InterpAsm-armv5te.S revision b0a0541b59d1126ff77c88de742b4a74579fe296
1/* 2 * This file was generated automatically by gen-mterp.py for 'armv5te'. 3 * 4 * --> DO NOT EDIT <-- 5 */ 6 7/* File: armv5te/header.S */ 8/* 9 * Copyright (C) 2008 The Android Open Source Project 10 * 11 * Licensed under the Apache License, Version 2.0 (the "License"); 12 * you may not use this file except in compliance with the License. 13 * You may obtain a copy of the License at 14 * 15 * http://www.apache.org/licenses/LICENSE-2.0 16 * 17 * Unless required by applicable law or agreed to in writing, software 18 * distributed under the License is distributed on an "AS IS" BASIS, 19 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 20 * See the License for the specific language governing permissions and 21 * limitations under the License. 22 */ 23/* 24 * ARMv5 definitions and declarations. 25 */ 26 27/* 28ARM EABI general notes: 29 30r0-r3 hold first 4 args to a method; they are not preserved across method calls 31r4-r8 are available for general use 32r9 is given special treatment in some situations, but not for us 33r10 (sl) seems to be generally available 34r11 (fp) is used by gcc (unless -fomit-frame-pointer is set) 35r12 (ip) is scratch -- not preserved across method calls 36r13 (sp) should be managed carefully in case a signal arrives 37r14 (lr) must be preserved 38r15 (pc) can be tinkered with directly 39 40r0 holds returns of <= 4 bytes 41r0-r1 hold returns of 8 bytes, low word in r0 42 43Callee must save/restore r4+ (except r12) if it modifies them. If VFP 44is present, registers s16-s31 (a/k/a d8-d15, a/k/a q4-q7) must be preserved, 45s0-s15 (d0-d7, q0-a3) do not need to be. 46 47Stack is "full descending". Only the arguments that don't fit in the first 4 48registers are placed on the stack. "sp" points at the first stacked argument 49(i.e. the 5th arg). 50 51VFP: single-precision results in s0, double-precision results in d0. 52 53In the EABI, "sp" must be 64-bit aligned on entry to a function, and any 5464-bit quantities (long long, double) must be 64-bit aligned. 55*/ 56 57/* 58Mterp and ARM notes: 59 60The following registers have fixed assignments: 61 62 reg nick purpose 63 r4 rPC interpreted program counter, used for fetching instructions 64 r5 rFP interpreted frame pointer, used for accessing locals and args 65 r6 rGLUE MterpGlue pointer 66 r7 rINST first 16-bit code unit of current instruction 67 r8 rIBASE interpreted instruction base pointer, used for computed goto 68 69Macros are provided for common operations. Each macro MUST emit only 70one instruction to make instruction-counting easier. They MUST NOT alter 71unspecified registers or condition codes. 72*/ 73 74/* single-purpose registers, given names for clarity */ 75#define rPC r4 76#define rFP r5 77#define rGLUE r6 78#define rINST r7 79#define rIBASE r8 80 81/* save/restore the PC and/or FP from the glue struct */ 82#define LOAD_PC_FROM_GLUE() ldr rPC, [rGLUE, #offGlue_pc] 83#define SAVE_PC_TO_GLUE() str rPC, [rGLUE, #offGlue_pc] 84#define LOAD_FP_FROM_GLUE() ldr rFP, [rGLUE, #offGlue_fp] 85#define SAVE_FP_TO_GLUE() str rFP, [rGLUE, #offGlue_fp] 86#define LOAD_PC_FP_FROM_GLUE() ldmia rGLUE, {rPC, rFP} 87#define SAVE_PC_FP_TO_GLUE() stmia rGLUE, {rPC, rFP} 88 89/* 90 * "export" the PC to the stack frame, f/b/o future exception objects. Must 91 * be done *before* something calls dvmThrowException. 92 * 93 * In C this is "SAVEAREA_FROM_FP(fp)->xtra.currentPc = pc", i.e. 94 * fp - sizeof(StackSaveArea) + offsetof(SaveArea, xtra.currentPc) 95 * 96 * It's okay to do this more than once. 97 */ 98#define EXPORT_PC() \ 99 str rPC, [rFP, #(-sizeofStackSaveArea + offStackSaveArea_currentPc)] 100 101/* 102 * Given a frame pointer, find the stack save area. 103 * 104 * In C this is "((StackSaveArea*)(_fp) -1)". 105 */ 106#define SAVEAREA_FROM_FP(_reg, _fpreg) \ 107 sub _reg, _fpreg, #sizeofStackSaveArea 108 109/* 110 * Fetch the next instruction from rPC into rINST. Does not advance rPC. 111 */ 112#define FETCH_INST() ldrh rINST, [rPC] 113 114/* 115 * Fetch the next instruction from the specified offset. Advances rPC 116 * to point to the next instruction. "_count" is in 16-bit code units. 117 * 118 * Because of the limited size of immediate constants on ARM, this is only 119 * suitable for small forward movements (i.e. don't try to implement "goto" 120 * with this). 121 * 122 * This must come AFTER anything that can throw an exception, or the 123 * exception catch may miss. (This also implies that it must come after 124 * EXPORT_PC().) 125 */ 126#define FETCH_ADVANCE_INST(_count) ldrh rINST, [rPC, #(_count*2)]! 127 128/* 129 * The operation performed here is similar to FETCH_ADVANCE_INST, except the 130 * src and dest registers are parameterized (not hard-wired to rPC and rINST). 131 */ 132#define PREFETCH_ADVANCE_INST(_dreg, _sreg, _count) \ 133 ldrh _dreg, [_sreg, #(_count*2)]! 134 135/* 136 * Fetch the next instruction from an offset specified by _reg. Updates 137 * rPC to point to the next instruction. "_reg" must specify the distance 138 * in bytes, *not* 16-bit code units, and may be a signed value. 139 * 140 * We want to write "ldrh rINST, [rPC, _reg, lsl #2]!", but some of the 141 * bits that hold the shift distance are used for the half/byte/sign flags. 142 * In some cases we can pre-double _reg for free, so we require a byte offset 143 * here. 144 */ 145#define FETCH_ADVANCE_INST_RB(_reg) ldrh rINST, [rPC, _reg]! 146 147/* 148 * Fetch a half-word code unit from an offset past the current PC. The 149 * "_count" value is in 16-bit code units. Does not advance rPC. 150 * 151 * The "_S" variant works the same but treats the value as signed. 152 */ 153#define FETCH(_reg, _count) ldrh _reg, [rPC, #(_count*2)] 154#define FETCH_S(_reg, _count) ldrsh _reg, [rPC, #(_count*2)] 155 156/* 157 * Fetch one byte from an offset past the current PC. Pass in the same 158 * "_count" as you would for FETCH, and an additional 0/1 indicating which 159 * byte of the halfword you want (lo/hi). 160 */ 161#define FETCH_B(_reg, _count, _byte) ldrb _reg, [rPC, #(_count*2+_byte)] 162 163/* 164 * Put the instruction's opcode field into the specified register. 165 */ 166#define GET_INST_OPCODE(_reg) and _reg, rINST, #255 167 168/* 169 * Put the prefetched instruction's opcode field into the specified register. 170 */ 171#define GET_PREFETCHED_OPCODE(_oreg, _ireg) and _oreg, _ireg, #255 172 173/* 174 * Begin executing the opcode in _reg. Because this only jumps within the 175 * interpreter, we don't have to worry about pre-ARMv5 THUMB interwork. 176 */ 177#define GOTO_OPCODE(_reg) add pc, rIBASE, _reg, lsl #6 178#define GOTO_OPCODE_IFEQ(_reg) addeq pc, rIBASE, _reg, lsl #6 179#define GOTO_OPCODE_IFNE(_reg) addne pc, rIBASE, _reg, lsl #6 180 181/* 182 * Get/set the 32-bit value from a Dalvik register. 183 */ 184#define GET_VREG(_reg, _vreg) ldr _reg, [rFP, _vreg, lsl #2] 185#define SET_VREG(_reg, _vreg) str _reg, [rFP, _vreg, lsl #2] 186 187#if defined(WITH_JIT) 188#define GET_JIT_ENABLED(_reg) ldr _reg,[rGLUE,#offGlue_jitEnabled] 189#define GET_JIT_PROF_TABLE(_reg) ldr _reg,[rGLUE,#offGlue_pJitProfTable] 190#define GET_JIT_THRESHOLD(_reg) ldr _reg,[rGLUE,#offGlue_jitThreshold] 191#endif 192 193/* 194 * Convert a virtual register index into an address. 195 */ 196#define VREG_INDEX_TO_ADDR(_reg, _vreg) \ 197 add _reg, rFP, _vreg, lsl #2 198 199/* 200 * This is a #include, not a %include, because we want the C pre-processor 201 * to expand the macros into assembler assignment statements. 202 */ 203#include "../common/asm-constants.h" 204 205 206/* File: armv5te/platform.S */ 207/* 208 * =========================================================================== 209 * CPU-version-specific defines 210 * =========================================================================== 211 */ 212 213/* 214 * Macro for "LDR PC,xxx", which is not allowed pre-ARMv5. Essentially a 215 * one-way branch. 216 * 217 * May modify IP. Does not modify LR. 218 */ 219.macro LDR_PC source 220 ldr pc, \source 221.endm 222 223/* 224 * Macro for "MOV LR,PC / LDR PC,xxx", which is not allowed pre-ARMv5. 225 * Jump to subroutine. 226 * 227 * May modify IP and LR. 228 */ 229.macro LDR_PC_LR source 230 mov lr, pc 231 ldr pc, \source 232.endm 233 234/* 235 * Macro for "LDMFD SP!, {...regs...,PC}". 236 * 237 * May modify IP and LR. 238 */ 239.macro LDMFD_PC regs 240 ldmfd sp!, {\regs,pc} 241.endm 242 243 244/* File: armv5te/entry.S */ 245/* 246 * Copyright (C) 2008 The Android Open Source Project 247 * 248 * Licensed under the Apache License, Version 2.0 (the "License"); 249 * you may not use this file except in compliance with the License. 250 * You may obtain a copy of the License at 251 * 252 * http://www.apache.org/licenses/LICENSE-2.0 253 * 254 * Unless required by applicable law or agreed to in writing, software 255 * distributed under the License is distributed on an "AS IS" BASIS, 256 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 257 * See the License for the specific language governing permissions and 258 * limitations under the License. 259 */ 260/* 261 * Interpreter entry point. 262 */ 263 264/* 265 * We don't have formal stack frames, so gdb scans upward in the code 266 * to find the start of the function (a label with the %function type), 267 * and then looks at the next few instructions to figure out what 268 * got pushed onto the stack. From this it figures out how to restore 269 * the registers, including PC, for the previous stack frame. If gdb 270 * sees a non-function label, it stops scanning, so either we need to 271 * have nothing but assembler-local labels between the entry point and 272 * the break, or we need to fake it out. 273 * 274 * When this is defined, we add some stuff to make gdb less confused. 275 */ 276#define ASSIST_DEBUGGER 1 277 278 .text 279 .align 2 280 .global dvmMterpStdRun 281 .type dvmMterpStdRun, %function 282 283/* 284 * On entry: 285 * r0 MterpGlue* glue 286 * 287 * This function returns a boolean "changeInterp" value. The return comes 288 * via a call to dvmMterpStdBail(). 289 */ 290dvmMterpStdRun: 291#define MTERP_ENTRY1 \ 292 .save {r4-r10,fp,lr}; \ 293 stmfd sp!, {r4-r10,fp,lr} @ save 9 regs 294#define MTERP_ENTRY2 \ 295 .pad #4; \ 296 sub sp, sp, #4 @ align 64 297 298 .fnstart 299 MTERP_ENTRY1 300 MTERP_ENTRY2 301 302 /* save stack pointer, add magic word for debuggerd */ 303 str sp, [r0, #offGlue_bailPtr] @ save SP for eventual return 304 305 /* set up "named" registers, figure out entry point */ 306 mov rGLUE, r0 @ set rGLUE 307 ldrb r1, [r0, #offGlue_entryPoint] @ InterpEntry enum is char 308 LOAD_PC_FP_FROM_GLUE() @ load rPC and rFP from "glue" 309 adr rIBASE, dvmAsmInstructionStart @ set rIBASE 310 cmp r1, #kInterpEntryInstr @ usual case? 311 bne .Lnot_instr @ no, handle it 312 313#if defined(WITH_JIT) 314.Lno_singleStep: 315 /* Entry is always a possible trace start */ 316 GET_JIT_PROF_TABLE(r0) 317 FETCH_INST() 318 cmp r0,#0 319 bne common_updateProfile 320 GET_INST_OPCODE(ip) 321 GOTO_OPCODE(ip) 322#else 323 /* start executing the instruction at rPC */ 324 FETCH_INST() @ load rINST from rPC 325 GET_INST_OPCODE(ip) @ extract opcode from rINST 326 GOTO_OPCODE(ip) @ jump to next instruction 327#endif 328 329.Lnot_instr: 330 cmp r1, #kInterpEntryReturn @ were we returning from a method? 331 beq common_returnFromMethod 332 333.Lnot_return: 334 cmp r1, #kInterpEntryThrow @ were we throwing an exception? 335 beq common_exceptionThrown 336 337#if defined(WITH_JIT) 338.Lnot_throw: 339 ldr r0,[rGLUE, #offGlue_jitResume] 340 ldr r2,[rGLUE, #offGlue_jitResumePC] 341 cmp r1, #kInterpEntryResume @ resuming after Jit single-step? 342 bne .Lbad_arg 343 cmp rPC,r2 344 bne .Lno_singleStep @ must have branched, don't resume 345 mov r1, #kInterpEntryInstr 346 strb r1, [rGLUE, #offGlue_entryPoint] 347 ldr rINST, .LdvmCompilerTemplate 348 bx r0 @ re-enter the translation 349.LdvmCompilerTemplate: 350 .word dvmCompilerTemplateStart 351#endif 352 353.Lbad_arg: 354 ldr r0, strBadEntryPoint 355 @ r1 holds value of entryPoint 356 bl printf 357 bl dvmAbort 358 .fnend 359 360 361 .global dvmMterpStdBail 362 .type dvmMterpStdBail, %function 363 364/* 365 * Restore the stack pointer and PC from the save point established on entry. 366 * This is essentially the same as a longjmp, but should be cheaper. The 367 * last instruction causes us to return to whoever called dvmMterpStdRun. 368 * 369 * We pushed some registers on the stack in dvmMterpStdRun, then saved 370 * SP and LR. Here we restore SP, restore the registers, and then restore 371 * LR to PC. 372 * 373 * On entry: 374 * r0 MterpGlue* glue 375 * r1 bool changeInterp 376 */ 377dvmMterpStdBail: 378 ldr sp, [r0, #offGlue_bailPtr] @ sp<- saved SP 379 mov r0, r1 @ return the changeInterp value 380 add sp, sp, #4 @ un-align 64 381 LDMFD_PC "r4-r10,fp" @ restore 9 regs and return 382 383 384/* 385 * String references. 386 */ 387strBadEntryPoint: 388 .word .LstrBadEntryPoint 389 390 391 392 .global dvmAsmInstructionStart 393 .type dvmAsmInstructionStart, %function 394dvmAsmInstructionStart = .L_OP_NOP 395 .text 396 397/* ------------------------------ */ 398 .balign 64 399.L_OP_NOP: /* 0x00 */ 400/* File: armv5te/OP_NOP.S */ 401 FETCH_ADVANCE_INST(1) @ advance to next instr, load rINST 402 GET_INST_OPCODE(ip) @ ip<- opcode from rINST 403 GOTO_OPCODE(ip) @ execute it 404 405#ifdef ASSIST_DEBUGGER 406 /* insert fake function header to help gdb find the stack frame */ 407 .type dalvik_inst, %function 408dalvik_inst: 409 .fnstart 410 MTERP_ENTRY1 411 MTERP_ENTRY2 412 .fnend 413#endif 414 415 416/* ------------------------------ */ 417 .balign 64 418.L_OP_MOVE: /* 0x01 */ 419/* File: armv5te/OP_MOVE.S */ 420 /* for move, move-object, long-to-int */ 421 /* op vA, vB */ 422 mov r1, rINST, lsr #12 @ r1<- B from 15:12 423 mov r0, rINST, lsr #8 @ r0<- A from 11:8 424 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 425 GET_VREG(r2, r1) @ r2<- fp[B] 426 and r0, r0, #15 427 GET_INST_OPCODE(ip) @ ip<- opcode from rINST 428 SET_VREG(r2, r0) @ fp[A]<- r2 429 GOTO_OPCODE(ip) @ execute next instruction 430 431 432/* ------------------------------ */ 433 .balign 64 434.L_OP_MOVE_FROM16: /* 0x02 */ 435/* File: armv5te/OP_MOVE_FROM16.S */ 436 /* for: move/from16, move-object/from16 */ 437 /* op vAA, vBBBB */ 438 FETCH(r1, 1) @ r1<- BBBB 439 mov r0, rINST, lsr #8 @ r0<- AA 440 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 441 GET_VREG(r2, r1) @ r2<- fp[BBBB] 442 GET_INST_OPCODE(ip) @ extract opcode from rINST 443 SET_VREG(r2, r0) @ fp[AA]<- r2 444 GOTO_OPCODE(ip) @ jump to next instruction 445 446 447/* ------------------------------ */ 448 .balign 64 449.L_OP_MOVE_16: /* 0x03 */ 450/* File: armv5te/OP_MOVE_16.S */ 451 /* for: move/16, move-object/16 */ 452 /* op vAAAA, vBBBB */ 453 FETCH(r1, 2) @ r1<- BBBB 454 FETCH(r0, 1) @ r0<- AAAA 455 FETCH_ADVANCE_INST(3) @ advance rPC, load rINST 456 GET_VREG(r2, r1) @ r2<- fp[BBBB] 457 GET_INST_OPCODE(ip) @ extract opcode from rINST 458 SET_VREG(r2, r0) @ fp[AAAA]<- r2 459 GOTO_OPCODE(ip) @ jump to next instruction 460 461 462/* ------------------------------ */ 463 .balign 64 464.L_OP_MOVE_WIDE: /* 0x04 */ 465/* File: armv5te/OP_MOVE_WIDE.S */ 466 /* move-wide vA, vB */ 467 /* NOTE: regs can overlap, e.g. "move v6,v7" or "move v7,v6" */ 468 mov r2, rINST, lsr #8 @ r2<- A(+) 469 mov r3, rINST, lsr #12 @ r3<- B 470 and r2, r2, #15 471 add r3, rFP, r3, lsl #2 @ r3<- &fp[B] 472 add r2, rFP, r2, lsl #2 @ r2<- &fp[A] 473 ldmia r3, {r0-r1} @ r0/r1<- fp[B] 474 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 475 GET_INST_OPCODE(ip) @ extract opcode from rINST 476 stmia r2, {r0-r1} @ fp[A]<- r0/r1 477 GOTO_OPCODE(ip) @ jump to next instruction 478 479 480/* ------------------------------ */ 481 .balign 64 482.L_OP_MOVE_WIDE_FROM16: /* 0x05 */ 483/* File: armv5te/OP_MOVE_WIDE_FROM16.S */ 484 /* move-wide/from16 vAA, vBBBB */ 485 /* NOTE: regs can overlap, e.g. "move v6,v7" or "move v7,v6" */ 486 FETCH(r3, 1) @ r3<- BBBB 487 mov r2, rINST, lsr #8 @ r2<- AA 488 add r3, rFP, r3, lsl #2 @ r3<- &fp[BBBB] 489 add r2, rFP, r2, lsl #2 @ r2<- &fp[AA] 490 ldmia r3, {r0-r1} @ r0/r1<- fp[BBBB] 491 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 492 GET_INST_OPCODE(ip) @ extract opcode from rINST 493 stmia r2, {r0-r1} @ fp[AA]<- r0/r1 494 GOTO_OPCODE(ip) @ jump to next instruction 495 496 497/* ------------------------------ */ 498 .balign 64 499.L_OP_MOVE_WIDE_16: /* 0x06 */ 500/* File: armv5te/OP_MOVE_WIDE_16.S */ 501 /* move-wide/16 vAAAA, vBBBB */ 502 /* NOTE: regs can overlap, e.g. "move v6,v7" or "move v7,v6" */ 503 FETCH(r3, 2) @ r3<- BBBB 504 FETCH(r2, 1) @ r2<- AAAA 505 add r3, rFP, r3, lsl #2 @ r3<- &fp[BBBB] 506 add r2, rFP, r2, lsl #2 @ r2<- &fp[AAAA] 507 ldmia r3, {r0-r1} @ r0/r1<- fp[BBBB] 508 FETCH_ADVANCE_INST(3) @ advance rPC, load rINST 509 GET_INST_OPCODE(ip) @ extract opcode from rINST 510 stmia r2, {r0-r1} @ fp[AAAA]<- r0/r1 511 GOTO_OPCODE(ip) @ jump to next instruction 512 513 514/* ------------------------------ */ 515 .balign 64 516.L_OP_MOVE_OBJECT: /* 0x07 */ 517/* File: armv5te/OP_MOVE_OBJECT.S */ 518/* File: armv5te/OP_MOVE.S */ 519 /* for move, move-object, long-to-int */ 520 /* op vA, vB */ 521 mov r1, rINST, lsr #12 @ r1<- B from 15:12 522 mov r0, rINST, lsr #8 @ r0<- A from 11:8 523 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 524 GET_VREG(r2, r1) @ r2<- fp[B] 525 and r0, r0, #15 526 GET_INST_OPCODE(ip) @ ip<- opcode from rINST 527 SET_VREG(r2, r0) @ fp[A]<- r2 528 GOTO_OPCODE(ip) @ execute next instruction 529 530 531 532/* ------------------------------ */ 533 .balign 64 534.L_OP_MOVE_OBJECT_FROM16: /* 0x08 */ 535/* File: armv5te/OP_MOVE_OBJECT_FROM16.S */ 536/* File: armv5te/OP_MOVE_FROM16.S */ 537 /* for: move/from16, move-object/from16 */ 538 /* op vAA, vBBBB */ 539 FETCH(r1, 1) @ r1<- BBBB 540 mov r0, rINST, lsr #8 @ r0<- AA 541 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 542 GET_VREG(r2, r1) @ r2<- fp[BBBB] 543 GET_INST_OPCODE(ip) @ extract opcode from rINST 544 SET_VREG(r2, r0) @ fp[AA]<- r2 545 GOTO_OPCODE(ip) @ jump to next instruction 546 547 548 549/* ------------------------------ */ 550 .balign 64 551.L_OP_MOVE_OBJECT_16: /* 0x09 */ 552/* File: armv5te/OP_MOVE_OBJECT_16.S */ 553/* File: armv5te/OP_MOVE_16.S */ 554 /* for: move/16, move-object/16 */ 555 /* op vAAAA, vBBBB */ 556 FETCH(r1, 2) @ r1<- BBBB 557 FETCH(r0, 1) @ r0<- AAAA 558 FETCH_ADVANCE_INST(3) @ advance rPC, load rINST 559 GET_VREG(r2, r1) @ r2<- fp[BBBB] 560 GET_INST_OPCODE(ip) @ extract opcode from rINST 561 SET_VREG(r2, r0) @ fp[AAAA]<- r2 562 GOTO_OPCODE(ip) @ jump to next instruction 563 564 565 566/* ------------------------------ */ 567 .balign 64 568.L_OP_MOVE_RESULT: /* 0x0a */ 569/* File: armv5te/OP_MOVE_RESULT.S */ 570 /* for: move-result, move-result-object */ 571 /* op vAA */ 572 mov r2, rINST, lsr #8 @ r2<- AA 573 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 574 ldr r0, [rGLUE, #offGlue_retval] @ r0<- glue->retval.i 575 GET_INST_OPCODE(ip) @ extract opcode from rINST 576 SET_VREG(r0, r2) @ fp[AA]<- r0 577 GOTO_OPCODE(ip) @ jump to next instruction 578 579 580/* ------------------------------ */ 581 .balign 64 582.L_OP_MOVE_RESULT_WIDE: /* 0x0b */ 583/* File: armv5te/OP_MOVE_RESULT_WIDE.S */ 584 /* move-result-wide vAA */ 585 mov r2, rINST, lsr #8 @ r2<- AA 586 add r3, rGLUE, #offGlue_retval @ r3<- &glue->retval 587 add r2, rFP, r2, lsl #2 @ r2<- &fp[AA] 588 ldmia r3, {r0-r1} @ r0/r1<- retval.j 589 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 590 GET_INST_OPCODE(ip) @ extract opcode from rINST 591 stmia r2, {r0-r1} @ fp[AA]<- r0/r1 592 GOTO_OPCODE(ip) @ jump to next instruction 593 594 595/* ------------------------------ */ 596 .balign 64 597.L_OP_MOVE_RESULT_OBJECT: /* 0x0c */ 598/* File: armv5te/OP_MOVE_RESULT_OBJECT.S */ 599/* File: armv5te/OP_MOVE_RESULT.S */ 600 /* for: move-result, move-result-object */ 601 /* op vAA */ 602 mov r2, rINST, lsr #8 @ r2<- AA 603 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 604 ldr r0, [rGLUE, #offGlue_retval] @ r0<- glue->retval.i 605 GET_INST_OPCODE(ip) @ extract opcode from rINST 606 SET_VREG(r0, r2) @ fp[AA]<- r0 607 GOTO_OPCODE(ip) @ jump to next instruction 608 609 610 611/* ------------------------------ */ 612 .balign 64 613.L_OP_MOVE_EXCEPTION: /* 0x0d */ 614/* File: armv5te/OP_MOVE_EXCEPTION.S */ 615 /* move-exception vAA */ 616 ldr r0, [rGLUE, #offGlue_self] @ r0<- glue->self 617 mov r2, rINST, lsr #8 @ r2<- AA 618 ldr r3, [r0, #offThread_exception] @ r3<- dvmGetException bypass 619 mov r1, #0 @ r1<- 0 620 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 621 SET_VREG(r3, r2) @ fp[AA]<- exception obj 622 GET_INST_OPCODE(ip) @ extract opcode from rINST 623 str r1, [r0, #offThread_exception] @ dvmClearException bypass 624 GOTO_OPCODE(ip) @ jump to next instruction 625 626 627/* ------------------------------ */ 628 .balign 64 629.L_OP_RETURN_VOID: /* 0x0e */ 630/* File: armv5te/OP_RETURN_VOID.S */ 631 b common_returnFromMethod 632 633 634/* ------------------------------ */ 635 .balign 64 636.L_OP_RETURN: /* 0x0f */ 637/* File: armv5te/OP_RETURN.S */ 638 /* 639 * Return a 32-bit value. Copies the return value into the "glue" 640 * structure, then jumps to the return handler. 641 * 642 * for: return, return-object 643 */ 644 /* op vAA */ 645 mov r2, rINST, lsr #8 @ r2<- AA 646 GET_VREG(r0, r2) @ r0<- vAA 647 str r0, [rGLUE, #offGlue_retval] @ retval.i <- vAA 648 b common_returnFromMethod 649 650 651/* ------------------------------ */ 652 .balign 64 653.L_OP_RETURN_WIDE: /* 0x10 */ 654/* File: armv5te/OP_RETURN_WIDE.S */ 655 /* 656 * Return a 64-bit value. Copies the return value into the "glue" 657 * structure, then jumps to the return handler. 658 */ 659 /* return-wide vAA */ 660 mov r2, rINST, lsr #8 @ r2<- AA 661 add r2, rFP, r2, lsl #2 @ r2<- &fp[AA] 662 add r3, rGLUE, #offGlue_retval @ r3<- &glue->retval 663 ldmia r2, {r0-r1} @ r0/r1 <- vAA/vAA+1 664 stmia r3, {r0-r1} @ retval<- r0/r1 665 b common_returnFromMethod 666 667 668/* ------------------------------ */ 669 .balign 64 670.L_OP_RETURN_OBJECT: /* 0x11 */ 671/* File: armv5te/OP_RETURN_OBJECT.S */ 672/* File: armv5te/OP_RETURN.S */ 673 /* 674 * Return a 32-bit value. Copies the return value into the "glue" 675 * structure, then jumps to the return handler. 676 * 677 * for: return, return-object 678 */ 679 /* op vAA */ 680 mov r2, rINST, lsr #8 @ r2<- AA 681 GET_VREG(r0, r2) @ r0<- vAA 682 str r0, [rGLUE, #offGlue_retval] @ retval.i <- vAA 683 b common_returnFromMethod 684 685 686 687/* ------------------------------ */ 688 .balign 64 689.L_OP_CONST_4: /* 0x12 */ 690/* File: armv5te/OP_CONST_4.S */ 691 /* const/4 vA, #+B */ 692 mov r1, rINST, lsl #16 @ r1<- Bxxx0000 693 mov r0, rINST, lsr #8 @ r0<- A+ 694 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 695 mov r1, r1, asr #28 @ r1<- sssssssB (sign-extended) 696 and r0, r0, #15 697 GET_INST_OPCODE(ip) @ ip<- opcode from rINST 698 SET_VREG(r1, r0) @ fp[A]<- r1 699 GOTO_OPCODE(ip) @ execute next instruction 700 701 702/* ------------------------------ */ 703 .balign 64 704.L_OP_CONST_16: /* 0x13 */ 705/* File: armv5te/OP_CONST_16.S */ 706 /* const/16 vAA, #+BBBB */ 707 FETCH_S(r0, 1) @ r0<- ssssBBBB (sign-extended) 708 mov r3, rINST, lsr #8 @ r3<- AA 709 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 710 SET_VREG(r0, r3) @ vAA<- r0 711 GET_INST_OPCODE(ip) @ extract opcode from rINST 712 GOTO_OPCODE(ip) @ jump to next instruction 713 714 715/* ------------------------------ */ 716 .balign 64 717.L_OP_CONST: /* 0x14 */ 718/* File: armv5te/OP_CONST.S */ 719 /* const vAA, #+BBBBbbbb */ 720 mov r3, rINST, lsr #8 @ r3<- AA 721 FETCH(r0, 1) @ r0<- bbbb (low) 722 FETCH(r1, 2) @ r1<- BBBB (high) 723 FETCH_ADVANCE_INST(3) @ advance rPC, load rINST 724 orr r0, r0, r1, lsl #16 @ r0<- BBBBbbbb 725 GET_INST_OPCODE(ip) @ extract opcode from rINST 726 SET_VREG(r0, r3) @ vAA<- r0 727 GOTO_OPCODE(ip) @ jump to next instruction 728 729 730/* ------------------------------ */ 731 .balign 64 732.L_OP_CONST_HIGH16: /* 0x15 */ 733/* File: armv5te/OP_CONST_HIGH16.S */ 734 /* const/high16 vAA, #+BBBB0000 */ 735 FETCH(r0, 1) @ r0<- 0000BBBB (zero-extended) 736 mov r3, rINST, lsr #8 @ r3<- AA 737 mov r0, r0, lsl #16 @ r0<- BBBB0000 738 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 739 SET_VREG(r0, r3) @ vAA<- r0 740 GET_INST_OPCODE(ip) @ extract opcode from rINST 741 GOTO_OPCODE(ip) @ jump to next instruction 742 743 744/* ------------------------------ */ 745 .balign 64 746.L_OP_CONST_WIDE_16: /* 0x16 */ 747/* File: armv5te/OP_CONST_WIDE_16.S */ 748 /* const-wide/16 vAA, #+BBBB */ 749 FETCH_S(r0, 1) @ r0<- ssssBBBB (sign-extended) 750 mov r3, rINST, lsr #8 @ r3<- AA 751 mov r1, r0, asr #31 @ r1<- ssssssss 752 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 753 add r3, rFP, r3, lsl #2 @ r3<- &fp[AA] 754 GET_INST_OPCODE(ip) @ extract opcode from rINST 755 stmia r3, {r0-r1} @ vAA<- r0/r1 756 GOTO_OPCODE(ip) @ jump to next instruction 757 758 759/* ------------------------------ */ 760 .balign 64 761.L_OP_CONST_WIDE_32: /* 0x17 */ 762/* File: armv5te/OP_CONST_WIDE_32.S */ 763 /* const-wide/32 vAA, #+BBBBbbbb */ 764 FETCH(r0, 1) @ r0<- 0000bbbb (low) 765 mov r3, rINST, lsr #8 @ r3<- AA 766 FETCH_S(r2, 2) @ r2<- ssssBBBB (high) 767 FETCH_ADVANCE_INST(3) @ advance rPC, load rINST 768 orr r0, r0, r2, lsl #16 @ r0<- BBBBbbbb 769 add r3, rFP, r3, lsl #2 @ r3<- &fp[AA] 770 mov r1, r0, asr #31 @ r1<- ssssssss 771 GET_INST_OPCODE(ip) @ extract opcode from rINST 772 stmia r3, {r0-r1} @ vAA<- r0/r1 773 GOTO_OPCODE(ip) @ jump to next instruction 774 775 776/* ------------------------------ */ 777 .balign 64 778.L_OP_CONST_WIDE: /* 0x18 */ 779/* File: armv5te/OP_CONST_WIDE.S */ 780 /* const-wide vAA, #+HHHHhhhhBBBBbbbb */ 781 FETCH(r0, 1) @ r0<- bbbb (low) 782 FETCH(r1, 2) @ r1<- BBBB (low middle) 783 FETCH(r2, 3) @ r2<- hhhh (high middle) 784 orr r0, r0, r1, lsl #16 @ r0<- BBBBbbbb (low word) 785 FETCH(r3, 4) @ r3<- HHHH (high) 786 mov r9, rINST, lsr #8 @ r9<- AA 787 orr r1, r2, r3, lsl #16 @ r1<- HHHHhhhh (high word) 788 FETCH_ADVANCE_INST(5) @ advance rPC, load rINST 789 add r9, rFP, r9, lsl #2 @ r9<- &fp[AA] 790 GET_INST_OPCODE(ip) @ extract opcode from rINST 791 stmia r9, {r0-r1} @ vAA<- r0/r1 792 GOTO_OPCODE(ip) @ jump to next instruction 793 794 795/* ------------------------------ */ 796 .balign 64 797.L_OP_CONST_WIDE_HIGH16: /* 0x19 */ 798/* File: armv5te/OP_CONST_WIDE_HIGH16.S */ 799 /* const-wide/high16 vAA, #+BBBB000000000000 */ 800 FETCH(r1, 1) @ r1<- 0000BBBB (zero-extended) 801 mov r3, rINST, lsr #8 @ r3<- AA 802 mov r0, #0 @ r0<- 00000000 803 mov r1, r1, lsl #16 @ r1<- BBBB0000 804 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 805 add r3, rFP, r3, lsl #2 @ r3<- &fp[AA] 806 GET_INST_OPCODE(ip) @ extract opcode from rINST 807 stmia r3, {r0-r1} @ vAA<- r0/r1 808 GOTO_OPCODE(ip) @ jump to next instruction 809 810 811/* ------------------------------ */ 812 .balign 64 813.L_OP_CONST_STRING: /* 0x1a */ 814/* File: armv5te/OP_CONST_STRING.S */ 815 /* const/string vAA, String@BBBB */ 816 FETCH(r1, 1) @ r1<- BBBB 817 ldr r2, [rGLUE, #offGlue_methodClassDex] @ r2<- glue->methodClassDex 818 mov r9, rINST, lsr #8 @ r9<- AA 819 ldr r2, [r2, #offDvmDex_pResStrings] @ r2<- dvmDex->pResStrings 820 ldr r0, [r2, r1, lsl #2] @ r0<- pResStrings[BBBB] 821 cmp r0, #0 @ not yet resolved? 822 beq .LOP_CONST_STRING_resolve 823 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 824 GET_INST_OPCODE(ip) @ extract opcode from rINST 825 SET_VREG(r0, r9) @ vAA<- r0 826 GOTO_OPCODE(ip) @ jump to next instruction 827 828/* ------------------------------ */ 829 .balign 64 830.L_OP_CONST_STRING_JUMBO: /* 0x1b */ 831/* File: armv5te/OP_CONST_STRING_JUMBO.S */ 832 /* const/string vAA, String@BBBBBBBB */ 833 FETCH(r0, 1) @ r0<- bbbb (low) 834 FETCH(r1, 2) @ r1<- BBBB (high) 835 ldr r2, [rGLUE, #offGlue_methodClassDex] @ r2<- glue->methodClassDex 836 mov r9, rINST, lsr #8 @ r9<- AA 837 ldr r2, [r2, #offDvmDex_pResStrings] @ r2<- dvmDex->pResStrings 838 orr r1, r0, r1, lsl #16 @ r1<- BBBBbbbb 839 ldr r0, [r2, r1, lsl #2] @ r0<- pResStrings[BBBB] 840 cmp r0, #0 841 beq .LOP_CONST_STRING_JUMBO_resolve 842 FETCH_ADVANCE_INST(3) @ advance rPC, load rINST 843 GET_INST_OPCODE(ip) @ extract opcode from rINST 844 SET_VREG(r0, r9) @ vAA<- r0 845 GOTO_OPCODE(ip) @ jump to next instruction 846 847/* ------------------------------ */ 848 .balign 64 849.L_OP_CONST_CLASS: /* 0x1c */ 850/* File: armv5te/OP_CONST_CLASS.S */ 851 /* const/class vAA, Class@BBBB */ 852 FETCH(r1, 1) @ r1<- BBBB 853 ldr r2, [rGLUE, #offGlue_methodClassDex] @ r2<- glue->methodClassDex 854 mov r9, rINST, lsr #8 @ r9<- AA 855 ldr r2, [r2, #offDvmDex_pResClasses] @ r2<- dvmDex->pResClasses 856 ldr r0, [r2, r1, lsl #2] @ r0<- pResClasses[BBBB] 857 cmp r0, #0 @ not yet resolved? 858 beq .LOP_CONST_CLASS_resolve 859 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 860 GET_INST_OPCODE(ip) @ extract opcode from rINST 861 SET_VREG(r0, r9) @ vAA<- r0 862 GOTO_OPCODE(ip) @ jump to next instruction 863 864/* ------------------------------ */ 865 .balign 64 866.L_OP_MONITOR_ENTER: /* 0x1d */ 867/* File: armv5te/OP_MONITOR_ENTER.S */ 868 /* 869 * Synchronize on an object. 870 */ 871 /* monitor-enter vAA */ 872 mov r2, rINST, lsr #8 @ r2<- AA 873 GET_VREG(r1, r2) @ r1<- vAA (object) 874 ldr r0, [rGLUE, #offGlue_self] @ r0<- glue->self 875 cmp r1, #0 @ null object? 876 EXPORT_PC() @ need for precise GC, MONITOR_TRACKING 877 beq common_errNullObject @ null object, throw an exception 878 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 879 bl dvmLockObject @ call(self, obj) 880#ifdef WITH_DEADLOCK_PREDICTION /* implies WITH_MONITOR_TRACKING */ 881 ldr r0, [rGLUE, #offGlue_self] @ r0<- glue->self 882 ldr r1, [r0, #offThread_exception] @ check for exception 883 cmp r1, #0 884 bne common_exceptionThrown @ exception raised, bail out 885#endif 886 GET_INST_OPCODE(ip) @ extract opcode from rINST 887 GOTO_OPCODE(ip) @ jump to next instruction 888 889 890/* ------------------------------ */ 891 .balign 64 892.L_OP_MONITOR_EXIT: /* 0x1e */ 893/* File: armv5te/OP_MONITOR_EXIT.S */ 894 /* 895 * Unlock an object. 896 * 897 * Exceptions that occur when unlocking a monitor need to appear as 898 * if they happened at the following instruction. See the Dalvik 899 * instruction spec. 900 */ 901 /* monitor-exit vAA */ 902 mov r2, rINST, lsr #8 @ r2<- AA 903 EXPORT_PC() @ before fetch: export the PC 904 GET_VREG(r1, r2) @ r1<- vAA (object) 905 cmp r1, #0 @ null object? 906 beq common_errNullObject @ yes 907 ldr r0, [rGLUE, #offGlue_self] @ r0<- glue->self 908 bl dvmUnlockObject @ r0<- success for unlock(self, obj) 909 cmp r0, #0 @ failed? 910 beq common_exceptionThrown @ yes, exception is pending 911 FETCH_ADVANCE_INST(1) @ before throw: advance rPC, load rINST 912 GET_INST_OPCODE(ip) @ extract opcode from rINST 913 GOTO_OPCODE(ip) @ jump to next instruction 914 915 916/* ------------------------------ */ 917 .balign 64 918.L_OP_CHECK_CAST: /* 0x1f */ 919/* File: armv5te/OP_CHECK_CAST.S */ 920 /* 921 * Check to see if a cast from one class to another is allowed. 922 */ 923 /* check-cast vAA, class@BBBB */ 924 mov r3, rINST, lsr #8 @ r3<- AA 925 FETCH(r2, 1) @ r2<- BBBB 926 GET_VREG(r9, r3) @ r9<- object 927 ldr r0, [rGLUE, #offGlue_methodClassDex] @ r0<- pDvmDex 928 cmp r9, #0 @ is object null? 929 ldr r0, [r0, #offDvmDex_pResClasses] @ r0<- pDvmDex->pResClasses 930 beq .LOP_CHECK_CAST_okay @ null obj, cast always succeeds 931 ldr r1, [r0, r2, lsl #2] @ r1<- resolved class 932 ldr r0, [r9, #offObject_clazz] @ r0<- obj->clazz 933 cmp r1, #0 @ have we resolved this before? 934 beq .LOP_CHECK_CAST_resolve @ not resolved, do it now 935.LOP_CHECK_CAST_resolved: 936 cmp r0, r1 @ same class (trivial success)? 937 bne .LOP_CHECK_CAST_fullcheck @ no, do full check 938.LOP_CHECK_CAST_okay: 939 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 940 GET_INST_OPCODE(ip) @ extract opcode from rINST 941 GOTO_OPCODE(ip) @ jump to next instruction 942 943/* ------------------------------ */ 944 .balign 64 945.L_OP_INSTANCE_OF: /* 0x20 */ 946/* File: armv5te/OP_INSTANCE_OF.S */ 947 /* 948 * Check to see if an object reference is an instance of a class. 949 * 950 * Most common situation is a non-null object, being compared against 951 * an already-resolved class. 952 */ 953 /* instance-of vA, vB, class@CCCC */ 954 mov r3, rINST, lsr #12 @ r3<- B 955 mov r9, rINST, lsr #8 @ r9<- A+ 956 GET_VREG(r0, r3) @ r0<- vB (object) 957 and r9, r9, #15 @ r9<- A 958 cmp r0, #0 @ is object null? 959 ldr r2, [rGLUE, #offGlue_methodClassDex] @ r2<- pDvmDex 960 beq .LOP_INSTANCE_OF_store @ null obj, not an instance, store r0 961 FETCH(r3, 1) @ r3<- CCCC 962 ldr r2, [r2, #offDvmDex_pResClasses] @ r2<- pDvmDex->pResClasses 963 ldr r1, [r2, r3, lsl #2] @ r1<- resolved class 964 ldr r0, [r0, #offObject_clazz] @ r0<- obj->clazz 965 cmp r1, #0 @ have we resolved this before? 966 beq .LOP_INSTANCE_OF_resolve @ not resolved, do it now 967.LOP_INSTANCE_OF_resolved: @ r0=obj->clazz, r1=resolved class 968 cmp r0, r1 @ same class (trivial success)? 969 beq .LOP_INSTANCE_OF_trivial @ yes, trivial finish 970 b .LOP_INSTANCE_OF_fullcheck @ no, do full check 971 972/* ------------------------------ */ 973 .balign 64 974.L_OP_ARRAY_LENGTH: /* 0x21 */ 975/* File: armv5te/OP_ARRAY_LENGTH.S */ 976 /* 977 * Return the length of an array. 978 */ 979 mov r1, rINST, lsr #12 @ r1<- B 980 mov r2, rINST, lsr #8 @ r2<- A+ 981 GET_VREG(r0, r1) @ r0<- vB (object ref) 982 and r2, r2, #15 @ r2<- A 983 cmp r0, #0 @ is object null? 984 beq common_errNullObject @ yup, fail 985 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 986 ldr r3, [r0, #offArrayObject_length] @ r3<- array length 987 GET_INST_OPCODE(ip) @ extract opcode from rINST 988 SET_VREG(r3, r2) @ vB<- length 989 GOTO_OPCODE(ip) @ jump to next instruction 990 991 992/* ------------------------------ */ 993 .balign 64 994.L_OP_NEW_INSTANCE: /* 0x22 */ 995/* File: armv5te/OP_NEW_INSTANCE.S */ 996 /* 997 * Create a new instance of a class. 998 */ 999 /* new-instance vAA, class@BBBB */ 1000 ldr r3, [rGLUE, #offGlue_methodClassDex] @ r3<- pDvmDex 1001 FETCH(r1, 1) @ r1<- BBBB 1002 ldr r3, [r3, #offDvmDex_pResClasses] @ r3<- pDvmDex->pResClasses 1003 ldr r0, [r3, r1, lsl #2] @ r0<- resolved class 1004 EXPORT_PC() @ req'd for init, resolve, alloc 1005 cmp r0, #0 @ already resolved? 1006 beq .LOP_NEW_INSTANCE_resolve @ no, resolve it now 1007.LOP_NEW_INSTANCE_resolved: @ r0=class 1008 ldrb r1, [r0, #offClassObject_status] @ r1<- ClassStatus enum 1009 cmp r1, #CLASS_INITIALIZED @ has class been initialized? 1010 bne .LOP_NEW_INSTANCE_needinit @ no, init class now 1011.LOP_NEW_INSTANCE_initialized: @ r0=class 1012 mov r1, #ALLOC_DONT_TRACK @ flags for alloc call 1013 bl dvmAllocObject @ r0<- new object 1014 b .LOP_NEW_INSTANCE_finish @ continue 1015 1016/* ------------------------------ */ 1017 .balign 64 1018.L_OP_NEW_ARRAY: /* 0x23 */ 1019/* File: armv5te/OP_NEW_ARRAY.S */ 1020 /* 1021 * Allocate an array of objects, specified with the array class 1022 * and a count. 1023 * 1024 * The verifier guarantees that this is an array class, so we don't 1025 * check for it here. 1026 */ 1027 /* new-array vA, vB, class@CCCC */ 1028 mov r0, rINST, lsr #12 @ r0<- B 1029 FETCH(r2, 1) @ r2<- CCCC 1030 ldr r3, [rGLUE, #offGlue_methodClassDex] @ r3<- pDvmDex 1031 GET_VREG(r1, r0) @ r1<- vB (array length) 1032 ldr r3, [r3, #offDvmDex_pResClasses] @ r3<- pDvmDex->pResClasses 1033 cmp r1, #0 @ check length 1034 ldr r0, [r3, r2, lsl #2] @ r0<- resolved class 1035 bmi common_errNegativeArraySize @ negative length, bail 1036 cmp r0, #0 @ already resolved? 1037 EXPORT_PC() @ req'd for resolve, alloc 1038 bne .LOP_NEW_ARRAY_finish @ resolved, continue 1039 b .LOP_NEW_ARRAY_resolve @ do resolve now 1040 1041/* ------------------------------ */ 1042 .balign 64 1043.L_OP_FILLED_NEW_ARRAY: /* 0x24 */ 1044/* File: armv5te/OP_FILLED_NEW_ARRAY.S */ 1045 /* 1046 * Create a new array with elements filled from registers. 1047 * 1048 * for: filled-new-array, filled-new-array/range 1049 */ 1050 /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */ 1051 /* op {vCCCC..v(CCCC+AA-1)}, type@BBBB */ 1052 ldr r3, [rGLUE, #offGlue_methodClassDex] @ r3<- pDvmDex 1053 FETCH(r1, 1) @ r1<- BBBB 1054 ldr r3, [r3, #offDvmDex_pResClasses] @ r3<- pDvmDex->pResClasses 1055 EXPORT_PC() @ need for resolve and alloc 1056 ldr r0, [r3, r1, lsl #2] @ r0<- resolved class 1057 mov r10, rINST, lsr #8 @ r10<- AA or BA 1058 cmp r0, #0 @ already resolved? 1059 bne .LOP_FILLED_NEW_ARRAY_continue @ yes, continue on 10608: ldr r3, [rGLUE, #offGlue_method] @ r3<- glue->method 1061 mov r2, #0 @ r2<- false 1062 ldr r0, [r3, #offMethod_clazz] @ r0<- method->clazz 1063 bl dvmResolveClass @ r0<- call(clazz, ref) 1064 cmp r0, #0 @ got null? 1065 beq common_exceptionThrown @ yes, handle exception 1066 b .LOP_FILLED_NEW_ARRAY_continue 1067 1068/* ------------------------------ */ 1069 .balign 64 1070.L_OP_FILLED_NEW_ARRAY_RANGE: /* 0x25 */ 1071/* File: armv5te/OP_FILLED_NEW_ARRAY_RANGE.S */ 1072/* File: armv5te/OP_FILLED_NEW_ARRAY.S */ 1073 /* 1074 * Create a new array with elements filled from registers. 1075 * 1076 * for: filled-new-array, filled-new-array/range 1077 */ 1078 /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */ 1079 /* op {vCCCC..v(CCCC+AA-1)}, type@BBBB */ 1080 ldr r3, [rGLUE, #offGlue_methodClassDex] @ r3<- pDvmDex 1081 FETCH(r1, 1) @ r1<- BBBB 1082 ldr r3, [r3, #offDvmDex_pResClasses] @ r3<- pDvmDex->pResClasses 1083 EXPORT_PC() @ need for resolve and alloc 1084 ldr r0, [r3, r1, lsl #2] @ r0<- resolved class 1085 mov r10, rINST, lsr #8 @ r10<- AA or BA 1086 cmp r0, #0 @ already resolved? 1087 bne .LOP_FILLED_NEW_ARRAY_RANGE_continue @ yes, continue on 10888: ldr r3, [rGLUE, #offGlue_method] @ r3<- glue->method 1089 mov r2, #0 @ r2<- false 1090 ldr r0, [r3, #offMethod_clazz] @ r0<- method->clazz 1091 bl dvmResolveClass @ r0<- call(clazz, ref) 1092 cmp r0, #0 @ got null? 1093 beq common_exceptionThrown @ yes, handle exception 1094 b .LOP_FILLED_NEW_ARRAY_RANGE_continue 1095 1096 1097/* ------------------------------ */ 1098 .balign 64 1099.L_OP_FILL_ARRAY_DATA: /* 0x26 */ 1100/* File: armv5te/OP_FILL_ARRAY_DATA.S */ 1101 /* fill-array-data vAA, +BBBBBBBB */ 1102 FETCH(r0, 1) @ r0<- bbbb (lo) 1103 FETCH(r1, 2) @ r1<- BBBB (hi) 1104 mov r3, rINST, lsr #8 @ r3<- AA 1105 orr r1, r0, r1, lsl #16 @ r1<- BBBBbbbb 1106 GET_VREG(r0, r3) @ r0<- vAA (array object) 1107 add r1, rPC, r1, lsl #1 @ r1<- PC + BBBBbbbb*2 (array data off.) 1108 EXPORT_PC(); 1109 bl dvmInterpHandleFillArrayData@ fill the array with predefined data 1110 cmp r0, #0 @ 0 means an exception is thrown 1111 beq common_exceptionThrown @ has exception 1112 FETCH_ADVANCE_INST(3) @ advance rPC, load rINST 1113 GET_INST_OPCODE(ip) @ extract opcode from rINST 1114 GOTO_OPCODE(ip) @ jump to next instruction 1115 1116/* ------------------------------ */ 1117 .balign 64 1118.L_OP_THROW: /* 0x27 */ 1119/* File: armv5te/OP_THROW.S */ 1120 /* 1121 * Throw an exception object in the current thread. 1122 */ 1123 /* throw vAA */ 1124 mov r2, rINST, lsr #8 @ r2<- AA 1125 GET_VREG(r1, r2) @ r1<- vAA (exception object) 1126 ldr r0, [rGLUE, #offGlue_self] @ r0<- glue->self 1127 cmp r1, #0 @ null object? 1128 beq common_errNullObject @ yes, throw an NPE instead 1129 @ bypass dvmSetException, just store it 1130 str r1, [r0, #offThread_exception] @ thread->exception<- obj 1131 b common_exceptionThrown 1132 1133 1134/* ------------------------------ */ 1135 .balign 64 1136.L_OP_GOTO: /* 0x28 */ 1137/* File: armv5te/OP_GOTO.S */ 1138 /* 1139 * Unconditional branch, 8-bit offset. 1140 * 1141 * The branch distance is a signed code-unit offset, which we need to 1142 * double to get a byte offset. 1143 */ 1144 /* goto +AA */ 1145 mov r0, rINST, lsl #16 @ r0<- AAxx0000 1146 movs r9, r0, asr #24 @ r9<- ssssssAA (sign-extended) 1147 mov r9, r9, lsl #1 @ r9<- byte offset 1148 bmi common_backwardBranch @ backward branch, do periodic checks 1149#if defined(WITH_JIT) 1150 GET_JIT_PROF_TABLE(r0) 1151 FETCH_ADVANCE_INST_RB(r9) @ update rPC, load rINST 1152 cmp r0,#0 1153 bne common_updateProfile 1154 GET_INST_OPCODE(ip) @ extract opcode from rINST 1155 GOTO_OPCODE(ip) @ jump to next instruction 1156#else 1157 FETCH_ADVANCE_INST_RB(r9) @ update rPC, load rINST 1158 GET_INST_OPCODE(ip) @ extract opcode from rINST 1159 GOTO_OPCODE(ip) @ jump to next instruction 1160#endif 1161 1162/* ------------------------------ */ 1163 .balign 64 1164.L_OP_GOTO_16: /* 0x29 */ 1165/* File: armv5te/OP_GOTO_16.S */ 1166 /* 1167 * Unconditional branch, 16-bit offset. 1168 * 1169 * The branch distance is a signed code-unit offset, which we need to 1170 * double to get a byte offset. 1171 */ 1172 /* goto/16 +AAAA */ 1173 FETCH_S(r0, 1) @ r0<- ssssAAAA (sign-extended) 1174 movs r9, r0, asl #1 @ r9<- byte offset, check sign 1175 bmi common_backwardBranch @ backward branch, do periodic checks 1176#if defined(WITH_JIT) 1177 GET_JIT_PROF_TABLE(r0) 1178 FETCH_ADVANCE_INST_RB(r9) @ update rPC, load rINST 1179 cmp r0,#0 1180 bne common_updateProfile 1181 GET_INST_OPCODE(ip) @ extract opcode from rINST 1182 GOTO_OPCODE(ip) @ jump to next instruction 1183#else 1184 FETCH_ADVANCE_INST_RB(r9) @ update rPC, load rINST 1185 GET_INST_OPCODE(ip) @ extract opcode from rINST 1186 GOTO_OPCODE(ip) @ jump to next instruction 1187#endif 1188 1189 1190/* ------------------------------ */ 1191 .balign 64 1192.L_OP_GOTO_32: /* 0x2a */ 1193/* File: armv5te/OP_GOTO_32.S */ 1194 /* 1195 * Unconditional branch, 32-bit offset. 1196 * 1197 * The branch distance is a signed code-unit offset, which we need to 1198 * double to get a byte offset. 1199 * 1200 * Unlike most opcodes, this one is allowed to branch to itself, so 1201 * our "backward branch" test must be "<=0" instead of "<0". The ORRS 1202 * instruction doesn't affect the V flag, so we need to clear it 1203 * explicitly. 1204 */ 1205 /* goto/32 +AAAAAAAA */ 1206 FETCH(r0, 1) @ r0<- aaaa (lo) 1207 FETCH(r1, 2) @ r1<- AAAA (hi) 1208 cmp ip, ip @ (clear V flag during stall) 1209 orrs r0, r0, r1, lsl #16 @ r0<- AAAAaaaa, check sign 1210 mov r9, r0, asl #1 @ r9<- byte offset 1211 ble common_backwardBranch @ backward branch, do periodic checks 1212#if defined(WITH_JIT) 1213 GET_JIT_PROF_TABLE(r0) 1214 FETCH_ADVANCE_INST_RB(r9) @ update rPC, load rINST 1215 cmp r0,#0 1216 bne common_updateProfile 1217 GET_INST_OPCODE(ip) @ extract opcode from rINST 1218 GOTO_OPCODE(ip) @ jump to next instruction 1219#else 1220 FETCH_ADVANCE_INST_RB(r9) @ update rPC, load rINST 1221 GET_INST_OPCODE(ip) @ extract opcode from rINST 1222 GOTO_OPCODE(ip) @ jump to next instruction 1223#endif 1224 1225/* ------------------------------ */ 1226 .balign 64 1227.L_OP_PACKED_SWITCH: /* 0x2b */ 1228/* File: armv5te/OP_PACKED_SWITCH.S */ 1229 /* 1230 * Handle a packed-switch or sparse-switch instruction. In both cases 1231 * we decode it and hand it off to a helper function. 1232 * 1233 * We don't really expect backward branches in a switch statement, but 1234 * they're perfectly legal, so we check for them here. 1235 * 1236 * for: packed-switch, sparse-switch 1237 */ 1238 /* op vAA, +BBBB */ 1239 FETCH(r0, 1) @ r0<- bbbb (lo) 1240 FETCH(r1, 2) @ r1<- BBBB (hi) 1241 mov r3, rINST, lsr #8 @ r3<- AA 1242 orr r0, r0, r1, lsl #16 @ r0<- BBBBbbbb 1243 GET_VREG(r1, r3) @ r1<- vAA 1244 add r0, rPC, r0, lsl #1 @ r0<- PC + BBBBbbbb*2 1245 bl dvmInterpHandlePackedSwitch @ r0<- code-unit branch offset 1246 movs r9, r0, asl #1 @ r9<- branch byte offset, check sign 1247 bmi common_backwardBranch @ backward branch, do periodic checks 1248 beq common_backwardBranch @ (want to use BLE but V is unknown) 1249#if defined(WITH_JIT) 1250 GET_JIT_PROF_TABLE(r0) 1251 FETCH_ADVANCE_INST_RB(r9) @ update rPC, load rINST 1252 cmp r0,#0 1253 bne common_updateProfile 1254 GET_INST_OPCODE(ip) @ extract opcode from rINST 1255 GOTO_OPCODE(ip) @ jump to next instruction 1256#else 1257 FETCH_ADVANCE_INST_RB(r9) @ update rPC, load rINST 1258 GET_INST_OPCODE(ip) @ extract opcode from rINST 1259 GOTO_OPCODE(ip) @ jump to next instruction 1260#endif 1261 1262 1263/* ------------------------------ */ 1264 .balign 64 1265.L_OP_SPARSE_SWITCH: /* 0x2c */ 1266/* File: armv5te/OP_SPARSE_SWITCH.S */ 1267/* File: armv5te/OP_PACKED_SWITCH.S */ 1268 /* 1269 * Handle a packed-switch or sparse-switch instruction. In both cases 1270 * we decode it and hand it off to a helper function. 1271 * 1272 * We don't really expect backward branches in a switch statement, but 1273 * they're perfectly legal, so we check for them here. 1274 * 1275 * for: packed-switch, sparse-switch 1276 */ 1277 /* op vAA, +BBBB */ 1278 FETCH(r0, 1) @ r0<- bbbb (lo) 1279 FETCH(r1, 2) @ r1<- BBBB (hi) 1280 mov r3, rINST, lsr #8 @ r3<- AA 1281 orr r0, r0, r1, lsl #16 @ r0<- BBBBbbbb 1282 GET_VREG(r1, r3) @ r1<- vAA 1283 add r0, rPC, r0, lsl #1 @ r0<- PC + BBBBbbbb*2 1284 bl dvmInterpHandleSparseSwitch @ r0<- code-unit branch offset 1285 movs r9, r0, asl #1 @ r9<- branch byte offset, check sign 1286 bmi common_backwardBranch @ backward branch, do periodic checks 1287 beq common_backwardBranch @ (want to use BLE but V is unknown) 1288#if defined(WITH_JIT) 1289 GET_JIT_PROF_TABLE(r0) 1290 FETCH_ADVANCE_INST_RB(r9) @ update rPC, load rINST 1291 cmp r0,#0 1292 bne common_updateProfile 1293 GET_INST_OPCODE(ip) @ extract opcode from rINST 1294 GOTO_OPCODE(ip) @ jump to next instruction 1295#else 1296 FETCH_ADVANCE_INST_RB(r9) @ update rPC, load rINST 1297 GET_INST_OPCODE(ip) @ extract opcode from rINST 1298 GOTO_OPCODE(ip) @ jump to next instruction 1299#endif 1300 1301 1302 1303/* ------------------------------ */ 1304 .balign 64 1305.L_OP_CMPL_FLOAT: /* 0x2d */ 1306/* File: armv5te/OP_CMPL_FLOAT.S */ 1307 /* 1308 * Compare two floating-point values. Puts 0, 1, or -1 into the 1309 * destination register based on the results of the comparison. 1310 * 1311 * Provide a "naninst" instruction that puts 1 or -1 into r1 depending 1312 * on what value we'd like to return when one of the operands is NaN. 1313 * 1314 * The operation we're implementing is: 1315 * if (x == y) 1316 * return 0; 1317 * else if (x < y) 1318 * return -1; 1319 * else if (x > y) 1320 * return 1; 1321 * else 1322 * return {-1,1}; // one or both operands was NaN 1323 * 1324 * The straightforward implementation requires 3 calls to functions 1325 * that return a result in r0. We can do it with two calls if our 1326 * EABI library supports __aeabi_cfcmple (only one if we want to check 1327 * for NaN directly): 1328 * check x <= y 1329 * if <, return -1 1330 * if ==, return 0 1331 * check y <= x 1332 * if <, return 1 1333 * return {-1,1} 1334 * 1335 * for: cmpl-float, cmpg-float 1336 */ 1337 /* op vAA, vBB, vCC */ 1338 FETCH(r0, 1) @ r0<- CCBB 1339 and r2, r0, #255 @ r2<- BB 1340 mov r3, r0, lsr #8 @ r3<- CC 1341 GET_VREG(r9, r2) @ r9<- vBB 1342 GET_VREG(r10, r3) @ r10<- vCC 1343 mov r0, r9 @ copy to arg registers 1344 mov r1, r10 1345 bl __aeabi_cfcmple @ cmp <=: C clear if <, Z set if eq 1346 bhi .LOP_CMPL_FLOAT_gt_or_nan @ C set and Z clear, disambiguate 1347 mvncc r1, #0 @ (less than) r1<- -1 1348 moveq r1, #0 @ (equal) r1<- 0, trumps less than 1349.LOP_CMPL_FLOAT_finish: 1350 mov r3, rINST, lsr #8 @ r3<- AA 1351 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 1352 SET_VREG(r1, r3) @ vAA<- r1 1353 GET_INST_OPCODE(ip) @ extract opcode from rINST 1354 GOTO_OPCODE(ip) @ jump to next instruction 1355 1356/* ------------------------------ */ 1357 .balign 64 1358.L_OP_CMPG_FLOAT: /* 0x2e */ 1359/* File: armv5te/OP_CMPG_FLOAT.S */ 1360/* File: armv5te/OP_CMPL_FLOAT.S */ 1361 /* 1362 * Compare two floating-point values. Puts 0, 1, or -1 into the 1363 * destination register based on the results of the comparison. 1364 * 1365 * Provide a "naninst" instruction that puts 1 or -1 into r1 depending 1366 * on what value we'd like to return when one of the operands is NaN. 1367 * 1368 * The operation we're implementing is: 1369 * if (x == y) 1370 * return 0; 1371 * else if (x < y) 1372 * return -1; 1373 * else if (x > y) 1374 * return 1; 1375 * else 1376 * return {-1,1}; // one or both operands was NaN 1377 * 1378 * The straightforward implementation requires 3 calls to functions 1379 * that return a result in r0. We can do it with two calls if our 1380 * EABI library supports __aeabi_cfcmple (only one if we want to check 1381 * for NaN directly): 1382 * check x <= y 1383 * if <, return -1 1384 * if ==, return 0 1385 * check y <= x 1386 * if <, return 1 1387 * return {-1,1} 1388 * 1389 * for: cmpl-float, cmpg-float 1390 */ 1391 /* op vAA, vBB, vCC */ 1392 FETCH(r0, 1) @ r0<- CCBB 1393 and r2, r0, #255 @ r2<- BB 1394 mov r3, r0, lsr #8 @ r3<- CC 1395 GET_VREG(r9, r2) @ r9<- vBB 1396 GET_VREG(r10, r3) @ r10<- vCC 1397 mov r0, r9 @ copy to arg registers 1398 mov r1, r10 1399 bl __aeabi_cfcmple @ cmp <=: C clear if <, Z set if eq 1400 bhi .LOP_CMPG_FLOAT_gt_or_nan @ C set and Z clear, disambiguate 1401 mvncc r1, #0 @ (less than) r1<- -1 1402 moveq r1, #0 @ (equal) r1<- 0, trumps less than 1403.LOP_CMPG_FLOAT_finish: 1404 mov r3, rINST, lsr #8 @ r3<- AA 1405 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 1406 SET_VREG(r1, r3) @ vAA<- r1 1407 GET_INST_OPCODE(ip) @ extract opcode from rINST 1408 GOTO_OPCODE(ip) @ jump to next instruction 1409 1410 1411/* ------------------------------ */ 1412 .balign 64 1413.L_OP_CMPL_DOUBLE: /* 0x2f */ 1414/* File: armv5te/OP_CMPL_DOUBLE.S */ 1415 /* 1416 * Compare two floating-point values. Puts 0, 1, or -1 into the 1417 * destination register based on the results of the comparison. 1418 * 1419 * Provide a "naninst" instruction that puts 1 or -1 into r1 depending 1420 * on what value we'd like to return when one of the operands is NaN. 1421 * 1422 * See OP_CMPL_FLOAT for an explanation. 1423 * 1424 * For: cmpl-double, cmpg-double 1425 */ 1426 /* op vAA, vBB, vCC */ 1427 FETCH(r0, 1) @ r0<- CCBB 1428 and r9, r0, #255 @ r9<- BB 1429 mov r10, r0, lsr #8 @ r10<- CC 1430 add r9, rFP, r9, lsl #2 @ r9<- &fp[BB] 1431 add r10, rFP, r10, lsl #2 @ r10<- &fp[CC] 1432 ldmia r9, {r0-r1} @ r0/r1<- vBB/vBB+1 1433 ldmia r10, {r2-r3} @ r2/r3<- vCC/vCC+1 1434 bl __aeabi_cdcmple @ cmp <=: C clear if <, Z set if eq 1435 bhi .LOP_CMPL_DOUBLE_gt_or_nan @ C set and Z clear, disambiguate 1436 mvncc r1, #0 @ (less than) r1<- -1 1437 moveq r1, #0 @ (equal) r1<- 0, trumps less than 1438.LOP_CMPL_DOUBLE_finish: 1439 mov r3, rINST, lsr #8 @ r3<- AA 1440 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 1441 SET_VREG(r1, r3) @ vAA<- r1 1442 GET_INST_OPCODE(ip) @ extract opcode from rINST 1443 GOTO_OPCODE(ip) @ jump to next instruction 1444 1445/* ------------------------------ */ 1446 .balign 64 1447.L_OP_CMPG_DOUBLE: /* 0x30 */ 1448/* File: armv5te/OP_CMPG_DOUBLE.S */ 1449/* File: armv5te/OP_CMPL_DOUBLE.S */ 1450 /* 1451 * Compare two floating-point values. Puts 0, 1, or -1 into the 1452 * destination register based on the results of the comparison. 1453 * 1454 * Provide a "naninst" instruction that puts 1 or -1 into r1 depending 1455 * on what value we'd like to return when one of the operands is NaN. 1456 * 1457 * See OP_CMPL_FLOAT for an explanation. 1458 * 1459 * For: cmpl-double, cmpg-double 1460 */ 1461 /* op vAA, vBB, vCC */ 1462 FETCH(r0, 1) @ r0<- CCBB 1463 and r9, r0, #255 @ r9<- BB 1464 mov r10, r0, lsr #8 @ r10<- CC 1465 add r9, rFP, r9, lsl #2 @ r9<- &fp[BB] 1466 add r10, rFP, r10, lsl #2 @ r10<- &fp[CC] 1467 ldmia r9, {r0-r1} @ r0/r1<- vBB/vBB+1 1468 ldmia r10, {r2-r3} @ r2/r3<- vCC/vCC+1 1469 bl __aeabi_cdcmple @ cmp <=: C clear if <, Z set if eq 1470 bhi .LOP_CMPG_DOUBLE_gt_or_nan @ C set and Z clear, disambiguate 1471 mvncc r1, #0 @ (less than) r1<- -1 1472 moveq r1, #0 @ (equal) r1<- 0, trumps less than 1473.LOP_CMPG_DOUBLE_finish: 1474 mov r3, rINST, lsr #8 @ r3<- AA 1475 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 1476 SET_VREG(r1, r3) @ vAA<- r1 1477 GET_INST_OPCODE(ip) @ extract opcode from rINST 1478 GOTO_OPCODE(ip) @ jump to next instruction 1479 1480 1481/* ------------------------------ */ 1482 .balign 64 1483.L_OP_CMP_LONG: /* 0x31 */ 1484/* File: armv5te/OP_CMP_LONG.S */ 1485 /* 1486 * Compare two 64-bit values. Puts 0, 1, or -1 into the destination 1487 * register based on the results of the comparison. 1488 * 1489 * We load the full values with LDM, but in practice many values could 1490 * be resolved by only looking at the high word. This could be made 1491 * faster or slower by splitting the LDM into a pair of LDRs. 1492 * 1493 * If we just wanted to set condition flags, we could do this: 1494 * subs ip, r0, r2 1495 * sbcs ip, r1, r3 1496 * subeqs ip, r0, r2 1497 * Leaving { <0, 0, >0 } in ip. However, we have to set it to a specific 1498 * integer value, which we can do with 2 conditional mov/mvn instructions 1499 * (set 1, set -1; if they're equal we already have 0 in ip), giving 1500 * us a constant 5-cycle path plus a branch at the end to the 1501 * instruction epilogue code. The multi-compare approach below needs 1502 * 2 or 3 cycles + branch if the high word doesn't match, 6 + branch 1503 * in the worst case (the 64-bit values are equal). 1504 */ 1505 /* cmp-long vAA, vBB, vCC */ 1506 FETCH(r0, 1) @ r0<- CCBB 1507 mov r9, rINST, lsr #8 @ r9<- AA 1508 and r2, r0, #255 @ r2<- BB 1509 mov r3, r0, lsr #8 @ r3<- CC 1510 add r2, rFP, r2, lsl #2 @ r2<- &fp[BB] 1511 add r3, rFP, r3, lsl #2 @ r3<- &fp[CC] 1512 ldmia r2, {r0-r1} @ r0/r1<- vBB/vBB+1 1513 ldmia r3, {r2-r3} @ r2/r3<- vCC/vCC+1 1514 cmp r1, r3 @ compare (vBB+1, vCC+1) 1515 blt .LOP_CMP_LONG_less @ signed compare on high part 1516 bgt .LOP_CMP_LONG_greater 1517 subs r1, r0, r2 @ r1<- r0 - r2 1518 bhi .LOP_CMP_LONG_greater @ unsigned compare on low part 1519 bne .LOP_CMP_LONG_less 1520 b .LOP_CMP_LONG_finish @ equal; r1 already holds 0 1521 1522/* ------------------------------ */ 1523 .balign 64 1524.L_OP_IF_EQ: /* 0x32 */ 1525/* File: armv5te/OP_IF_EQ.S */ 1526/* File: armv5te/bincmp.S */ 1527 /* 1528 * Generic two-operand compare-and-branch operation. Provide a "revcmp" 1529 * fragment that specifies the *reverse* comparison to perform, e.g. 1530 * for "if-le" you would use "gt". 1531 * 1532 * For: if-eq, if-ne, if-lt, if-ge, if-gt, if-le 1533 */ 1534 /* if-cmp vA, vB, +CCCC */ 1535 mov r0, rINST, lsr #8 @ r0<- A+ 1536 mov r1, rINST, lsr #12 @ r1<- B 1537 and r0, r0, #15 1538 GET_VREG(r3, r1) @ r3<- vB 1539 GET_VREG(r2, r0) @ r2<- vA 1540 mov r9, #4 @ r0<- BYTE branch dist for not-taken 1541 cmp r2, r3 @ compare (vA, vB) 1542 bne 1f @ branch to 1 if comparison failed 1543 FETCH_S(r9, 1) @ r9<- branch offset, in code units 1544 movs r9, r9, asl #1 @ convert to bytes, check sign 1545 bmi common_backwardBranch @ yes, do periodic checks 15461: 1547#if defined(WITH_JIT) 1548 GET_JIT_PROF_TABLE(r0) 1549 FETCH_ADVANCE_INST_RB(r9) @ update rPC, load rINST 1550 b common_testUpdateProfile 1551#else 1552 FETCH_ADVANCE_INST_RB(r9) @ update rPC, load rINST 1553 GET_INST_OPCODE(ip) @ extract opcode from rINST 1554 GOTO_OPCODE(ip) @ jump to next instruction 1555#endif 1556 1557 1558 1559/* ------------------------------ */ 1560 .balign 64 1561.L_OP_IF_NE: /* 0x33 */ 1562/* File: armv5te/OP_IF_NE.S */ 1563/* File: armv5te/bincmp.S */ 1564 /* 1565 * Generic two-operand compare-and-branch operation. Provide a "revcmp" 1566 * fragment that specifies the *reverse* comparison to perform, e.g. 1567 * for "if-le" you would use "gt". 1568 * 1569 * For: if-eq, if-ne, if-lt, if-ge, if-gt, if-le 1570 */ 1571 /* if-cmp vA, vB, +CCCC */ 1572 mov r0, rINST, lsr #8 @ r0<- A+ 1573 mov r1, rINST, lsr #12 @ r1<- B 1574 and r0, r0, #15 1575 GET_VREG(r3, r1) @ r3<- vB 1576 GET_VREG(r2, r0) @ r2<- vA 1577 mov r9, #4 @ r0<- BYTE branch dist for not-taken 1578 cmp r2, r3 @ compare (vA, vB) 1579 beq 1f @ branch to 1 if comparison failed 1580 FETCH_S(r9, 1) @ r9<- branch offset, in code units 1581 movs r9, r9, asl #1 @ convert to bytes, check sign 1582 bmi common_backwardBranch @ yes, do periodic checks 15831: 1584#if defined(WITH_JIT) 1585 GET_JIT_PROF_TABLE(r0) 1586 FETCH_ADVANCE_INST_RB(r9) @ update rPC, load rINST 1587 b common_testUpdateProfile 1588#else 1589 FETCH_ADVANCE_INST_RB(r9) @ update rPC, load rINST 1590 GET_INST_OPCODE(ip) @ extract opcode from rINST 1591 GOTO_OPCODE(ip) @ jump to next instruction 1592#endif 1593 1594 1595 1596/* ------------------------------ */ 1597 .balign 64 1598.L_OP_IF_LT: /* 0x34 */ 1599/* File: armv5te/OP_IF_LT.S */ 1600/* File: armv5te/bincmp.S */ 1601 /* 1602 * Generic two-operand compare-and-branch operation. Provide a "revcmp" 1603 * fragment that specifies the *reverse* comparison to perform, e.g. 1604 * for "if-le" you would use "gt". 1605 * 1606 * For: if-eq, if-ne, if-lt, if-ge, if-gt, if-le 1607 */ 1608 /* if-cmp vA, vB, +CCCC */ 1609 mov r0, rINST, lsr #8 @ r0<- A+ 1610 mov r1, rINST, lsr #12 @ r1<- B 1611 and r0, r0, #15 1612 GET_VREG(r3, r1) @ r3<- vB 1613 GET_VREG(r2, r0) @ r2<- vA 1614 mov r9, #4 @ r0<- BYTE branch dist for not-taken 1615 cmp r2, r3 @ compare (vA, vB) 1616 bge 1f @ branch to 1 if comparison failed 1617 FETCH_S(r9, 1) @ r9<- branch offset, in code units 1618 movs r9, r9, asl #1 @ convert to bytes, check sign 1619 bmi common_backwardBranch @ yes, do periodic checks 16201: 1621#if defined(WITH_JIT) 1622 GET_JIT_PROF_TABLE(r0) 1623 FETCH_ADVANCE_INST_RB(r9) @ update rPC, load rINST 1624 b common_testUpdateProfile 1625#else 1626 FETCH_ADVANCE_INST_RB(r9) @ update rPC, load rINST 1627 GET_INST_OPCODE(ip) @ extract opcode from rINST 1628 GOTO_OPCODE(ip) @ jump to next instruction 1629#endif 1630 1631 1632 1633/* ------------------------------ */ 1634 .balign 64 1635.L_OP_IF_GE: /* 0x35 */ 1636/* File: armv5te/OP_IF_GE.S */ 1637/* File: armv5te/bincmp.S */ 1638 /* 1639 * Generic two-operand compare-and-branch operation. Provide a "revcmp" 1640 * fragment that specifies the *reverse* comparison to perform, e.g. 1641 * for "if-le" you would use "gt". 1642 * 1643 * For: if-eq, if-ne, if-lt, if-ge, if-gt, if-le 1644 */ 1645 /* if-cmp vA, vB, +CCCC */ 1646 mov r0, rINST, lsr #8 @ r0<- A+ 1647 mov r1, rINST, lsr #12 @ r1<- B 1648 and r0, r0, #15 1649 GET_VREG(r3, r1) @ r3<- vB 1650 GET_VREG(r2, r0) @ r2<- vA 1651 mov r9, #4 @ r0<- BYTE branch dist for not-taken 1652 cmp r2, r3 @ compare (vA, vB) 1653 blt 1f @ branch to 1 if comparison failed 1654 FETCH_S(r9, 1) @ r9<- branch offset, in code units 1655 movs r9, r9, asl #1 @ convert to bytes, check sign 1656 bmi common_backwardBranch @ yes, do periodic checks 16571: 1658#if defined(WITH_JIT) 1659 GET_JIT_PROF_TABLE(r0) 1660 FETCH_ADVANCE_INST_RB(r9) @ update rPC, load rINST 1661 b common_testUpdateProfile 1662#else 1663 FETCH_ADVANCE_INST_RB(r9) @ update rPC, load rINST 1664 GET_INST_OPCODE(ip) @ extract opcode from rINST 1665 GOTO_OPCODE(ip) @ jump to next instruction 1666#endif 1667 1668 1669 1670/* ------------------------------ */ 1671 .balign 64 1672.L_OP_IF_GT: /* 0x36 */ 1673/* File: armv5te/OP_IF_GT.S */ 1674/* File: armv5te/bincmp.S */ 1675 /* 1676 * Generic two-operand compare-and-branch operation. Provide a "revcmp" 1677 * fragment that specifies the *reverse* comparison to perform, e.g. 1678 * for "if-le" you would use "gt". 1679 * 1680 * For: if-eq, if-ne, if-lt, if-ge, if-gt, if-le 1681 */ 1682 /* if-cmp vA, vB, +CCCC */ 1683 mov r0, rINST, lsr #8 @ r0<- A+ 1684 mov r1, rINST, lsr #12 @ r1<- B 1685 and r0, r0, #15 1686 GET_VREG(r3, r1) @ r3<- vB 1687 GET_VREG(r2, r0) @ r2<- vA 1688 mov r9, #4 @ r0<- BYTE branch dist for not-taken 1689 cmp r2, r3 @ compare (vA, vB) 1690 ble 1f @ branch to 1 if comparison failed 1691 FETCH_S(r9, 1) @ r9<- branch offset, in code units 1692 movs r9, r9, asl #1 @ convert to bytes, check sign 1693 bmi common_backwardBranch @ yes, do periodic checks 16941: 1695#if defined(WITH_JIT) 1696 GET_JIT_PROF_TABLE(r0) 1697 FETCH_ADVANCE_INST_RB(r9) @ update rPC, load rINST 1698 b common_testUpdateProfile 1699#else 1700 FETCH_ADVANCE_INST_RB(r9) @ update rPC, load rINST 1701 GET_INST_OPCODE(ip) @ extract opcode from rINST 1702 GOTO_OPCODE(ip) @ jump to next instruction 1703#endif 1704 1705 1706 1707/* ------------------------------ */ 1708 .balign 64 1709.L_OP_IF_LE: /* 0x37 */ 1710/* File: armv5te/OP_IF_LE.S */ 1711/* File: armv5te/bincmp.S */ 1712 /* 1713 * Generic two-operand compare-and-branch operation. Provide a "revcmp" 1714 * fragment that specifies the *reverse* comparison to perform, e.g. 1715 * for "if-le" you would use "gt". 1716 * 1717 * For: if-eq, if-ne, if-lt, if-ge, if-gt, if-le 1718 */ 1719 /* if-cmp vA, vB, +CCCC */ 1720 mov r0, rINST, lsr #8 @ r0<- A+ 1721 mov r1, rINST, lsr #12 @ r1<- B 1722 and r0, r0, #15 1723 GET_VREG(r3, r1) @ r3<- vB 1724 GET_VREG(r2, r0) @ r2<- vA 1725 mov r9, #4 @ r0<- BYTE branch dist for not-taken 1726 cmp r2, r3 @ compare (vA, vB) 1727 bgt 1f @ branch to 1 if comparison failed 1728 FETCH_S(r9, 1) @ r9<- branch offset, in code units 1729 movs r9, r9, asl #1 @ convert to bytes, check sign 1730 bmi common_backwardBranch @ yes, do periodic checks 17311: 1732#if defined(WITH_JIT) 1733 GET_JIT_PROF_TABLE(r0) 1734 FETCH_ADVANCE_INST_RB(r9) @ update rPC, load rINST 1735 b common_testUpdateProfile 1736#else 1737 FETCH_ADVANCE_INST_RB(r9) @ update rPC, load rINST 1738 GET_INST_OPCODE(ip) @ extract opcode from rINST 1739 GOTO_OPCODE(ip) @ jump to next instruction 1740#endif 1741 1742 1743 1744/* ------------------------------ */ 1745 .balign 64 1746.L_OP_IF_EQZ: /* 0x38 */ 1747/* File: armv5te/OP_IF_EQZ.S */ 1748/* File: armv5te/zcmp.S */ 1749 /* 1750 * Generic one-operand compare-and-branch operation. Provide a "revcmp" 1751 * fragment that specifies the *reverse* comparison to perform, e.g. 1752 * for "if-le" you would use "gt". 1753 * 1754 * for: if-eqz, if-nez, if-ltz, if-gez, if-gtz, if-lez 1755 */ 1756 /* if-cmp vAA, +BBBB */ 1757 mov r0, rINST, lsr #8 @ r0<- AA 1758 GET_VREG(r2, r0) @ r2<- vAA 1759 mov r9, #4 @ r0<- BYTE branch dist for not-taken 1760 cmp r2, #0 @ compare (vA, 0) 1761 bne 1f @ branch to 1 if comparison failed 1762 FETCH_S(r9, 1) @ r9<- branch offset, in code units 1763 movs r9, r9, asl #1 @ convert to bytes, check sign 1764 bmi common_backwardBranch @ backward branch, do periodic checks 17651: 1766#if defined(WITH_JIT) 1767 GET_JIT_PROF_TABLE(r0) 1768 FETCH_ADVANCE_INST_RB(r9) @ update rPC, load rINST 1769 cmp r0,#0 1770 bne common_updateProfile 1771 GET_INST_OPCODE(ip) @ extract opcode from rINST 1772 GOTO_OPCODE(ip) @ jump to next instruction 1773#else 1774 FETCH_ADVANCE_INST_RB(r9) @ update rPC, load rINST 1775 GET_INST_OPCODE(ip) @ extract opcode from rINST 1776 GOTO_OPCODE(ip) @ jump to next instruction 1777#endif 1778 1779 1780 1781/* ------------------------------ */ 1782 .balign 64 1783.L_OP_IF_NEZ: /* 0x39 */ 1784/* File: armv5te/OP_IF_NEZ.S */ 1785/* File: armv5te/zcmp.S */ 1786 /* 1787 * Generic one-operand compare-and-branch operation. Provide a "revcmp" 1788 * fragment that specifies the *reverse* comparison to perform, e.g. 1789 * for "if-le" you would use "gt". 1790 * 1791 * for: if-eqz, if-nez, if-ltz, if-gez, if-gtz, if-lez 1792 */ 1793 /* if-cmp vAA, +BBBB */ 1794 mov r0, rINST, lsr #8 @ r0<- AA 1795 GET_VREG(r2, r0) @ r2<- vAA 1796 mov r9, #4 @ r0<- BYTE branch dist for not-taken 1797 cmp r2, #0 @ compare (vA, 0) 1798 beq 1f @ branch to 1 if comparison failed 1799 FETCH_S(r9, 1) @ r9<- branch offset, in code units 1800 movs r9, r9, asl #1 @ convert to bytes, check sign 1801 bmi common_backwardBranch @ backward branch, do periodic checks 18021: 1803#if defined(WITH_JIT) 1804 GET_JIT_PROF_TABLE(r0) 1805 FETCH_ADVANCE_INST_RB(r9) @ update rPC, load rINST 1806 cmp r0,#0 1807 bne common_updateProfile 1808 GET_INST_OPCODE(ip) @ extract opcode from rINST 1809 GOTO_OPCODE(ip) @ jump to next instruction 1810#else 1811 FETCH_ADVANCE_INST_RB(r9) @ update rPC, load rINST 1812 GET_INST_OPCODE(ip) @ extract opcode from rINST 1813 GOTO_OPCODE(ip) @ jump to next instruction 1814#endif 1815 1816 1817 1818/* ------------------------------ */ 1819 .balign 64 1820.L_OP_IF_LTZ: /* 0x3a */ 1821/* File: armv5te/OP_IF_LTZ.S */ 1822/* File: armv5te/zcmp.S */ 1823 /* 1824 * Generic one-operand compare-and-branch operation. Provide a "revcmp" 1825 * fragment that specifies the *reverse* comparison to perform, e.g. 1826 * for "if-le" you would use "gt". 1827 * 1828 * for: if-eqz, if-nez, if-ltz, if-gez, if-gtz, if-lez 1829 */ 1830 /* if-cmp vAA, +BBBB */ 1831 mov r0, rINST, lsr #8 @ r0<- AA 1832 GET_VREG(r2, r0) @ r2<- vAA 1833 mov r9, #4 @ r0<- BYTE branch dist for not-taken 1834 cmp r2, #0 @ compare (vA, 0) 1835 bge 1f @ branch to 1 if comparison failed 1836 FETCH_S(r9, 1) @ r9<- branch offset, in code units 1837 movs r9, r9, asl #1 @ convert to bytes, check sign 1838 bmi common_backwardBranch @ backward branch, do periodic checks 18391: 1840#if defined(WITH_JIT) 1841 GET_JIT_PROF_TABLE(r0) 1842 FETCH_ADVANCE_INST_RB(r9) @ update rPC, load rINST 1843 cmp r0,#0 1844 bne common_updateProfile 1845 GET_INST_OPCODE(ip) @ extract opcode from rINST 1846 GOTO_OPCODE(ip) @ jump to next instruction 1847#else 1848 FETCH_ADVANCE_INST_RB(r9) @ update rPC, load rINST 1849 GET_INST_OPCODE(ip) @ extract opcode from rINST 1850 GOTO_OPCODE(ip) @ jump to next instruction 1851#endif 1852 1853 1854 1855/* ------------------------------ */ 1856 .balign 64 1857.L_OP_IF_GEZ: /* 0x3b */ 1858/* File: armv5te/OP_IF_GEZ.S */ 1859/* File: armv5te/zcmp.S */ 1860 /* 1861 * Generic one-operand compare-and-branch operation. Provide a "revcmp" 1862 * fragment that specifies the *reverse* comparison to perform, e.g. 1863 * for "if-le" you would use "gt". 1864 * 1865 * for: if-eqz, if-nez, if-ltz, if-gez, if-gtz, if-lez 1866 */ 1867 /* if-cmp vAA, +BBBB */ 1868 mov r0, rINST, lsr #8 @ r0<- AA 1869 GET_VREG(r2, r0) @ r2<- vAA 1870 mov r9, #4 @ r0<- BYTE branch dist for not-taken 1871 cmp r2, #0 @ compare (vA, 0) 1872 blt 1f @ branch to 1 if comparison failed 1873 FETCH_S(r9, 1) @ r9<- branch offset, in code units 1874 movs r9, r9, asl #1 @ convert to bytes, check sign 1875 bmi common_backwardBranch @ backward branch, do periodic checks 18761: 1877#if defined(WITH_JIT) 1878 GET_JIT_PROF_TABLE(r0) 1879 FETCH_ADVANCE_INST_RB(r9) @ update rPC, load rINST 1880 cmp r0,#0 1881 bne common_updateProfile 1882 GET_INST_OPCODE(ip) @ extract opcode from rINST 1883 GOTO_OPCODE(ip) @ jump to next instruction 1884#else 1885 FETCH_ADVANCE_INST_RB(r9) @ update rPC, load rINST 1886 GET_INST_OPCODE(ip) @ extract opcode from rINST 1887 GOTO_OPCODE(ip) @ jump to next instruction 1888#endif 1889 1890 1891 1892/* ------------------------------ */ 1893 .balign 64 1894.L_OP_IF_GTZ: /* 0x3c */ 1895/* File: armv5te/OP_IF_GTZ.S */ 1896/* File: armv5te/zcmp.S */ 1897 /* 1898 * Generic one-operand compare-and-branch operation. Provide a "revcmp" 1899 * fragment that specifies the *reverse* comparison to perform, e.g. 1900 * for "if-le" you would use "gt". 1901 * 1902 * for: if-eqz, if-nez, if-ltz, if-gez, if-gtz, if-lez 1903 */ 1904 /* if-cmp vAA, +BBBB */ 1905 mov r0, rINST, lsr #8 @ r0<- AA 1906 GET_VREG(r2, r0) @ r2<- vAA 1907 mov r9, #4 @ r0<- BYTE branch dist for not-taken 1908 cmp r2, #0 @ compare (vA, 0) 1909 ble 1f @ branch to 1 if comparison failed 1910 FETCH_S(r9, 1) @ r9<- branch offset, in code units 1911 movs r9, r9, asl #1 @ convert to bytes, check sign 1912 bmi common_backwardBranch @ backward branch, do periodic checks 19131: 1914#if defined(WITH_JIT) 1915 GET_JIT_PROF_TABLE(r0) 1916 FETCH_ADVANCE_INST_RB(r9) @ update rPC, load rINST 1917 cmp r0,#0 1918 bne common_updateProfile 1919 GET_INST_OPCODE(ip) @ extract opcode from rINST 1920 GOTO_OPCODE(ip) @ jump to next instruction 1921#else 1922 FETCH_ADVANCE_INST_RB(r9) @ update rPC, load rINST 1923 GET_INST_OPCODE(ip) @ extract opcode from rINST 1924 GOTO_OPCODE(ip) @ jump to next instruction 1925#endif 1926 1927 1928 1929/* ------------------------------ */ 1930 .balign 64 1931.L_OP_IF_LEZ: /* 0x3d */ 1932/* File: armv5te/OP_IF_LEZ.S */ 1933/* File: armv5te/zcmp.S */ 1934 /* 1935 * Generic one-operand compare-and-branch operation. Provide a "revcmp" 1936 * fragment that specifies the *reverse* comparison to perform, e.g. 1937 * for "if-le" you would use "gt". 1938 * 1939 * for: if-eqz, if-nez, if-ltz, if-gez, if-gtz, if-lez 1940 */ 1941 /* if-cmp vAA, +BBBB */ 1942 mov r0, rINST, lsr #8 @ r0<- AA 1943 GET_VREG(r2, r0) @ r2<- vAA 1944 mov r9, #4 @ r0<- BYTE branch dist for not-taken 1945 cmp r2, #0 @ compare (vA, 0) 1946 bgt 1f @ branch to 1 if comparison failed 1947 FETCH_S(r9, 1) @ r9<- branch offset, in code units 1948 movs r9, r9, asl #1 @ convert to bytes, check sign 1949 bmi common_backwardBranch @ backward branch, do periodic checks 19501: 1951#if defined(WITH_JIT) 1952 GET_JIT_PROF_TABLE(r0) 1953 FETCH_ADVANCE_INST_RB(r9) @ update rPC, load rINST 1954 cmp r0,#0 1955 bne common_updateProfile 1956 GET_INST_OPCODE(ip) @ extract opcode from rINST 1957 GOTO_OPCODE(ip) @ jump to next instruction 1958#else 1959 FETCH_ADVANCE_INST_RB(r9) @ update rPC, load rINST 1960 GET_INST_OPCODE(ip) @ extract opcode from rINST 1961 GOTO_OPCODE(ip) @ jump to next instruction 1962#endif 1963 1964 1965 1966/* ------------------------------ */ 1967 .balign 64 1968.L_OP_UNUSED_3E: /* 0x3e */ 1969/* File: armv5te/OP_UNUSED_3E.S */ 1970/* File: armv5te/unused.S */ 1971 bl common_abort 1972 1973 1974 1975/* ------------------------------ */ 1976 .balign 64 1977.L_OP_UNUSED_3F: /* 0x3f */ 1978/* File: armv5te/OP_UNUSED_3F.S */ 1979/* File: armv5te/unused.S */ 1980 bl common_abort 1981 1982 1983 1984/* ------------------------------ */ 1985 .balign 64 1986.L_OP_UNUSED_40: /* 0x40 */ 1987/* File: armv5te/OP_UNUSED_40.S */ 1988/* File: armv5te/unused.S */ 1989 bl common_abort 1990 1991 1992 1993/* ------------------------------ */ 1994 .balign 64 1995.L_OP_UNUSED_41: /* 0x41 */ 1996/* File: armv5te/OP_UNUSED_41.S */ 1997/* File: armv5te/unused.S */ 1998 bl common_abort 1999 2000 2001 2002/* ------------------------------ */ 2003 .balign 64 2004.L_OP_UNUSED_42: /* 0x42 */ 2005/* File: armv5te/OP_UNUSED_42.S */ 2006/* File: armv5te/unused.S */ 2007 bl common_abort 2008 2009 2010 2011/* ------------------------------ */ 2012 .balign 64 2013.L_OP_UNUSED_43: /* 0x43 */ 2014/* File: armv5te/OP_UNUSED_43.S */ 2015/* File: armv5te/unused.S */ 2016 bl common_abort 2017 2018 2019 2020/* ------------------------------ */ 2021 .balign 64 2022.L_OP_AGET: /* 0x44 */ 2023/* File: armv5te/OP_AGET.S */ 2024 /* 2025 * Array get, 32 bits or less. vAA <- vBB[vCC]. 2026 * 2027 * Note: using the usual FETCH/and/shift stuff, this fits in exactly 17 2028 * instructions. We use a pair of FETCH_Bs instead. 2029 * 2030 * for: aget, aget-object, aget-boolean, aget-byte, aget-char, aget-short 2031 */ 2032 /* op vAA, vBB, vCC */ 2033 FETCH_B(r2, 1, 0) @ r2<- BB 2034 mov r9, rINST, lsr #8 @ r9<- AA 2035 FETCH_B(r3, 1, 1) @ r3<- CC 2036 GET_VREG(r0, r2) @ r0<- vBB (array object) 2037 GET_VREG(r1, r3) @ r1<- vCC (requested index) 2038 cmp r0, #0 @ null array object? 2039 beq common_errNullObject @ yes, bail 2040 ldr r3, [r0, #offArrayObject_length] @ r3<- arrayObj->length 2041 add r0, r0, r1, lsl #2 @ r0<- arrayObj + index*width 2042 cmp r1, r3 @ compare unsigned index, length 2043 bcs common_errArrayIndex @ index >= length, bail 2044 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 2045 ldr r2, [r0, #offArrayObject_contents] @ r2<- vBB[vCC] 2046 GET_INST_OPCODE(ip) @ extract opcode from rINST 2047 SET_VREG(r2, r9) @ vAA<- r2 2048 GOTO_OPCODE(ip) @ jump to next instruction 2049 2050 2051/* ------------------------------ */ 2052 .balign 64 2053.L_OP_AGET_WIDE: /* 0x45 */ 2054/* File: armv5te/OP_AGET_WIDE.S */ 2055 /* 2056 * Array get, 64 bits. vAA <- vBB[vCC]. 2057 * 2058 * Arrays of long/double are 64-bit aligned, so it's okay to use LDRD. 2059 */ 2060 /* aget-wide vAA, vBB, vCC */ 2061 FETCH(r0, 1) @ r0<- CCBB 2062 mov r9, rINST, lsr #8 @ r9<- AA 2063 and r2, r0, #255 @ r2<- BB 2064 mov r3, r0, lsr #8 @ r3<- CC 2065 GET_VREG(r0, r2) @ r0<- vBB (array object) 2066 GET_VREG(r1, r3) @ r1<- vCC (requested index) 2067 cmp r0, #0 @ null array object? 2068 beq common_errNullObject @ yes, bail 2069 ldr r3, [r0, #offArrayObject_length] @ r3<- arrayObj->length 2070 add r0, r0, r1, lsl #3 @ r0<- arrayObj + index*width 2071 cmp r1, r3 @ compare unsigned index, length 2072 bcc .LOP_AGET_WIDE_finish @ okay, continue below 2073 b common_errArrayIndex @ index >= length, bail 2074 @ May want to swap the order of these two branches depending on how the 2075 @ branch prediction (if any) handles conditional forward branches vs. 2076 @ unconditional forward branches. 2077 2078/* ------------------------------ */ 2079 .balign 64 2080.L_OP_AGET_OBJECT: /* 0x46 */ 2081/* File: armv5te/OP_AGET_OBJECT.S */ 2082/* File: armv5te/OP_AGET.S */ 2083 /* 2084 * Array get, 32 bits or less. vAA <- vBB[vCC]. 2085 * 2086 * Note: using the usual FETCH/and/shift stuff, this fits in exactly 17 2087 * instructions. We use a pair of FETCH_Bs instead. 2088 * 2089 * for: aget, aget-object, aget-boolean, aget-byte, aget-char, aget-short 2090 */ 2091 /* op vAA, vBB, vCC */ 2092 FETCH_B(r2, 1, 0) @ r2<- BB 2093 mov r9, rINST, lsr #8 @ r9<- AA 2094 FETCH_B(r3, 1, 1) @ r3<- CC 2095 GET_VREG(r0, r2) @ r0<- vBB (array object) 2096 GET_VREG(r1, r3) @ r1<- vCC (requested index) 2097 cmp r0, #0 @ null array object? 2098 beq common_errNullObject @ yes, bail 2099 ldr r3, [r0, #offArrayObject_length] @ r3<- arrayObj->length 2100 add r0, r0, r1, lsl #2 @ r0<- arrayObj + index*width 2101 cmp r1, r3 @ compare unsigned index, length 2102 bcs common_errArrayIndex @ index >= length, bail 2103 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 2104 ldr r2, [r0, #offArrayObject_contents] @ r2<- vBB[vCC] 2105 GET_INST_OPCODE(ip) @ extract opcode from rINST 2106 SET_VREG(r2, r9) @ vAA<- r2 2107 GOTO_OPCODE(ip) @ jump to next instruction 2108 2109 2110 2111/* ------------------------------ */ 2112 .balign 64 2113.L_OP_AGET_BOOLEAN: /* 0x47 */ 2114/* File: armv5te/OP_AGET_BOOLEAN.S */ 2115/* File: armv5te/OP_AGET.S */ 2116 /* 2117 * Array get, 32 bits or less. vAA <- vBB[vCC]. 2118 * 2119 * Note: using the usual FETCH/and/shift stuff, this fits in exactly 17 2120 * instructions. We use a pair of FETCH_Bs instead. 2121 * 2122 * for: aget, aget-object, aget-boolean, aget-byte, aget-char, aget-short 2123 */ 2124 /* op vAA, vBB, vCC */ 2125 FETCH_B(r2, 1, 0) @ r2<- BB 2126 mov r9, rINST, lsr #8 @ r9<- AA 2127 FETCH_B(r3, 1, 1) @ r3<- CC 2128 GET_VREG(r0, r2) @ r0<- vBB (array object) 2129 GET_VREG(r1, r3) @ r1<- vCC (requested index) 2130 cmp r0, #0 @ null array object? 2131 beq common_errNullObject @ yes, bail 2132 ldr r3, [r0, #offArrayObject_length] @ r3<- arrayObj->length 2133 add r0, r0, r1, lsl #0 @ r0<- arrayObj + index*width 2134 cmp r1, r3 @ compare unsigned index, length 2135 bcs common_errArrayIndex @ index >= length, bail 2136 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 2137 ldrb r2, [r0, #offArrayObject_contents] @ r2<- vBB[vCC] 2138 GET_INST_OPCODE(ip) @ extract opcode from rINST 2139 SET_VREG(r2, r9) @ vAA<- r2 2140 GOTO_OPCODE(ip) @ jump to next instruction 2141 2142 2143 2144/* ------------------------------ */ 2145 .balign 64 2146.L_OP_AGET_BYTE: /* 0x48 */ 2147/* File: armv5te/OP_AGET_BYTE.S */ 2148/* File: armv5te/OP_AGET.S */ 2149 /* 2150 * Array get, 32 bits or less. vAA <- vBB[vCC]. 2151 * 2152 * Note: using the usual FETCH/and/shift stuff, this fits in exactly 17 2153 * instructions. We use a pair of FETCH_Bs instead. 2154 * 2155 * for: aget, aget-object, aget-boolean, aget-byte, aget-char, aget-short 2156 */ 2157 /* op vAA, vBB, vCC */ 2158 FETCH_B(r2, 1, 0) @ r2<- BB 2159 mov r9, rINST, lsr #8 @ r9<- AA 2160 FETCH_B(r3, 1, 1) @ r3<- CC 2161 GET_VREG(r0, r2) @ r0<- vBB (array object) 2162 GET_VREG(r1, r3) @ r1<- vCC (requested index) 2163 cmp r0, #0 @ null array object? 2164 beq common_errNullObject @ yes, bail 2165 ldr r3, [r0, #offArrayObject_length] @ r3<- arrayObj->length 2166 add r0, r0, r1, lsl #0 @ r0<- arrayObj + index*width 2167 cmp r1, r3 @ compare unsigned index, length 2168 bcs common_errArrayIndex @ index >= length, bail 2169 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 2170 ldrsb r2, [r0, #offArrayObject_contents] @ r2<- vBB[vCC] 2171 GET_INST_OPCODE(ip) @ extract opcode from rINST 2172 SET_VREG(r2, r9) @ vAA<- r2 2173 GOTO_OPCODE(ip) @ jump to next instruction 2174 2175 2176 2177/* ------------------------------ */ 2178 .balign 64 2179.L_OP_AGET_CHAR: /* 0x49 */ 2180/* File: armv5te/OP_AGET_CHAR.S */ 2181/* File: armv5te/OP_AGET.S */ 2182 /* 2183 * Array get, 32 bits or less. vAA <- vBB[vCC]. 2184 * 2185 * Note: using the usual FETCH/and/shift stuff, this fits in exactly 17 2186 * instructions. We use a pair of FETCH_Bs instead. 2187 * 2188 * for: aget, aget-object, aget-boolean, aget-byte, aget-char, aget-short 2189 */ 2190 /* op vAA, vBB, vCC */ 2191 FETCH_B(r2, 1, 0) @ r2<- BB 2192 mov r9, rINST, lsr #8 @ r9<- AA 2193 FETCH_B(r3, 1, 1) @ r3<- CC 2194 GET_VREG(r0, r2) @ r0<- vBB (array object) 2195 GET_VREG(r1, r3) @ r1<- vCC (requested index) 2196 cmp r0, #0 @ null array object? 2197 beq common_errNullObject @ yes, bail 2198 ldr r3, [r0, #offArrayObject_length] @ r3<- arrayObj->length 2199 add r0, r0, r1, lsl #1 @ r0<- arrayObj + index*width 2200 cmp r1, r3 @ compare unsigned index, length 2201 bcs common_errArrayIndex @ index >= length, bail 2202 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 2203 ldrh r2, [r0, #offArrayObject_contents] @ r2<- vBB[vCC] 2204 GET_INST_OPCODE(ip) @ extract opcode from rINST 2205 SET_VREG(r2, r9) @ vAA<- r2 2206 GOTO_OPCODE(ip) @ jump to next instruction 2207 2208 2209 2210/* ------------------------------ */ 2211 .balign 64 2212.L_OP_AGET_SHORT: /* 0x4a */ 2213/* File: armv5te/OP_AGET_SHORT.S */ 2214/* File: armv5te/OP_AGET.S */ 2215 /* 2216 * Array get, 32 bits or less. vAA <- vBB[vCC]. 2217 * 2218 * Note: using the usual FETCH/and/shift stuff, this fits in exactly 17 2219 * instructions. We use a pair of FETCH_Bs instead. 2220 * 2221 * for: aget, aget-object, aget-boolean, aget-byte, aget-char, aget-short 2222 */ 2223 /* op vAA, vBB, vCC */ 2224 FETCH_B(r2, 1, 0) @ r2<- BB 2225 mov r9, rINST, lsr #8 @ r9<- AA 2226 FETCH_B(r3, 1, 1) @ r3<- CC 2227 GET_VREG(r0, r2) @ r0<- vBB (array object) 2228 GET_VREG(r1, r3) @ r1<- vCC (requested index) 2229 cmp r0, #0 @ null array object? 2230 beq common_errNullObject @ yes, bail 2231 ldr r3, [r0, #offArrayObject_length] @ r3<- arrayObj->length 2232 add r0, r0, r1, lsl #1 @ r0<- arrayObj + index*width 2233 cmp r1, r3 @ compare unsigned index, length 2234 bcs common_errArrayIndex @ index >= length, bail 2235 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 2236 ldrsh r2, [r0, #offArrayObject_contents] @ r2<- vBB[vCC] 2237 GET_INST_OPCODE(ip) @ extract opcode from rINST 2238 SET_VREG(r2, r9) @ vAA<- r2 2239 GOTO_OPCODE(ip) @ jump to next instruction 2240 2241 2242 2243/* ------------------------------ */ 2244 .balign 64 2245.L_OP_APUT: /* 0x4b */ 2246/* File: armv5te/OP_APUT.S */ 2247 /* 2248 * Array put, 32 bits or less. vBB[vCC] <- vAA. 2249 * 2250 * Note: using the usual FETCH/and/shift stuff, this fits in exactly 17 2251 * instructions. We use a pair of FETCH_Bs instead. 2252 * 2253 * for: aput, aput-boolean, aput-byte, aput-char, aput-short 2254 */ 2255 /* op vAA, vBB, vCC */ 2256 FETCH_B(r2, 1, 0) @ r2<- BB 2257 mov r9, rINST, lsr #8 @ r9<- AA 2258 FETCH_B(r3, 1, 1) @ r3<- CC 2259 GET_VREG(r0, r2) @ r0<- vBB (array object) 2260 GET_VREG(r1, r3) @ r1<- vCC (requested index) 2261 cmp r0, #0 @ null array object? 2262 beq common_errNullObject @ yes, bail 2263 ldr r3, [r0, #offArrayObject_length] @ r3<- arrayObj->length 2264 add r0, r0, r1, lsl #2 @ r0<- arrayObj + index*width 2265 cmp r1, r3 @ compare unsigned index, length 2266 bcs common_errArrayIndex @ index >= length, bail 2267 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 2268 GET_VREG(r2, r9) @ r2<- vAA 2269 GET_INST_OPCODE(ip) @ extract opcode from rINST 2270 str r2, [r0, #offArrayObject_contents] @ vBB[vCC]<- r2 2271 GOTO_OPCODE(ip) @ jump to next instruction 2272 2273 2274/* ------------------------------ */ 2275 .balign 64 2276.L_OP_APUT_WIDE: /* 0x4c */ 2277/* File: armv5te/OP_APUT_WIDE.S */ 2278 /* 2279 * Array put, 64 bits. vBB[vCC] <- vAA. 2280 * 2281 * Arrays of long/double are 64-bit aligned, so it's okay to use STRD. 2282 */ 2283 /* aput-wide vAA, vBB, vCC */ 2284 FETCH(r0, 1) @ r0<- CCBB 2285 mov r9, rINST, lsr #8 @ r9<- AA 2286 and r2, r0, #255 @ r2<- BB 2287 mov r3, r0, lsr #8 @ r3<- CC 2288 GET_VREG(r0, r2) @ r0<- vBB (array object) 2289 GET_VREG(r1, r3) @ r1<- vCC (requested index) 2290 cmp r0, #0 @ null array object? 2291 beq common_errNullObject @ yes, bail 2292 ldr r3, [r0, #offArrayObject_length] @ r3<- arrayObj->length 2293 add r0, r0, r1, lsl #3 @ r0<- arrayObj + index*width 2294 cmp r1, r3 @ compare unsigned index, length 2295 add r9, rFP, r9, lsl #2 @ r9<- &fp[AA] 2296 bcc .LOP_APUT_WIDE_finish @ okay, continue below 2297 b common_errArrayIndex @ index >= length, bail 2298 @ May want to swap the order of these two branches depending on how the 2299 @ branch prediction (if any) handles conditional forward branches vs. 2300 @ unconditional forward branches. 2301 2302/* ------------------------------ */ 2303 .balign 64 2304.L_OP_APUT_OBJECT: /* 0x4d */ 2305/* File: armv5te/OP_APUT_OBJECT.S */ 2306 /* 2307 * Store an object into an array. vBB[vCC] <- vAA. 2308 * 2309 * Note: using the usual FETCH/and/shift stuff, this fits in exactly 17 2310 * instructions. We use a pair of FETCH_Bs instead. 2311 */ 2312 /* op vAA, vBB, vCC */ 2313 FETCH(r0, 1) @ r0<- CCBB 2314 mov r9, rINST, lsr #8 @ r9<- AA 2315 and r2, r0, #255 @ r2<- BB 2316 mov r3, r0, lsr #8 @ r3<- CC 2317 GET_VREG(r1, r2) @ r1<- vBB (array object) 2318 GET_VREG(r0, r3) @ r0<- vCC (requested index) 2319 cmp r1, #0 @ null array object? 2320 GET_VREG(r9, r9) @ r9<- vAA 2321 beq common_errNullObject @ yes, bail 2322 ldr r3, [r1, #offArrayObject_length] @ r3<- arrayObj->length 2323 add r10, r1, r0, lsl #2 @ r10<- arrayObj + index*width 2324 cmp r0, r3 @ compare unsigned index, length 2325 bcc .LOP_APUT_OBJECT_finish @ we're okay, continue on 2326 b common_errArrayIndex @ index >= length, bail 2327 2328 2329/* ------------------------------ */ 2330 .balign 64 2331.L_OP_APUT_BOOLEAN: /* 0x4e */ 2332/* File: armv5te/OP_APUT_BOOLEAN.S */ 2333/* File: armv5te/OP_APUT.S */ 2334 /* 2335 * Array put, 32 bits or less. vBB[vCC] <- vAA. 2336 * 2337 * Note: using the usual FETCH/and/shift stuff, this fits in exactly 17 2338 * instructions. We use a pair of FETCH_Bs instead. 2339 * 2340 * for: aput, aput-boolean, aput-byte, aput-char, aput-short 2341 */ 2342 /* op vAA, vBB, vCC */ 2343 FETCH_B(r2, 1, 0) @ r2<- BB 2344 mov r9, rINST, lsr #8 @ r9<- AA 2345 FETCH_B(r3, 1, 1) @ r3<- CC 2346 GET_VREG(r0, r2) @ r0<- vBB (array object) 2347 GET_VREG(r1, r3) @ r1<- vCC (requested index) 2348 cmp r0, #0 @ null array object? 2349 beq common_errNullObject @ yes, bail 2350 ldr r3, [r0, #offArrayObject_length] @ r3<- arrayObj->length 2351 add r0, r0, r1, lsl #0 @ r0<- arrayObj + index*width 2352 cmp r1, r3 @ compare unsigned index, length 2353 bcs common_errArrayIndex @ index >= length, bail 2354 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 2355 GET_VREG(r2, r9) @ r2<- vAA 2356 GET_INST_OPCODE(ip) @ extract opcode from rINST 2357 strb r2, [r0, #offArrayObject_contents] @ vBB[vCC]<- r2 2358 GOTO_OPCODE(ip) @ jump to next instruction 2359 2360 2361 2362/* ------------------------------ */ 2363 .balign 64 2364.L_OP_APUT_BYTE: /* 0x4f */ 2365/* File: armv5te/OP_APUT_BYTE.S */ 2366/* File: armv5te/OP_APUT.S */ 2367 /* 2368 * Array put, 32 bits or less. vBB[vCC] <- vAA. 2369 * 2370 * Note: using the usual FETCH/and/shift stuff, this fits in exactly 17 2371 * instructions. We use a pair of FETCH_Bs instead. 2372 * 2373 * for: aput, aput-boolean, aput-byte, aput-char, aput-short 2374 */ 2375 /* op vAA, vBB, vCC */ 2376 FETCH_B(r2, 1, 0) @ r2<- BB 2377 mov r9, rINST, lsr #8 @ r9<- AA 2378 FETCH_B(r3, 1, 1) @ r3<- CC 2379 GET_VREG(r0, r2) @ r0<- vBB (array object) 2380 GET_VREG(r1, r3) @ r1<- vCC (requested index) 2381 cmp r0, #0 @ null array object? 2382 beq common_errNullObject @ yes, bail 2383 ldr r3, [r0, #offArrayObject_length] @ r3<- arrayObj->length 2384 add r0, r0, r1, lsl #0 @ r0<- arrayObj + index*width 2385 cmp r1, r3 @ compare unsigned index, length 2386 bcs common_errArrayIndex @ index >= length, bail 2387 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 2388 GET_VREG(r2, r9) @ r2<- vAA 2389 GET_INST_OPCODE(ip) @ extract opcode from rINST 2390 strb r2, [r0, #offArrayObject_contents] @ vBB[vCC]<- r2 2391 GOTO_OPCODE(ip) @ jump to next instruction 2392 2393 2394 2395/* ------------------------------ */ 2396 .balign 64 2397.L_OP_APUT_CHAR: /* 0x50 */ 2398/* File: armv5te/OP_APUT_CHAR.S */ 2399/* File: armv5te/OP_APUT.S */ 2400 /* 2401 * Array put, 32 bits or less. vBB[vCC] <- vAA. 2402 * 2403 * Note: using the usual FETCH/and/shift stuff, this fits in exactly 17 2404 * instructions. We use a pair of FETCH_Bs instead. 2405 * 2406 * for: aput, aput-boolean, aput-byte, aput-char, aput-short 2407 */ 2408 /* op vAA, vBB, vCC */ 2409 FETCH_B(r2, 1, 0) @ r2<- BB 2410 mov r9, rINST, lsr #8 @ r9<- AA 2411 FETCH_B(r3, 1, 1) @ r3<- CC 2412 GET_VREG(r0, r2) @ r0<- vBB (array object) 2413 GET_VREG(r1, r3) @ r1<- vCC (requested index) 2414 cmp r0, #0 @ null array object? 2415 beq common_errNullObject @ yes, bail 2416 ldr r3, [r0, #offArrayObject_length] @ r3<- arrayObj->length 2417 add r0, r0, r1, lsl #1 @ r0<- arrayObj + index*width 2418 cmp r1, r3 @ compare unsigned index, length 2419 bcs common_errArrayIndex @ index >= length, bail 2420 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 2421 GET_VREG(r2, r9) @ r2<- vAA 2422 GET_INST_OPCODE(ip) @ extract opcode from rINST 2423 strh r2, [r0, #offArrayObject_contents] @ vBB[vCC]<- r2 2424 GOTO_OPCODE(ip) @ jump to next instruction 2425 2426 2427 2428/* ------------------------------ */ 2429 .balign 64 2430.L_OP_APUT_SHORT: /* 0x51 */ 2431/* File: armv5te/OP_APUT_SHORT.S */ 2432/* File: armv5te/OP_APUT.S */ 2433 /* 2434 * Array put, 32 bits or less. vBB[vCC] <- vAA. 2435 * 2436 * Note: using the usual FETCH/and/shift stuff, this fits in exactly 17 2437 * instructions. We use a pair of FETCH_Bs instead. 2438 * 2439 * for: aput, aput-boolean, aput-byte, aput-char, aput-short 2440 */ 2441 /* op vAA, vBB, vCC */ 2442 FETCH_B(r2, 1, 0) @ r2<- BB 2443 mov r9, rINST, lsr #8 @ r9<- AA 2444 FETCH_B(r3, 1, 1) @ r3<- CC 2445 GET_VREG(r0, r2) @ r0<- vBB (array object) 2446 GET_VREG(r1, r3) @ r1<- vCC (requested index) 2447 cmp r0, #0 @ null array object? 2448 beq common_errNullObject @ yes, bail 2449 ldr r3, [r0, #offArrayObject_length] @ r3<- arrayObj->length 2450 add r0, r0, r1, lsl #1 @ r0<- arrayObj + index*width 2451 cmp r1, r3 @ compare unsigned index, length 2452 bcs common_errArrayIndex @ index >= length, bail 2453 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 2454 GET_VREG(r2, r9) @ r2<- vAA 2455 GET_INST_OPCODE(ip) @ extract opcode from rINST 2456 strh r2, [r0, #offArrayObject_contents] @ vBB[vCC]<- r2 2457 GOTO_OPCODE(ip) @ jump to next instruction 2458 2459 2460 2461/* ------------------------------ */ 2462 .balign 64 2463.L_OP_IGET: /* 0x52 */ 2464/* File: armv5te/OP_IGET.S */ 2465 /* 2466 * General 32-bit instance field get. 2467 * 2468 * for: iget, iget-object, iget-boolean, iget-byte, iget-char, iget-short 2469 */ 2470 /* op vA, vB, field@CCCC */ 2471 mov r0, rINST, lsr #12 @ r0<- B 2472 ldr r3, [rGLUE, #offGlue_methodClassDex] @ r3<- DvmDex 2473 FETCH(r1, 1) @ r1<- field ref CCCC 2474 ldr r2, [r3, #offDvmDex_pResFields] @ r2<- pDvmDex->pResFields 2475 GET_VREG(r9, r0) @ r9<- fp[B], the object pointer 2476 ldr r0, [r2, r1, lsl #2] @ r0<- resolved InstField ptr 2477 cmp r0, #0 @ is resolved entry null? 2478 bne .LOP_IGET_finish @ no, already resolved 24798: ldr r2, [rGLUE, #offGlue_method] @ r2<- current method 2480 EXPORT_PC() @ resolve() could throw 2481 ldr r0, [r2, #offMethod_clazz] @ r0<- method->clazz 2482 bl dvmResolveInstField @ r0<- resolved InstField ptr 2483 cmp r0, #0 2484 bne .LOP_IGET_finish 2485 b common_exceptionThrown 2486 2487/* ------------------------------ */ 2488 .balign 64 2489.L_OP_IGET_WIDE: /* 0x53 */ 2490/* File: armv5te/OP_IGET_WIDE.S */ 2491 /* 2492 * Wide 32-bit instance field get. 2493 */ 2494 /* iget-wide vA, vB, field@CCCC */ 2495 mov r0, rINST, lsr #12 @ r0<- B 2496 ldr r3, [rGLUE, #offGlue_methodClassDex] @ r3<- DvmDex 2497 FETCH(r1, 1) @ r1<- field ref CCCC 2498 ldr r2, [r3, #offDvmDex_pResFields] @ r2<- pResFields 2499 GET_VREG(r9, r0) @ r9<- fp[B], the object pointer 2500 ldr r0, [r2, r1, lsl #2] @ r0<- resolved InstField ptr 2501 cmp r0, #0 @ is resolved entry null? 2502 bne .LOP_IGET_WIDE_finish @ no, already resolved 25038: ldr r2, [rGLUE, #offGlue_method] @ r2<- current method 2504 EXPORT_PC() @ resolve() could throw 2505 ldr r0, [r2, #offMethod_clazz] @ r0<- method->clazz 2506 bl dvmResolveInstField @ r0<- resolved InstField ptr 2507 cmp r0, #0 2508 bne .LOP_IGET_WIDE_finish 2509 b common_exceptionThrown 2510 2511/* ------------------------------ */ 2512 .balign 64 2513.L_OP_IGET_OBJECT: /* 0x54 */ 2514/* File: armv5te/OP_IGET_OBJECT.S */ 2515/* File: armv5te/OP_IGET.S */ 2516 /* 2517 * General 32-bit instance field get. 2518 * 2519 * for: iget, iget-object, iget-boolean, iget-byte, iget-char, iget-short 2520 */ 2521 /* op vA, vB, field@CCCC */ 2522 mov r0, rINST, lsr #12 @ r0<- B 2523 ldr r3, [rGLUE, #offGlue_methodClassDex] @ r3<- DvmDex 2524 FETCH(r1, 1) @ r1<- field ref CCCC 2525 ldr r2, [r3, #offDvmDex_pResFields] @ r2<- pDvmDex->pResFields 2526 GET_VREG(r9, r0) @ r9<- fp[B], the object pointer 2527 ldr r0, [r2, r1, lsl #2] @ r0<- resolved InstField ptr 2528 cmp r0, #0 @ is resolved entry null? 2529 bne .LOP_IGET_OBJECT_finish @ no, already resolved 25308: ldr r2, [rGLUE, #offGlue_method] @ r2<- current method 2531 EXPORT_PC() @ resolve() could throw 2532 ldr r0, [r2, #offMethod_clazz] @ r0<- method->clazz 2533 bl dvmResolveInstField @ r0<- resolved InstField ptr 2534 cmp r0, #0 2535 bne .LOP_IGET_OBJECT_finish 2536 b common_exceptionThrown 2537 2538 2539/* ------------------------------ */ 2540 .balign 64 2541.L_OP_IGET_BOOLEAN: /* 0x55 */ 2542/* File: armv5te/OP_IGET_BOOLEAN.S */ 2543@include "armv5te/OP_IGET.S" { "load":"ldrb", "sqnum":"1" } 2544/* File: armv5te/OP_IGET.S */ 2545 /* 2546 * General 32-bit instance field get. 2547 * 2548 * for: iget, iget-object, iget-boolean, iget-byte, iget-char, iget-short 2549 */ 2550 /* op vA, vB, field@CCCC */ 2551 mov r0, rINST, lsr #12 @ r0<- B 2552 ldr r3, [rGLUE, #offGlue_methodClassDex] @ r3<- DvmDex 2553 FETCH(r1, 1) @ r1<- field ref CCCC 2554 ldr r2, [r3, #offDvmDex_pResFields] @ r2<- pDvmDex->pResFields 2555 GET_VREG(r9, r0) @ r9<- fp[B], the object pointer 2556 ldr r0, [r2, r1, lsl #2] @ r0<- resolved InstField ptr 2557 cmp r0, #0 @ is resolved entry null? 2558 bne .LOP_IGET_BOOLEAN_finish @ no, already resolved 25598: ldr r2, [rGLUE, #offGlue_method] @ r2<- current method 2560 EXPORT_PC() @ resolve() could throw 2561 ldr r0, [r2, #offMethod_clazz] @ r0<- method->clazz 2562 bl dvmResolveInstField @ r0<- resolved InstField ptr 2563 cmp r0, #0 2564 bne .LOP_IGET_BOOLEAN_finish 2565 b common_exceptionThrown 2566 2567 2568/* ------------------------------ */ 2569 .balign 64 2570.L_OP_IGET_BYTE: /* 0x56 */ 2571/* File: armv5te/OP_IGET_BYTE.S */ 2572@include "armv5te/OP_IGET.S" { "load":"ldrsb", "sqnum":"2" } 2573/* File: armv5te/OP_IGET.S */ 2574 /* 2575 * General 32-bit instance field get. 2576 * 2577 * for: iget, iget-object, iget-boolean, iget-byte, iget-char, iget-short 2578 */ 2579 /* op vA, vB, field@CCCC */ 2580 mov r0, rINST, lsr #12 @ r0<- B 2581 ldr r3, [rGLUE, #offGlue_methodClassDex] @ r3<- DvmDex 2582 FETCH(r1, 1) @ r1<- field ref CCCC 2583 ldr r2, [r3, #offDvmDex_pResFields] @ r2<- pDvmDex->pResFields 2584 GET_VREG(r9, r0) @ r9<- fp[B], the object pointer 2585 ldr r0, [r2, r1, lsl #2] @ r0<- resolved InstField ptr 2586 cmp r0, #0 @ is resolved entry null? 2587 bne .LOP_IGET_BYTE_finish @ no, already resolved 25888: ldr r2, [rGLUE, #offGlue_method] @ r2<- current method 2589 EXPORT_PC() @ resolve() could throw 2590 ldr r0, [r2, #offMethod_clazz] @ r0<- method->clazz 2591 bl dvmResolveInstField @ r0<- resolved InstField ptr 2592 cmp r0, #0 2593 bne .LOP_IGET_BYTE_finish 2594 b common_exceptionThrown 2595 2596 2597/* ------------------------------ */ 2598 .balign 64 2599.L_OP_IGET_CHAR: /* 0x57 */ 2600/* File: armv5te/OP_IGET_CHAR.S */ 2601@include "armv5te/OP_IGET.S" { "load":"ldrh", "sqnum":"3" } 2602/* File: armv5te/OP_IGET.S */ 2603 /* 2604 * General 32-bit instance field get. 2605 * 2606 * for: iget, iget-object, iget-boolean, iget-byte, iget-char, iget-short 2607 */ 2608 /* op vA, vB, field@CCCC */ 2609 mov r0, rINST, lsr #12 @ r0<- B 2610 ldr r3, [rGLUE, #offGlue_methodClassDex] @ r3<- DvmDex 2611 FETCH(r1, 1) @ r1<- field ref CCCC 2612 ldr r2, [r3, #offDvmDex_pResFields] @ r2<- pDvmDex->pResFields 2613 GET_VREG(r9, r0) @ r9<- fp[B], the object pointer 2614 ldr r0, [r2, r1, lsl #2] @ r0<- resolved InstField ptr 2615 cmp r0, #0 @ is resolved entry null? 2616 bne .LOP_IGET_CHAR_finish @ no, already resolved 26178: ldr r2, [rGLUE, #offGlue_method] @ r2<- current method 2618 EXPORT_PC() @ resolve() could throw 2619 ldr r0, [r2, #offMethod_clazz] @ r0<- method->clazz 2620 bl dvmResolveInstField @ r0<- resolved InstField ptr 2621 cmp r0, #0 2622 bne .LOP_IGET_CHAR_finish 2623 b common_exceptionThrown 2624 2625 2626/* ------------------------------ */ 2627 .balign 64 2628.L_OP_IGET_SHORT: /* 0x58 */ 2629/* File: armv5te/OP_IGET_SHORT.S */ 2630@include "armv5te/OP_IGET.S" { "load":"ldrsh", "sqnum":"4" } 2631/* File: armv5te/OP_IGET.S */ 2632 /* 2633 * General 32-bit instance field get. 2634 * 2635 * for: iget, iget-object, iget-boolean, iget-byte, iget-char, iget-short 2636 */ 2637 /* op vA, vB, field@CCCC */ 2638 mov r0, rINST, lsr #12 @ r0<- B 2639 ldr r3, [rGLUE, #offGlue_methodClassDex] @ r3<- DvmDex 2640 FETCH(r1, 1) @ r1<- field ref CCCC 2641 ldr r2, [r3, #offDvmDex_pResFields] @ r2<- pDvmDex->pResFields 2642 GET_VREG(r9, r0) @ r9<- fp[B], the object pointer 2643 ldr r0, [r2, r1, lsl #2] @ r0<- resolved InstField ptr 2644 cmp r0, #0 @ is resolved entry null? 2645 bne .LOP_IGET_SHORT_finish @ no, already resolved 26468: ldr r2, [rGLUE, #offGlue_method] @ r2<- current method 2647 EXPORT_PC() @ resolve() could throw 2648 ldr r0, [r2, #offMethod_clazz] @ r0<- method->clazz 2649 bl dvmResolveInstField @ r0<- resolved InstField ptr 2650 cmp r0, #0 2651 bne .LOP_IGET_SHORT_finish 2652 b common_exceptionThrown 2653 2654 2655/* ------------------------------ */ 2656 .balign 64 2657.L_OP_IPUT: /* 0x59 */ 2658/* File: armv5te/OP_IPUT.S */ 2659 /* 2660 * General 32-bit instance field put. 2661 * 2662 * for: iput, iput-object, iput-boolean, iput-byte, iput-char, iput-short 2663 */ 2664 /* op vA, vB, field@CCCC */ 2665 mov r0, rINST, lsr #12 @ r0<- B 2666 ldr r3, [rGLUE, #offGlue_methodClassDex] @ r3<- DvmDex 2667 FETCH(r1, 1) @ r1<- field ref CCCC 2668 ldr r2, [r3, #offDvmDex_pResFields] @ r2<- pDvmDex->pResFields 2669 GET_VREG(r9, r0) @ r9<- fp[B], the object pointer 2670 ldr r0, [r2, r1, lsl #2] @ r0<- resolved InstField ptr 2671 cmp r0, #0 @ is resolved entry null? 2672 bne .LOP_IPUT_finish @ no, already resolved 26738: ldr r2, [rGLUE, #offGlue_method] @ r2<- current method 2674 EXPORT_PC() @ resolve() could throw 2675 ldr r0, [r2, #offMethod_clazz] @ r0<- method->clazz 2676 bl dvmResolveInstField @ r0<- resolved InstField ptr 2677 cmp r0, #0 @ success? 2678 bne .LOP_IPUT_finish @ yes, finish up 2679 b common_exceptionThrown 2680 2681/* ------------------------------ */ 2682 .balign 64 2683.L_OP_IPUT_WIDE: /* 0x5a */ 2684/* File: armv5te/OP_IPUT_WIDE.S */ 2685 /* iput-wide vA, vB, field@CCCC */ 2686 mov r0, rINST, lsr #12 @ r0<- B 2687 ldr r3, [rGLUE, #offGlue_methodClassDex] @ r3<- DvmDex 2688 FETCH(r1, 1) @ r1<- field ref CCCC 2689 ldr r2, [r3, #offDvmDex_pResFields] @ r2<- pResFields 2690 GET_VREG(r9, r0) @ r9<- fp[B], the object pointer 2691 ldr r0, [r2, r1, lsl #2] @ r0<- resolved InstField ptr 2692 cmp r0, #0 @ is resolved entry null? 2693 bne .LOP_IPUT_WIDE_finish @ no, already resolved 26948: ldr r2, [rGLUE, #offGlue_method] @ r2<- current method 2695 EXPORT_PC() @ resolve() could throw 2696 ldr r0, [r2, #offMethod_clazz] @ r0<- method->clazz 2697 bl dvmResolveInstField @ r0<- resolved InstField ptr 2698 cmp r0, #0 @ success? 2699 bne .LOP_IPUT_WIDE_finish @ yes, finish up 2700 b common_exceptionThrown 2701 2702/* ------------------------------ */ 2703 .balign 64 2704.L_OP_IPUT_OBJECT: /* 0x5b */ 2705/* File: armv5te/OP_IPUT_OBJECT.S */ 2706/* File: armv5te/OP_IPUT.S */ 2707 /* 2708 * General 32-bit instance field put. 2709 * 2710 * for: iput, iput-object, iput-boolean, iput-byte, iput-char, iput-short 2711 */ 2712 /* op vA, vB, field@CCCC */ 2713 mov r0, rINST, lsr #12 @ r0<- B 2714 ldr r3, [rGLUE, #offGlue_methodClassDex] @ r3<- DvmDex 2715 FETCH(r1, 1) @ r1<- field ref CCCC 2716 ldr r2, [r3, #offDvmDex_pResFields] @ r2<- pDvmDex->pResFields 2717 GET_VREG(r9, r0) @ r9<- fp[B], the object pointer 2718 ldr r0, [r2, r1, lsl #2] @ r0<- resolved InstField ptr 2719 cmp r0, #0 @ is resolved entry null? 2720 bne .LOP_IPUT_OBJECT_finish @ no, already resolved 27218: ldr r2, [rGLUE, #offGlue_method] @ r2<- current method 2722 EXPORT_PC() @ resolve() could throw 2723 ldr r0, [r2, #offMethod_clazz] @ r0<- method->clazz 2724 bl dvmResolveInstField @ r0<- resolved InstField ptr 2725 cmp r0, #0 @ success? 2726 bne .LOP_IPUT_OBJECT_finish @ yes, finish up 2727 b common_exceptionThrown 2728 2729 2730/* ------------------------------ */ 2731 .balign 64 2732.L_OP_IPUT_BOOLEAN: /* 0x5c */ 2733/* File: armv5te/OP_IPUT_BOOLEAN.S */ 2734@include "armv5te/OP_IPUT.S" { "store":"strb", "sqnum":"1" } 2735/* File: armv5te/OP_IPUT.S */ 2736 /* 2737 * General 32-bit instance field put. 2738 * 2739 * for: iput, iput-object, iput-boolean, iput-byte, iput-char, iput-short 2740 */ 2741 /* op vA, vB, field@CCCC */ 2742 mov r0, rINST, lsr #12 @ r0<- B 2743 ldr r3, [rGLUE, #offGlue_methodClassDex] @ r3<- DvmDex 2744 FETCH(r1, 1) @ r1<- field ref CCCC 2745 ldr r2, [r3, #offDvmDex_pResFields] @ r2<- pDvmDex->pResFields 2746 GET_VREG(r9, r0) @ r9<- fp[B], the object pointer 2747 ldr r0, [r2, r1, lsl #2] @ r0<- resolved InstField ptr 2748 cmp r0, #0 @ is resolved entry null? 2749 bne .LOP_IPUT_BOOLEAN_finish @ no, already resolved 27508: ldr r2, [rGLUE, #offGlue_method] @ r2<- current method 2751 EXPORT_PC() @ resolve() could throw 2752 ldr r0, [r2, #offMethod_clazz] @ r0<- method->clazz 2753 bl dvmResolveInstField @ r0<- resolved InstField ptr 2754 cmp r0, #0 @ success? 2755 bne .LOP_IPUT_BOOLEAN_finish @ yes, finish up 2756 b common_exceptionThrown 2757 2758 2759/* ------------------------------ */ 2760 .balign 64 2761.L_OP_IPUT_BYTE: /* 0x5d */ 2762/* File: armv5te/OP_IPUT_BYTE.S */ 2763@include "armv5te/OP_IPUT.S" { "store":"strb", "sqnum":"2" } 2764/* File: armv5te/OP_IPUT.S */ 2765 /* 2766 * General 32-bit instance field put. 2767 * 2768 * for: iput, iput-object, iput-boolean, iput-byte, iput-char, iput-short 2769 */ 2770 /* op vA, vB, field@CCCC */ 2771 mov r0, rINST, lsr #12 @ r0<- B 2772 ldr r3, [rGLUE, #offGlue_methodClassDex] @ r3<- DvmDex 2773 FETCH(r1, 1) @ r1<- field ref CCCC 2774 ldr r2, [r3, #offDvmDex_pResFields] @ r2<- pDvmDex->pResFields 2775 GET_VREG(r9, r0) @ r9<- fp[B], the object pointer 2776 ldr r0, [r2, r1, lsl #2] @ r0<- resolved InstField ptr 2777 cmp r0, #0 @ is resolved entry null? 2778 bne .LOP_IPUT_BYTE_finish @ no, already resolved 27798: ldr r2, [rGLUE, #offGlue_method] @ r2<- current method 2780 EXPORT_PC() @ resolve() could throw 2781 ldr r0, [r2, #offMethod_clazz] @ r0<- method->clazz 2782 bl dvmResolveInstField @ r0<- resolved InstField ptr 2783 cmp r0, #0 @ success? 2784 bne .LOP_IPUT_BYTE_finish @ yes, finish up 2785 b common_exceptionThrown 2786 2787 2788/* ------------------------------ */ 2789 .balign 64 2790.L_OP_IPUT_CHAR: /* 0x5e */ 2791/* File: armv5te/OP_IPUT_CHAR.S */ 2792@include "armv5te/OP_IPUT.S" { "store":"strh", "sqnum":"3" } 2793/* File: armv5te/OP_IPUT.S */ 2794 /* 2795 * General 32-bit instance field put. 2796 * 2797 * for: iput, iput-object, iput-boolean, iput-byte, iput-char, iput-short 2798 */ 2799 /* op vA, vB, field@CCCC */ 2800 mov r0, rINST, lsr #12 @ r0<- B 2801 ldr r3, [rGLUE, #offGlue_methodClassDex] @ r3<- DvmDex 2802 FETCH(r1, 1) @ r1<- field ref CCCC 2803 ldr r2, [r3, #offDvmDex_pResFields] @ r2<- pDvmDex->pResFields 2804 GET_VREG(r9, r0) @ r9<- fp[B], the object pointer 2805 ldr r0, [r2, r1, lsl #2] @ r0<- resolved InstField ptr 2806 cmp r0, #0 @ is resolved entry null? 2807 bne .LOP_IPUT_CHAR_finish @ no, already resolved 28088: ldr r2, [rGLUE, #offGlue_method] @ r2<- current method 2809 EXPORT_PC() @ resolve() could throw 2810 ldr r0, [r2, #offMethod_clazz] @ r0<- method->clazz 2811 bl dvmResolveInstField @ r0<- resolved InstField ptr 2812 cmp r0, #0 @ success? 2813 bne .LOP_IPUT_CHAR_finish @ yes, finish up 2814 b common_exceptionThrown 2815 2816 2817/* ------------------------------ */ 2818 .balign 64 2819.L_OP_IPUT_SHORT: /* 0x5f */ 2820/* File: armv5te/OP_IPUT_SHORT.S */ 2821@include "armv5te/OP_IPUT.S" { "store":"strh", "sqnum":"4" } 2822/* File: armv5te/OP_IPUT.S */ 2823 /* 2824 * General 32-bit instance field put. 2825 * 2826 * for: iput, iput-object, iput-boolean, iput-byte, iput-char, iput-short 2827 */ 2828 /* op vA, vB, field@CCCC */ 2829 mov r0, rINST, lsr #12 @ r0<- B 2830 ldr r3, [rGLUE, #offGlue_methodClassDex] @ r3<- DvmDex 2831 FETCH(r1, 1) @ r1<- field ref CCCC 2832 ldr r2, [r3, #offDvmDex_pResFields] @ r2<- pDvmDex->pResFields 2833 GET_VREG(r9, r0) @ r9<- fp[B], the object pointer 2834 ldr r0, [r2, r1, lsl #2] @ r0<- resolved InstField ptr 2835 cmp r0, #0 @ is resolved entry null? 2836 bne .LOP_IPUT_SHORT_finish @ no, already resolved 28378: ldr r2, [rGLUE, #offGlue_method] @ r2<- current method 2838 EXPORT_PC() @ resolve() could throw 2839 ldr r0, [r2, #offMethod_clazz] @ r0<- method->clazz 2840 bl dvmResolveInstField @ r0<- resolved InstField ptr 2841 cmp r0, #0 @ success? 2842 bne .LOP_IPUT_SHORT_finish @ yes, finish up 2843 b common_exceptionThrown 2844 2845 2846/* ------------------------------ */ 2847 .balign 64 2848.L_OP_SGET: /* 0x60 */ 2849/* File: armv5te/OP_SGET.S */ 2850 /* 2851 * General 32-bit SGET handler. 2852 * 2853 * for: sget, sget-object, sget-boolean, sget-byte, sget-char, sget-short 2854 */ 2855 /* op vAA, field@BBBB */ 2856 ldr r2, [rGLUE, #offGlue_methodClassDex] @ r2<- DvmDex 2857 FETCH(r1, 1) @ r1<- field ref BBBB 2858 ldr r2, [r2, #offDvmDex_pResFields] @ r2<- dvmDex->pResFields 2859 ldr r0, [r2, r1, lsl #2] @ r0<- resolved StaticField ptr 2860 cmp r0, #0 @ is resolved entry null? 2861 beq .LOP_SGET_resolve @ yes, do resolve 2862.LOP_SGET_finish: @ field ptr in r0 2863 ldr r1, [r0, #offStaticField_value] @ r1<- field value 2864 mov r2, rINST, lsr #8 @ r2<- AA 2865 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 2866 SET_VREG(r1, r2) @ fp[AA]<- r1 2867 GET_INST_OPCODE(ip) @ extract opcode from rINST 2868 GOTO_OPCODE(ip) @ jump to next instruction 2869 2870/* ------------------------------ */ 2871 .balign 64 2872.L_OP_SGET_WIDE: /* 0x61 */ 2873/* File: armv5te/OP_SGET_WIDE.S */ 2874 /* 2875 * 64-bit SGET handler. 2876 */ 2877 /* sget-wide vAA, field@BBBB */ 2878 ldr r2, [rGLUE, #offGlue_methodClassDex] @ r2<- DvmDex 2879 FETCH(r1, 1) @ r1<- field ref BBBB 2880 ldr r2, [r2, #offDvmDex_pResFields] @ r2<- dvmDex->pResFields 2881 ldr r0, [r2, r1, lsl #2] @ r0<- resolved StaticField ptr 2882 cmp r0, #0 @ is resolved entry null? 2883 beq .LOP_SGET_WIDE_resolve @ yes, do resolve 2884.LOP_SGET_WIDE_finish: 2885 mov r1, rINST, lsr #8 @ r1<- AA 2886 ldrd r2, [r0, #offStaticField_value] @ r2/r3<- field value (aligned) 2887 add r1, rFP, r1, lsl #2 @ r1<- &fp[AA] 2888 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 2889 stmia r1, {r2-r3} @ vAA/vAA+1<- r2/r3 2890 GET_INST_OPCODE(ip) @ extract opcode from rINST 2891 GOTO_OPCODE(ip) @ jump to next instruction 2892 2893/* ------------------------------ */ 2894 .balign 64 2895.L_OP_SGET_OBJECT: /* 0x62 */ 2896/* File: armv5te/OP_SGET_OBJECT.S */ 2897/* File: armv5te/OP_SGET.S */ 2898 /* 2899 * General 32-bit SGET handler. 2900 * 2901 * for: sget, sget-object, sget-boolean, sget-byte, sget-char, sget-short 2902 */ 2903 /* op vAA, field@BBBB */ 2904 ldr r2, [rGLUE, #offGlue_methodClassDex] @ r2<- DvmDex 2905 FETCH(r1, 1) @ r1<- field ref BBBB 2906 ldr r2, [r2, #offDvmDex_pResFields] @ r2<- dvmDex->pResFields 2907 ldr r0, [r2, r1, lsl #2] @ r0<- resolved StaticField ptr 2908 cmp r0, #0 @ is resolved entry null? 2909 beq .LOP_SGET_OBJECT_resolve @ yes, do resolve 2910.LOP_SGET_OBJECT_finish: @ field ptr in r0 2911 ldr r1, [r0, #offStaticField_value] @ r1<- field value 2912 mov r2, rINST, lsr #8 @ r2<- AA 2913 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 2914 SET_VREG(r1, r2) @ fp[AA]<- r1 2915 GET_INST_OPCODE(ip) @ extract opcode from rINST 2916 GOTO_OPCODE(ip) @ jump to next instruction 2917 2918 2919/* ------------------------------ */ 2920 .balign 64 2921.L_OP_SGET_BOOLEAN: /* 0x63 */ 2922/* File: armv5te/OP_SGET_BOOLEAN.S */ 2923/* File: armv5te/OP_SGET.S */ 2924 /* 2925 * General 32-bit SGET handler. 2926 * 2927 * for: sget, sget-object, sget-boolean, sget-byte, sget-char, sget-short 2928 */ 2929 /* op vAA, field@BBBB */ 2930 ldr r2, [rGLUE, #offGlue_methodClassDex] @ r2<- DvmDex 2931 FETCH(r1, 1) @ r1<- field ref BBBB 2932 ldr r2, [r2, #offDvmDex_pResFields] @ r2<- dvmDex->pResFields 2933 ldr r0, [r2, r1, lsl #2] @ r0<- resolved StaticField ptr 2934 cmp r0, #0 @ is resolved entry null? 2935 beq .LOP_SGET_BOOLEAN_resolve @ yes, do resolve 2936.LOP_SGET_BOOLEAN_finish: @ field ptr in r0 2937 ldr r1, [r0, #offStaticField_value] @ r1<- field value 2938 mov r2, rINST, lsr #8 @ r2<- AA 2939 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 2940 SET_VREG(r1, r2) @ fp[AA]<- r1 2941 GET_INST_OPCODE(ip) @ extract opcode from rINST 2942 GOTO_OPCODE(ip) @ jump to next instruction 2943 2944 2945/* ------------------------------ */ 2946 .balign 64 2947.L_OP_SGET_BYTE: /* 0x64 */ 2948/* File: armv5te/OP_SGET_BYTE.S */ 2949/* File: armv5te/OP_SGET.S */ 2950 /* 2951 * General 32-bit SGET handler. 2952 * 2953 * for: sget, sget-object, sget-boolean, sget-byte, sget-char, sget-short 2954 */ 2955 /* op vAA, field@BBBB */ 2956 ldr r2, [rGLUE, #offGlue_methodClassDex] @ r2<- DvmDex 2957 FETCH(r1, 1) @ r1<- field ref BBBB 2958 ldr r2, [r2, #offDvmDex_pResFields] @ r2<- dvmDex->pResFields 2959 ldr r0, [r2, r1, lsl #2] @ r0<- resolved StaticField ptr 2960 cmp r0, #0 @ is resolved entry null? 2961 beq .LOP_SGET_BYTE_resolve @ yes, do resolve 2962.LOP_SGET_BYTE_finish: @ field ptr in r0 2963 ldr r1, [r0, #offStaticField_value] @ r1<- field value 2964 mov r2, rINST, lsr #8 @ r2<- AA 2965 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 2966 SET_VREG(r1, r2) @ fp[AA]<- r1 2967 GET_INST_OPCODE(ip) @ extract opcode from rINST 2968 GOTO_OPCODE(ip) @ jump to next instruction 2969 2970 2971/* ------------------------------ */ 2972 .balign 64 2973.L_OP_SGET_CHAR: /* 0x65 */ 2974/* File: armv5te/OP_SGET_CHAR.S */ 2975/* File: armv5te/OP_SGET.S */ 2976 /* 2977 * General 32-bit SGET handler. 2978 * 2979 * for: sget, sget-object, sget-boolean, sget-byte, sget-char, sget-short 2980 */ 2981 /* op vAA, field@BBBB */ 2982 ldr r2, [rGLUE, #offGlue_methodClassDex] @ r2<- DvmDex 2983 FETCH(r1, 1) @ r1<- field ref BBBB 2984 ldr r2, [r2, #offDvmDex_pResFields] @ r2<- dvmDex->pResFields 2985 ldr r0, [r2, r1, lsl #2] @ r0<- resolved StaticField ptr 2986 cmp r0, #0 @ is resolved entry null? 2987 beq .LOP_SGET_CHAR_resolve @ yes, do resolve 2988.LOP_SGET_CHAR_finish: @ field ptr in r0 2989 ldr r1, [r0, #offStaticField_value] @ r1<- field value 2990 mov r2, rINST, lsr #8 @ r2<- AA 2991 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 2992 SET_VREG(r1, r2) @ fp[AA]<- r1 2993 GET_INST_OPCODE(ip) @ extract opcode from rINST 2994 GOTO_OPCODE(ip) @ jump to next instruction 2995 2996 2997/* ------------------------------ */ 2998 .balign 64 2999.L_OP_SGET_SHORT: /* 0x66 */ 3000/* File: armv5te/OP_SGET_SHORT.S */ 3001/* File: armv5te/OP_SGET.S */ 3002 /* 3003 * General 32-bit SGET handler. 3004 * 3005 * for: sget, sget-object, sget-boolean, sget-byte, sget-char, sget-short 3006 */ 3007 /* op vAA, field@BBBB */ 3008 ldr r2, [rGLUE, #offGlue_methodClassDex] @ r2<- DvmDex 3009 FETCH(r1, 1) @ r1<- field ref BBBB 3010 ldr r2, [r2, #offDvmDex_pResFields] @ r2<- dvmDex->pResFields 3011 ldr r0, [r2, r1, lsl #2] @ r0<- resolved StaticField ptr 3012 cmp r0, #0 @ is resolved entry null? 3013 beq .LOP_SGET_SHORT_resolve @ yes, do resolve 3014.LOP_SGET_SHORT_finish: @ field ptr in r0 3015 ldr r1, [r0, #offStaticField_value] @ r1<- field value 3016 mov r2, rINST, lsr #8 @ r2<- AA 3017 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 3018 SET_VREG(r1, r2) @ fp[AA]<- r1 3019 GET_INST_OPCODE(ip) @ extract opcode from rINST 3020 GOTO_OPCODE(ip) @ jump to next instruction 3021 3022 3023/* ------------------------------ */ 3024 .balign 64 3025.L_OP_SPUT: /* 0x67 */ 3026/* File: armv5te/OP_SPUT.S */ 3027 /* 3028 * General 32-bit SPUT handler. 3029 * 3030 * for: sput, sput-object, sput-boolean, sput-byte, sput-char, sput-short 3031 */ 3032 /* op vAA, field@BBBB */ 3033 ldr r2, [rGLUE, #offGlue_methodClassDex] @ r2<- DvmDex 3034 FETCH(r1, 1) @ r1<- field ref BBBB 3035 ldr r2, [r2, #offDvmDex_pResFields] @ r2<- dvmDex->pResFields 3036 ldr r0, [r2, r1, lsl #2] @ r0<- resolved StaticField ptr 3037 cmp r0, #0 @ is resolved entry null? 3038 beq .LOP_SPUT_resolve @ yes, do resolve 3039.LOP_SPUT_finish: @ field ptr in r0 3040 mov r2, rINST, lsr #8 @ r2<- AA 3041 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 3042 GET_VREG(r1, r2) @ r1<- fp[AA] 3043 GET_INST_OPCODE(ip) @ extract opcode from rINST 3044 str r1, [r0, #offStaticField_value] @ field<- vAA 3045 GOTO_OPCODE(ip) @ jump to next instruction 3046 3047/* ------------------------------ */ 3048 .balign 64 3049.L_OP_SPUT_WIDE: /* 0x68 */ 3050/* File: armv5te/OP_SPUT_WIDE.S */ 3051 /* 3052 * 64-bit SPUT handler. 3053 */ 3054 /* sput-wide vAA, field@BBBB */ 3055 ldr r2, [rGLUE, #offGlue_methodClassDex] @ r2<- DvmDex 3056 FETCH(r1, 1) @ r1<- field ref BBBB 3057 ldr r2, [r2, #offDvmDex_pResFields] @ r2<- dvmDex->pResFields 3058 mov r9, rINST, lsr #8 @ r9<- AA 3059 ldr r0, [r2, r1, lsl #2] @ r0<- resolved StaticField ptr 3060 add r9, rFP, r9, lsl #2 @ r9<- &fp[AA] 3061 cmp r0, #0 @ is resolved entry null? 3062 beq .LOP_SPUT_WIDE_resolve @ yes, do resolve 3063.LOP_SPUT_WIDE_finish: @ field ptr in r0, AA in r9 3064 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 3065 ldmia r9, {r2-r3} @ r2/r3<- vAA/vAA+1 3066 GET_INST_OPCODE(ip) @ extract opcode from rINST 3067 strd r2, [r0, #offStaticField_value] @ field<- vAA/vAA+1 3068 GOTO_OPCODE(ip) @ jump to next instruction 3069 3070/* ------------------------------ */ 3071 .balign 64 3072.L_OP_SPUT_OBJECT: /* 0x69 */ 3073/* File: armv5te/OP_SPUT_OBJECT.S */ 3074/* File: armv5te/OP_SPUT.S */ 3075 /* 3076 * General 32-bit SPUT handler. 3077 * 3078 * for: sput, sput-object, sput-boolean, sput-byte, sput-char, sput-short 3079 */ 3080 /* op vAA, field@BBBB */ 3081 ldr r2, [rGLUE, #offGlue_methodClassDex] @ r2<- DvmDex 3082 FETCH(r1, 1) @ r1<- field ref BBBB 3083 ldr r2, [r2, #offDvmDex_pResFields] @ r2<- dvmDex->pResFields 3084 ldr r0, [r2, r1, lsl #2] @ r0<- resolved StaticField ptr 3085 cmp r0, #0 @ is resolved entry null? 3086 beq .LOP_SPUT_OBJECT_resolve @ yes, do resolve 3087.LOP_SPUT_OBJECT_finish: @ field ptr in r0 3088 mov r2, rINST, lsr #8 @ r2<- AA 3089 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 3090 GET_VREG(r1, r2) @ r1<- fp[AA] 3091 GET_INST_OPCODE(ip) @ extract opcode from rINST 3092 str r1, [r0, #offStaticField_value] @ field<- vAA 3093 GOTO_OPCODE(ip) @ jump to next instruction 3094 3095 3096/* ------------------------------ */ 3097 .balign 64 3098.L_OP_SPUT_BOOLEAN: /* 0x6a */ 3099/* File: armv5te/OP_SPUT_BOOLEAN.S */ 3100/* File: armv5te/OP_SPUT.S */ 3101 /* 3102 * General 32-bit SPUT handler. 3103 * 3104 * for: sput, sput-object, sput-boolean, sput-byte, sput-char, sput-short 3105 */ 3106 /* op vAA, field@BBBB */ 3107 ldr r2, [rGLUE, #offGlue_methodClassDex] @ r2<- DvmDex 3108 FETCH(r1, 1) @ r1<- field ref BBBB 3109 ldr r2, [r2, #offDvmDex_pResFields] @ r2<- dvmDex->pResFields 3110 ldr r0, [r2, r1, lsl #2] @ r0<- resolved StaticField ptr 3111 cmp r0, #0 @ is resolved entry null? 3112 beq .LOP_SPUT_BOOLEAN_resolve @ yes, do resolve 3113.LOP_SPUT_BOOLEAN_finish: @ field ptr in r0 3114 mov r2, rINST, lsr #8 @ r2<- AA 3115 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 3116 GET_VREG(r1, r2) @ r1<- fp[AA] 3117 GET_INST_OPCODE(ip) @ extract opcode from rINST 3118 str r1, [r0, #offStaticField_value] @ field<- vAA 3119 GOTO_OPCODE(ip) @ jump to next instruction 3120 3121 3122/* ------------------------------ */ 3123 .balign 64 3124.L_OP_SPUT_BYTE: /* 0x6b */ 3125/* File: armv5te/OP_SPUT_BYTE.S */ 3126/* File: armv5te/OP_SPUT.S */ 3127 /* 3128 * General 32-bit SPUT handler. 3129 * 3130 * for: sput, sput-object, sput-boolean, sput-byte, sput-char, sput-short 3131 */ 3132 /* op vAA, field@BBBB */ 3133 ldr r2, [rGLUE, #offGlue_methodClassDex] @ r2<- DvmDex 3134 FETCH(r1, 1) @ r1<- field ref BBBB 3135 ldr r2, [r2, #offDvmDex_pResFields] @ r2<- dvmDex->pResFields 3136 ldr r0, [r2, r1, lsl #2] @ r0<- resolved StaticField ptr 3137 cmp r0, #0 @ is resolved entry null? 3138 beq .LOP_SPUT_BYTE_resolve @ yes, do resolve 3139.LOP_SPUT_BYTE_finish: @ field ptr in r0 3140 mov r2, rINST, lsr #8 @ r2<- AA 3141 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 3142 GET_VREG(r1, r2) @ r1<- fp[AA] 3143 GET_INST_OPCODE(ip) @ extract opcode from rINST 3144 str r1, [r0, #offStaticField_value] @ field<- vAA 3145 GOTO_OPCODE(ip) @ jump to next instruction 3146 3147 3148/* ------------------------------ */ 3149 .balign 64 3150.L_OP_SPUT_CHAR: /* 0x6c */ 3151/* File: armv5te/OP_SPUT_CHAR.S */ 3152/* File: armv5te/OP_SPUT.S */ 3153 /* 3154 * General 32-bit SPUT handler. 3155 * 3156 * for: sput, sput-object, sput-boolean, sput-byte, sput-char, sput-short 3157 */ 3158 /* op vAA, field@BBBB */ 3159 ldr r2, [rGLUE, #offGlue_methodClassDex] @ r2<- DvmDex 3160 FETCH(r1, 1) @ r1<- field ref BBBB 3161 ldr r2, [r2, #offDvmDex_pResFields] @ r2<- dvmDex->pResFields 3162 ldr r0, [r2, r1, lsl #2] @ r0<- resolved StaticField ptr 3163 cmp r0, #0 @ is resolved entry null? 3164 beq .LOP_SPUT_CHAR_resolve @ yes, do resolve 3165.LOP_SPUT_CHAR_finish: @ field ptr in r0 3166 mov r2, rINST, lsr #8 @ r2<- AA 3167 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 3168 GET_VREG(r1, r2) @ r1<- fp[AA] 3169 GET_INST_OPCODE(ip) @ extract opcode from rINST 3170 str r1, [r0, #offStaticField_value] @ field<- vAA 3171 GOTO_OPCODE(ip) @ jump to next instruction 3172 3173 3174/* ------------------------------ */ 3175 .balign 64 3176.L_OP_SPUT_SHORT: /* 0x6d */ 3177/* File: armv5te/OP_SPUT_SHORT.S */ 3178/* File: armv5te/OP_SPUT.S */ 3179 /* 3180 * General 32-bit SPUT handler. 3181 * 3182 * for: sput, sput-object, sput-boolean, sput-byte, sput-char, sput-short 3183 */ 3184 /* op vAA, field@BBBB */ 3185 ldr r2, [rGLUE, #offGlue_methodClassDex] @ r2<- DvmDex 3186 FETCH(r1, 1) @ r1<- field ref BBBB 3187 ldr r2, [r2, #offDvmDex_pResFields] @ r2<- dvmDex->pResFields 3188 ldr r0, [r2, r1, lsl #2] @ r0<- resolved StaticField ptr 3189 cmp r0, #0 @ is resolved entry null? 3190 beq .LOP_SPUT_SHORT_resolve @ yes, do resolve 3191.LOP_SPUT_SHORT_finish: @ field ptr in r0 3192 mov r2, rINST, lsr #8 @ r2<- AA 3193 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 3194 GET_VREG(r1, r2) @ r1<- fp[AA] 3195 GET_INST_OPCODE(ip) @ extract opcode from rINST 3196 str r1, [r0, #offStaticField_value] @ field<- vAA 3197 GOTO_OPCODE(ip) @ jump to next instruction 3198 3199 3200/* ------------------------------ */ 3201 .balign 64 3202.L_OP_INVOKE_VIRTUAL: /* 0x6e */ 3203/* File: armv5te/OP_INVOKE_VIRTUAL.S */ 3204 /* 3205 * Handle a virtual method call. 3206 * 3207 * for: invoke-virtual, invoke-virtual/range 3208 */ 3209 /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */ 3210 /* op vAA, {vCCCC..v(CCCC+AA-1)}, meth@BBBB */ 3211 ldr r3, [rGLUE, #offGlue_methodClassDex] @ r3<- pDvmDex 3212 FETCH(r1, 1) @ r1<- BBBB 3213 ldr r3, [r3, #offDvmDex_pResMethods] @ r3<- pDvmDex->pResMethods 3214 FETCH(r10, 2) @ r10<- GFED or CCCC 3215 ldr r0, [r3, r1, lsl #2] @ r0<- resolved baseMethod 3216 .if (!0) 3217 and r10, r10, #15 @ r10<- D (or stays CCCC) 3218 .endif 3219 cmp r0, #0 @ already resolved? 3220 EXPORT_PC() @ must export for invoke 3221 bne .LOP_INVOKE_VIRTUAL_continue @ yes, continue on 3222 ldr r3, [rGLUE, #offGlue_method] @ r3<- glue->method 3223 ldr r0, [r3, #offMethod_clazz] @ r0<- method->clazz 3224 mov r2, #METHOD_VIRTUAL @ resolver method type 3225 bl dvmResolveMethod @ r0<- call(clazz, ref, flags) 3226 cmp r0, #0 @ got null? 3227 bne .LOP_INVOKE_VIRTUAL_continue @ no, continue 3228 b common_exceptionThrown @ yes, handle exception 3229 3230/* ------------------------------ */ 3231 .balign 64 3232.L_OP_INVOKE_SUPER: /* 0x6f */ 3233/* File: armv5te/OP_INVOKE_SUPER.S */ 3234 /* 3235 * Handle a "super" method call. 3236 * 3237 * for: invoke-super, invoke-super/range 3238 */ 3239 /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */ 3240 /* op vAA, {vCCCC..v(CCCC+AA-1)}, meth@BBBB */ 3241 FETCH(r10, 2) @ r10<- GFED or CCCC 3242 ldr r3, [rGLUE, #offGlue_methodClassDex] @ r3<- pDvmDex 3243 .if (!0) 3244 and r10, r10, #15 @ r10<- D (or stays CCCC) 3245 .endif 3246 FETCH(r1, 1) @ r1<- BBBB 3247 ldr r3, [r3, #offDvmDex_pResMethods] @ r3<- pDvmDex->pResMethods 3248 GET_VREG(r2, r10) @ r2<- "this" ptr 3249 ldr r0, [r3, r1, lsl #2] @ r0<- resolved baseMethod 3250 cmp r2, #0 @ null "this"? 3251 ldr r9, [rGLUE, #offGlue_method] @ r9<- current method 3252 beq common_errNullObject @ null "this", throw exception 3253 cmp r0, #0 @ already resolved? 3254 ldr r9, [r9, #offMethod_clazz] @ r9<- method->clazz 3255 EXPORT_PC() @ must export for invoke 3256 bne .LOP_INVOKE_SUPER_continue @ resolved, continue on 3257 b .LOP_INVOKE_SUPER_resolve @ do resolve now 3258 3259/* ------------------------------ */ 3260 .balign 64 3261.L_OP_INVOKE_DIRECT: /* 0x70 */ 3262/* File: armv5te/OP_INVOKE_DIRECT.S */ 3263 /* 3264 * Handle a direct method call. 3265 * 3266 * (We could defer the "is 'this' pointer null" test to the common 3267 * method invocation code, and use a flag to indicate that static 3268 * calls don't count. If we do this as part of copying the arguments 3269 * out we could avoiding loading the first arg twice.) 3270 * 3271 * for: invoke-direct, invoke-direct/range 3272 */ 3273 /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */ 3274 /* op {vCCCC..v(CCCC+AA-1)}, meth@BBBB */ 3275 ldr r3, [rGLUE, #offGlue_methodClassDex] @ r3<- pDvmDex 3276 FETCH(r1, 1) @ r1<- BBBB 3277 ldr r3, [r3, #offDvmDex_pResMethods] @ r3<- pDvmDex->pResMethods 3278 FETCH(r10, 2) @ r10<- GFED or CCCC 3279 ldr r0, [r3, r1, lsl #2] @ r0<- resolved methodToCall 3280 .if (!0) 3281 and r10, r10, #15 @ r10<- D (or stays CCCC) 3282 .endif 3283 cmp r0, #0 @ already resolved? 3284 EXPORT_PC() @ must export for invoke 3285 GET_VREG(r2, r10) @ r2<- "this" ptr 3286 beq .LOP_INVOKE_DIRECT_resolve @ not resolved, do it now 3287.LOP_INVOKE_DIRECT_finish: 3288 cmp r2, #0 @ null "this" ref? 3289 bne common_invokeMethodNoRange @ no, continue on 3290 b common_errNullObject @ yes, throw exception 3291 3292/* ------------------------------ */ 3293 .balign 64 3294.L_OP_INVOKE_STATIC: /* 0x71 */ 3295/* File: armv5te/OP_INVOKE_STATIC.S */ 3296 /* 3297 * Handle a static method call. 3298 * 3299 * for: invoke-static, invoke-static/range 3300 */ 3301 /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */ 3302 /* op {vCCCC..v(CCCC+AA-1)}, meth@BBBB */ 3303 ldr r3, [rGLUE, #offGlue_methodClassDex] @ r3<- pDvmDex 3304 FETCH(r1, 1) @ r1<- BBBB 3305 ldr r3, [r3, #offDvmDex_pResMethods] @ r3<- pDvmDex->pResMethods 3306 ldr r0, [r3, r1, lsl #2] @ r0<- resolved methodToCall 3307 cmp r0, #0 @ already resolved? 3308 EXPORT_PC() @ must export for invoke 3309 bne common_invokeMethodNoRange @ yes, continue on 33100: ldr r3, [rGLUE, #offGlue_method] @ r3<- glue->method 3311 ldr r0, [r3, #offMethod_clazz] @ r0<- method->clazz 3312 mov r2, #METHOD_STATIC @ resolver method type 3313 bl dvmResolveMethod @ r0<- call(clazz, ref, flags) 3314 cmp r0, #0 @ got null? 3315 bne common_invokeMethodNoRange @ no, continue 3316 b common_exceptionThrown @ yes, handle exception 3317 3318 3319/* ------------------------------ */ 3320 .balign 64 3321.L_OP_INVOKE_INTERFACE: /* 0x72 */ 3322/* File: armv5te/OP_INVOKE_INTERFACE.S */ 3323 /* 3324 * Handle an interface method call. 3325 * 3326 * for: invoke-interface, invoke-interface/range 3327 */ 3328 /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */ 3329 /* op {vCCCC..v(CCCC+AA-1)}, meth@BBBB */ 3330 FETCH(r2, 2) @ r2<- FEDC or CCCC 3331 FETCH(r1, 1) @ r1<- BBBB 3332 .if (!0) 3333 and r2, r2, #15 @ r2<- C (or stays CCCC) 3334 .endif 3335 EXPORT_PC() @ must export for invoke 3336 GET_VREG(r0, r2) @ r0<- first arg ("this") 3337 ldr r3, [rGLUE, #offGlue_methodClassDex] @ r3<- methodClassDex 3338 cmp r0, #0 @ null obj? 3339 ldr r2, [rGLUE, #offGlue_method] @ r2<- method 3340 beq common_errNullObject @ yes, fail 3341 ldr r0, [r0, #offObject_clazz] @ r0<- thisPtr->clazz 3342 bl dvmFindInterfaceMethodInCache @ r0<- call(class, ref, method, dex) 3343 cmp r0, #0 @ failed? 3344 beq common_exceptionThrown @ yes, handle exception 3345 b common_invokeMethodNoRange @ jump to common handler 3346 3347 3348/* ------------------------------ */ 3349 .balign 64 3350.L_OP_UNUSED_73: /* 0x73 */ 3351/* File: armv5te/OP_UNUSED_73.S */ 3352/* File: armv5te/unused.S */ 3353 bl common_abort 3354 3355 3356 3357/* ------------------------------ */ 3358 .balign 64 3359.L_OP_INVOKE_VIRTUAL_RANGE: /* 0x74 */ 3360/* File: armv5te/OP_INVOKE_VIRTUAL_RANGE.S */ 3361/* File: armv5te/OP_INVOKE_VIRTUAL.S */ 3362 /* 3363 * Handle a virtual method call. 3364 * 3365 * for: invoke-virtual, invoke-virtual/range 3366 */ 3367 /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */ 3368 /* op vAA, {vCCCC..v(CCCC+AA-1)}, meth@BBBB */ 3369 ldr r3, [rGLUE, #offGlue_methodClassDex] @ r3<- pDvmDex 3370 FETCH(r1, 1) @ r1<- BBBB 3371 ldr r3, [r3, #offDvmDex_pResMethods] @ r3<- pDvmDex->pResMethods 3372 FETCH(r10, 2) @ r10<- GFED or CCCC 3373 ldr r0, [r3, r1, lsl #2] @ r0<- resolved baseMethod 3374 .if (!1) 3375 and r10, r10, #15 @ r10<- D (or stays CCCC) 3376 .endif 3377 cmp r0, #0 @ already resolved? 3378 EXPORT_PC() @ must export for invoke 3379 bne .LOP_INVOKE_VIRTUAL_RANGE_continue @ yes, continue on 3380 ldr r3, [rGLUE, #offGlue_method] @ r3<- glue->method 3381 ldr r0, [r3, #offMethod_clazz] @ r0<- method->clazz 3382 mov r2, #METHOD_VIRTUAL @ resolver method type 3383 bl dvmResolveMethod @ r0<- call(clazz, ref, flags) 3384 cmp r0, #0 @ got null? 3385 bne .LOP_INVOKE_VIRTUAL_RANGE_continue @ no, continue 3386 b common_exceptionThrown @ yes, handle exception 3387 3388 3389/* ------------------------------ */ 3390 .balign 64 3391.L_OP_INVOKE_SUPER_RANGE: /* 0x75 */ 3392/* File: armv5te/OP_INVOKE_SUPER_RANGE.S */ 3393/* File: armv5te/OP_INVOKE_SUPER.S */ 3394 /* 3395 * Handle a "super" method call. 3396 * 3397 * for: invoke-super, invoke-super/range 3398 */ 3399 /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */ 3400 /* op vAA, {vCCCC..v(CCCC+AA-1)}, meth@BBBB */ 3401 FETCH(r10, 2) @ r10<- GFED or CCCC 3402 ldr r3, [rGLUE, #offGlue_methodClassDex] @ r3<- pDvmDex 3403 .if (!1) 3404 and r10, r10, #15 @ r10<- D (or stays CCCC) 3405 .endif 3406 FETCH(r1, 1) @ r1<- BBBB 3407 ldr r3, [r3, #offDvmDex_pResMethods] @ r3<- pDvmDex->pResMethods 3408 GET_VREG(r2, r10) @ r2<- "this" ptr 3409 ldr r0, [r3, r1, lsl #2] @ r0<- resolved baseMethod 3410 cmp r2, #0 @ null "this"? 3411 ldr r9, [rGLUE, #offGlue_method] @ r9<- current method 3412 beq common_errNullObject @ null "this", throw exception 3413 cmp r0, #0 @ already resolved? 3414 ldr r9, [r9, #offMethod_clazz] @ r9<- method->clazz 3415 EXPORT_PC() @ must export for invoke 3416 bne .LOP_INVOKE_SUPER_RANGE_continue @ resolved, continue on 3417 b .LOP_INVOKE_SUPER_RANGE_resolve @ do resolve now 3418 3419 3420/* ------------------------------ */ 3421 .balign 64 3422.L_OP_INVOKE_DIRECT_RANGE: /* 0x76 */ 3423/* File: armv5te/OP_INVOKE_DIRECT_RANGE.S */ 3424/* File: armv5te/OP_INVOKE_DIRECT.S */ 3425 /* 3426 * Handle a direct method call. 3427 * 3428 * (We could defer the "is 'this' pointer null" test to the common 3429 * method invocation code, and use a flag to indicate that static 3430 * calls don't count. If we do this as part of copying the arguments 3431 * out we could avoiding loading the first arg twice.) 3432 * 3433 * for: invoke-direct, invoke-direct/range 3434 */ 3435 /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */ 3436 /* op {vCCCC..v(CCCC+AA-1)}, meth@BBBB */ 3437 ldr r3, [rGLUE, #offGlue_methodClassDex] @ r3<- pDvmDex 3438 FETCH(r1, 1) @ r1<- BBBB 3439 ldr r3, [r3, #offDvmDex_pResMethods] @ r3<- pDvmDex->pResMethods 3440 FETCH(r10, 2) @ r10<- GFED or CCCC 3441 ldr r0, [r3, r1, lsl #2] @ r0<- resolved methodToCall 3442 .if (!1) 3443 and r10, r10, #15 @ r10<- D (or stays CCCC) 3444 .endif 3445 cmp r0, #0 @ already resolved? 3446 EXPORT_PC() @ must export for invoke 3447 GET_VREG(r2, r10) @ r2<- "this" ptr 3448 beq .LOP_INVOKE_DIRECT_RANGE_resolve @ not resolved, do it now 3449.LOP_INVOKE_DIRECT_RANGE_finish: 3450 cmp r2, #0 @ null "this" ref? 3451 bne common_invokeMethodRange @ no, continue on 3452 b common_errNullObject @ yes, throw exception 3453 3454 3455/* ------------------------------ */ 3456 .balign 64 3457.L_OP_INVOKE_STATIC_RANGE: /* 0x77 */ 3458/* File: armv5te/OP_INVOKE_STATIC_RANGE.S */ 3459/* File: armv5te/OP_INVOKE_STATIC.S */ 3460 /* 3461 * Handle a static method call. 3462 * 3463 * for: invoke-static, invoke-static/range 3464 */ 3465 /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */ 3466 /* op {vCCCC..v(CCCC+AA-1)}, meth@BBBB */ 3467 ldr r3, [rGLUE, #offGlue_methodClassDex] @ r3<- pDvmDex 3468 FETCH(r1, 1) @ r1<- BBBB 3469 ldr r3, [r3, #offDvmDex_pResMethods] @ r3<- pDvmDex->pResMethods 3470 ldr r0, [r3, r1, lsl #2] @ r0<- resolved methodToCall 3471 cmp r0, #0 @ already resolved? 3472 EXPORT_PC() @ must export for invoke 3473 bne common_invokeMethodRange @ yes, continue on 34740: ldr r3, [rGLUE, #offGlue_method] @ r3<- glue->method 3475 ldr r0, [r3, #offMethod_clazz] @ r0<- method->clazz 3476 mov r2, #METHOD_STATIC @ resolver method type 3477 bl dvmResolveMethod @ r0<- call(clazz, ref, flags) 3478 cmp r0, #0 @ got null? 3479 bne common_invokeMethodRange @ no, continue 3480 b common_exceptionThrown @ yes, handle exception 3481 3482 3483 3484/* ------------------------------ */ 3485 .balign 64 3486.L_OP_INVOKE_INTERFACE_RANGE: /* 0x78 */ 3487/* File: armv5te/OP_INVOKE_INTERFACE_RANGE.S */ 3488/* File: armv5te/OP_INVOKE_INTERFACE.S */ 3489 /* 3490 * Handle an interface method call. 3491 * 3492 * for: invoke-interface, invoke-interface/range 3493 */ 3494 /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */ 3495 /* op {vCCCC..v(CCCC+AA-1)}, meth@BBBB */ 3496 FETCH(r2, 2) @ r2<- FEDC or CCCC 3497 FETCH(r1, 1) @ r1<- BBBB 3498 .if (!1) 3499 and r2, r2, #15 @ r2<- C (or stays CCCC) 3500 .endif 3501 EXPORT_PC() @ must export for invoke 3502 GET_VREG(r0, r2) @ r0<- first arg ("this") 3503 ldr r3, [rGLUE, #offGlue_methodClassDex] @ r3<- methodClassDex 3504 cmp r0, #0 @ null obj? 3505 ldr r2, [rGLUE, #offGlue_method] @ r2<- method 3506 beq common_errNullObject @ yes, fail 3507 ldr r0, [r0, #offObject_clazz] @ r0<- thisPtr->clazz 3508 bl dvmFindInterfaceMethodInCache @ r0<- call(class, ref, method, dex) 3509 cmp r0, #0 @ failed? 3510 beq common_exceptionThrown @ yes, handle exception 3511 b common_invokeMethodRange @ jump to common handler 3512 3513 3514 3515/* ------------------------------ */ 3516 .balign 64 3517.L_OP_UNUSED_79: /* 0x79 */ 3518/* File: armv5te/OP_UNUSED_79.S */ 3519/* File: armv5te/unused.S */ 3520 bl common_abort 3521 3522 3523 3524/* ------------------------------ */ 3525 .balign 64 3526.L_OP_UNUSED_7A: /* 0x7a */ 3527/* File: armv5te/OP_UNUSED_7A.S */ 3528/* File: armv5te/unused.S */ 3529 bl common_abort 3530 3531 3532 3533/* ------------------------------ */ 3534 .balign 64 3535.L_OP_NEG_INT: /* 0x7b */ 3536/* File: armv5te/OP_NEG_INT.S */ 3537/* File: armv5te/unop.S */ 3538 /* 3539 * Generic 32-bit unary operation. Provide an "instr" line that 3540 * specifies an instruction that performs "result = op r0". 3541 * This could be an ARM instruction or a function call. 3542 * 3543 * for: neg-int, not-int, neg-float, int-to-float, float-to-int, 3544 * int-to-byte, int-to-char, int-to-short 3545 */ 3546 /* unop vA, vB */ 3547 mov r3, rINST, lsr #12 @ r3<- B 3548 mov r9, rINST, lsr #8 @ r9<- A+ 3549 GET_VREG(r0, r3) @ r0<- vB 3550 and r9, r9, #15 3551 @ optional op; may set condition codes 3552 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 3553 rsb r0, r0, #0 @ r0<- op, r0-r3 changed 3554 GET_INST_OPCODE(ip) @ extract opcode from rINST 3555 SET_VREG(r0, r9) @ vAA<- r0 3556 GOTO_OPCODE(ip) @ jump to next instruction 3557 /* 9-10 instructions */ 3558 3559 3560/* ------------------------------ */ 3561 .balign 64 3562.L_OP_NOT_INT: /* 0x7c */ 3563/* File: armv5te/OP_NOT_INT.S */ 3564/* File: armv5te/unop.S */ 3565 /* 3566 * Generic 32-bit unary operation. Provide an "instr" line that 3567 * specifies an instruction that performs "result = op r0". 3568 * This could be an ARM instruction or a function call. 3569 * 3570 * for: neg-int, not-int, neg-float, int-to-float, float-to-int, 3571 * int-to-byte, int-to-char, int-to-short 3572 */ 3573 /* unop vA, vB */ 3574 mov r3, rINST, lsr #12 @ r3<- B 3575 mov r9, rINST, lsr #8 @ r9<- A+ 3576 GET_VREG(r0, r3) @ r0<- vB 3577 and r9, r9, #15 3578 @ optional op; may set condition codes 3579 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 3580 mvn r0, r0 @ r0<- op, r0-r3 changed 3581 GET_INST_OPCODE(ip) @ extract opcode from rINST 3582 SET_VREG(r0, r9) @ vAA<- r0 3583 GOTO_OPCODE(ip) @ jump to next instruction 3584 /* 9-10 instructions */ 3585 3586 3587/* ------------------------------ */ 3588 .balign 64 3589.L_OP_NEG_LONG: /* 0x7d */ 3590/* File: armv5te/OP_NEG_LONG.S */ 3591/* File: armv5te/unopWide.S */ 3592 /* 3593 * Generic 64-bit unary operation. Provide an "instr" line that 3594 * specifies an instruction that performs "result = op r0/r1". 3595 * This could be an ARM instruction or a function call. 3596 * 3597 * For: neg-long, not-long, neg-double, long-to-double, double-to-long 3598 */ 3599 /* unop vA, vB */ 3600 mov r9, rINST, lsr #8 @ r9<- A+ 3601 mov r3, rINST, lsr #12 @ r3<- B 3602 and r9, r9, #15 3603 add r3, rFP, r3, lsl #2 @ r3<- &fp[B] 3604 add r9, rFP, r9, lsl #2 @ r9<- &fp[A] 3605 ldmia r3, {r0-r1} @ r0/r1<- vAA 3606 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 3607 rsbs r0, r0, #0 @ optional op; may set condition codes 3608 rsc r1, r1, #0 @ r0/r1<- op, r2-r3 changed 3609 GET_INST_OPCODE(ip) @ extract opcode from rINST 3610 stmia r9, {r0-r1} @ vAA<- r0/r1 3611 GOTO_OPCODE(ip) @ jump to next instruction 3612 /* 12-13 instructions */ 3613 3614 3615 3616/* ------------------------------ */ 3617 .balign 64 3618.L_OP_NOT_LONG: /* 0x7e */ 3619/* File: armv5te/OP_NOT_LONG.S */ 3620/* File: armv5te/unopWide.S */ 3621 /* 3622 * Generic 64-bit unary operation. Provide an "instr" line that 3623 * specifies an instruction that performs "result = op r0/r1". 3624 * This could be an ARM instruction or a function call. 3625 * 3626 * For: neg-long, not-long, neg-double, long-to-double, double-to-long 3627 */ 3628 /* unop vA, vB */ 3629 mov r9, rINST, lsr #8 @ r9<- A+ 3630 mov r3, rINST, lsr #12 @ r3<- B 3631 and r9, r9, #15 3632 add r3, rFP, r3, lsl #2 @ r3<- &fp[B] 3633 add r9, rFP, r9, lsl #2 @ r9<- &fp[A] 3634 ldmia r3, {r0-r1} @ r0/r1<- vAA 3635 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 3636 mvn r0, r0 @ optional op; may set condition codes 3637 mvn r1, r1 @ r0/r1<- op, r2-r3 changed 3638 GET_INST_OPCODE(ip) @ extract opcode from rINST 3639 stmia r9, {r0-r1} @ vAA<- r0/r1 3640 GOTO_OPCODE(ip) @ jump to next instruction 3641 /* 12-13 instructions */ 3642 3643 3644 3645/* ------------------------------ */ 3646 .balign 64 3647.L_OP_NEG_FLOAT: /* 0x7f */ 3648/* File: armv5te/OP_NEG_FLOAT.S */ 3649/* File: armv5te/unop.S */ 3650 /* 3651 * Generic 32-bit unary operation. Provide an "instr" line that 3652 * specifies an instruction that performs "result = op r0". 3653 * This could be an ARM instruction or a function call. 3654 * 3655 * for: neg-int, not-int, neg-float, int-to-float, float-to-int, 3656 * int-to-byte, int-to-char, int-to-short 3657 */ 3658 /* unop vA, vB */ 3659 mov r3, rINST, lsr #12 @ r3<- B 3660 mov r9, rINST, lsr #8 @ r9<- A+ 3661 GET_VREG(r0, r3) @ r0<- vB 3662 and r9, r9, #15 3663 @ optional op; may set condition codes 3664 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 3665 add r0, r0, #0x80000000 @ r0<- op, r0-r3 changed 3666 GET_INST_OPCODE(ip) @ extract opcode from rINST 3667 SET_VREG(r0, r9) @ vAA<- r0 3668 GOTO_OPCODE(ip) @ jump to next instruction 3669 /* 9-10 instructions */ 3670 3671 3672/* ------------------------------ */ 3673 .balign 64 3674.L_OP_NEG_DOUBLE: /* 0x80 */ 3675/* File: armv5te/OP_NEG_DOUBLE.S */ 3676/* File: armv5te/unopWide.S */ 3677 /* 3678 * Generic 64-bit unary operation. Provide an "instr" line that 3679 * specifies an instruction that performs "result = op r0/r1". 3680 * This could be an ARM instruction or a function call. 3681 * 3682 * For: neg-long, not-long, neg-double, long-to-double, double-to-long 3683 */ 3684 /* unop vA, vB */ 3685 mov r9, rINST, lsr #8 @ r9<- A+ 3686 mov r3, rINST, lsr #12 @ r3<- B 3687 and r9, r9, #15 3688 add r3, rFP, r3, lsl #2 @ r3<- &fp[B] 3689 add r9, rFP, r9, lsl #2 @ r9<- &fp[A] 3690 ldmia r3, {r0-r1} @ r0/r1<- vAA 3691 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 3692 @ optional op; may set condition codes 3693 add r1, r1, #0x80000000 @ r0/r1<- op, r2-r3 changed 3694 GET_INST_OPCODE(ip) @ extract opcode from rINST 3695 stmia r9, {r0-r1} @ vAA<- r0/r1 3696 GOTO_OPCODE(ip) @ jump to next instruction 3697 /* 12-13 instructions */ 3698 3699 3700 3701/* ------------------------------ */ 3702 .balign 64 3703.L_OP_INT_TO_LONG: /* 0x81 */ 3704/* File: armv5te/OP_INT_TO_LONG.S */ 3705/* File: armv5te/unopWider.S */ 3706 /* 3707 * Generic 32bit-to-64bit unary operation. Provide an "instr" line 3708 * that specifies an instruction that performs "result = op r0", where 3709 * "result" is a 64-bit quantity in r0/r1. 3710 * 3711 * For: int-to-long, int-to-double, float-to-long, float-to-double 3712 */ 3713 /* unop vA, vB */ 3714 mov r9, rINST, lsr #8 @ r9<- A+ 3715 mov r3, rINST, lsr #12 @ r3<- B 3716 and r9, r9, #15 3717 GET_VREG(r0, r3) @ r0<- vB 3718 add r9, rFP, r9, lsl #2 @ r9<- &fp[A] 3719 @ optional op; may set condition codes 3720 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 3721 mov r1, r0, asr #31 @ r0<- op, r0-r3 changed 3722 GET_INST_OPCODE(ip) @ extract opcode from rINST 3723 stmia r9, {r0-r1} @ vA/vA+1<- r0/r1 3724 GOTO_OPCODE(ip) @ jump to next instruction 3725 /* 10-11 instructions */ 3726 3727 3728/* ------------------------------ */ 3729 .balign 64 3730.L_OP_INT_TO_FLOAT: /* 0x82 */ 3731/* File: armv5te/OP_INT_TO_FLOAT.S */ 3732/* File: armv5te/unop.S */ 3733 /* 3734 * Generic 32-bit unary operation. Provide an "instr" line that 3735 * specifies an instruction that performs "result = op r0". 3736 * This could be an ARM instruction or a function call. 3737 * 3738 * for: neg-int, not-int, neg-float, int-to-float, float-to-int, 3739 * int-to-byte, int-to-char, int-to-short 3740 */ 3741 /* unop vA, vB */ 3742 mov r3, rINST, lsr #12 @ r3<- B 3743 mov r9, rINST, lsr #8 @ r9<- A+ 3744 GET_VREG(r0, r3) @ r0<- vB 3745 and r9, r9, #15 3746 @ optional op; may set condition codes 3747 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 3748 bl __aeabi_i2f @ r0<- op, r0-r3 changed 3749 GET_INST_OPCODE(ip) @ extract opcode from rINST 3750 SET_VREG(r0, r9) @ vAA<- r0 3751 GOTO_OPCODE(ip) @ jump to next instruction 3752 /* 9-10 instructions */ 3753 3754 3755/* ------------------------------ */ 3756 .balign 64 3757.L_OP_INT_TO_DOUBLE: /* 0x83 */ 3758/* File: armv5te/OP_INT_TO_DOUBLE.S */ 3759/* File: armv5te/unopWider.S */ 3760 /* 3761 * Generic 32bit-to-64bit unary operation. Provide an "instr" line 3762 * that specifies an instruction that performs "result = op r0", where 3763 * "result" is a 64-bit quantity in r0/r1. 3764 * 3765 * For: int-to-long, int-to-double, float-to-long, float-to-double 3766 */ 3767 /* unop vA, vB */ 3768 mov r9, rINST, lsr #8 @ r9<- A+ 3769 mov r3, rINST, lsr #12 @ r3<- B 3770 and r9, r9, #15 3771 GET_VREG(r0, r3) @ r0<- vB 3772 add r9, rFP, r9, lsl #2 @ r9<- &fp[A] 3773 @ optional op; may set condition codes 3774 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 3775 bl __aeabi_i2d @ r0<- op, r0-r3 changed 3776 GET_INST_OPCODE(ip) @ extract opcode from rINST 3777 stmia r9, {r0-r1} @ vA/vA+1<- r0/r1 3778 GOTO_OPCODE(ip) @ jump to next instruction 3779 /* 10-11 instructions */ 3780 3781 3782/* ------------------------------ */ 3783 .balign 64 3784.L_OP_LONG_TO_INT: /* 0x84 */ 3785/* File: armv5te/OP_LONG_TO_INT.S */ 3786/* we ignore the high word, making this equivalent to a 32-bit reg move */ 3787/* File: armv5te/OP_MOVE.S */ 3788 /* for move, move-object, long-to-int */ 3789 /* op vA, vB */ 3790 mov r1, rINST, lsr #12 @ r1<- B from 15:12 3791 mov r0, rINST, lsr #8 @ r0<- A from 11:8 3792 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 3793 GET_VREG(r2, r1) @ r2<- fp[B] 3794 and r0, r0, #15 3795 GET_INST_OPCODE(ip) @ ip<- opcode from rINST 3796 SET_VREG(r2, r0) @ fp[A]<- r2 3797 GOTO_OPCODE(ip) @ execute next instruction 3798 3799 3800 3801/* ------------------------------ */ 3802 .balign 64 3803.L_OP_LONG_TO_FLOAT: /* 0x85 */ 3804/* File: armv5te/OP_LONG_TO_FLOAT.S */ 3805/* File: armv5te/unopNarrower.S */ 3806 /* 3807 * Generic 64bit-to-32bit unary operation. Provide an "instr" line 3808 * that specifies an instruction that performs "result = op r0/r1", where 3809 * "result" is a 32-bit quantity in r0. 3810 * 3811 * For: long-to-float, double-to-int, double-to-float 3812 * 3813 * (This would work for long-to-int, but that instruction is actually 3814 * an exact match for OP_MOVE.) 3815 */ 3816 /* unop vA, vB */ 3817 mov r3, rINST, lsr #12 @ r3<- B 3818 mov r9, rINST, lsr #8 @ r9<- A+ 3819 add r3, rFP, r3, lsl #2 @ r3<- &fp[B] 3820 and r9, r9, #15 3821 ldmia r3, {r0-r1} @ r0/r1<- vB/vB+1 3822 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 3823 @ optional op; may set condition codes 3824 bl __aeabi_l2f @ r0<- op, r0-r3 changed 3825 GET_INST_OPCODE(ip) @ extract opcode from rINST 3826 SET_VREG(r0, r9) @ vA<- r0 3827 GOTO_OPCODE(ip) @ jump to next instruction 3828 /* 10-11 instructions */ 3829 3830 3831/* ------------------------------ */ 3832 .balign 64 3833.L_OP_LONG_TO_DOUBLE: /* 0x86 */ 3834/* File: armv5te/OP_LONG_TO_DOUBLE.S */ 3835/* File: armv5te/unopWide.S */ 3836 /* 3837 * Generic 64-bit unary operation. Provide an "instr" line that 3838 * specifies an instruction that performs "result = op r0/r1". 3839 * This could be an ARM instruction or a function call. 3840 * 3841 * For: neg-long, not-long, neg-double, long-to-double, double-to-long 3842 */ 3843 /* unop vA, vB */ 3844 mov r9, rINST, lsr #8 @ r9<- A+ 3845 mov r3, rINST, lsr #12 @ r3<- B 3846 and r9, r9, #15 3847 add r3, rFP, r3, lsl #2 @ r3<- &fp[B] 3848 add r9, rFP, r9, lsl #2 @ r9<- &fp[A] 3849 ldmia r3, {r0-r1} @ r0/r1<- vAA 3850 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 3851 @ optional op; may set condition codes 3852 bl __aeabi_l2d @ r0/r1<- op, r2-r3 changed 3853 GET_INST_OPCODE(ip) @ extract opcode from rINST 3854 stmia r9, {r0-r1} @ vAA<- r0/r1 3855 GOTO_OPCODE(ip) @ jump to next instruction 3856 /* 12-13 instructions */ 3857 3858 3859 3860/* ------------------------------ */ 3861 .balign 64 3862.L_OP_FLOAT_TO_INT: /* 0x87 */ 3863/* File: armv5te/OP_FLOAT_TO_INT.S */ 3864/* EABI appears to have Java-style conversions of +inf/-inf/NaN */ 3865/* File: armv5te/unop.S */ 3866 /* 3867 * Generic 32-bit unary operation. Provide an "instr" line that 3868 * specifies an instruction that performs "result = op r0". 3869 * This could be an ARM instruction or a function call. 3870 * 3871 * for: neg-int, not-int, neg-float, int-to-float, float-to-int, 3872 * int-to-byte, int-to-char, int-to-short 3873 */ 3874 /* unop vA, vB */ 3875 mov r3, rINST, lsr #12 @ r3<- B 3876 mov r9, rINST, lsr #8 @ r9<- A+ 3877 GET_VREG(r0, r3) @ r0<- vB 3878 and r9, r9, #15 3879 @ optional op; may set condition codes 3880 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 3881 bl __aeabi_f2iz @ r0<- op, r0-r3 changed 3882 GET_INST_OPCODE(ip) @ extract opcode from rINST 3883 SET_VREG(r0, r9) @ vAA<- r0 3884 GOTO_OPCODE(ip) @ jump to next instruction 3885 /* 9-10 instructions */ 3886 3887 3888#if 0 3889@include "armv5te/unop.S" {"instr":"bl f2i_doconv"} 3890@break 3891/* 3892 * Convert the float in r0 to an int in r0. 3893 * 3894 * We have to clip values to int min/max per the specification. The 3895 * expected common case is a "reasonable" value that converts directly 3896 * to modest integer. The EABI convert function isn't doing this for us. 3897 */ 3898f2i_doconv: 3899 stmfd sp!, {r4, lr} 3900 mov r1, #0x4f000000 @ (float)maxint 3901 mov r4, r0 3902 bl __aeabi_fcmpge @ is arg >= maxint? 3903 cmp r0, #0 @ nonzero == yes 3904 mvnne r0, #0x80000000 @ return maxint (7fffffff) 3905 ldmnefd sp!, {r4, pc} 3906 3907 mov r0, r4 @ recover arg 3908 mov r1, #0xcf000000 @ (float)minint 3909 bl __aeabi_fcmple @ is arg <= minint? 3910 cmp r0, #0 @ nonzero == yes 3911 movne r0, #0x80000000 @ return minint (80000000) 3912 ldmnefd sp!, {r4, pc} 3913 3914 mov r0, r4 @ recover arg 3915 mov r1, r4 3916 bl __aeabi_fcmpeq @ is arg == self? 3917 cmp r0, #0 @ zero == no 3918 ldmeqfd sp!, {r4, pc} @ return zero for NaN 3919 3920 mov r0, r4 @ recover arg 3921 bl __aeabi_f2iz @ convert float to int 3922 ldmfd sp!, {r4, pc} 3923#endif 3924 3925 3926/* ------------------------------ */ 3927 .balign 64 3928.L_OP_FLOAT_TO_LONG: /* 0x88 */ 3929/* File: armv5te/OP_FLOAT_TO_LONG.S */ 3930@include "armv5te/unopWider.S" {"instr":"bl __aeabi_f2lz"} 3931/* File: armv5te/unopWider.S */ 3932 /* 3933 * Generic 32bit-to-64bit unary operation. Provide an "instr" line 3934 * that specifies an instruction that performs "result = op r0", where 3935 * "result" is a 64-bit quantity in r0/r1. 3936 * 3937 * For: int-to-long, int-to-double, float-to-long, float-to-double 3938 */ 3939 /* unop vA, vB */ 3940 mov r9, rINST, lsr #8 @ r9<- A+ 3941 mov r3, rINST, lsr #12 @ r3<- B 3942 and r9, r9, #15 3943 GET_VREG(r0, r3) @ r0<- vB 3944 add r9, rFP, r9, lsl #2 @ r9<- &fp[A] 3945 @ optional op; may set condition codes 3946 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 3947 bl f2l_doconv @ r0<- op, r0-r3 changed 3948 GET_INST_OPCODE(ip) @ extract opcode from rINST 3949 stmia r9, {r0-r1} @ vA/vA+1<- r0/r1 3950 GOTO_OPCODE(ip) @ jump to next instruction 3951 /* 10-11 instructions */ 3952 3953 3954 3955/* ------------------------------ */ 3956 .balign 64 3957.L_OP_FLOAT_TO_DOUBLE: /* 0x89 */ 3958/* File: armv5te/OP_FLOAT_TO_DOUBLE.S */ 3959/* File: armv5te/unopWider.S */ 3960 /* 3961 * Generic 32bit-to-64bit unary operation. Provide an "instr" line 3962 * that specifies an instruction that performs "result = op r0", where 3963 * "result" is a 64-bit quantity in r0/r1. 3964 * 3965 * For: int-to-long, int-to-double, float-to-long, float-to-double 3966 */ 3967 /* unop vA, vB */ 3968 mov r9, rINST, lsr #8 @ r9<- A+ 3969 mov r3, rINST, lsr #12 @ r3<- B 3970 and r9, r9, #15 3971 GET_VREG(r0, r3) @ r0<- vB 3972 add r9, rFP, r9, lsl #2 @ r9<- &fp[A] 3973 @ optional op; may set condition codes 3974 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 3975 bl __aeabi_f2d @ r0<- op, r0-r3 changed 3976 GET_INST_OPCODE(ip) @ extract opcode from rINST 3977 stmia r9, {r0-r1} @ vA/vA+1<- r0/r1 3978 GOTO_OPCODE(ip) @ jump to next instruction 3979 /* 10-11 instructions */ 3980 3981 3982/* ------------------------------ */ 3983 .balign 64 3984.L_OP_DOUBLE_TO_INT: /* 0x8a */ 3985/* File: armv5te/OP_DOUBLE_TO_INT.S */ 3986/* EABI appears to have Java-style conversions of +inf/-inf/NaN */ 3987/* File: armv5te/unopNarrower.S */ 3988 /* 3989 * Generic 64bit-to-32bit unary operation. Provide an "instr" line 3990 * that specifies an instruction that performs "result = op r0/r1", where 3991 * "result" is a 32-bit quantity in r0. 3992 * 3993 * For: long-to-float, double-to-int, double-to-float 3994 * 3995 * (This would work for long-to-int, but that instruction is actually 3996 * an exact match for OP_MOVE.) 3997 */ 3998 /* unop vA, vB */ 3999 mov r3, rINST, lsr #12 @ r3<- B 4000 mov r9, rINST, lsr #8 @ r9<- A+ 4001 add r3, rFP, r3, lsl #2 @ r3<- &fp[B] 4002 and r9, r9, #15 4003 ldmia r3, {r0-r1} @ r0/r1<- vB/vB+1 4004 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 4005 @ optional op; may set condition codes 4006 bl __aeabi_d2iz @ r0<- op, r0-r3 changed 4007 GET_INST_OPCODE(ip) @ extract opcode from rINST 4008 SET_VREG(r0, r9) @ vA<- r0 4009 GOTO_OPCODE(ip) @ jump to next instruction 4010 /* 10-11 instructions */ 4011 4012 4013#if 0 4014@include "armv5te/unopNarrower.S" {"instr":"bl d2i_doconv"} 4015@break 4016/* 4017 * Convert the double in r0/r1 to an int in r0. 4018 * 4019 * We have to clip values to int min/max per the specification. The 4020 * expected common case is a "reasonable" value that converts directly 4021 * to modest integer. The EABI convert function isn't doing this for us. 4022 */ 4023d2i_doconv: 4024 stmfd sp!, {r4, r5, lr} @ save regs 4025 mov r2, #0x80000000 @ maxint, as a double (low word) 4026 mov r2, r2, asr #9 @ 0xffc00000 4027 sub sp, sp, #4 @ align for EABI 4028 mvn r3, #0xbe000000 @ maxint, as a double (high word) 4029 sub r3, r3, #0x00200000 @ 0x41dfffff 4030 mov r4, r0 @ save a copy of r0 4031 mov r5, r1 @ and r1 4032 bl __aeabi_dcmpge @ is arg >= maxint? 4033 cmp r0, #0 @ nonzero == yes 4034 mvnne r0, #0x80000000 @ return maxint (0x7fffffff) 4035 bne 1f 4036 4037 mov r0, r4 @ recover arg 4038 mov r1, r5 4039 mov r3, #0xc1000000 @ minint, as a double (high word) 4040 add r3, r3, #0x00e00000 @ 0xc1e00000 4041 mov r2, #0 @ minint, as a double (low word) 4042 bl __aeabi_dcmple @ is arg <= minint? 4043 cmp r0, #0 @ nonzero == yes 4044 movne r0, #0x80000000 @ return minint (80000000) 4045 bne 1f 4046 4047 mov r0, r4 @ recover arg 4048 mov r1, r5 4049 mov r2, r4 @ compare against self 4050 mov r3, r5 4051 bl __aeabi_dcmpeq @ is arg == self? 4052 cmp r0, #0 @ zero == no 4053 beq 1f @ return zero for NaN 4054 4055 mov r0, r4 @ recover arg 4056 mov r1, r5 4057 bl __aeabi_d2iz @ convert double to int 4058 40591: 4060 add sp, sp, #4 4061 ldmfd sp!, {r4, r5, pc} 4062#endif 4063 4064 4065/* ------------------------------ */ 4066 .balign 64 4067.L_OP_DOUBLE_TO_LONG: /* 0x8b */ 4068/* File: armv5te/OP_DOUBLE_TO_LONG.S */ 4069@include "armv5te/unopWide.S" {"instr":"bl __aeabi_d2lz"} 4070/* File: armv5te/unopWide.S */ 4071 /* 4072 * Generic 64-bit unary operation. Provide an "instr" line that 4073 * specifies an instruction that performs "result = op r0/r1". 4074 * This could be an ARM instruction or a function call. 4075 * 4076 * For: neg-long, not-long, neg-double, long-to-double, double-to-long 4077 */ 4078 /* unop vA, vB */ 4079 mov r9, rINST, lsr #8 @ r9<- A+ 4080 mov r3, rINST, lsr #12 @ r3<- B 4081 and r9, r9, #15 4082 add r3, rFP, r3, lsl #2 @ r3<- &fp[B] 4083 add r9, rFP, r9, lsl #2 @ r9<- &fp[A] 4084 ldmia r3, {r0-r1} @ r0/r1<- vAA 4085 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 4086 @ optional op; may set condition codes 4087 bl d2l_doconv @ r0/r1<- op, r2-r3 changed 4088 GET_INST_OPCODE(ip) @ extract opcode from rINST 4089 stmia r9, {r0-r1} @ vAA<- r0/r1 4090 GOTO_OPCODE(ip) @ jump to next instruction 4091 /* 12-13 instructions */ 4092 4093 4094 4095 4096/* ------------------------------ */ 4097 .balign 64 4098.L_OP_DOUBLE_TO_FLOAT: /* 0x8c */ 4099/* File: armv5te/OP_DOUBLE_TO_FLOAT.S */ 4100/* File: armv5te/unopNarrower.S */ 4101 /* 4102 * Generic 64bit-to-32bit unary operation. Provide an "instr" line 4103 * that specifies an instruction that performs "result = op r0/r1", where 4104 * "result" is a 32-bit quantity in r0. 4105 * 4106 * For: long-to-float, double-to-int, double-to-float 4107 * 4108 * (This would work for long-to-int, but that instruction is actually 4109 * an exact match for OP_MOVE.) 4110 */ 4111 /* unop vA, vB */ 4112 mov r3, rINST, lsr #12 @ r3<- B 4113 mov r9, rINST, lsr #8 @ r9<- A+ 4114 add r3, rFP, r3, lsl #2 @ r3<- &fp[B] 4115 and r9, r9, #15 4116 ldmia r3, {r0-r1} @ r0/r1<- vB/vB+1 4117 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 4118 @ optional op; may set condition codes 4119 bl __aeabi_d2f @ r0<- op, r0-r3 changed 4120 GET_INST_OPCODE(ip) @ extract opcode from rINST 4121 SET_VREG(r0, r9) @ vA<- r0 4122 GOTO_OPCODE(ip) @ jump to next instruction 4123 /* 10-11 instructions */ 4124 4125 4126/* ------------------------------ */ 4127 .balign 64 4128.L_OP_INT_TO_BYTE: /* 0x8d */ 4129/* File: armv5te/OP_INT_TO_BYTE.S */ 4130/* File: armv5te/unop.S */ 4131 /* 4132 * Generic 32-bit unary operation. Provide an "instr" line that 4133 * specifies an instruction that performs "result = op r0". 4134 * This could be an ARM instruction or a function call. 4135 * 4136 * for: neg-int, not-int, neg-float, int-to-float, float-to-int, 4137 * int-to-byte, int-to-char, int-to-short 4138 */ 4139 /* unop vA, vB */ 4140 mov r3, rINST, lsr #12 @ r3<- B 4141 mov r9, rINST, lsr #8 @ r9<- A+ 4142 GET_VREG(r0, r3) @ r0<- vB 4143 and r9, r9, #15 4144 mov r0, r0, asl #24 @ optional op; may set condition codes 4145 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 4146 mov r0, r0, asr #24 @ r0<- op, r0-r3 changed 4147 GET_INST_OPCODE(ip) @ extract opcode from rINST 4148 SET_VREG(r0, r9) @ vAA<- r0 4149 GOTO_OPCODE(ip) @ jump to next instruction 4150 /* 9-10 instructions */ 4151 4152 4153/* ------------------------------ */ 4154 .balign 64 4155.L_OP_INT_TO_CHAR: /* 0x8e */ 4156/* File: armv5te/OP_INT_TO_CHAR.S */ 4157/* File: armv5te/unop.S */ 4158 /* 4159 * Generic 32-bit unary operation. Provide an "instr" line that 4160 * specifies an instruction that performs "result = op r0". 4161 * This could be an ARM instruction or a function call. 4162 * 4163 * for: neg-int, not-int, neg-float, int-to-float, float-to-int, 4164 * int-to-byte, int-to-char, int-to-short 4165 */ 4166 /* unop vA, vB */ 4167 mov r3, rINST, lsr #12 @ r3<- B 4168 mov r9, rINST, lsr #8 @ r9<- A+ 4169 GET_VREG(r0, r3) @ r0<- vB 4170 and r9, r9, #15 4171 mov r0, r0, asl #16 @ optional op; may set condition codes 4172 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 4173 mov r0, r0, lsr #16 @ r0<- op, r0-r3 changed 4174 GET_INST_OPCODE(ip) @ extract opcode from rINST 4175 SET_VREG(r0, r9) @ vAA<- r0 4176 GOTO_OPCODE(ip) @ jump to next instruction 4177 /* 9-10 instructions */ 4178 4179 4180/* ------------------------------ */ 4181 .balign 64 4182.L_OP_INT_TO_SHORT: /* 0x8f */ 4183/* File: armv5te/OP_INT_TO_SHORT.S */ 4184/* File: armv5te/unop.S */ 4185 /* 4186 * Generic 32-bit unary operation. Provide an "instr" line that 4187 * specifies an instruction that performs "result = op r0". 4188 * This could be an ARM instruction or a function call. 4189 * 4190 * for: neg-int, not-int, neg-float, int-to-float, float-to-int, 4191 * int-to-byte, int-to-char, int-to-short 4192 */ 4193 /* unop vA, vB */ 4194 mov r3, rINST, lsr #12 @ r3<- B 4195 mov r9, rINST, lsr #8 @ r9<- A+ 4196 GET_VREG(r0, r3) @ r0<- vB 4197 and r9, r9, #15 4198 mov r0, r0, asl #16 @ optional op; may set condition codes 4199 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 4200 mov r0, r0, asr #16 @ r0<- op, r0-r3 changed 4201 GET_INST_OPCODE(ip) @ extract opcode from rINST 4202 SET_VREG(r0, r9) @ vAA<- r0 4203 GOTO_OPCODE(ip) @ jump to next instruction 4204 /* 9-10 instructions */ 4205 4206 4207/* ------------------------------ */ 4208 .balign 64 4209.L_OP_ADD_INT: /* 0x90 */ 4210/* File: armv5te/OP_ADD_INT.S */ 4211/* File: armv5te/binop.S */ 4212 /* 4213 * Generic 32-bit binary operation. Provide an "instr" line that 4214 * specifies an instruction that performs "result = r0 op r1". 4215 * This could be an ARM instruction or a function call. (If the result 4216 * comes back in a register other than r0, you can override "result".) 4217 * 4218 * If "chkzero" is set to 1, we perform a divide-by-zero check on 4219 * vCC (r1). Useful for integer division and modulus. Note that we 4220 * *don't* check for (INT_MIN / -1) here, because the ARM math lib 4221 * handles it correctly. 4222 * 4223 * For: add-int, sub-int, mul-int, div-int, rem-int, and-int, or-int, 4224 * xor-int, shl-int, shr-int, ushr-int, add-float, sub-float, 4225 * mul-float, div-float, rem-float 4226 */ 4227 /* binop vAA, vBB, vCC */ 4228 FETCH(r0, 1) @ r0<- CCBB 4229 mov r9, rINST, lsr #8 @ r9<- AA 4230 mov r3, r0, lsr #8 @ r3<- CC 4231 and r2, r0, #255 @ r2<- BB 4232 GET_VREG(r1, r3) @ r1<- vCC 4233 GET_VREG(r0, r2) @ r0<- vBB 4234 .if 0 4235 cmp r1, #0 @ is second operand zero? 4236 beq common_errDivideByZero 4237 .endif 4238 4239 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 4240 @ optional op; may set condition codes 4241 add r0, r0, r1 @ r0<- op, r0-r3 changed 4242 GET_INST_OPCODE(ip) @ extract opcode from rINST 4243 SET_VREG(r0, r9) @ vAA<- r0 4244 GOTO_OPCODE(ip) @ jump to next instruction 4245 /* 11-14 instructions */ 4246 4247 4248 4249/* ------------------------------ */ 4250 .balign 64 4251.L_OP_SUB_INT: /* 0x91 */ 4252/* File: armv5te/OP_SUB_INT.S */ 4253/* File: armv5te/binop.S */ 4254 /* 4255 * Generic 32-bit binary operation. Provide an "instr" line that 4256 * specifies an instruction that performs "result = r0 op r1". 4257 * This could be an ARM instruction or a function call. (If the result 4258 * comes back in a register other than r0, you can override "result".) 4259 * 4260 * If "chkzero" is set to 1, we perform a divide-by-zero check on 4261 * vCC (r1). Useful for integer division and modulus. Note that we 4262 * *don't* check for (INT_MIN / -1) here, because the ARM math lib 4263 * handles it correctly. 4264 * 4265 * For: add-int, sub-int, mul-int, div-int, rem-int, and-int, or-int, 4266 * xor-int, shl-int, shr-int, ushr-int, add-float, sub-float, 4267 * mul-float, div-float, rem-float 4268 */ 4269 /* binop vAA, vBB, vCC */ 4270 FETCH(r0, 1) @ r0<- CCBB 4271 mov r9, rINST, lsr #8 @ r9<- AA 4272 mov r3, r0, lsr #8 @ r3<- CC 4273 and r2, r0, #255 @ r2<- BB 4274 GET_VREG(r1, r3) @ r1<- vCC 4275 GET_VREG(r0, r2) @ r0<- vBB 4276 .if 0 4277 cmp r1, #0 @ is second operand zero? 4278 beq common_errDivideByZero 4279 .endif 4280 4281 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 4282 @ optional op; may set condition codes 4283 sub r0, r0, r1 @ r0<- op, r0-r3 changed 4284 GET_INST_OPCODE(ip) @ extract opcode from rINST 4285 SET_VREG(r0, r9) @ vAA<- r0 4286 GOTO_OPCODE(ip) @ jump to next instruction 4287 /* 11-14 instructions */ 4288 4289 4290 4291/* ------------------------------ */ 4292 .balign 64 4293.L_OP_MUL_INT: /* 0x92 */ 4294/* File: armv5te/OP_MUL_INT.S */ 4295/* must be "mul r0, r1, r0" -- "r0, r0, r1" is illegal */ 4296/* File: armv5te/binop.S */ 4297 /* 4298 * Generic 32-bit binary operation. Provide an "instr" line that 4299 * specifies an instruction that performs "result = r0 op r1". 4300 * This could be an ARM instruction or a function call. (If the result 4301 * comes back in a register other than r0, you can override "result".) 4302 * 4303 * If "chkzero" is set to 1, we perform a divide-by-zero check on 4304 * vCC (r1). Useful for integer division and modulus. Note that we 4305 * *don't* check for (INT_MIN / -1) here, because the ARM math lib 4306 * handles it correctly. 4307 * 4308 * For: add-int, sub-int, mul-int, div-int, rem-int, and-int, or-int, 4309 * xor-int, shl-int, shr-int, ushr-int, add-float, sub-float, 4310 * mul-float, div-float, rem-float 4311 */ 4312 /* binop vAA, vBB, vCC */ 4313 FETCH(r0, 1) @ r0<- CCBB 4314 mov r9, rINST, lsr #8 @ r9<- AA 4315 mov r3, r0, lsr #8 @ r3<- CC 4316 and r2, r0, #255 @ r2<- BB 4317 GET_VREG(r1, r3) @ r1<- vCC 4318 GET_VREG(r0, r2) @ r0<- vBB 4319 .if 0 4320 cmp r1, #0 @ is second operand zero? 4321 beq common_errDivideByZero 4322 .endif 4323 4324 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 4325 @ optional op; may set condition codes 4326 mul r0, r1, r0 @ r0<- op, r0-r3 changed 4327 GET_INST_OPCODE(ip) @ extract opcode from rINST 4328 SET_VREG(r0, r9) @ vAA<- r0 4329 GOTO_OPCODE(ip) @ jump to next instruction 4330 /* 11-14 instructions */ 4331 4332 4333 4334/* ------------------------------ */ 4335 .balign 64 4336.L_OP_DIV_INT: /* 0x93 */ 4337/* File: armv5te/OP_DIV_INT.S */ 4338/* File: armv5te/binop.S */ 4339 /* 4340 * Generic 32-bit binary operation. Provide an "instr" line that 4341 * specifies an instruction that performs "result = r0 op r1". 4342 * This could be an ARM instruction or a function call. (If the result 4343 * comes back in a register other than r0, you can override "result".) 4344 * 4345 * If "chkzero" is set to 1, we perform a divide-by-zero check on 4346 * vCC (r1). Useful for integer division and modulus. Note that we 4347 * *don't* check for (INT_MIN / -1) here, because the ARM math lib 4348 * handles it correctly. 4349 * 4350 * For: add-int, sub-int, mul-int, div-int, rem-int, and-int, or-int, 4351 * xor-int, shl-int, shr-int, ushr-int, add-float, sub-float, 4352 * mul-float, div-float, rem-float 4353 */ 4354 /* binop vAA, vBB, vCC */ 4355 FETCH(r0, 1) @ r0<- CCBB 4356 mov r9, rINST, lsr #8 @ r9<- AA 4357 mov r3, r0, lsr #8 @ r3<- CC 4358 and r2, r0, #255 @ r2<- BB 4359 GET_VREG(r1, r3) @ r1<- vCC 4360 GET_VREG(r0, r2) @ r0<- vBB 4361 .if 1 4362 cmp r1, #0 @ is second operand zero? 4363 beq common_errDivideByZero 4364 .endif 4365 4366 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 4367 @ optional op; may set condition codes 4368 bl __aeabi_idiv @ r0<- op, r0-r3 changed 4369 GET_INST_OPCODE(ip) @ extract opcode from rINST 4370 SET_VREG(r0, r9) @ vAA<- r0 4371 GOTO_OPCODE(ip) @ jump to next instruction 4372 /* 11-14 instructions */ 4373 4374 4375 4376/* ------------------------------ */ 4377 .balign 64 4378.L_OP_REM_INT: /* 0x94 */ 4379/* File: armv5te/OP_REM_INT.S */ 4380/* idivmod returns quotient in r0 and remainder in r1 */ 4381/* File: armv5te/binop.S */ 4382 /* 4383 * Generic 32-bit binary operation. Provide an "instr" line that 4384 * specifies an instruction that performs "result = r0 op r1". 4385 * This could be an ARM instruction or a function call. (If the result 4386 * comes back in a register other than r0, you can override "result".) 4387 * 4388 * If "chkzero" is set to 1, we perform a divide-by-zero check on 4389 * vCC (r1). Useful for integer division and modulus. Note that we 4390 * *don't* check for (INT_MIN / -1) here, because the ARM math lib 4391 * handles it correctly. 4392 * 4393 * For: add-int, sub-int, mul-int, div-int, rem-int, and-int, or-int, 4394 * xor-int, shl-int, shr-int, ushr-int, add-float, sub-float, 4395 * mul-float, div-float, rem-float 4396 */ 4397 /* binop vAA, vBB, vCC */ 4398 FETCH(r0, 1) @ r0<- CCBB 4399 mov r9, rINST, lsr #8 @ r9<- AA 4400 mov r3, r0, lsr #8 @ r3<- CC 4401 and r2, r0, #255 @ r2<- BB 4402 GET_VREG(r1, r3) @ r1<- vCC 4403 GET_VREG(r0, r2) @ r0<- vBB 4404 .if 1 4405 cmp r1, #0 @ is second operand zero? 4406 beq common_errDivideByZero 4407 .endif 4408 4409 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 4410 @ optional op; may set condition codes 4411 bl __aeabi_idivmod @ r1<- op, r0-r3 changed 4412 GET_INST_OPCODE(ip) @ extract opcode from rINST 4413 SET_VREG(r1, r9) @ vAA<- r1 4414 GOTO_OPCODE(ip) @ jump to next instruction 4415 /* 11-14 instructions */ 4416 4417 4418 4419/* ------------------------------ */ 4420 .balign 64 4421.L_OP_AND_INT: /* 0x95 */ 4422/* File: armv5te/OP_AND_INT.S */ 4423/* File: armv5te/binop.S */ 4424 /* 4425 * Generic 32-bit binary operation. Provide an "instr" line that 4426 * specifies an instruction that performs "result = r0 op r1". 4427 * This could be an ARM instruction or a function call. (If the result 4428 * comes back in a register other than r0, you can override "result".) 4429 * 4430 * If "chkzero" is set to 1, we perform a divide-by-zero check on 4431 * vCC (r1). Useful for integer division and modulus. Note that we 4432 * *don't* check for (INT_MIN / -1) here, because the ARM math lib 4433 * handles it correctly. 4434 * 4435 * For: add-int, sub-int, mul-int, div-int, rem-int, and-int, or-int, 4436 * xor-int, shl-int, shr-int, ushr-int, add-float, sub-float, 4437 * mul-float, div-float, rem-float 4438 */ 4439 /* binop vAA, vBB, vCC */ 4440 FETCH(r0, 1) @ r0<- CCBB 4441 mov r9, rINST, lsr #8 @ r9<- AA 4442 mov r3, r0, lsr #8 @ r3<- CC 4443 and r2, r0, #255 @ r2<- BB 4444 GET_VREG(r1, r3) @ r1<- vCC 4445 GET_VREG(r0, r2) @ r0<- vBB 4446 .if 0 4447 cmp r1, #0 @ is second operand zero? 4448 beq common_errDivideByZero 4449 .endif 4450 4451 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 4452 @ optional op; may set condition codes 4453 and r0, r0, r1 @ r0<- op, r0-r3 changed 4454 GET_INST_OPCODE(ip) @ extract opcode from rINST 4455 SET_VREG(r0, r9) @ vAA<- r0 4456 GOTO_OPCODE(ip) @ jump to next instruction 4457 /* 11-14 instructions */ 4458 4459 4460 4461/* ------------------------------ */ 4462 .balign 64 4463.L_OP_OR_INT: /* 0x96 */ 4464/* File: armv5te/OP_OR_INT.S */ 4465/* File: armv5te/binop.S */ 4466 /* 4467 * Generic 32-bit binary operation. Provide an "instr" line that 4468 * specifies an instruction that performs "result = r0 op r1". 4469 * This could be an ARM instruction or a function call. (If the result 4470 * comes back in a register other than r0, you can override "result".) 4471 * 4472 * If "chkzero" is set to 1, we perform a divide-by-zero check on 4473 * vCC (r1). Useful for integer division and modulus. Note that we 4474 * *don't* check for (INT_MIN / -1) here, because the ARM math lib 4475 * handles it correctly. 4476 * 4477 * For: add-int, sub-int, mul-int, div-int, rem-int, and-int, or-int, 4478 * xor-int, shl-int, shr-int, ushr-int, add-float, sub-float, 4479 * mul-float, div-float, rem-float 4480 */ 4481 /* binop vAA, vBB, vCC */ 4482 FETCH(r0, 1) @ r0<- CCBB 4483 mov r9, rINST, lsr #8 @ r9<- AA 4484 mov r3, r0, lsr #8 @ r3<- CC 4485 and r2, r0, #255 @ r2<- BB 4486 GET_VREG(r1, r3) @ r1<- vCC 4487 GET_VREG(r0, r2) @ r0<- vBB 4488 .if 0 4489 cmp r1, #0 @ is second operand zero? 4490 beq common_errDivideByZero 4491 .endif 4492 4493 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 4494 @ optional op; may set condition codes 4495 orr r0, r0, r1 @ r0<- op, r0-r3 changed 4496 GET_INST_OPCODE(ip) @ extract opcode from rINST 4497 SET_VREG(r0, r9) @ vAA<- r0 4498 GOTO_OPCODE(ip) @ jump to next instruction 4499 /* 11-14 instructions */ 4500 4501 4502 4503/* ------------------------------ */ 4504 .balign 64 4505.L_OP_XOR_INT: /* 0x97 */ 4506/* File: armv5te/OP_XOR_INT.S */ 4507/* File: armv5te/binop.S */ 4508 /* 4509 * Generic 32-bit binary operation. Provide an "instr" line that 4510 * specifies an instruction that performs "result = r0 op r1". 4511 * This could be an ARM instruction or a function call. (If the result 4512 * comes back in a register other than r0, you can override "result".) 4513 * 4514 * If "chkzero" is set to 1, we perform a divide-by-zero check on 4515 * vCC (r1). Useful for integer division and modulus. Note that we 4516 * *don't* check for (INT_MIN / -1) here, because the ARM math lib 4517 * handles it correctly. 4518 * 4519 * For: add-int, sub-int, mul-int, div-int, rem-int, and-int, or-int, 4520 * xor-int, shl-int, shr-int, ushr-int, add-float, sub-float, 4521 * mul-float, div-float, rem-float 4522 */ 4523 /* binop vAA, vBB, vCC */ 4524 FETCH(r0, 1) @ r0<- CCBB 4525 mov r9, rINST, lsr #8 @ r9<- AA 4526 mov r3, r0, lsr #8 @ r3<- CC 4527 and r2, r0, #255 @ r2<- BB 4528 GET_VREG(r1, r3) @ r1<- vCC 4529 GET_VREG(r0, r2) @ r0<- vBB 4530 .if 0 4531 cmp r1, #0 @ is second operand zero? 4532 beq common_errDivideByZero 4533 .endif 4534 4535 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 4536 @ optional op; may set condition codes 4537 eor r0, r0, r1 @ r0<- op, r0-r3 changed 4538 GET_INST_OPCODE(ip) @ extract opcode from rINST 4539 SET_VREG(r0, r9) @ vAA<- r0 4540 GOTO_OPCODE(ip) @ jump to next instruction 4541 /* 11-14 instructions */ 4542 4543 4544 4545/* ------------------------------ */ 4546 .balign 64 4547.L_OP_SHL_INT: /* 0x98 */ 4548/* File: armv5te/OP_SHL_INT.S */ 4549/* File: armv5te/binop.S */ 4550 /* 4551 * Generic 32-bit binary operation. Provide an "instr" line that 4552 * specifies an instruction that performs "result = r0 op r1". 4553 * This could be an ARM instruction or a function call. (If the result 4554 * comes back in a register other than r0, you can override "result".) 4555 * 4556 * If "chkzero" is set to 1, we perform a divide-by-zero check on 4557 * vCC (r1). Useful for integer division and modulus. Note that we 4558 * *don't* check for (INT_MIN / -1) here, because the ARM math lib 4559 * handles it correctly. 4560 * 4561 * For: add-int, sub-int, mul-int, div-int, rem-int, and-int, or-int, 4562 * xor-int, shl-int, shr-int, ushr-int, add-float, sub-float, 4563 * mul-float, div-float, rem-float 4564 */ 4565 /* binop vAA, vBB, vCC */ 4566 FETCH(r0, 1) @ r0<- CCBB 4567 mov r9, rINST, lsr #8 @ r9<- AA 4568 mov r3, r0, lsr #8 @ r3<- CC 4569 and r2, r0, #255 @ r2<- BB 4570 GET_VREG(r1, r3) @ r1<- vCC 4571 GET_VREG(r0, r2) @ r0<- vBB 4572 .if 0 4573 cmp r1, #0 @ is second operand zero? 4574 beq common_errDivideByZero 4575 .endif 4576 4577 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 4578 and r1, r1, #31 @ optional op; may set condition codes 4579 mov r0, r0, asl r1 @ r0<- op, r0-r3 changed 4580 GET_INST_OPCODE(ip) @ extract opcode from rINST 4581 SET_VREG(r0, r9) @ vAA<- r0 4582 GOTO_OPCODE(ip) @ jump to next instruction 4583 /* 11-14 instructions */ 4584 4585 4586 4587/* ------------------------------ */ 4588 .balign 64 4589.L_OP_SHR_INT: /* 0x99 */ 4590/* File: armv5te/OP_SHR_INT.S */ 4591/* File: armv5te/binop.S */ 4592 /* 4593 * Generic 32-bit binary operation. Provide an "instr" line that 4594 * specifies an instruction that performs "result = r0 op r1". 4595 * This could be an ARM instruction or a function call. (If the result 4596 * comes back in a register other than r0, you can override "result".) 4597 * 4598 * If "chkzero" is set to 1, we perform a divide-by-zero check on 4599 * vCC (r1). Useful for integer division and modulus. Note that we 4600 * *don't* check for (INT_MIN / -1) here, because the ARM math lib 4601 * handles it correctly. 4602 * 4603 * For: add-int, sub-int, mul-int, div-int, rem-int, and-int, or-int, 4604 * xor-int, shl-int, shr-int, ushr-int, add-float, sub-float, 4605 * mul-float, div-float, rem-float 4606 */ 4607 /* binop vAA, vBB, vCC */ 4608 FETCH(r0, 1) @ r0<- CCBB 4609 mov r9, rINST, lsr #8 @ r9<- AA 4610 mov r3, r0, lsr #8 @ r3<- CC 4611 and r2, r0, #255 @ r2<- BB 4612 GET_VREG(r1, r3) @ r1<- vCC 4613 GET_VREG(r0, r2) @ r0<- vBB 4614 .if 0 4615 cmp r1, #0 @ is second operand zero? 4616 beq common_errDivideByZero 4617 .endif 4618 4619 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 4620 and r1, r1, #31 @ optional op; may set condition codes 4621 mov r0, r0, asr r1 @ r0<- op, r0-r3 changed 4622 GET_INST_OPCODE(ip) @ extract opcode from rINST 4623 SET_VREG(r0, r9) @ vAA<- r0 4624 GOTO_OPCODE(ip) @ jump to next instruction 4625 /* 11-14 instructions */ 4626 4627 4628 4629/* ------------------------------ */ 4630 .balign 64 4631.L_OP_USHR_INT: /* 0x9a */ 4632/* File: armv5te/OP_USHR_INT.S */ 4633/* File: armv5te/binop.S */ 4634 /* 4635 * Generic 32-bit binary operation. Provide an "instr" line that 4636 * specifies an instruction that performs "result = r0 op r1". 4637 * This could be an ARM instruction or a function call. (If the result 4638 * comes back in a register other than r0, you can override "result".) 4639 * 4640 * If "chkzero" is set to 1, we perform a divide-by-zero check on 4641 * vCC (r1). Useful for integer division and modulus. Note that we 4642 * *don't* check for (INT_MIN / -1) here, because the ARM math lib 4643 * handles it correctly. 4644 * 4645 * For: add-int, sub-int, mul-int, div-int, rem-int, and-int, or-int, 4646 * xor-int, shl-int, shr-int, ushr-int, add-float, sub-float, 4647 * mul-float, div-float, rem-float 4648 */ 4649 /* binop vAA, vBB, vCC */ 4650 FETCH(r0, 1) @ r0<- CCBB 4651 mov r9, rINST, lsr #8 @ r9<- AA 4652 mov r3, r0, lsr #8 @ r3<- CC 4653 and r2, r0, #255 @ r2<- BB 4654 GET_VREG(r1, r3) @ r1<- vCC 4655 GET_VREG(r0, r2) @ r0<- vBB 4656 .if 0 4657 cmp r1, #0 @ is second operand zero? 4658 beq common_errDivideByZero 4659 .endif 4660 4661 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 4662 and r1, r1, #31 @ optional op; may set condition codes 4663 mov r0, r0, lsr r1 @ r0<- op, r0-r3 changed 4664 GET_INST_OPCODE(ip) @ extract opcode from rINST 4665 SET_VREG(r0, r9) @ vAA<- r0 4666 GOTO_OPCODE(ip) @ jump to next instruction 4667 /* 11-14 instructions */ 4668 4669 4670 4671/* ------------------------------ */ 4672 .balign 64 4673.L_OP_ADD_LONG: /* 0x9b */ 4674/* File: armv5te/OP_ADD_LONG.S */ 4675/* File: armv5te/binopWide.S */ 4676 /* 4677 * Generic 64-bit binary operation. Provide an "instr" line that 4678 * specifies an instruction that performs "result = r0-r1 op r2-r3". 4679 * This could be an ARM instruction or a function call. (If the result 4680 * comes back in a register other than r0, you can override "result".) 4681 * 4682 * If "chkzero" is set to 1, we perform a divide-by-zero check on 4683 * vCC (r1). Useful for integer division and modulus. 4684 * 4685 * for: add-long, sub-long, div-long, rem-long, and-long, or-long, 4686 * xor-long, add-double, sub-double, mul-double, div-double, 4687 * rem-double 4688 * 4689 * IMPORTANT: you may specify "chkzero" or "preinstr" but not both. 4690 */ 4691 /* binop vAA, vBB, vCC */ 4692 FETCH(r0, 1) @ r0<- CCBB 4693 mov r9, rINST, lsr #8 @ r9<- AA 4694 and r2, r0, #255 @ r2<- BB 4695 mov r3, r0, lsr #8 @ r3<- CC 4696 add r9, rFP, r9, lsl #2 @ r9<- &fp[AA] 4697 add r2, rFP, r2, lsl #2 @ r2<- &fp[BB] 4698 add r3, rFP, r3, lsl #2 @ r3<- &fp[CC] 4699 ldmia r2, {r0-r1} @ r0/r1<- vBB/vBB+1 4700 ldmia r3, {r2-r3} @ r2/r3<- vCC/vCC+1 4701 .if 0 4702 orrs ip, r2, r3 @ second arg (r2-r3) is zero? 4703 beq common_errDivideByZero 4704 .endif 4705 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 4706 4707 adds r0, r0, r2 @ optional op; may set condition codes 4708 adc r1, r1, r3 @ result<- op, r0-r3 changed 4709 GET_INST_OPCODE(ip) @ extract opcode from rINST 4710 stmia r9, {r0,r1} @ vAA/vAA+1<- r0/r1 4711 GOTO_OPCODE(ip) @ jump to next instruction 4712 /* 14-17 instructions */ 4713 4714 4715 4716/* ------------------------------ */ 4717 .balign 64 4718.L_OP_SUB_LONG: /* 0x9c */ 4719/* File: armv5te/OP_SUB_LONG.S */ 4720/* File: armv5te/binopWide.S */ 4721 /* 4722 * Generic 64-bit binary operation. Provide an "instr" line that 4723 * specifies an instruction that performs "result = r0-r1 op r2-r3". 4724 * This could be an ARM instruction or a function call. (If the result 4725 * comes back in a register other than r0, you can override "result".) 4726 * 4727 * If "chkzero" is set to 1, we perform a divide-by-zero check on 4728 * vCC (r1). Useful for integer division and modulus. 4729 * 4730 * for: add-long, sub-long, div-long, rem-long, and-long, or-long, 4731 * xor-long, add-double, sub-double, mul-double, div-double, 4732 * rem-double 4733 * 4734 * IMPORTANT: you may specify "chkzero" or "preinstr" but not both. 4735 */ 4736 /* binop vAA, vBB, vCC */ 4737 FETCH(r0, 1) @ r0<- CCBB 4738 mov r9, rINST, lsr #8 @ r9<- AA 4739 and r2, r0, #255 @ r2<- BB 4740 mov r3, r0, lsr #8 @ r3<- CC 4741 add r9, rFP, r9, lsl #2 @ r9<- &fp[AA] 4742 add r2, rFP, r2, lsl #2 @ r2<- &fp[BB] 4743 add r3, rFP, r3, lsl #2 @ r3<- &fp[CC] 4744 ldmia r2, {r0-r1} @ r0/r1<- vBB/vBB+1 4745 ldmia r3, {r2-r3} @ r2/r3<- vCC/vCC+1 4746 .if 0 4747 orrs ip, r2, r3 @ second arg (r2-r3) is zero? 4748 beq common_errDivideByZero 4749 .endif 4750 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 4751 4752 subs r0, r0, r2 @ optional op; may set condition codes 4753 sbc r1, r1, r3 @ result<- op, r0-r3 changed 4754 GET_INST_OPCODE(ip) @ extract opcode from rINST 4755 stmia r9, {r0,r1} @ vAA/vAA+1<- r0/r1 4756 GOTO_OPCODE(ip) @ jump to next instruction 4757 /* 14-17 instructions */ 4758 4759 4760 4761/* ------------------------------ */ 4762 .balign 64 4763.L_OP_MUL_LONG: /* 0x9d */ 4764/* File: armv5te/OP_MUL_LONG.S */ 4765 /* 4766 * Signed 64-bit integer multiply. 4767 * 4768 * Consider WXxYZ (r1r0 x r3r2) with a long multiply: 4769 * WX 4770 * x YZ 4771 * -------- 4772 * ZW ZX 4773 * YW YX 4774 * 4775 * The low word of the result holds ZX, the high word holds 4776 * (ZW+YX) + (the high overflow from ZX). YW doesn't matter because 4777 * it doesn't fit in the low 64 bits. 4778 * 4779 * Unlike most ARM math operations, multiply instructions have 4780 * restrictions on using the same register more than once (Rd and Rm 4781 * cannot be the same). 4782 */ 4783 /* mul-long vAA, vBB, vCC */ 4784 FETCH(r0, 1) @ r0<- CCBB 4785 and r2, r0, #255 @ r2<- BB 4786 mov r3, r0, lsr #8 @ r3<- CC 4787 add r2, rFP, r2, lsl #2 @ r2<- &fp[BB] 4788 add r3, rFP, r3, lsl #2 @ r3<- &fp[CC] 4789 ldmia r2, {r0-r1} @ r0/r1<- vBB/vBB+1 4790 ldmia r3, {r2-r3} @ r2/r3<- vCC/vCC+1 4791 mul ip, r2, r1 @ ip<- ZxW 4792 umull r9, r10, r2, r0 @ r9/r10 <- ZxX 4793 mla r2, r0, r3, ip @ r2<- YxX + (ZxW) 4794 mov r0, rINST, lsr #8 @ r0<- AA 4795 add r10, r2, r10 @ r10<- r10 + low(ZxW + (YxX)) 4796 add r0, rFP, r0, lsl #2 @ r0<- &fp[AA] 4797 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 4798 b .LOP_MUL_LONG_finish 4799 4800/* ------------------------------ */ 4801 .balign 64 4802.L_OP_DIV_LONG: /* 0x9e */ 4803/* File: armv5te/OP_DIV_LONG.S */ 4804/* File: armv5te/binopWide.S */ 4805 /* 4806 * Generic 64-bit binary operation. Provide an "instr" line that 4807 * specifies an instruction that performs "result = r0-r1 op r2-r3". 4808 * This could be an ARM instruction or a function call. (If the result 4809 * comes back in a register other than r0, you can override "result".) 4810 * 4811 * If "chkzero" is set to 1, we perform a divide-by-zero check on 4812 * vCC (r1). Useful for integer division and modulus. 4813 * 4814 * for: add-long, sub-long, div-long, rem-long, and-long, or-long, 4815 * xor-long, add-double, sub-double, mul-double, div-double, 4816 * rem-double 4817 * 4818 * IMPORTANT: you may specify "chkzero" or "preinstr" but not both. 4819 */ 4820 /* binop vAA, vBB, vCC */ 4821 FETCH(r0, 1) @ r0<- CCBB 4822 mov r9, rINST, lsr #8 @ r9<- AA 4823 and r2, r0, #255 @ r2<- BB 4824 mov r3, r0, lsr #8 @ r3<- CC 4825 add r9, rFP, r9, lsl #2 @ r9<- &fp[AA] 4826 add r2, rFP, r2, lsl #2 @ r2<- &fp[BB] 4827 add r3, rFP, r3, lsl #2 @ r3<- &fp[CC] 4828 ldmia r2, {r0-r1} @ r0/r1<- vBB/vBB+1 4829 ldmia r3, {r2-r3} @ r2/r3<- vCC/vCC+1 4830 .if 1 4831 orrs ip, r2, r3 @ second arg (r2-r3) is zero? 4832 beq common_errDivideByZero 4833 .endif 4834 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 4835 4836 @ optional op; may set condition codes 4837 bl __aeabi_ldivmod @ result<- op, r0-r3 changed 4838 GET_INST_OPCODE(ip) @ extract opcode from rINST 4839 stmia r9, {r0,r1} @ vAA/vAA+1<- r0/r1 4840 GOTO_OPCODE(ip) @ jump to next instruction 4841 /* 14-17 instructions */ 4842 4843 4844 4845/* ------------------------------ */ 4846 .balign 64 4847.L_OP_REM_LONG: /* 0x9f */ 4848/* File: armv5te/OP_REM_LONG.S */ 4849/* ldivmod returns quotient in r0/r1 and remainder in r2/r3 */ 4850/* File: armv5te/binopWide.S */ 4851 /* 4852 * Generic 64-bit binary operation. Provide an "instr" line that 4853 * specifies an instruction that performs "result = r0-r1 op r2-r3". 4854 * This could be an ARM instruction or a function call. (If the result 4855 * comes back in a register other than r0, you can override "result".) 4856 * 4857 * If "chkzero" is set to 1, we perform a divide-by-zero check on 4858 * vCC (r1). Useful for integer division and modulus. 4859 * 4860 * for: add-long, sub-long, div-long, rem-long, and-long, or-long, 4861 * xor-long, add-double, sub-double, mul-double, div-double, 4862 * rem-double 4863 * 4864 * IMPORTANT: you may specify "chkzero" or "preinstr" but not both. 4865 */ 4866 /* binop vAA, vBB, vCC */ 4867 FETCH(r0, 1) @ r0<- CCBB 4868 mov r9, rINST, lsr #8 @ r9<- AA 4869 and r2, r0, #255 @ r2<- BB 4870 mov r3, r0, lsr #8 @ r3<- CC 4871 add r9, rFP, r9, lsl #2 @ r9<- &fp[AA] 4872 add r2, rFP, r2, lsl #2 @ r2<- &fp[BB] 4873 add r3, rFP, r3, lsl #2 @ r3<- &fp[CC] 4874 ldmia r2, {r0-r1} @ r0/r1<- vBB/vBB+1 4875 ldmia r3, {r2-r3} @ r2/r3<- vCC/vCC+1 4876 .if 1 4877 orrs ip, r2, r3 @ second arg (r2-r3) is zero? 4878 beq common_errDivideByZero 4879 .endif 4880 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 4881 4882 @ optional op; may set condition codes 4883 bl __aeabi_ldivmod @ result<- op, r0-r3 changed 4884 GET_INST_OPCODE(ip) @ extract opcode from rINST 4885 stmia r9, {r2,r3} @ vAA/vAA+1<- r2/r3 4886 GOTO_OPCODE(ip) @ jump to next instruction 4887 /* 14-17 instructions */ 4888 4889 4890 4891/* ------------------------------ */ 4892 .balign 64 4893.L_OP_AND_LONG: /* 0xa0 */ 4894/* File: armv5te/OP_AND_LONG.S */ 4895/* File: armv5te/binopWide.S */ 4896 /* 4897 * Generic 64-bit binary operation. Provide an "instr" line that 4898 * specifies an instruction that performs "result = r0-r1 op r2-r3". 4899 * This could be an ARM instruction or a function call. (If the result 4900 * comes back in a register other than r0, you can override "result".) 4901 * 4902 * If "chkzero" is set to 1, we perform a divide-by-zero check on 4903 * vCC (r1). Useful for integer division and modulus. 4904 * 4905 * for: add-long, sub-long, div-long, rem-long, and-long, or-long, 4906 * xor-long, add-double, sub-double, mul-double, div-double, 4907 * rem-double 4908 * 4909 * IMPORTANT: you may specify "chkzero" or "preinstr" but not both. 4910 */ 4911 /* binop vAA, vBB, vCC */ 4912 FETCH(r0, 1) @ r0<- CCBB 4913 mov r9, rINST, lsr #8 @ r9<- AA 4914 and r2, r0, #255 @ r2<- BB 4915 mov r3, r0, lsr #8 @ r3<- CC 4916 add r9, rFP, r9, lsl #2 @ r9<- &fp[AA] 4917 add r2, rFP, r2, lsl #2 @ r2<- &fp[BB] 4918 add r3, rFP, r3, lsl #2 @ r3<- &fp[CC] 4919 ldmia r2, {r0-r1} @ r0/r1<- vBB/vBB+1 4920 ldmia r3, {r2-r3} @ r2/r3<- vCC/vCC+1 4921 .if 0 4922 orrs ip, r2, r3 @ second arg (r2-r3) is zero? 4923 beq common_errDivideByZero 4924 .endif 4925 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 4926 4927 and r0, r0, r2 @ optional op; may set condition codes 4928 and r1, r1, r3 @ result<- op, r0-r3 changed 4929 GET_INST_OPCODE(ip) @ extract opcode from rINST 4930 stmia r9, {r0,r1} @ vAA/vAA+1<- r0/r1 4931 GOTO_OPCODE(ip) @ jump to next instruction 4932 /* 14-17 instructions */ 4933 4934 4935 4936/* ------------------------------ */ 4937 .balign 64 4938.L_OP_OR_LONG: /* 0xa1 */ 4939/* File: armv5te/OP_OR_LONG.S */ 4940/* File: armv5te/binopWide.S */ 4941 /* 4942 * Generic 64-bit binary operation. Provide an "instr" line that 4943 * specifies an instruction that performs "result = r0-r1 op r2-r3". 4944 * This could be an ARM instruction or a function call. (If the result 4945 * comes back in a register other than r0, you can override "result".) 4946 * 4947 * If "chkzero" is set to 1, we perform a divide-by-zero check on 4948 * vCC (r1). Useful for integer division and modulus. 4949 * 4950 * for: add-long, sub-long, div-long, rem-long, and-long, or-long, 4951 * xor-long, add-double, sub-double, mul-double, div-double, 4952 * rem-double 4953 * 4954 * IMPORTANT: you may specify "chkzero" or "preinstr" but not both. 4955 */ 4956 /* binop vAA, vBB, vCC */ 4957 FETCH(r0, 1) @ r0<- CCBB 4958 mov r9, rINST, lsr #8 @ r9<- AA 4959 and r2, r0, #255 @ r2<- BB 4960 mov r3, r0, lsr #8 @ r3<- CC 4961 add r9, rFP, r9, lsl #2 @ r9<- &fp[AA] 4962 add r2, rFP, r2, lsl #2 @ r2<- &fp[BB] 4963 add r3, rFP, r3, lsl #2 @ r3<- &fp[CC] 4964 ldmia r2, {r0-r1} @ r0/r1<- vBB/vBB+1 4965 ldmia r3, {r2-r3} @ r2/r3<- vCC/vCC+1 4966 .if 0 4967 orrs ip, r2, r3 @ second arg (r2-r3) is zero? 4968 beq common_errDivideByZero 4969 .endif 4970 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 4971 4972 orr r0, r0, r2 @ optional op; may set condition codes 4973 orr r1, r1, r3 @ result<- op, r0-r3 changed 4974 GET_INST_OPCODE(ip) @ extract opcode from rINST 4975 stmia r9, {r0,r1} @ vAA/vAA+1<- r0/r1 4976 GOTO_OPCODE(ip) @ jump to next instruction 4977 /* 14-17 instructions */ 4978 4979 4980 4981/* ------------------------------ */ 4982 .balign 64 4983.L_OP_XOR_LONG: /* 0xa2 */ 4984/* File: armv5te/OP_XOR_LONG.S */ 4985/* File: armv5te/binopWide.S */ 4986 /* 4987 * Generic 64-bit binary operation. Provide an "instr" line that 4988 * specifies an instruction that performs "result = r0-r1 op r2-r3". 4989 * This could be an ARM instruction or a function call. (If the result 4990 * comes back in a register other than r0, you can override "result".) 4991 * 4992 * If "chkzero" is set to 1, we perform a divide-by-zero check on 4993 * vCC (r1). Useful for integer division and modulus. 4994 * 4995 * for: add-long, sub-long, div-long, rem-long, and-long, or-long, 4996 * xor-long, add-double, sub-double, mul-double, div-double, 4997 * rem-double 4998 * 4999 * IMPORTANT: you may specify "chkzero" or "preinstr" but not both. 5000 */ 5001 /* binop vAA, vBB, vCC */ 5002 FETCH(r0, 1) @ r0<- CCBB 5003 mov r9, rINST, lsr #8 @ r9<- AA 5004 and r2, r0, #255 @ r2<- BB 5005 mov r3, r0, lsr #8 @ r3<- CC 5006 add r9, rFP, r9, lsl #2 @ r9<- &fp[AA] 5007 add r2, rFP, r2, lsl #2 @ r2<- &fp[BB] 5008 add r3, rFP, r3, lsl #2 @ r3<- &fp[CC] 5009 ldmia r2, {r0-r1} @ r0/r1<- vBB/vBB+1 5010 ldmia r3, {r2-r3} @ r2/r3<- vCC/vCC+1 5011 .if 0 5012 orrs ip, r2, r3 @ second arg (r2-r3) is zero? 5013 beq common_errDivideByZero 5014 .endif 5015 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 5016 5017 eor r0, r0, r2 @ optional op; may set condition codes 5018 eor r1, r1, r3 @ result<- op, r0-r3 changed 5019 GET_INST_OPCODE(ip) @ extract opcode from rINST 5020 stmia r9, {r0,r1} @ vAA/vAA+1<- r0/r1 5021 GOTO_OPCODE(ip) @ jump to next instruction 5022 /* 14-17 instructions */ 5023 5024 5025 5026/* ------------------------------ */ 5027 .balign 64 5028.L_OP_SHL_LONG: /* 0xa3 */ 5029/* File: armv5te/OP_SHL_LONG.S */ 5030 /* 5031 * Long integer shift. This is different from the generic 32/64-bit 5032 * binary operations because vAA/vBB are 64-bit but vCC (the shift 5033 * distance) is 32-bit. Also, Dalvik requires us to mask off the low 5034 * 6 bits of the shift distance. 5035 */ 5036 /* shl-long vAA, vBB, vCC */ 5037 FETCH(r0, 1) @ r0<- CCBB 5038 mov r9, rINST, lsr #8 @ r9<- AA 5039 and r3, r0, #255 @ r3<- BB 5040 mov r0, r0, lsr #8 @ r0<- CC 5041 add r3, rFP, r3, lsl #2 @ r3<- &fp[BB] 5042 GET_VREG(r2, r0) @ r2<- vCC 5043 ldmia r3, {r0-r1} @ r0/r1<- vBB/vBB+1 5044 and r2, r2, #63 @ r2<- r2 & 0x3f 5045 add r9, rFP, r9, lsl #2 @ r9<- &fp[AA] 5046 5047 mov r1, r1, asl r2 @ r1<- r1 << r2 5048 rsb r3, r2, #32 @ r3<- 32 - r2 5049 orr r1, r1, r0, lsr r3 @ r1<- r1 | (r0 << (32-r2)) 5050 subs ip, r2, #32 @ ip<- r2 - 32 5051 movpl r1, r0, asl ip @ if r2 >= 32, r1<- r0 << (r2-32) 5052 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 5053 b .LOP_SHL_LONG_finish 5054 5055/* ------------------------------ */ 5056 .balign 64 5057.L_OP_SHR_LONG: /* 0xa4 */ 5058/* File: armv5te/OP_SHR_LONG.S */ 5059 /* 5060 * Long integer shift. This is different from the generic 32/64-bit 5061 * binary operations because vAA/vBB are 64-bit but vCC (the shift 5062 * distance) is 32-bit. Also, Dalvik requires us to mask off the low 5063 * 6 bits of the shift distance. 5064 */ 5065 /* shr-long vAA, vBB, vCC */ 5066 FETCH(r0, 1) @ r0<- CCBB 5067 mov r9, rINST, lsr #8 @ r9<- AA 5068 and r3, r0, #255 @ r3<- BB 5069 mov r0, r0, lsr #8 @ r0<- CC 5070 add r3, rFP, r3, lsl #2 @ r3<- &fp[BB] 5071 GET_VREG(r2, r0) @ r2<- vCC 5072 ldmia r3, {r0-r1} @ r0/r1<- vBB/vBB+1 5073 and r2, r2, #63 @ r0<- r0 & 0x3f 5074 add r9, rFP, r9, lsl #2 @ r9<- &fp[AA] 5075 5076 mov r0, r0, lsr r2 @ r0<- r2 >> r2 5077 rsb r3, r2, #32 @ r3<- 32 - r2 5078 orr r0, r0, r1, asl r3 @ r0<- r0 | (r1 << (32-r2)) 5079 subs ip, r2, #32 @ ip<- r2 - 32 5080 movpl r0, r1, asr ip @ if r2 >= 32, r0<-r1 >> (r2-32) 5081 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 5082 b .LOP_SHR_LONG_finish 5083 5084/* ------------------------------ */ 5085 .balign 64 5086.L_OP_USHR_LONG: /* 0xa5 */ 5087/* File: armv5te/OP_USHR_LONG.S */ 5088 /* 5089 * Long integer shift. This is different from the generic 32/64-bit 5090 * binary operations because vAA/vBB are 64-bit but vCC (the shift 5091 * distance) is 32-bit. Also, Dalvik requires us to mask off the low 5092 * 6 bits of the shift distance. 5093 */ 5094 /* ushr-long vAA, vBB, vCC */ 5095 FETCH(r0, 1) @ r0<- CCBB 5096 mov r9, rINST, lsr #8 @ r9<- AA 5097 and r3, r0, #255 @ r3<- BB 5098 mov r0, r0, lsr #8 @ r0<- CC 5099 add r3, rFP, r3, lsl #2 @ r3<- &fp[BB] 5100 GET_VREG(r2, r0) @ r2<- vCC 5101 ldmia r3, {r0-r1} @ r0/r1<- vBB/vBB+1 5102 and r2, r2, #63 @ r0<- r0 & 0x3f 5103 add r9, rFP, r9, lsl #2 @ r9<- &fp[AA] 5104 5105 mov r0, r0, lsr r2 @ r0<- r2 >> r2 5106 rsb r3, r2, #32 @ r3<- 32 - r2 5107 orr r0, r0, r1, asl r3 @ r0<- r0 | (r1 << (32-r2)) 5108 subs ip, r2, #32 @ ip<- r2 - 32 5109 movpl r0, r1, lsr ip @ if r2 >= 32, r0<-r1 >>> (r2-32) 5110 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 5111 b .LOP_USHR_LONG_finish 5112 5113/* ------------------------------ */ 5114 .balign 64 5115.L_OP_ADD_FLOAT: /* 0xa6 */ 5116/* File: armv5te/OP_ADD_FLOAT.S */ 5117/* File: armv5te/binop.S */ 5118 /* 5119 * Generic 32-bit binary operation. Provide an "instr" line that 5120 * specifies an instruction that performs "result = r0 op r1". 5121 * This could be an ARM instruction or a function call. (If the result 5122 * comes back in a register other than r0, you can override "result".) 5123 * 5124 * If "chkzero" is set to 1, we perform a divide-by-zero check on 5125 * vCC (r1). Useful for integer division and modulus. Note that we 5126 * *don't* check for (INT_MIN / -1) here, because the ARM math lib 5127 * handles it correctly. 5128 * 5129 * For: add-int, sub-int, mul-int, div-int, rem-int, and-int, or-int, 5130 * xor-int, shl-int, shr-int, ushr-int, add-float, sub-float, 5131 * mul-float, div-float, rem-float 5132 */ 5133 /* binop vAA, vBB, vCC */ 5134 FETCH(r0, 1) @ r0<- CCBB 5135 mov r9, rINST, lsr #8 @ r9<- AA 5136 mov r3, r0, lsr #8 @ r3<- CC 5137 and r2, r0, #255 @ r2<- BB 5138 GET_VREG(r1, r3) @ r1<- vCC 5139 GET_VREG(r0, r2) @ r0<- vBB 5140 .if 0 5141 cmp r1, #0 @ is second operand zero? 5142 beq common_errDivideByZero 5143 .endif 5144 5145 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 5146 @ optional op; may set condition codes 5147 bl __aeabi_fadd @ r0<- op, r0-r3 changed 5148 GET_INST_OPCODE(ip) @ extract opcode from rINST 5149 SET_VREG(r0, r9) @ vAA<- r0 5150 GOTO_OPCODE(ip) @ jump to next instruction 5151 /* 11-14 instructions */ 5152 5153 5154 5155/* ------------------------------ */ 5156 .balign 64 5157.L_OP_SUB_FLOAT: /* 0xa7 */ 5158/* File: armv5te/OP_SUB_FLOAT.S */ 5159/* File: armv5te/binop.S */ 5160 /* 5161 * Generic 32-bit binary operation. Provide an "instr" line that 5162 * specifies an instruction that performs "result = r0 op r1". 5163 * This could be an ARM instruction or a function call. (If the result 5164 * comes back in a register other than r0, you can override "result".) 5165 * 5166 * If "chkzero" is set to 1, we perform a divide-by-zero check on 5167 * vCC (r1). Useful for integer division and modulus. Note that we 5168 * *don't* check for (INT_MIN / -1) here, because the ARM math lib 5169 * handles it correctly. 5170 * 5171 * For: add-int, sub-int, mul-int, div-int, rem-int, and-int, or-int, 5172 * xor-int, shl-int, shr-int, ushr-int, add-float, sub-float, 5173 * mul-float, div-float, rem-float 5174 */ 5175 /* binop vAA, vBB, vCC */ 5176 FETCH(r0, 1) @ r0<- CCBB 5177 mov r9, rINST, lsr #8 @ r9<- AA 5178 mov r3, r0, lsr #8 @ r3<- CC 5179 and r2, r0, #255 @ r2<- BB 5180 GET_VREG(r1, r3) @ r1<- vCC 5181 GET_VREG(r0, r2) @ r0<- vBB 5182 .if 0 5183 cmp r1, #0 @ is second operand zero? 5184 beq common_errDivideByZero 5185 .endif 5186 5187 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 5188 @ optional op; may set condition codes 5189 bl __aeabi_fsub @ r0<- op, r0-r3 changed 5190 GET_INST_OPCODE(ip) @ extract opcode from rINST 5191 SET_VREG(r0, r9) @ vAA<- r0 5192 GOTO_OPCODE(ip) @ jump to next instruction 5193 /* 11-14 instructions */ 5194 5195 5196 5197/* ------------------------------ */ 5198 .balign 64 5199.L_OP_MUL_FLOAT: /* 0xa8 */ 5200/* File: armv5te/OP_MUL_FLOAT.S */ 5201/* File: armv5te/binop.S */ 5202 /* 5203 * Generic 32-bit binary operation. Provide an "instr" line that 5204 * specifies an instruction that performs "result = r0 op r1". 5205 * This could be an ARM instruction or a function call. (If the result 5206 * comes back in a register other than r0, you can override "result".) 5207 * 5208 * If "chkzero" is set to 1, we perform a divide-by-zero check on 5209 * vCC (r1). Useful for integer division and modulus. Note that we 5210 * *don't* check for (INT_MIN / -1) here, because the ARM math lib 5211 * handles it correctly. 5212 * 5213 * For: add-int, sub-int, mul-int, div-int, rem-int, and-int, or-int, 5214 * xor-int, shl-int, shr-int, ushr-int, add-float, sub-float, 5215 * mul-float, div-float, rem-float 5216 */ 5217 /* binop vAA, vBB, vCC */ 5218 FETCH(r0, 1) @ r0<- CCBB 5219 mov r9, rINST, lsr #8 @ r9<- AA 5220 mov r3, r0, lsr #8 @ r3<- CC 5221 and r2, r0, #255 @ r2<- BB 5222 GET_VREG(r1, r3) @ r1<- vCC 5223 GET_VREG(r0, r2) @ r0<- vBB 5224 .if 0 5225 cmp r1, #0 @ is second operand zero? 5226 beq common_errDivideByZero 5227 .endif 5228 5229 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 5230 @ optional op; may set condition codes 5231 bl __aeabi_fmul @ r0<- op, r0-r3 changed 5232 GET_INST_OPCODE(ip) @ extract opcode from rINST 5233 SET_VREG(r0, r9) @ vAA<- r0 5234 GOTO_OPCODE(ip) @ jump to next instruction 5235 /* 11-14 instructions */ 5236 5237 5238 5239/* ------------------------------ */ 5240 .balign 64 5241.L_OP_DIV_FLOAT: /* 0xa9 */ 5242/* File: armv5te/OP_DIV_FLOAT.S */ 5243/* File: armv5te/binop.S */ 5244 /* 5245 * Generic 32-bit binary operation. Provide an "instr" line that 5246 * specifies an instruction that performs "result = r0 op r1". 5247 * This could be an ARM instruction or a function call. (If the result 5248 * comes back in a register other than r0, you can override "result".) 5249 * 5250 * If "chkzero" is set to 1, we perform a divide-by-zero check on 5251 * vCC (r1). Useful for integer division and modulus. Note that we 5252 * *don't* check for (INT_MIN / -1) here, because the ARM math lib 5253 * handles it correctly. 5254 * 5255 * For: add-int, sub-int, mul-int, div-int, rem-int, and-int, or-int, 5256 * xor-int, shl-int, shr-int, ushr-int, add-float, sub-float, 5257 * mul-float, div-float, rem-float 5258 */ 5259 /* binop vAA, vBB, vCC */ 5260 FETCH(r0, 1) @ r0<- CCBB 5261 mov r9, rINST, lsr #8 @ r9<- AA 5262 mov r3, r0, lsr #8 @ r3<- CC 5263 and r2, r0, #255 @ r2<- BB 5264 GET_VREG(r1, r3) @ r1<- vCC 5265 GET_VREG(r0, r2) @ r0<- vBB 5266 .if 0 5267 cmp r1, #0 @ is second operand zero? 5268 beq common_errDivideByZero 5269 .endif 5270 5271 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 5272 @ optional op; may set condition codes 5273 bl __aeabi_fdiv @ r0<- op, r0-r3 changed 5274 GET_INST_OPCODE(ip) @ extract opcode from rINST 5275 SET_VREG(r0, r9) @ vAA<- r0 5276 GOTO_OPCODE(ip) @ jump to next instruction 5277 /* 11-14 instructions */ 5278 5279 5280 5281/* ------------------------------ */ 5282 .balign 64 5283.L_OP_REM_FLOAT: /* 0xaa */ 5284/* File: armv5te/OP_REM_FLOAT.S */ 5285/* EABI doesn't define a float remainder function, but libm does */ 5286/* File: armv5te/binop.S */ 5287 /* 5288 * Generic 32-bit binary operation. Provide an "instr" line that 5289 * specifies an instruction that performs "result = r0 op r1". 5290 * This could be an ARM instruction or a function call. (If the result 5291 * comes back in a register other than r0, you can override "result".) 5292 * 5293 * If "chkzero" is set to 1, we perform a divide-by-zero check on 5294 * vCC (r1). Useful for integer division and modulus. Note that we 5295 * *don't* check for (INT_MIN / -1) here, because the ARM math lib 5296 * handles it correctly. 5297 * 5298 * For: add-int, sub-int, mul-int, div-int, rem-int, and-int, or-int, 5299 * xor-int, shl-int, shr-int, ushr-int, add-float, sub-float, 5300 * mul-float, div-float, rem-float 5301 */ 5302 /* binop vAA, vBB, vCC */ 5303 FETCH(r0, 1) @ r0<- CCBB 5304 mov r9, rINST, lsr #8 @ r9<- AA 5305 mov r3, r0, lsr #8 @ r3<- CC 5306 and r2, r0, #255 @ r2<- BB 5307 GET_VREG(r1, r3) @ r1<- vCC 5308 GET_VREG(r0, r2) @ r0<- vBB 5309 .if 0 5310 cmp r1, #0 @ is second operand zero? 5311 beq common_errDivideByZero 5312 .endif 5313 5314 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 5315 @ optional op; may set condition codes 5316 bl fmodf @ r0<- op, r0-r3 changed 5317 GET_INST_OPCODE(ip) @ extract opcode from rINST 5318 SET_VREG(r0, r9) @ vAA<- r0 5319 GOTO_OPCODE(ip) @ jump to next instruction 5320 /* 11-14 instructions */ 5321 5322 5323 5324/* ------------------------------ */ 5325 .balign 64 5326.L_OP_ADD_DOUBLE: /* 0xab */ 5327/* File: armv5te/OP_ADD_DOUBLE.S */ 5328/* File: armv5te/binopWide.S */ 5329 /* 5330 * Generic 64-bit binary operation. Provide an "instr" line that 5331 * specifies an instruction that performs "result = r0-r1 op r2-r3". 5332 * This could be an ARM instruction or a function call. (If the result 5333 * comes back in a register other than r0, you can override "result".) 5334 * 5335 * If "chkzero" is set to 1, we perform a divide-by-zero check on 5336 * vCC (r1). Useful for integer division and modulus. 5337 * 5338 * for: add-long, sub-long, div-long, rem-long, and-long, or-long, 5339 * xor-long, add-double, sub-double, mul-double, div-double, 5340 * rem-double 5341 * 5342 * IMPORTANT: you may specify "chkzero" or "preinstr" but not both. 5343 */ 5344 /* binop vAA, vBB, vCC */ 5345 FETCH(r0, 1) @ r0<- CCBB 5346 mov r9, rINST, lsr #8 @ r9<- AA 5347 and r2, r0, #255 @ r2<- BB 5348 mov r3, r0, lsr #8 @ r3<- CC 5349 add r9, rFP, r9, lsl #2 @ r9<- &fp[AA] 5350 add r2, rFP, r2, lsl #2 @ r2<- &fp[BB] 5351 add r3, rFP, r3, lsl #2 @ r3<- &fp[CC] 5352 ldmia r2, {r0-r1} @ r0/r1<- vBB/vBB+1 5353 ldmia r3, {r2-r3} @ r2/r3<- vCC/vCC+1 5354 .if 0 5355 orrs ip, r2, r3 @ second arg (r2-r3) is zero? 5356 beq common_errDivideByZero 5357 .endif 5358 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 5359 5360 @ optional op; may set condition codes 5361 bl __aeabi_dadd @ result<- op, r0-r3 changed 5362 GET_INST_OPCODE(ip) @ extract opcode from rINST 5363 stmia r9, {r0,r1} @ vAA/vAA+1<- r0/r1 5364 GOTO_OPCODE(ip) @ jump to next instruction 5365 /* 14-17 instructions */ 5366 5367 5368 5369/* ------------------------------ */ 5370 .balign 64 5371.L_OP_SUB_DOUBLE: /* 0xac */ 5372/* File: armv5te/OP_SUB_DOUBLE.S */ 5373/* File: armv5te/binopWide.S */ 5374 /* 5375 * Generic 64-bit binary operation. Provide an "instr" line that 5376 * specifies an instruction that performs "result = r0-r1 op r2-r3". 5377 * This could be an ARM instruction or a function call. (If the result 5378 * comes back in a register other than r0, you can override "result".) 5379 * 5380 * If "chkzero" is set to 1, we perform a divide-by-zero check on 5381 * vCC (r1). Useful for integer division and modulus. 5382 * 5383 * for: add-long, sub-long, div-long, rem-long, and-long, or-long, 5384 * xor-long, add-double, sub-double, mul-double, div-double, 5385 * rem-double 5386 * 5387 * IMPORTANT: you may specify "chkzero" or "preinstr" but not both. 5388 */ 5389 /* binop vAA, vBB, vCC */ 5390 FETCH(r0, 1) @ r0<- CCBB 5391 mov r9, rINST, lsr #8 @ r9<- AA 5392 and r2, r0, #255 @ r2<- BB 5393 mov r3, r0, lsr #8 @ r3<- CC 5394 add r9, rFP, r9, lsl #2 @ r9<- &fp[AA] 5395 add r2, rFP, r2, lsl #2 @ r2<- &fp[BB] 5396 add r3, rFP, r3, lsl #2 @ r3<- &fp[CC] 5397 ldmia r2, {r0-r1} @ r0/r1<- vBB/vBB+1 5398 ldmia r3, {r2-r3} @ r2/r3<- vCC/vCC+1 5399 .if 0 5400 orrs ip, r2, r3 @ second arg (r2-r3) is zero? 5401 beq common_errDivideByZero 5402 .endif 5403 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 5404 5405 @ optional op; may set condition codes 5406 bl __aeabi_dsub @ result<- op, r0-r3 changed 5407 GET_INST_OPCODE(ip) @ extract opcode from rINST 5408 stmia r9, {r0,r1} @ vAA/vAA+1<- r0/r1 5409 GOTO_OPCODE(ip) @ jump to next instruction 5410 /* 14-17 instructions */ 5411 5412 5413 5414/* ------------------------------ */ 5415 .balign 64 5416.L_OP_MUL_DOUBLE: /* 0xad */ 5417/* File: armv5te/OP_MUL_DOUBLE.S */ 5418/* File: armv5te/binopWide.S */ 5419 /* 5420 * Generic 64-bit binary operation. Provide an "instr" line that 5421 * specifies an instruction that performs "result = r0-r1 op r2-r3". 5422 * This could be an ARM instruction or a function call. (If the result 5423 * comes back in a register other than r0, you can override "result".) 5424 * 5425 * If "chkzero" is set to 1, we perform a divide-by-zero check on 5426 * vCC (r1). Useful for integer division and modulus. 5427 * 5428 * for: add-long, sub-long, div-long, rem-long, and-long, or-long, 5429 * xor-long, add-double, sub-double, mul-double, div-double, 5430 * rem-double 5431 * 5432 * IMPORTANT: you may specify "chkzero" or "preinstr" but not both. 5433 */ 5434 /* binop vAA, vBB, vCC */ 5435 FETCH(r0, 1) @ r0<- CCBB 5436 mov r9, rINST, lsr #8 @ r9<- AA 5437 and r2, r0, #255 @ r2<- BB 5438 mov r3, r0, lsr #8 @ r3<- CC 5439 add r9, rFP, r9, lsl #2 @ r9<- &fp[AA] 5440 add r2, rFP, r2, lsl #2 @ r2<- &fp[BB] 5441 add r3, rFP, r3, lsl #2 @ r3<- &fp[CC] 5442 ldmia r2, {r0-r1} @ r0/r1<- vBB/vBB+1 5443 ldmia r3, {r2-r3} @ r2/r3<- vCC/vCC+1 5444 .if 0 5445 orrs ip, r2, r3 @ second arg (r2-r3) is zero? 5446 beq common_errDivideByZero 5447 .endif 5448 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 5449 5450 @ optional op; may set condition codes 5451 bl __aeabi_dmul @ result<- op, r0-r3 changed 5452 GET_INST_OPCODE(ip) @ extract opcode from rINST 5453 stmia r9, {r0,r1} @ vAA/vAA+1<- r0/r1 5454 GOTO_OPCODE(ip) @ jump to next instruction 5455 /* 14-17 instructions */ 5456 5457 5458 5459/* ------------------------------ */ 5460 .balign 64 5461.L_OP_DIV_DOUBLE: /* 0xae */ 5462/* File: armv5te/OP_DIV_DOUBLE.S */ 5463/* File: armv5te/binopWide.S */ 5464 /* 5465 * Generic 64-bit binary operation. Provide an "instr" line that 5466 * specifies an instruction that performs "result = r0-r1 op r2-r3". 5467 * This could be an ARM instruction or a function call. (If the result 5468 * comes back in a register other than r0, you can override "result".) 5469 * 5470 * If "chkzero" is set to 1, we perform a divide-by-zero check on 5471 * vCC (r1). Useful for integer division and modulus. 5472 * 5473 * for: add-long, sub-long, div-long, rem-long, and-long, or-long, 5474 * xor-long, add-double, sub-double, mul-double, div-double, 5475 * rem-double 5476 * 5477 * IMPORTANT: you may specify "chkzero" or "preinstr" but not both. 5478 */ 5479 /* binop vAA, vBB, vCC */ 5480 FETCH(r0, 1) @ r0<- CCBB 5481 mov r9, rINST, lsr #8 @ r9<- AA 5482 and r2, r0, #255 @ r2<- BB 5483 mov r3, r0, lsr #8 @ r3<- CC 5484 add r9, rFP, r9, lsl #2 @ r9<- &fp[AA] 5485 add r2, rFP, r2, lsl #2 @ r2<- &fp[BB] 5486 add r3, rFP, r3, lsl #2 @ r3<- &fp[CC] 5487 ldmia r2, {r0-r1} @ r0/r1<- vBB/vBB+1 5488 ldmia r3, {r2-r3} @ r2/r3<- vCC/vCC+1 5489 .if 0 5490 orrs ip, r2, r3 @ second arg (r2-r3) is zero? 5491 beq common_errDivideByZero 5492 .endif 5493 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 5494 5495 @ optional op; may set condition codes 5496 bl __aeabi_ddiv @ result<- op, r0-r3 changed 5497 GET_INST_OPCODE(ip) @ extract opcode from rINST 5498 stmia r9, {r0,r1} @ vAA/vAA+1<- r0/r1 5499 GOTO_OPCODE(ip) @ jump to next instruction 5500 /* 14-17 instructions */ 5501 5502 5503 5504/* ------------------------------ */ 5505 .balign 64 5506.L_OP_REM_DOUBLE: /* 0xaf */ 5507/* File: armv5te/OP_REM_DOUBLE.S */ 5508/* EABI doesn't define a double remainder function, but libm does */ 5509/* File: armv5te/binopWide.S */ 5510 /* 5511 * Generic 64-bit binary operation. Provide an "instr" line that 5512 * specifies an instruction that performs "result = r0-r1 op r2-r3". 5513 * This could be an ARM instruction or a function call. (If the result 5514 * comes back in a register other than r0, you can override "result".) 5515 * 5516 * If "chkzero" is set to 1, we perform a divide-by-zero check on 5517 * vCC (r1). Useful for integer division and modulus. 5518 * 5519 * for: add-long, sub-long, div-long, rem-long, and-long, or-long, 5520 * xor-long, add-double, sub-double, mul-double, div-double, 5521 * rem-double 5522 * 5523 * IMPORTANT: you may specify "chkzero" or "preinstr" but not both. 5524 */ 5525 /* binop vAA, vBB, vCC */ 5526 FETCH(r0, 1) @ r0<- CCBB 5527 mov r9, rINST, lsr #8 @ r9<- AA 5528 and r2, r0, #255 @ r2<- BB 5529 mov r3, r0, lsr #8 @ r3<- CC 5530 add r9, rFP, r9, lsl #2 @ r9<- &fp[AA] 5531 add r2, rFP, r2, lsl #2 @ r2<- &fp[BB] 5532 add r3, rFP, r3, lsl #2 @ r3<- &fp[CC] 5533 ldmia r2, {r0-r1} @ r0/r1<- vBB/vBB+1 5534 ldmia r3, {r2-r3} @ r2/r3<- vCC/vCC+1 5535 .if 0 5536 orrs ip, r2, r3 @ second arg (r2-r3) is zero? 5537 beq common_errDivideByZero 5538 .endif 5539 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 5540 5541 @ optional op; may set condition codes 5542 bl fmod @ result<- op, r0-r3 changed 5543 GET_INST_OPCODE(ip) @ extract opcode from rINST 5544 stmia r9, {r0,r1} @ vAA/vAA+1<- r0/r1 5545 GOTO_OPCODE(ip) @ jump to next instruction 5546 /* 14-17 instructions */ 5547 5548 5549 5550/* ------------------------------ */ 5551 .balign 64 5552.L_OP_ADD_INT_2ADDR: /* 0xb0 */ 5553/* File: armv5te/OP_ADD_INT_2ADDR.S */ 5554/* File: armv5te/binop2addr.S */ 5555 /* 5556 * Generic 32-bit "/2addr" binary operation. Provide an "instr" line 5557 * that specifies an instruction that performs "result = r0 op r1". 5558 * This could be an ARM instruction or a function call. (If the result 5559 * comes back in a register other than r0, you can override "result".) 5560 * 5561 * If "chkzero" is set to 1, we perform a divide-by-zero check on 5562 * vCC (r1). Useful for integer division and modulus. 5563 * 5564 * For: add-int/2addr, sub-int/2addr, mul-int/2addr, div-int/2addr, 5565 * rem-int/2addr, and-int/2addr, or-int/2addr, xor-int/2addr, 5566 * shl-int/2addr, shr-int/2addr, ushr-int/2addr, add-float/2addr, 5567 * sub-float/2addr, mul-float/2addr, div-float/2addr, rem-float/2addr 5568 */ 5569 /* binop/2addr vA, vB */ 5570 mov r9, rINST, lsr #8 @ r9<- A+ 5571 mov r3, rINST, lsr #12 @ r3<- B 5572 and r9, r9, #15 5573 GET_VREG(r1, r3) @ r1<- vB 5574 GET_VREG(r0, r9) @ r0<- vA 5575 .if 0 5576 cmp r1, #0 @ is second operand zero? 5577 beq common_errDivideByZero 5578 .endif 5579 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 5580 5581 @ optional op; may set condition codes 5582 add r0, r0, r1 @ r0<- op, r0-r3 changed 5583 GET_INST_OPCODE(ip) @ extract opcode from rINST 5584 SET_VREG(r0, r9) @ vAA<- r0 5585 GOTO_OPCODE(ip) @ jump to next instruction 5586 /* 10-13 instructions */ 5587 5588 5589 5590/* ------------------------------ */ 5591 .balign 64 5592.L_OP_SUB_INT_2ADDR: /* 0xb1 */ 5593/* File: armv5te/OP_SUB_INT_2ADDR.S */ 5594/* File: armv5te/binop2addr.S */ 5595 /* 5596 * Generic 32-bit "/2addr" binary operation. Provide an "instr" line 5597 * that specifies an instruction that performs "result = r0 op r1". 5598 * This could be an ARM instruction or a function call. (If the result 5599 * comes back in a register other than r0, you can override "result".) 5600 * 5601 * If "chkzero" is set to 1, we perform a divide-by-zero check on 5602 * vCC (r1). Useful for integer division and modulus. 5603 * 5604 * For: add-int/2addr, sub-int/2addr, mul-int/2addr, div-int/2addr, 5605 * rem-int/2addr, and-int/2addr, or-int/2addr, xor-int/2addr, 5606 * shl-int/2addr, shr-int/2addr, ushr-int/2addr, add-float/2addr, 5607 * sub-float/2addr, mul-float/2addr, div-float/2addr, rem-float/2addr 5608 */ 5609 /* binop/2addr vA, vB */ 5610 mov r9, rINST, lsr #8 @ r9<- A+ 5611 mov r3, rINST, lsr #12 @ r3<- B 5612 and r9, r9, #15 5613 GET_VREG(r1, r3) @ r1<- vB 5614 GET_VREG(r0, r9) @ r0<- vA 5615 .if 0 5616 cmp r1, #0 @ is second operand zero? 5617 beq common_errDivideByZero 5618 .endif 5619 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 5620 5621 @ optional op; may set condition codes 5622 sub r0, r0, r1 @ r0<- op, r0-r3 changed 5623 GET_INST_OPCODE(ip) @ extract opcode from rINST 5624 SET_VREG(r0, r9) @ vAA<- r0 5625 GOTO_OPCODE(ip) @ jump to next instruction 5626 /* 10-13 instructions */ 5627 5628 5629 5630/* ------------------------------ */ 5631 .balign 64 5632.L_OP_MUL_INT_2ADDR: /* 0xb2 */ 5633/* File: armv5te/OP_MUL_INT_2ADDR.S */ 5634/* must be "mul r0, r1, r0" -- "r0, r0, r1" is illegal */ 5635/* File: armv5te/binop2addr.S */ 5636 /* 5637 * Generic 32-bit "/2addr" binary operation. Provide an "instr" line 5638 * that specifies an instruction that performs "result = r0 op r1". 5639 * This could be an ARM instruction or a function call. (If the result 5640 * comes back in a register other than r0, you can override "result".) 5641 * 5642 * If "chkzero" is set to 1, we perform a divide-by-zero check on 5643 * vCC (r1). Useful for integer division and modulus. 5644 * 5645 * For: add-int/2addr, sub-int/2addr, mul-int/2addr, div-int/2addr, 5646 * rem-int/2addr, and-int/2addr, or-int/2addr, xor-int/2addr, 5647 * shl-int/2addr, shr-int/2addr, ushr-int/2addr, add-float/2addr, 5648 * sub-float/2addr, mul-float/2addr, div-float/2addr, rem-float/2addr 5649 */ 5650 /* binop/2addr vA, vB */ 5651 mov r9, rINST, lsr #8 @ r9<- A+ 5652 mov r3, rINST, lsr #12 @ r3<- B 5653 and r9, r9, #15 5654 GET_VREG(r1, r3) @ r1<- vB 5655 GET_VREG(r0, r9) @ r0<- vA 5656 .if 0 5657 cmp r1, #0 @ is second operand zero? 5658 beq common_errDivideByZero 5659 .endif 5660 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 5661 5662 @ optional op; may set condition codes 5663 mul r0, r1, r0 @ r0<- op, r0-r3 changed 5664 GET_INST_OPCODE(ip) @ extract opcode from rINST 5665 SET_VREG(r0, r9) @ vAA<- r0 5666 GOTO_OPCODE(ip) @ jump to next instruction 5667 /* 10-13 instructions */ 5668 5669 5670 5671/* ------------------------------ */ 5672 .balign 64 5673.L_OP_DIV_INT_2ADDR: /* 0xb3 */ 5674/* File: armv5te/OP_DIV_INT_2ADDR.S */ 5675/* File: armv5te/binop2addr.S */ 5676 /* 5677 * Generic 32-bit "/2addr" binary operation. Provide an "instr" line 5678 * that specifies an instruction that performs "result = r0 op r1". 5679 * This could be an ARM instruction or a function call. (If the result 5680 * comes back in a register other than r0, you can override "result".) 5681 * 5682 * If "chkzero" is set to 1, we perform a divide-by-zero check on 5683 * vCC (r1). Useful for integer division and modulus. 5684 * 5685 * For: add-int/2addr, sub-int/2addr, mul-int/2addr, div-int/2addr, 5686 * rem-int/2addr, and-int/2addr, or-int/2addr, xor-int/2addr, 5687 * shl-int/2addr, shr-int/2addr, ushr-int/2addr, add-float/2addr, 5688 * sub-float/2addr, mul-float/2addr, div-float/2addr, rem-float/2addr 5689 */ 5690 /* binop/2addr vA, vB */ 5691 mov r9, rINST, lsr #8 @ r9<- A+ 5692 mov r3, rINST, lsr #12 @ r3<- B 5693 and r9, r9, #15 5694 GET_VREG(r1, r3) @ r1<- vB 5695 GET_VREG(r0, r9) @ r0<- vA 5696 .if 1 5697 cmp r1, #0 @ is second operand zero? 5698 beq common_errDivideByZero 5699 .endif 5700 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 5701 5702 @ optional op; may set condition codes 5703 bl __aeabi_idiv @ r0<- op, r0-r3 changed 5704 GET_INST_OPCODE(ip) @ extract opcode from rINST 5705 SET_VREG(r0, r9) @ vAA<- r0 5706 GOTO_OPCODE(ip) @ jump to next instruction 5707 /* 10-13 instructions */ 5708 5709 5710 5711/* ------------------------------ */ 5712 .balign 64 5713.L_OP_REM_INT_2ADDR: /* 0xb4 */ 5714/* File: armv5te/OP_REM_INT_2ADDR.S */ 5715/* idivmod returns quotient in r0 and remainder in r1 */ 5716/* File: armv5te/binop2addr.S */ 5717 /* 5718 * Generic 32-bit "/2addr" binary operation. Provide an "instr" line 5719 * that specifies an instruction that performs "result = r0 op r1". 5720 * This could be an ARM instruction or a function call. (If the result 5721 * comes back in a register other than r0, you can override "result".) 5722 * 5723 * If "chkzero" is set to 1, we perform a divide-by-zero check on 5724 * vCC (r1). Useful for integer division and modulus. 5725 * 5726 * For: add-int/2addr, sub-int/2addr, mul-int/2addr, div-int/2addr, 5727 * rem-int/2addr, and-int/2addr, or-int/2addr, xor-int/2addr, 5728 * shl-int/2addr, shr-int/2addr, ushr-int/2addr, add-float/2addr, 5729 * sub-float/2addr, mul-float/2addr, div-float/2addr, rem-float/2addr 5730 */ 5731 /* binop/2addr vA, vB */ 5732 mov r9, rINST, lsr #8 @ r9<- A+ 5733 mov r3, rINST, lsr #12 @ r3<- B 5734 and r9, r9, #15 5735 GET_VREG(r1, r3) @ r1<- vB 5736 GET_VREG(r0, r9) @ r0<- vA 5737 .if 1 5738 cmp r1, #0 @ is second operand zero? 5739 beq common_errDivideByZero 5740 .endif 5741 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 5742 5743 @ optional op; may set condition codes 5744 bl __aeabi_idivmod @ r1<- op, r0-r3 changed 5745 GET_INST_OPCODE(ip) @ extract opcode from rINST 5746 SET_VREG(r1, r9) @ vAA<- r1 5747 GOTO_OPCODE(ip) @ jump to next instruction 5748 /* 10-13 instructions */ 5749 5750 5751 5752/* ------------------------------ */ 5753 .balign 64 5754.L_OP_AND_INT_2ADDR: /* 0xb5 */ 5755/* File: armv5te/OP_AND_INT_2ADDR.S */ 5756/* File: armv5te/binop2addr.S */ 5757 /* 5758 * Generic 32-bit "/2addr" binary operation. Provide an "instr" line 5759 * that specifies an instruction that performs "result = r0 op r1". 5760 * This could be an ARM instruction or a function call. (If the result 5761 * comes back in a register other than r0, you can override "result".) 5762 * 5763 * If "chkzero" is set to 1, we perform a divide-by-zero check on 5764 * vCC (r1). Useful for integer division and modulus. 5765 * 5766 * For: add-int/2addr, sub-int/2addr, mul-int/2addr, div-int/2addr, 5767 * rem-int/2addr, and-int/2addr, or-int/2addr, xor-int/2addr, 5768 * shl-int/2addr, shr-int/2addr, ushr-int/2addr, add-float/2addr, 5769 * sub-float/2addr, mul-float/2addr, div-float/2addr, rem-float/2addr 5770 */ 5771 /* binop/2addr vA, vB */ 5772 mov r9, rINST, lsr #8 @ r9<- A+ 5773 mov r3, rINST, lsr #12 @ r3<- B 5774 and r9, r9, #15 5775 GET_VREG(r1, r3) @ r1<- vB 5776 GET_VREG(r0, r9) @ r0<- vA 5777 .if 0 5778 cmp r1, #0 @ is second operand zero? 5779 beq common_errDivideByZero 5780 .endif 5781 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 5782 5783 @ optional op; may set condition codes 5784 and r0, r0, r1 @ r0<- op, r0-r3 changed 5785 GET_INST_OPCODE(ip) @ extract opcode from rINST 5786 SET_VREG(r0, r9) @ vAA<- r0 5787 GOTO_OPCODE(ip) @ jump to next instruction 5788 /* 10-13 instructions */ 5789 5790 5791 5792/* ------------------------------ */ 5793 .balign 64 5794.L_OP_OR_INT_2ADDR: /* 0xb6 */ 5795/* File: armv5te/OP_OR_INT_2ADDR.S */ 5796/* File: armv5te/binop2addr.S */ 5797 /* 5798 * Generic 32-bit "/2addr" binary operation. Provide an "instr" line 5799 * that specifies an instruction that performs "result = r0 op r1". 5800 * This could be an ARM instruction or a function call. (If the result 5801 * comes back in a register other than r0, you can override "result".) 5802 * 5803 * If "chkzero" is set to 1, we perform a divide-by-zero check on 5804 * vCC (r1). Useful for integer division and modulus. 5805 * 5806 * For: add-int/2addr, sub-int/2addr, mul-int/2addr, div-int/2addr, 5807 * rem-int/2addr, and-int/2addr, or-int/2addr, xor-int/2addr, 5808 * shl-int/2addr, shr-int/2addr, ushr-int/2addr, add-float/2addr, 5809 * sub-float/2addr, mul-float/2addr, div-float/2addr, rem-float/2addr 5810 */ 5811 /* binop/2addr vA, vB */ 5812 mov r9, rINST, lsr #8 @ r9<- A+ 5813 mov r3, rINST, lsr #12 @ r3<- B 5814 and r9, r9, #15 5815 GET_VREG(r1, r3) @ r1<- vB 5816 GET_VREG(r0, r9) @ r0<- vA 5817 .if 0 5818 cmp r1, #0 @ is second operand zero? 5819 beq common_errDivideByZero 5820 .endif 5821 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 5822 5823 @ optional op; may set condition codes 5824 orr r0, r0, r1 @ r0<- op, r0-r3 changed 5825 GET_INST_OPCODE(ip) @ extract opcode from rINST 5826 SET_VREG(r0, r9) @ vAA<- r0 5827 GOTO_OPCODE(ip) @ jump to next instruction 5828 /* 10-13 instructions */ 5829 5830 5831 5832/* ------------------------------ */ 5833 .balign 64 5834.L_OP_XOR_INT_2ADDR: /* 0xb7 */ 5835/* File: armv5te/OP_XOR_INT_2ADDR.S */ 5836/* File: armv5te/binop2addr.S */ 5837 /* 5838 * Generic 32-bit "/2addr" binary operation. Provide an "instr" line 5839 * that specifies an instruction that performs "result = r0 op r1". 5840 * This could be an ARM instruction or a function call. (If the result 5841 * comes back in a register other than r0, you can override "result".) 5842 * 5843 * If "chkzero" is set to 1, we perform a divide-by-zero check on 5844 * vCC (r1). Useful for integer division and modulus. 5845 * 5846 * For: add-int/2addr, sub-int/2addr, mul-int/2addr, div-int/2addr, 5847 * rem-int/2addr, and-int/2addr, or-int/2addr, xor-int/2addr, 5848 * shl-int/2addr, shr-int/2addr, ushr-int/2addr, add-float/2addr, 5849 * sub-float/2addr, mul-float/2addr, div-float/2addr, rem-float/2addr 5850 */ 5851 /* binop/2addr vA, vB */ 5852 mov r9, rINST, lsr #8 @ r9<- A+ 5853 mov r3, rINST, lsr #12 @ r3<- B 5854 and r9, r9, #15 5855 GET_VREG(r1, r3) @ r1<- vB 5856 GET_VREG(r0, r9) @ r0<- vA 5857 .if 0 5858 cmp r1, #0 @ is second operand zero? 5859 beq common_errDivideByZero 5860 .endif 5861 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 5862 5863 @ optional op; may set condition codes 5864 eor r0, r0, r1 @ r0<- op, r0-r3 changed 5865 GET_INST_OPCODE(ip) @ extract opcode from rINST 5866 SET_VREG(r0, r9) @ vAA<- r0 5867 GOTO_OPCODE(ip) @ jump to next instruction 5868 /* 10-13 instructions */ 5869 5870 5871 5872/* ------------------------------ */ 5873 .balign 64 5874.L_OP_SHL_INT_2ADDR: /* 0xb8 */ 5875/* File: armv5te/OP_SHL_INT_2ADDR.S */ 5876/* File: armv5te/binop2addr.S */ 5877 /* 5878 * Generic 32-bit "/2addr" binary operation. Provide an "instr" line 5879 * that specifies an instruction that performs "result = r0 op r1". 5880 * This could be an ARM instruction or a function call. (If the result 5881 * comes back in a register other than r0, you can override "result".) 5882 * 5883 * If "chkzero" is set to 1, we perform a divide-by-zero check on 5884 * vCC (r1). Useful for integer division and modulus. 5885 * 5886 * For: add-int/2addr, sub-int/2addr, mul-int/2addr, div-int/2addr, 5887 * rem-int/2addr, and-int/2addr, or-int/2addr, xor-int/2addr, 5888 * shl-int/2addr, shr-int/2addr, ushr-int/2addr, add-float/2addr, 5889 * sub-float/2addr, mul-float/2addr, div-float/2addr, rem-float/2addr 5890 */ 5891 /* binop/2addr vA, vB */ 5892 mov r9, rINST, lsr #8 @ r9<- A+ 5893 mov r3, rINST, lsr #12 @ r3<- B 5894 and r9, r9, #15 5895 GET_VREG(r1, r3) @ r1<- vB 5896 GET_VREG(r0, r9) @ r0<- vA 5897 .if 0 5898 cmp r1, #0 @ is second operand zero? 5899 beq common_errDivideByZero 5900 .endif 5901 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 5902 5903 and r1, r1, #31 @ optional op; may set condition codes 5904 mov r0, r0, asl r1 @ r0<- op, r0-r3 changed 5905 GET_INST_OPCODE(ip) @ extract opcode from rINST 5906 SET_VREG(r0, r9) @ vAA<- r0 5907 GOTO_OPCODE(ip) @ jump to next instruction 5908 /* 10-13 instructions */ 5909 5910 5911 5912/* ------------------------------ */ 5913 .balign 64 5914.L_OP_SHR_INT_2ADDR: /* 0xb9 */ 5915/* File: armv5te/OP_SHR_INT_2ADDR.S */ 5916/* File: armv5te/binop2addr.S */ 5917 /* 5918 * Generic 32-bit "/2addr" binary operation. Provide an "instr" line 5919 * that specifies an instruction that performs "result = r0 op r1". 5920 * This could be an ARM instruction or a function call. (If the result 5921 * comes back in a register other than r0, you can override "result".) 5922 * 5923 * If "chkzero" is set to 1, we perform a divide-by-zero check on 5924 * vCC (r1). Useful for integer division and modulus. 5925 * 5926 * For: add-int/2addr, sub-int/2addr, mul-int/2addr, div-int/2addr, 5927 * rem-int/2addr, and-int/2addr, or-int/2addr, xor-int/2addr, 5928 * shl-int/2addr, shr-int/2addr, ushr-int/2addr, add-float/2addr, 5929 * sub-float/2addr, mul-float/2addr, div-float/2addr, rem-float/2addr 5930 */ 5931 /* binop/2addr vA, vB */ 5932 mov r9, rINST, lsr #8 @ r9<- A+ 5933 mov r3, rINST, lsr #12 @ r3<- B 5934 and r9, r9, #15 5935 GET_VREG(r1, r3) @ r1<- vB 5936 GET_VREG(r0, r9) @ r0<- vA 5937 .if 0 5938 cmp r1, #0 @ is second operand zero? 5939 beq common_errDivideByZero 5940 .endif 5941 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 5942 5943 and r1, r1, #31 @ optional op; may set condition codes 5944 mov r0, r0, asr r1 @ r0<- op, r0-r3 changed 5945 GET_INST_OPCODE(ip) @ extract opcode from rINST 5946 SET_VREG(r0, r9) @ vAA<- r0 5947 GOTO_OPCODE(ip) @ jump to next instruction 5948 /* 10-13 instructions */ 5949 5950 5951 5952/* ------------------------------ */ 5953 .balign 64 5954.L_OP_USHR_INT_2ADDR: /* 0xba */ 5955/* File: armv5te/OP_USHR_INT_2ADDR.S */ 5956/* File: armv5te/binop2addr.S */ 5957 /* 5958 * Generic 32-bit "/2addr" binary operation. Provide an "instr" line 5959 * that specifies an instruction that performs "result = r0 op r1". 5960 * This could be an ARM instruction or a function call. (If the result 5961 * comes back in a register other than r0, you can override "result".) 5962 * 5963 * If "chkzero" is set to 1, we perform a divide-by-zero check on 5964 * vCC (r1). Useful for integer division and modulus. 5965 * 5966 * For: add-int/2addr, sub-int/2addr, mul-int/2addr, div-int/2addr, 5967 * rem-int/2addr, and-int/2addr, or-int/2addr, xor-int/2addr, 5968 * shl-int/2addr, shr-int/2addr, ushr-int/2addr, add-float/2addr, 5969 * sub-float/2addr, mul-float/2addr, div-float/2addr, rem-float/2addr 5970 */ 5971 /* binop/2addr vA, vB */ 5972 mov r9, rINST, lsr #8 @ r9<- A+ 5973 mov r3, rINST, lsr #12 @ r3<- B 5974 and r9, r9, #15 5975 GET_VREG(r1, r3) @ r1<- vB 5976 GET_VREG(r0, r9) @ r0<- vA 5977 .if 0 5978 cmp r1, #0 @ is second operand zero? 5979 beq common_errDivideByZero 5980 .endif 5981 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 5982 5983 and r1, r1, #31 @ optional op; may set condition codes 5984 mov r0, r0, lsr r1 @ r0<- op, r0-r3 changed 5985 GET_INST_OPCODE(ip) @ extract opcode from rINST 5986 SET_VREG(r0, r9) @ vAA<- r0 5987 GOTO_OPCODE(ip) @ jump to next instruction 5988 /* 10-13 instructions */ 5989 5990 5991 5992/* ------------------------------ */ 5993 .balign 64 5994.L_OP_ADD_LONG_2ADDR: /* 0xbb */ 5995/* File: armv5te/OP_ADD_LONG_2ADDR.S */ 5996/* File: armv5te/binopWide2addr.S */ 5997 /* 5998 * Generic 64-bit "/2addr" binary operation. Provide an "instr" line 5999 * that specifies an instruction that performs "result = r0-r1 op r2-r3". 6000 * This could be an ARM instruction or a function call. (If the result 6001 * comes back in a register other than r0, you can override "result".) 6002 * 6003 * If "chkzero" is set to 1, we perform a divide-by-zero check on 6004 * vCC (r1). Useful for integer division and modulus. 6005 * 6006 * For: add-long/2addr, sub-long/2addr, div-long/2addr, rem-long/2addr, 6007 * and-long/2addr, or-long/2addr, xor-long/2addr, add-double/2addr, 6008 * sub-double/2addr, mul-double/2addr, div-double/2addr, 6009 * rem-double/2addr 6010 */ 6011 /* binop/2addr vA, vB */ 6012 mov r9, rINST, lsr #8 @ r9<- A+ 6013 mov r1, rINST, lsr #12 @ r1<- B 6014 and r9, r9, #15 6015 add r1, rFP, r1, lsl #2 @ r1<- &fp[B] 6016 add r9, rFP, r9, lsl #2 @ r9<- &fp[A] 6017 ldmia r1, {r2-r3} @ r2/r3<- vBB/vBB+1 6018 ldmia r9, {r0-r1} @ r0/r1<- vAA/vAA+1 6019 .if 0 6020 orrs ip, r2, r3 @ second arg (r2-r3) is zero? 6021 beq common_errDivideByZero 6022 .endif 6023 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 6024 6025 adds r0, r0, r2 @ optional op; may set condition codes 6026 adc r1, r1, r3 @ result<- op, r0-r3 changed 6027 GET_INST_OPCODE(ip) @ extract opcode from rINST 6028 stmia r9, {r0,r1} @ vAA/vAA+1<- r0/r1 6029 GOTO_OPCODE(ip) @ jump to next instruction 6030 /* 12-15 instructions */ 6031 6032 6033 6034/* ------------------------------ */ 6035 .balign 64 6036.L_OP_SUB_LONG_2ADDR: /* 0xbc */ 6037/* File: armv5te/OP_SUB_LONG_2ADDR.S */ 6038/* File: armv5te/binopWide2addr.S */ 6039 /* 6040 * Generic 64-bit "/2addr" binary operation. Provide an "instr" line 6041 * that specifies an instruction that performs "result = r0-r1 op r2-r3". 6042 * This could be an ARM instruction or a function call. (If the result 6043 * comes back in a register other than r0, you can override "result".) 6044 * 6045 * If "chkzero" is set to 1, we perform a divide-by-zero check on 6046 * vCC (r1). Useful for integer division and modulus. 6047 * 6048 * For: add-long/2addr, sub-long/2addr, div-long/2addr, rem-long/2addr, 6049 * and-long/2addr, or-long/2addr, xor-long/2addr, add-double/2addr, 6050 * sub-double/2addr, mul-double/2addr, div-double/2addr, 6051 * rem-double/2addr 6052 */ 6053 /* binop/2addr vA, vB */ 6054 mov r9, rINST, lsr #8 @ r9<- A+ 6055 mov r1, rINST, lsr #12 @ r1<- B 6056 and r9, r9, #15 6057 add r1, rFP, r1, lsl #2 @ r1<- &fp[B] 6058 add r9, rFP, r9, lsl #2 @ r9<- &fp[A] 6059 ldmia r1, {r2-r3} @ r2/r3<- vBB/vBB+1 6060 ldmia r9, {r0-r1} @ r0/r1<- vAA/vAA+1 6061 .if 0 6062 orrs ip, r2, r3 @ second arg (r2-r3) is zero? 6063 beq common_errDivideByZero 6064 .endif 6065 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 6066 6067 subs r0, r0, r2 @ optional op; may set condition codes 6068 sbc r1, r1, r3 @ result<- op, r0-r3 changed 6069 GET_INST_OPCODE(ip) @ extract opcode from rINST 6070 stmia r9, {r0,r1} @ vAA/vAA+1<- r0/r1 6071 GOTO_OPCODE(ip) @ jump to next instruction 6072 /* 12-15 instructions */ 6073 6074 6075 6076/* ------------------------------ */ 6077 .balign 64 6078.L_OP_MUL_LONG_2ADDR: /* 0xbd */ 6079/* File: armv5te/OP_MUL_LONG_2ADDR.S */ 6080 /* 6081 * Signed 64-bit integer multiply, "/2addr" version. 6082 * 6083 * See OP_MUL_LONG for an explanation. 6084 * 6085 * We get a little tight on registers, so to avoid looking up &fp[A] 6086 * again we stuff it into rINST. 6087 */ 6088 /* mul-long/2addr vA, vB */ 6089 mov r9, rINST, lsr #8 @ r9<- A+ 6090 mov r1, rINST, lsr #12 @ r1<- B 6091 and r9, r9, #15 6092 add r1, rFP, r1, lsl #2 @ r1<- &fp[B] 6093 add rINST, rFP, r9, lsl #2 @ rINST<- &fp[A] 6094 ldmia r1, {r2-r3} @ r2/r3<- vBB/vBB+1 6095 ldmia rINST, {r0-r1} @ r0/r1<- vAA/vAA+1 6096 mul ip, r2, r1 @ ip<- ZxW 6097 umull r9, r10, r2, r0 @ r9/r10 <- ZxX 6098 mla r2, r0, r3, ip @ r2<- YxX + (ZxW) 6099 mov r0, rINST @ r0<- &fp[A] (free up rINST) 6100 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 6101 add r10, r2, r10 @ r10<- r10 + low(ZxW + (YxX)) 6102 GET_INST_OPCODE(ip) @ extract opcode from rINST 6103 stmia r0, {r9-r10} @ vAA/vAA+1<- r9/r10 6104 GOTO_OPCODE(ip) @ jump to next instruction 6105 6106 6107/* ------------------------------ */ 6108 .balign 64 6109.L_OP_DIV_LONG_2ADDR: /* 0xbe */ 6110/* File: armv5te/OP_DIV_LONG_2ADDR.S */ 6111/* File: armv5te/binopWide2addr.S */ 6112 /* 6113 * Generic 64-bit "/2addr" binary operation. Provide an "instr" line 6114 * that specifies an instruction that performs "result = r0-r1 op r2-r3". 6115 * This could be an ARM instruction or a function call. (If the result 6116 * comes back in a register other than r0, you can override "result".) 6117 * 6118 * If "chkzero" is set to 1, we perform a divide-by-zero check on 6119 * vCC (r1). Useful for integer division and modulus. 6120 * 6121 * For: add-long/2addr, sub-long/2addr, div-long/2addr, rem-long/2addr, 6122 * and-long/2addr, or-long/2addr, xor-long/2addr, add-double/2addr, 6123 * sub-double/2addr, mul-double/2addr, div-double/2addr, 6124 * rem-double/2addr 6125 */ 6126 /* binop/2addr vA, vB */ 6127 mov r9, rINST, lsr #8 @ r9<- A+ 6128 mov r1, rINST, lsr #12 @ r1<- B 6129 and r9, r9, #15 6130 add r1, rFP, r1, lsl #2 @ r1<- &fp[B] 6131 add r9, rFP, r9, lsl #2 @ r9<- &fp[A] 6132 ldmia r1, {r2-r3} @ r2/r3<- vBB/vBB+1 6133 ldmia r9, {r0-r1} @ r0/r1<- vAA/vAA+1 6134 .if 1 6135 orrs ip, r2, r3 @ second arg (r2-r3) is zero? 6136 beq common_errDivideByZero 6137 .endif 6138 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 6139 6140 @ optional op; may set condition codes 6141 bl __aeabi_ldivmod @ result<- op, r0-r3 changed 6142 GET_INST_OPCODE(ip) @ extract opcode from rINST 6143 stmia r9, {r0,r1} @ vAA/vAA+1<- r0/r1 6144 GOTO_OPCODE(ip) @ jump to next instruction 6145 /* 12-15 instructions */ 6146 6147 6148 6149/* ------------------------------ */ 6150 .balign 64 6151.L_OP_REM_LONG_2ADDR: /* 0xbf */ 6152/* File: armv5te/OP_REM_LONG_2ADDR.S */ 6153/* ldivmod returns quotient in r0/r1 and remainder in r2/r3 */ 6154/* File: armv5te/binopWide2addr.S */ 6155 /* 6156 * Generic 64-bit "/2addr" binary operation. Provide an "instr" line 6157 * that specifies an instruction that performs "result = r0-r1 op r2-r3". 6158 * This could be an ARM instruction or a function call. (If the result 6159 * comes back in a register other than r0, you can override "result".) 6160 * 6161 * If "chkzero" is set to 1, we perform a divide-by-zero check on 6162 * vCC (r1). Useful for integer division and modulus. 6163 * 6164 * For: add-long/2addr, sub-long/2addr, div-long/2addr, rem-long/2addr, 6165 * and-long/2addr, or-long/2addr, xor-long/2addr, add-double/2addr, 6166 * sub-double/2addr, mul-double/2addr, div-double/2addr, 6167 * rem-double/2addr 6168 */ 6169 /* binop/2addr vA, vB */ 6170 mov r9, rINST, lsr #8 @ r9<- A+ 6171 mov r1, rINST, lsr #12 @ r1<- B 6172 and r9, r9, #15 6173 add r1, rFP, r1, lsl #2 @ r1<- &fp[B] 6174 add r9, rFP, r9, lsl #2 @ r9<- &fp[A] 6175 ldmia r1, {r2-r3} @ r2/r3<- vBB/vBB+1 6176 ldmia r9, {r0-r1} @ r0/r1<- vAA/vAA+1 6177 .if 1 6178 orrs ip, r2, r3 @ second arg (r2-r3) is zero? 6179 beq common_errDivideByZero 6180 .endif 6181 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 6182 6183 @ optional op; may set condition codes 6184 bl __aeabi_ldivmod @ result<- op, r0-r3 changed 6185 GET_INST_OPCODE(ip) @ extract opcode from rINST 6186 stmia r9, {r2,r3} @ vAA/vAA+1<- r2/r3 6187 GOTO_OPCODE(ip) @ jump to next instruction 6188 /* 12-15 instructions */ 6189 6190 6191 6192/* ------------------------------ */ 6193 .balign 64 6194.L_OP_AND_LONG_2ADDR: /* 0xc0 */ 6195/* File: armv5te/OP_AND_LONG_2ADDR.S */ 6196/* File: armv5te/binopWide2addr.S */ 6197 /* 6198 * Generic 64-bit "/2addr" binary operation. Provide an "instr" line 6199 * that specifies an instruction that performs "result = r0-r1 op r2-r3". 6200 * This could be an ARM instruction or a function call. (If the result 6201 * comes back in a register other than r0, you can override "result".) 6202 * 6203 * If "chkzero" is set to 1, we perform a divide-by-zero check on 6204 * vCC (r1). Useful for integer division and modulus. 6205 * 6206 * For: add-long/2addr, sub-long/2addr, div-long/2addr, rem-long/2addr, 6207 * and-long/2addr, or-long/2addr, xor-long/2addr, add-double/2addr, 6208 * sub-double/2addr, mul-double/2addr, div-double/2addr, 6209 * rem-double/2addr 6210 */ 6211 /* binop/2addr vA, vB */ 6212 mov r9, rINST, lsr #8 @ r9<- A+ 6213 mov r1, rINST, lsr #12 @ r1<- B 6214 and r9, r9, #15 6215 add r1, rFP, r1, lsl #2 @ r1<- &fp[B] 6216 add r9, rFP, r9, lsl #2 @ r9<- &fp[A] 6217 ldmia r1, {r2-r3} @ r2/r3<- vBB/vBB+1 6218 ldmia r9, {r0-r1} @ r0/r1<- vAA/vAA+1 6219 .if 0 6220 orrs ip, r2, r3 @ second arg (r2-r3) is zero? 6221 beq common_errDivideByZero 6222 .endif 6223 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 6224 6225 and r0, r0, r2 @ optional op; may set condition codes 6226 and r1, r1, r3 @ result<- op, r0-r3 changed 6227 GET_INST_OPCODE(ip) @ extract opcode from rINST 6228 stmia r9, {r0,r1} @ vAA/vAA+1<- r0/r1 6229 GOTO_OPCODE(ip) @ jump to next instruction 6230 /* 12-15 instructions */ 6231 6232 6233 6234/* ------------------------------ */ 6235 .balign 64 6236.L_OP_OR_LONG_2ADDR: /* 0xc1 */ 6237/* File: armv5te/OP_OR_LONG_2ADDR.S */ 6238/* File: armv5te/binopWide2addr.S */ 6239 /* 6240 * Generic 64-bit "/2addr" binary operation. Provide an "instr" line 6241 * that specifies an instruction that performs "result = r0-r1 op r2-r3". 6242 * This could be an ARM instruction or a function call. (If the result 6243 * comes back in a register other than r0, you can override "result".) 6244 * 6245 * If "chkzero" is set to 1, we perform a divide-by-zero check on 6246 * vCC (r1). Useful for integer division and modulus. 6247 * 6248 * For: add-long/2addr, sub-long/2addr, div-long/2addr, rem-long/2addr, 6249 * and-long/2addr, or-long/2addr, xor-long/2addr, add-double/2addr, 6250 * sub-double/2addr, mul-double/2addr, div-double/2addr, 6251 * rem-double/2addr 6252 */ 6253 /* binop/2addr vA, vB */ 6254 mov r9, rINST, lsr #8 @ r9<- A+ 6255 mov r1, rINST, lsr #12 @ r1<- B 6256 and r9, r9, #15 6257 add r1, rFP, r1, lsl #2 @ r1<- &fp[B] 6258 add r9, rFP, r9, lsl #2 @ r9<- &fp[A] 6259 ldmia r1, {r2-r3} @ r2/r3<- vBB/vBB+1 6260 ldmia r9, {r0-r1} @ r0/r1<- vAA/vAA+1 6261 .if 0 6262 orrs ip, r2, r3 @ second arg (r2-r3) is zero? 6263 beq common_errDivideByZero 6264 .endif 6265 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 6266 6267 orr r0, r0, r2 @ optional op; may set condition codes 6268 orr r1, r1, r3 @ result<- op, r0-r3 changed 6269 GET_INST_OPCODE(ip) @ extract opcode from rINST 6270 stmia r9, {r0,r1} @ vAA/vAA+1<- r0/r1 6271 GOTO_OPCODE(ip) @ jump to next instruction 6272 /* 12-15 instructions */ 6273 6274 6275 6276/* ------------------------------ */ 6277 .balign 64 6278.L_OP_XOR_LONG_2ADDR: /* 0xc2 */ 6279/* File: armv5te/OP_XOR_LONG_2ADDR.S */ 6280/* File: armv5te/binopWide2addr.S */ 6281 /* 6282 * Generic 64-bit "/2addr" binary operation. Provide an "instr" line 6283 * that specifies an instruction that performs "result = r0-r1 op r2-r3". 6284 * This could be an ARM instruction or a function call. (If the result 6285 * comes back in a register other than r0, you can override "result".) 6286 * 6287 * If "chkzero" is set to 1, we perform a divide-by-zero check on 6288 * vCC (r1). Useful for integer division and modulus. 6289 * 6290 * For: add-long/2addr, sub-long/2addr, div-long/2addr, rem-long/2addr, 6291 * and-long/2addr, or-long/2addr, xor-long/2addr, add-double/2addr, 6292 * sub-double/2addr, mul-double/2addr, div-double/2addr, 6293 * rem-double/2addr 6294 */ 6295 /* binop/2addr vA, vB */ 6296 mov r9, rINST, lsr #8 @ r9<- A+ 6297 mov r1, rINST, lsr #12 @ r1<- B 6298 and r9, r9, #15 6299 add r1, rFP, r1, lsl #2 @ r1<- &fp[B] 6300 add r9, rFP, r9, lsl #2 @ r9<- &fp[A] 6301 ldmia r1, {r2-r3} @ r2/r3<- vBB/vBB+1 6302 ldmia r9, {r0-r1} @ r0/r1<- vAA/vAA+1 6303 .if 0 6304 orrs ip, r2, r3 @ second arg (r2-r3) is zero? 6305 beq common_errDivideByZero 6306 .endif 6307 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 6308 6309 eor r0, r0, r2 @ optional op; may set condition codes 6310 eor r1, r1, r3 @ result<- op, r0-r3 changed 6311 GET_INST_OPCODE(ip) @ extract opcode from rINST 6312 stmia r9, {r0,r1} @ vAA/vAA+1<- r0/r1 6313 GOTO_OPCODE(ip) @ jump to next instruction 6314 /* 12-15 instructions */ 6315 6316 6317 6318/* ------------------------------ */ 6319 .balign 64 6320.L_OP_SHL_LONG_2ADDR: /* 0xc3 */ 6321/* File: armv5te/OP_SHL_LONG_2ADDR.S */ 6322 /* 6323 * Long integer shift, 2addr version. vA is 64-bit value/result, vB is 6324 * 32-bit shift distance. 6325 */ 6326 /* shl-long/2addr vA, vB */ 6327 mov r9, rINST, lsr #8 @ r9<- A+ 6328 mov r3, rINST, lsr #12 @ r3<- B 6329 and r9, r9, #15 6330 GET_VREG(r2, r3) @ r2<- vB 6331 add r9, rFP, r9, lsl #2 @ r9<- &fp[A] 6332 and r2, r2, #63 @ r2<- r2 & 0x3f 6333 ldmia r9, {r0-r1} @ r0/r1<- vAA/vAA+1 6334 6335 mov r1, r1, asl r2 @ r1<- r1 << r2 6336 rsb r3, r2, #32 @ r3<- 32 - r2 6337 orr r1, r1, r0, lsr r3 @ r1<- r1 | (r0 << (32-r2)) 6338 subs ip, r2, #32 @ ip<- r2 - 32 6339 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 6340 movpl r1, r0, asl ip @ if r2 >= 32, r1<- r0 << (r2-32) 6341 mov r0, r0, asl r2 @ r0<- r0 << r2 6342 b .LOP_SHL_LONG_2ADDR_finish 6343 6344/* ------------------------------ */ 6345 .balign 64 6346.L_OP_SHR_LONG_2ADDR: /* 0xc4 */ 6347/* File: armv5te/OP_SHR_LONG_2ADDR.S */ 6348 /* 6349 * Long integer shift, 2addr version. vA is 64-bit value/result, vB is 6350 * 32-bit shift distance. 6351 */ 6352 /* shr-long/2addr vA, vB */ 6353 mov r9, rINST, lsr #8 @ r9<- A+ 6354 mov r3, rINST, lsr #12 @ r3<- B 6355 and r9, r9, #15 6356 GET_VREG(r2, r3) @ r2<- vB 6357 add r9, rFP, r9, lsl #2 @ r9<- &fp[A] 6358 and r2, r2, #63 @ r2<- r2 & 0x3f 6359 ldmia r9, {r0-r1} @ r0/r1<- vAA/vAA+1 6360 6361 mov r0, r0, lsr r2 @ r0<- r2 >> r2 6362 rsb r3, r2, #32 @ r3<- 32 - r2 6363 orr r0, r0, r1, asl r3 @ r0<- r0 | (r1 << (32-r2)) 6364 subs ip, r2, #32 @ ip<- r2 - 32 6365 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 6366 movpl r0, r1, asr ip @ if r2 >= 32, r0<-r1 >> (r2-32) 6367 mov r1, r1, asr r2 @ r1<- r1 >> r2 6368 b .LOP_SHR_LONG_2ADDR_finish 6369 6370/* ------------------------------ */ 6371 .balign 64 6372.L_OP_USHR_LONG_2ADDR: /* 0xc5 */ 6373/* File: armv5te/OP_USHR_LONG_2ADDR.S */ 6374 /* 6375 * Long integer shift, 2addr version. vA is 64-bit value/result, vB is 6376 * 32-bit shift distance. 6377 */ 6378 /* ushr-long/2addr vA, vB */ 6379 mov r9, rINST, lsr #8 @ r9<- A+ 6380 mov r3, rINST, lsr #12 @ r3<- B 6381 and r9, r9, #15 6382 GET_VREG(r2, r3) @ r2<- vB 6383 add r9, rFP, r9, lsl #2 @ r9<- &fp[A] 6384 and r2, r2, #63 @ r2<- r2 & 0x3f 6385 ldmia r9, {r0-r1} @ r0/r1<- vAA/vAA+1 6386 6387 mov r0, r0, lsr r2 @ r0<- r2 >> r2 6388 rsb r3, r2, #32 @ r3<- 32 - r2 6389 orr r0, r0, r1, asl r3 @ r0<- r0 | (r1 << (32-r2)) 6390 subs ip, r2, #32 @ ip<- r2 - 32 6391 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 6392 movpl r0, r1, lsr ip @ if r2 >= 32, r0<-r1 >>> (r2-32) 6393 mov r1, r1, lsr r2 @ r1<- r1 >>> r2 6394 b .LOP_USHR_LONG_2ADDR_finish 6395 6396/* ------------------------------ */ 6397 .balign 64 6398.L_OP_ADD_FLOAT_2ADDR: /* 0xc6 */ 6399/* File: armv5te/OP_ADD_FLOAT_2ADDR.S */ 6400/* File: armv5te/binop2addr.S */ 6401 /* 6402 * Generic 32-bit "/2addr" binary operation. Provide an "instr" line 6403 * that specifies an instruction that performs "result = r0 op r1". 6404 * This could be an ARM instruction or a function call. (If the result 6405 * comes back in a register other than r0, you can override "result".) 6406 * 6407 * If "chkzero" is set to 1, we perform a divide-by-zero check on 6408 * vCC (r1). Useful for integer division and modulus. 6409 * 6410 * For: add-int/2addr, sub-int/2addr, mul-int/2addr, div-int/2addr, 6411 * rem-int/2addr, and-int/2addr, or-int/2addr, xor-int/2addr, 6412 * shl-int/2addr, shr-int/2addr, ushr-int/2addr, add-float/2addr, 6413 * sub-float/2addr, mul-float/2addr, div-float/2addr, rem-float/2addr 6414 */ 6415 /* binop/2addr vA, vB */ 6416 mov r9, rINST, lsr #8 @ r9<- A+ 6417 mov r3, rINST, lsr #12 @ r3<- B 6418 and r9, r9, #15 6419 GET_VREG(r1, r3) @ r1<- vB 6420 GET_VREG(r0, r9) @ r0<- vA 6421 .if 0 6422 cmp r1, #0 @ is second operand zero? 6423 beq common_errDivideByZero 6424 .endif 6425 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 6426 6427 @ optional op; may set condition codes 6428 bl __aeabi_fadd @ r0<- op, r0-r3 changed 6429 GET_INST_OPCODE(ip) @ extract opcode from rINST 6430 SET_VREG(r0, r9) @ vAA<- r0 6431 GOTO_OPCODE(ip) @ jump to next instruction 6432 /* 10-13 instructions */ 6433 6434 6435 6436/* ------------------------------ */ 6437 .balign 64 6438.L_OP_SUB_FLOAT_2ADDR: /* 0xc7 */ 6439/* File: armv5te/OP_SUB_FLOAT_2ADDR.S */ 6440/* File: armv5te/binop2addr.S */ 6441 /* 6442 * Generic 32-bit "/2addr" binary operation. Provide an "instr" line 6443 * that specifies an instruction that performs "result = r0 op r1". 6444 * This could be an ARM instruction or a function call. (If the result 6445 * comes back in a register other than r0, you can override "result".) 6446 * 6447 * If "chkzero" is set to 1, we perform a divide-by-zero check on 6448 * vCC (r1). Useful for integer division and modulus. 6449 * 6450 * For: add-int/2addr, sub-int/2addr, mul-int/2addr, div-int/2addr, 6451 * rem-int/2addr, and-int/2addr, or-int/2addr, xor-int/2addr, 6452 * shl-int/2addr, shr-int/2addr, ushr-int/2addr, add-float/2addr, 6453 * sub-float/2addr, mul-float/2addr, div-float/2addr, rem-float/2addr 6454 */ 6455 /* binop/2addr vA, vB */ 6456 mov r9, rINST, lsr #8 @ r9<- A+ 6457 mov r3, rINST, lsr #12 @ r3<- B 6458 and r9, r9, #15 6459 GET_VREG(r1, r3) @ r1<- vB 6460 GET_VREG(r0, r9) @ r0<- vA 6461 .if 0 6462 cmp r1, #0 @ is second operand zero? 6463 beq common_errDivideByZero 6464 .endif 6465 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 6466 6467 @ optional op; may set condition codes 6468 bl __aeabi_fsub @ r0<- op, r0-r3 changed 6469 GET_INST_OPCODE(ip) @ extract opcode from rINST 6470 SET_VREG(r0, r9) @ vAA<- r0 6471 GOTO_OPCODE(ip) @ jump to next instruction 6472 /* 10-13 instructions */ 6473 6474 6475 6476/* ------------------------------ */ 6477 .balign 64 6478.L_OP_MUL_FLOAT_2ADDR: /* 0xc8 */ 6479/* File: armv5te/OP_MUL_FLOAT_2ADDR.S */ 6480/* File: armv5te/binop2addr.S */ 6481 /* 6482 * Generic 32-bit "/2addr" binary operation. Provide an "instr" line 6483 * that specifies an instruction that performs "result = r0 op r1". 6484 * This could be an ARM instruction or a function call. (If the result 6485 * comes back in a register other than r0, you can override "result".) 6486 * 6487 * If "chkzero" is set to 1, we perform a divide-by-zero check on 6488 * vCC (r1). Useful for integer division and modulus. 6489 * 6490 * For: add-int/2addr, sub-int/2addr, mul-int/2addr, div-int/2addr, 6491 * rem-int/2addr, and-int/2addr, or-int/2addr, xor-int/2addr, 6492 * shl-int/2addr, shr-int/2addr, ushr-int/2addr, add-float/2addr, 6493 * sub-float/2addr, mul-float/2addr, div-float/2addr, rem-float/2addr 6494 */ 6495 /* binop/2addr vA, vB */ 6496 mov r9, rINST, lsr #8 @ r9<- A+ 6497 mov r3, rINST, lsr #12 @ r3<- B 6498 and r9, r9, #15 6499 GET_VREG(r1, r3) @ r1<- vB 6500 GET_VREG(r0, r9) @ r0<- vA 6501 .if 0 6502 cmp r1, #0 @ is second operand zero? 6503 beq common_errDivideByZero 6504 .endif 6505 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 6506 6507 @ optional op; may set condition codes 6508 bl __aeabi_fmul @ r0<- op, r0-r3 changed 6509 GET_INST_OPCODE(ip) @ extract opcode from rINST 6510 SET_VREG(r0, r9) @ vAA<- r0 6511 GOTO_OPCODE(ip) @ jump to next instruction 6512 /* 10-13 instructions */ 6513 6514 6515 6516/* ------------------------------ */ 6517 .balign 64 6518.L_OP_DIV_FLOAT_2ADDR: /* 0xc9 */ 6519/* File: armv5te/OP_DIV_FLOAT_2ADDR.S */ 6520/* File: armv5te/binop2addr.S */ 6521 /* 6522 * Generic 32-bit "/2addr" binary operation. Provide an "instr" line 6523 * that specifies an instruction that performs "result = r0 op r1". 6524 * This could be an ARM instruction or a function call. (If the result 6525 * comes back in a register other than r0, you can override "result".) 6526 * 6527 * If "chkzero" is set to 1, we perform a divide-by-zero check on 6528 * vCC (r1). Useful for integer division and modulus. 6529 * 6530 * For: add-int/2addr, sub-int/2addr, mul-int/2addr, div-int/2addr, 6531 * rem-int/2addr, and-int/2addr, or-int/2addr, xor-int/2addr, 6532 * shl-int/2addr, shr-int/2addr, ushr-int/2addr, add-float/2addr, 6533 * sub-float/2addr, mul-float/2addr, div-float/2addr, rem-float/2addr 6534 */ 6535 /* binop/2addr vA, vB */ 6536 mov r9, rINST, lsr #8 @ r9<- A+ 6537 mov r3, rINST, lsr #12 @ r3<- B 6538 and r9, r9, #15 6539 GET_VREG(r1, r3) @ r1<- vB 6540 GET_VREG(r0, r9) @ r0<- vA 6541 .if 0 6542 cmp r1, #0 @ is second operand zero? 6543 beq common_errDivideByZero 6544 .endif 6545 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 6546 6547 @ optional op; may set condition codes 6548 bl __aeabi_fdiv @ r0<- op, r0-r3 changed 6549 GET_INST_OPCODE(ip) @ extract opcode from rINST 6550 SET_VREG(r0, r9) @ vAA<- r0 6551 GOTO_OPCODE(ip) @ jump to next instruction 6552 /* 10-13 instructions */ 6553 6554 6555 6556/* ------------------------------ */ 6557 .balign 64 6558.L_OP_REM_FLOAT_2ADDR: /* 0xca */ 6559/* File: armv5te/OP_REM_FLOAT_2ADDR.S */ 6560/* EABI doesn't define a float remainder function, but libm does */ 6561/* File: armv5te/binop2addr.S */ 6562 /* 6563 * Generic 32-bit "/2addr" binary operation. Provide an "instr" line 6564 * that specifies an instruction that performs "result = r0 op r1". 6565 * This could be an ARM instruction or a function call. (If the result 6566 * comes back in a register other than r0, you can override "result".) 6567 * 6568 * If "chkzero" is set to 1, we perform a divide-by-zero check on 6569 * vCC (r1). Useful for integer division and modulus. 6570 * 6571 * For: add-int/2addr, sub-int/2addr, mul-int/2addr, div-int/2addr, 6572 * rem-int/2addr, and-int/2addr, or-int/2addr, xor-int/2addr, 6573 * shl-int/2addr, shr-int/2addr, ushr-int/2addr, add-float/2addr, 6574 * sub-float/2addr, mul-float/2addr, div-float/2addr, rem-float/2addr 6575 */ 6576 /* binop/2addr vA, vB */ 6577 mov r9, rINST, lsr #8 @ r9<- A+ 6578 mov r3, rINST, lsr #12 @ r3<- B 6579 and r9, r9, #15 6580 GET_VREG(r1, r3) @ r1<- vB 6581 GET_VREG(r0, r9) @ r0<- vA 6582 .if 0 6583 cmp r1, #0 @ is second operand zero? 6584 beq common_errDivideByZero 6585 .endif 6586 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 6587 6588 @ optional op; may set condition codes 6589 bl fmodf @ r0<- op, r0-r3 changed 6590 GET_INST_OPCODE(ip) @ extract opcode from rINST 6591 SET_VREG(r0, r9) @ vAA<- r0 6592 GOTO_OPCODE(ip) @ jump to next instruction 6593 /* 10-13 instructions */ 6594 6595 6596 6597/* ------------------------------ */ 6598 .balign 64 6599.L_OP_ADD_DOUBLE_2ADDR: /* 0xcb */ 6600/* File: armv5te/OP_ADD_DOUBLE_2ADDR.S */ 6601/* File: armv5te/binopWide2addr.S */ 6602 /* 6603 * Generic 64-bit "/2addr" binary operation. Provide an "instr" line 6604 * that specifies an instruction that performs "result = r0-r1 op r2-r3". 6605 * This could be an ARM instruction or a function call. (If the result 6606 * comes back in a register other than r0, you can override "result".) 6607 * 6608 * If "chkzero" is set to 1, we perform a divide-by-zero check on 6609 * vCC (r1). Useful for integer division and modulus. 6610 * 6611 * For: add-long/2addr, sub-long/2addr, div-long/2addr, rem-long/2addr, 6612 * and-long/2addr, or-long/2addr, xor-long/2addr, add-double/2addr, 6613 * sub-double/2addr, mul-double/2addr, div-double/2addr, 6614 * rem-double/2addr 6615 */ 6616 /* binop/2addr vA, vB */ 6617 mov r9, rINST, lsr #8 @ r9<- A+ 6618 mov r1, rINST, lsr #12 @ r1<- B 6619 and r9, r9, #15 6620 add r1, rFP, r1, lsl #2 @ r1<- &fp[B] 6621 add r9, rFP, r9, lsl #2 @ r9<- &fp[A] 6622 ldmia r1, {r2-r3} @ r2/r3<- vBB/vBB+1 6623 ldmia r9, {r0-r1} @ r0/r1<- vAA/vAA+1 6624 .if 0 6625 orrs ip, r2, r3 @ second arg (r2-r3) is zero? 6626 beq common_errDivideByZero 6627 .endif 6628 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 6629 6630 @ optional op; may set condition codes 6631 bl __aeabi_dadd @ result<- op, r0-r3 changed 6632 GET_INST_OPCODE(ip) @ extract opcode from rINST 6633 stmia r9, {r0,r1} @ vAA/vAA+1<- r0/r1 6634 GOTO_OPCODE(ip) @ jump to next instruction 6635 /* 12-15 instructions */ 6636 6637 6638 6639/* ------------------------------ */ 6640 .balign 64 6641.L_OP_SUB_DOUBLE_2ADDR: /* 0xcc */ 6642/* File: armv5te/OP_SUB_DOUBLE_2ADDR.S */ 6643/* File: armv5te/binopWide2addr.S */ 6644 /* 6645 * Generic 64-bit "/2addr" binary operation. Provide an "instr" line 6646 * that specifies an instruction that performs "result = r0-r1 op r2-r3". 6647 * This could be an ARM instruction or a function call. (If the result 6648 * comes back in a register other than r0, you can override "result".) 6649 * 6650 * If "chkzero" is set to 1, we perform a divide-by-zero check on 6651 * vCC (r1). Useful for integer division and modulus. 6652 * 6653 * For: add-long/2addr, sub-long/2addr, div-long/2addr, rem-long/2addr, 6654 * and-long/2addr, or-long/2addr, xor-long/2addr, add-double/2addr, 6655 * sub-double/2addr, mul-double/2addr, div-double/2addr, 6656 * rem-double/2addr 6657 */ 6658 /* binop/2addr vA, vB */ 6659 mov r9, rINST, lsr #8 @ r9<- A+ 6660 mov r1, rINST, lsr #12 @ r1<- B 6661 and r9, r9, #15 6662 add r1, rFP, r1, lsl #2 @ r1<- &fp[B] 6663 add r9, rFP, r9, lsl #2 @ r9<- &fp[A] 6664 ldmia r1, {r2-r3} @ r2/r3<- vBB/vBB+1 6665 ldmia r9, {r0-r1} @ r0/r1<- vAA/vAA+1 6666 .if 0 6667 orrs ip, r2, r3 @ second arg (r2-r3) is zero? 6668 beq common_errDivideByZero 6669 .endif 6670 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 6671 6672 @ optional op; may set condition codes 6673 bl __aeabi_dsub @ result<- op, r0-r3 changed 6674 GET_INST_OPCODE(ip) @ extract opcode from rINST 6675 stmia r9, {r0,r1} @ vAA/vAA+1<- r0/r1 6676 GOTO_OPCODE(ip) @ jump to next instruction 6677 /* 12-15 instructions */ 6678 6679 6680 6681/* ------------------------------ */ 6682 .balign 64 6683.L_OP_MUL_DOUBLE_2ADDR: /* 0xcd */ 6684/* File: armv5te/OP_MUL_DOUBLE_2ADDR.S */ 6685/* File: armv5te/binopWide2addr.S */ 6686 /* 6687 * Generic 64-bit "/2addr" binary operation. Provide an "instr" line 6688 * that specifies an instruction that performs "result = r0-r1 op r2-r3". 6689 * This could be an ARM instruction or a function call. (If the result 6690 * comes back in a register other than r0, you can override "result".) 6691 * 6692 * If "chkzero" is set to 1, we perform a divide-by-zero check on 6693 * vCC (r1). Useful for integer division and modulus. 6694 * 6695 * For: add-long/2addr, sub-long/2addr, div-long/2addr, rem-long/2addr, 6696 * and-long/2addr, or-long/2addr, xor-long/2addr, add-double/2addr, 6697 * sub-double/2addr, mul-double/2addr, div-double/2addr, 6698 * rem-double/2addr 6699 */ 6700 /* binop/2addr vA, vB */ 6701 mov r9, rINST, lsr #8 @ r9<- A+ 6702 mov r1, rINST, lsr #12 @ r1<- B 6703 and r9, r9, #15 6704 add r1, rFP, r1, lsl #2 @ r1<- &fp[B] 6705 add r9, rFP, r9, lsl #2 @ r9<- &fp[A] 6706 ldmia r1, {r2-r3} @ r2/r3<- vBB/vBB+1 6707 ldmia r9, {r0-r1} @ r0/r1<- vAA/vAA+1 6708 .if 0 6709 orrs ip, r2, r3 @ second arg (r2-r3) is zero? 6710 beq common_errDivideByZero 6711 .endif 6712 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 6713 6714 @ optional op; may set condition codes 6715 bl __aeabi_dmul @ result<- op, r0-r3 changed 6716 GET_INST_OPCODE(ip) @ extract opcode from rINST 6717 stmia r9, {r0,r1} @ vAA/vAA+1<- r0/r1 6718 GOTO_OPCODE(ip) @ jump to next instruction 6719 /* 12-15 instructions */ 6720 6721 6722 6723/* ------------------------------ */ 6724 .balign 64 6725.L_OP_DIV_DOUBLE_2ADDR: /* 0xce */ 6726/* File: armv5te/OP_DIV_DOUBLE_2ADDR.S */ 6727/* File: armv5te/binopWide2addr.S */ 6728 /* 6729 * Generic 64-bit "/2addr" binary operation. Provide an "instr" line 6730 * that specifies an instruction that performs "result = r0-r1 op r2-r3". 6731 * This could be an ARM instruction or a function call. (If the result 6732 * comes back in a register other than r0, you can override "result".) 6733 * 6734 * If "chkzero" is set to 1, we perform a divide-by-zero check on 6735 * vCC (r1). Useful for integer division and modulus. 6736 * 6737 * For: add-long/2addr, sub-long/2addr, div-long/2addr, rem-long/2addr, 6738 * and-long/2addr, or-long/2addr, xor-long/2addr, add-double/2addr, 6739 * sub-double/2addr, mul-double/2addr, div-double/2addr, 6740 * rem-double/2addr 6741 */ 6742 /* binop/2addr vA, vB */ 6743 mov r9, rINST, lsr #8 @ r9<- A+ 6744 mov r1, rINST, lsr #12 @ r1<- B 6745 and r9, r9, #15 6746 add r1, rFP, r1, lsl #2 @ r1<- &fp[B] 6747 add r9, rFP, r9, lsl #2 @ r9<- &fp[A] 6748 ldmia r1, {r2-r3} @ r2/r3<- vBB/vBB+1 6749 ldmia r9, {r0-r1} @ r0/r1<- vAA/vAA+1 6750 .if 0 6751 orrs ip, r2, r3 @ second arg (r2-r3) is zero? 6752 beq common_errDivideByZero 6753 .endif 6754 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 6755 6756 @ optional op; may set condition codes 6757 bl __aeabi_ddiv @ result<- op, r0-r3 changed 6758 GET_INST_OPCODE(ip) @ extract opcode from rINST 6759 stmia r9, {r0,r1} @ vAA/vAA+1<- r0/r1 6760 GOTO_OPCODE(ip) @ jump to next instruction 6761 /* 12-15 instructions */ 6762 6763 6764 6765/* ------------------------------ */ 6766 .balign 64 6767.L_OP_REM_DOUBLE_2ADDR: /* 0xcf */ 6768/* File: armv5te/OP_REM_DOUBLE_2ADDR.S */ 6769/* EABI doesn't define a double remainder function, but libm does */ 6770/* File: armv5te/binopWide2addr.S */ 6771 /* 6772 * Generic 64-bit "/2addr" binary operation. Provide an "instr" line 6773 * that specifies an instruction that performs "result = r0-r1 op r2-r3". 6774 * This could be an ARM instruction or a function call. (If the result 6775 * comes back in a register other than r0, you can override "result".) 6776 * 6777 * If "chkzero" is set to 1, we perform a divide-by-zero check on 6778 * vCC (r1). Useful for integer division and modulus. 6779 * 6780 * For: add-long/2addr, sub-long/2addr, div-long/2addr, rem-long/2addr, 6781 * and-long/2addr, or-long/2addr, xor-long/2addr, add-double/2addr, 6782 * sub-double/2addr, mul-double/2addr, div-double/2addr, 6783 * rem-double/2addr 6784 */ 6785 /* binop/2addr vA, vB */ 6786 mov r9, rINST, lsr #8 @ r9<- A+ 6787 mov r1, rINST, lsr #12 @ r1<- B 6788 and r9, r9, #15 6789 add r1, rFP, r1, lsl #2 @ r1<- &fp[B] 6790 add r9, rFP, r9, lsl #2 @ r9<- &fp[A] 6791 ldmia r1, {r2-r3} @ r2/r3<- vBB/vBB+1 6792 ldmia r9, {r0-r1} @ r0/r1<- vAA/vAA+1 6793 .if 0 6794 orrs ip, r2, r3 @ second arg (r2-r3) is zero? 6795 beq common_errDivideByZero 6796 .endif 6797 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 6798 6799 @ optional op; may set condition codes 6800 bl fmod @ result<- op, r0-r3 changed 6801 GET_INST_OPCODE(ip) @ extract opcode from rINST 6802 stmia r9, {r0,r1} @ vAA/vAA+1<- r0/r1 6803 GOTO_OPCODE(ip) @ jump to next instruction 6804 /* 12-15 instructions */ 6805 6806 6807 6808/* ------------------------------ */ 6809 .balign 64 6810.L_OP_ADD_INT_LIT16: /* 0xd0 */ 6811/* File: armv5te/OP_ADD_INT_LIT16.S */ 6812/* File: armv5te/binopLit16.S */ 6813 /* 6814 * Generic 32-bit "lit16" binary operation. Provide an "instr" line 6815 * that specifies an instruction that performs "result = r0 op r1". 6816 * This could be an ARM instruction or a function call. (If the result 6817 * comes back in a register other than r0, you can override "result".) 6818 * 6819 * If "chkzero" is set to 1, we perform a divide-by-zero check on 6820 * vCC (r1). Useful for integer division and modulus. 6821 * 6822 * For: add-int/lit16, rsub-int, mul-int/lit16, div-int/lit16, 6823 * rem-int/lit16, and-int/lit16, or-int/lit16, xor-int/lit16 6824 */ 6825 /* binop/lit16 vA, vB, #+CCCC */ 6826 FETCH_S(r1, 1) @ r1<- ssssCCCC (sign-extended) 6827 mov r2, rINST, lsr #12 @ r2<- B 6828 mov r9, rINST, lsr #8 @ r9<- A+ 6829 GET_VREG(r0, r2) @ r0<- vB 6830 and r9, r9, #15 6831 .if 0 6832 cmp r1, #0 @ is second operand zero? 6833 beq common_errDivideByZero 6834 .endif 6835 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 6836 6837 add r0, r0, r1 @ r0<- op, r0-r3 changed 6838 GET_INST_OPCODE(ip) @ extract opcode from rINST 6839 SET_VREG(r0, r9) @ vAA<- r0 6840 GOTO_OPCODE(ip) @ jump to next instruction 6841 /* 10-13 instructions */ 6842 6843 6844 6845/* ------------------------------ */ 6846 .balign 64 6847.L_OP_RSUB_INT: /* 0xd1 */ 6848/* File: armv5te/OP_RSUB_INT.S */ 6849/* this op is "rsub-int", but can be thought of as "rsub-int/lit16" */ 6850/* File: armv5te/binopLit16.S */ 6851 /* 6852 * Generic 32-bit "lit16" binary operation. Provide an "instr" line 6853 * that specifies an instruction that performs "result = r0 op r1". 6854 * This could be an ARM instruction or a function call. (If the result 6855 * comes back in a register other than r0, you can override "result".) 6856 * 6857 * If "chkzero" is set to 1, we perform a divide-by-zero check on 6858 * vCC (r1). Useful for integer division and modulus. 6859 * 6860 * For: add-int/lit16, rsub-int, mul-int/lit16, div-int/lit16, 6861 * rem-int/lit16, and-int/lit16, or-int/lit16, xor-int/lit16 6862 */ 6863 /* binop/lit16 vA, vB, #+CCCC */ 6864 FETCH_S(r1, 1) @ r1<- ssssCCCC (sign-extended) 6865 mov r2, rINST, lsr #12 @ r2<- B 6866 mov r9, rINST, lsr #8 @ r9<- A+ 6867 GET_VREG(r0, r2) @ r0<- vB 6868 and r9, r9, #15 6869 .if 0 6870 cmp r1, #0 @ is second operand zero? 6871 beq common_errDivideByZero 6872 .endif 6873 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 6874 6875 rsb r0, r0, r1 @ r0<- op, r0-r3 changed 6876 GET_INST_OPCODE(ip) @ extract opcode from rINST 6877 SET_VREG(r0, r9) @ vAA<- r0 6878 GOTO_OPCODE(ip) @ jump to next instruction 6879 /* 10-13 instructions */ 6880 6881 6882 6883/* ------------------------------ */ 6884 .balign 64 6885.L_OP_MUL_INT_LIT16: /* 0xd2 */ 6886/* File: armv5te/OP_MUL_INT_LIT16.S */ 6887/* must be "mul r0, r1, r0" -- "r0, r0, r1" is illegal */ 6888/* File: armv5te/binopLit16.S */ 6889 /* 6890 * Generic 32-bit "lit16" binary operation. Provide an "instr" line 6891 * that specifies an instruction that performs "result = r0 op r1". 6892 * This could be an ARM instruction or a function call. (If the result 6893 * comes back in a register other than r0, you can override "result".) 6894 * 6895 * If "chkzero" is set to 1, we perform a divide-by-zero check on 6896 * vCC (r1). Useful for integer division and modulus. 6897 * 6898 * For: add-int/lit16, rsub-int, mul-int/lit16, div-int/lit16, 6899 * rem-int/lit16, and-int/lit16, or-int/lit16, xor-int/lit16 6900 */ 6901 /* binop/lit16 vA, vB, #+CCCC */ 6902 FETCH_S(r1, 1) @ r1<- ssssCCCC (sign-extended) 6903 mov r2, rINST, lsr #12 @ r2<- B 6904 mov r9, rINST, lsr #8 @ r9<- A+ 6905 GET_VREG(r0, r2) @ r0<- vB 6906 and r9, r9, #15 6907 .if 0 6908 cmp r1, #0 @ is second operand zero? 6909 beq common_errDivideByZero 6910 .endif 6911 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 6912 6913 mul r0, r1, r0 @ r0<- op, r0-r3 changed 6914 GET_INST_OPCODE(ip) @ extract opcode from rINST 6915 SET_VREG(r0, r9) @ vAA<- r0 6916 GOTO_OPCODE(ip) @ jump to next instruction 6917 /* 10-13 instructions */ 6918 6919 6920 6921/* ------------------------------ */ 6922 .balign 64 6923.L_OP_DIV_INT_LIT16: /* 0xd3 */ 6924/* File: armv5te/OP_DIV_INT_LIT16.S */ 6925/* File: armv5te/binopLit16.S */ 6926 /* 6927 * Generic 32-bit "lit16" binary operation. Provide an "instr" line 6928 * that specifies an instruction that performs "result = r0 op r1". 6929 * This could be an ARM instruction or a function call. (If the result 6930 * comes back in a register other than r0, you can override "result".) 6931 * 6932 * If "chkzero" is set to 1, we perform a divide-by-zero check on 6933 * vCC (r1). Useful for integer division and modulus. 6934 * 6935 * For: add-int/lit16, rsub-int, mul-int/lit16, div-int/lit16, 6936 * rem-int/lit16, and-int/lit16, or-int/lit16, xor-int/lit16 6937 */ 6938 /* binop/lit16 vA, vB, #+CCCC */ 6939 FETCH_S(r1, 1) @ r1<- ssssCCCC (sign-extended) 6940 mov r2, rINST, lsr #12 @ r2<- B 6941 mov r9, rINST, lsr #8 @ r9<- A+ 6942 GET_VREG(r0, r2) @ r0<- vB 6943 and r9, r9, #15 6944 .if 1 6945 cmp r1, #0 @ is second operand zero? 6946 beq common_errDivideByZero 6947 .endif 6948 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 6949 6950 bl __aeabi_idiv @ r0<- op, r0-r3 changed 6951 GET_INST_OPCODE(ip) @ extract opcode from rINST 6952 SET_VREG(r0, r9) @ vAA<- r0 6953 GOTO_OPCODE(ip) @ jump to next instruction 6954 /* 10-13 instructions */ 6955 6956 6957 6958/* ------------------------------ */ 6959 .balign 64 6960.L_OP_REM_INT_LIT16: /* 0xd4 */ 6961/* File: armv5te/OP_REM_INT_LIT16.S */ 6962/* idivmod returns quotient in r0 and remainder in r1 */ 6963/* File: armv5te/binopLit16.S */ 6964 /* 6965 * Generic 32-bit "lit16" binary operation. Provide an "instr" line 6966 * that specifies an instruction that performs "result = r0 op r1". 6967 * This could be an ARM instruction or a function call. (If the result 6968 * comes back in a register other than r0, you can override "result".) 6969 * 6970 * If "chkzero" is set to 1, we perform a divide-by-zero check on 6971 * vCC (r1). Useful for integer division and modulus. 6972 * 6973 * For: add-int/lit16, rsub-int, mul-int/lit16, div-int/lit16, 6974 * rem-int/lit16, and-int/lit16, or-int/lit16, xor-int/lit16 6975 */ 6976 /* binop/lit16 vA, vB, #+CCCC */ 6977 FETCH_S(r1, 1) @ r1<- ssssCCCC (sign-extended) 6978 mov r2, rINST, lsr #12 @ r2<- B 6979 mov r9, rINST, lsr #8 @ r9<- A+ 6980 GET_VREG(r0, r2) @ r0<- vB 6981 and r9, r9, #15 6982 .if 1 6983 cmp r1, #0 @ is second operand zero? 6984 beq common_errDivideByZero 6985 .endif 6986 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 6987 6988 bl __aeabi_idivmod @ r1<- op, r0-r3 changed 6989 GET_INST_OPCODE(ip) @ extract opcode from rINST 6990 SET_VREG(r1, r9) @ vAA<- r1 6991 GOTO_OPCODE(ip) @ jump to next instruction 6992 /* 10-13 instructions */ 6993 6994 6995 6996/* ------------------------------ */ 6997 .balign 64 6998.L_OP_AND_INT_LIT16: /* 0xd5 */ 6999/* File: armv5te/OP_AND_INT_LIT16.S */ 7000/* File: armv5te/binopLit16.S */ 7001 /* 7002 * Generic 32-bit "lit16" binary operation. Provide an "instr" line 7003 * that specifies an instruction that performs "result = r0 op r1". 7004 * This could be an ARM instruction or a function call. (If the result 7005 * comes back in a register other than r0, you can override "result".) 7006 * 7007 * If "chkzero" is set to 1, we perform a divide-by-zero check on 7008 * vCC (r1). Useful for integer division and modulus. 7009 * 7010 * For: add-int/lit16, rsub-int, mul-int/lit16, div-int/lit16, 7011 * rem-int/lit16, and-int/lit16, or-int/lit16, xor-int/lit16 7012 */ 7013 /* binop/lit16 vA, vB, #+CCCC */ 7014 FETCH_S(r1, 1) @ r1<- ssssCCCC (sign-extended) 7015 mov r2, rINST, lsr #12 @ r2<- B 7016 mov r9, rINST, lsr #8 @ r9<- A+ 7017 GET_VREG(r0, r2) @ r0<- vB 7018 and r9, r9, #15 7019 .if 0 7020 cmp r1, #0 @ is second operand zero? 7021 beq common_errDivideByZero 7022 .endif 7023 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 7024 7025 and r0, r0, r1 @ r0<- op, r0-r3 changed 7026 GET_INST_OPCODE(ip) @ extract opcode from rINST 7027 SET_VREG(r0, r9) @ vAA<- r0 7028 GOTO_OPCODE(ip) @ jump to next instruction 7029 /* 10-13 instructions */ 7030 7031 7032 7033/* ------------------------------ */ 7034 .balign 64 7035.L_OP_OR_INT_LIT16: /* 0xd6 */ 7036/* File: armv5te/OP_OR_INT_LIT16.S */ 7037/* File: armv5te/binopLit16.S */ 7038 /* 7039 * Generic 32-bit "lit16" binary operation. Provide an "instr" line 7040 * that specifies an instruction that performs "result = r0 op r1". 7041 * This could be an ARM instruction or a function call. (If the result 7042 * comes back in a register other than r0, you can override "result".) 7043 * 7044 * If "chkzero" is set to 1, we perform a divide-by-zero check on 7045 * vCC (r1). Useful for integer division and modulus. 7046 * 7047 * For: add-int/lit16, rsub-int, mul-int/lit16, div-int/lit16, 7048 * rem-int/lit16, and-int/lit16, or-int/lit16, xor-int/lit16 7049 */ 7050 /* binop/lit16 vA, vB, #+CCCC */ 7051 FETCH_S(r1, 1) @ r1<- ssssCCCC (sign-extended) 7052 mov r2, rINST, lsr #12 @ r2<- B 7053 mov r9, rINST, lsr #8 @ r9<- A+ 7054 GET_VREG(r0, r2) @ r0<- vB 7055 and r9, r9, #15 7056 .if 0 7057 cmp r1, #0 @ is second operand zero? 7058 beq common_errDivideByZero 7059 .endif 7060 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 7061 7062 orr r0, r0, r1 @ r0<- op, r0-r3 changed 7063 GET_INST_OPCODE(ip) @ extract opcode from rINST 7064 SET_VREG(r0, r9) @ vAA<- r0 7065 GOTO_OPCODE(ip) @ jump to next instruction 7066 /* 10-13 instructions */ 7067 7068 7069 7070/* ------------------------------ */ 7071 .balign 64 7072.L_OP_XOR_INT_LIT16: /* 0xd7 */ 7073/* File: armv5te/OP_XOR_INT_LIT16.S */ 7074/* File: armv5te/binopLit16.S */ 7075 /* 7076 * Generic 32-bit "lit16" binary operation. Provide an "instr" line 7077 * that specifies an instruction that performs "result = r0 op r1". 7078 * This could be an ARM instruction or a function call. (If the result 7079 * comes back in a register other than r0, you can override "result".) 7080 * 7081 * If "chkzero" is set to 1, we perform a divide-by-zero check on 7082 * vCC (r1). Useful for integer division and modulus. 7083 * 7084 * For: add-int/lit16, rsub-int, mul-int/lit16, div-int/lit16, 7085 * rem-int/lit16, and-int/lit16, or-int/lit16, xor-int/lit16 7086 */ 7087 /* binop/lit16 vA, vB, #+CCCC */ 7088 FETCH_S(r1, 1) @ r1<- ssssCCCC (sign-extended) 7089 mov r2, rINST, lsr #12 @ r2<- B 7090 mov r9, rINST, lsr #8 @ r9<- A+ 7091 GET_VREG(r0, r2) @ r0<- vB 7092 and r9, r9, #15 7093 .if 0 7094 cmp r1, #0 @ is second operand zero? 7095 beq common_errDivideByZero 7096 .endif 7097 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 7098 7099 eor r0, r0, r1 @ r0<- op, r0-r3 changed 7100 GET_INST_OPCODE(ip) @ extract opcode from rINST 7101 SET_VREG(r0, r9) @ vAA<- r0 7102 GOTO_OPCODE(ip) @ jump to next instruction 7103 /* 10-13 instructions */ 7104 7105 7106 7107/* ------------------------------ */ 7108 .balign 64 7109.L_OP_ADD_INT_LIT8: /* 0xd8 */ 7110/* File: armv5te/OP_ADD_INT_LIT8.S */ 7111/* File: armv5te/binopLit8.S */ 7112 /* 7113 * Generic 32-bit "lit8" binary operation. Provide an "instr" line 7114 * that specifies an instruction that performs "result = r0 op r1". 7115 * This could be an ARM instruction or a function call. (If the result 7116 * comes back in a register other than r0, you can override "result".) 7117 * 7118 * If "chkzero" is set to 1, we perform a divide-by-zero check on 7119 * vCC (r1). Useful for integer division and modulus. 7120 * 7121 * For: add-int/lit8, rsub-int/lit8, mul-int/lit8, div-int/lit8, 7122 * rem-int/lit8, and-int/lit8, or-int/lit8, xor-int/lit8, 7123 * shl-int/lit8, shr-int/lit8, ushr-int/lit8 7124 */ 7125 /* binop/lit8 vAA, vBB, #+CC */ 7126 FETCH_S(r3, 1) @ r3<- ssssCCBB (sign-extended for CC) 7127 mov r9, rINST, lsr #8 @ r9<- AA 7128 and r2, r3, #255 @ r2<- BB 7129 GET_VREG(r0, r2) @ r0<- vBB 7130 movs r1, r3, asr #8 @ r1<- ssssssCC (sign extended) 7131 .if 0 7132 @cmp r1, #0 @ is second operand zero? 7133 beq common_errDivideByZero 7134 .endif 7135 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 7136 7137 @ optional op; may set condition codes 7138 add r0, r0, r1 @ r0<- op, r0-r3 changed 7139 GET_INST_OPCODE(ip) @ extract opcode from rINST 7140 SET_VREG(r0, r9) @ vAA<- r0 7141 GOTO_OPCODE(ip) @ jump to next instruction 7142 /* 10-12 instructions */ 7143 7144 7145 7146/* ------------------------------ */ 7147 .balign 64 7148.L_OP_RSUB_INT_LIT8: /* 0xd9 */ 7149/* File: armv5te/OP_RSUB_INT_LIT8.S */ 7150/* File: armv5te/binopLit8.S */ 7151 /* 7152 * Generic 32-bit "lit8" binary operation. Provide an "instr" line 7153 * that specifies an instruction that performs "result = r0 op r1". 7154 * This could be an ARM instruction or a function call. (If the result 7155 * comes back in a register other than r0, you can override "result".) 7156 * 7157 * If "chkzero" is set to 1, we perform a divide-by-zero check on 7158 * vCC (r1). Useful for integer division and modulus. 7159 * 7160 * For: add-int/lit8, rsub-int/lit8, mul-int/lit8, div-int/lit8, 7161 * rem-int/lit8, and-int/lit8, or-int/lit8, xor-int/lit8, 7162 * shl-int/lit8, shr-int/lit8, ushr-int/lit8 7163 */ 7164 /* binop/lit8 vAA, vBB, #+CC */ 7165 FETCH_S(r3, 1) @ r3<- ssssCCBB (sign-extended for CC) 7166 mov r9, rINST, lsr #8 @ r9<- AA 7167 and r2, r3, #255 @ r2<- BB 7168 GET_VREG(r0, r2) @ r0<- vBB 7169 movs r1, r3, asr #8 @ r1<- ssssssCC (sign extended) 7170 .if 0 7171 @cmp r1, #0 @ is second operand zero? 7172 beq common_errDivideByZero 7173 .endif 7174 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 7175 7176 @ optional op; may set condition codes 7177 rsb r0, r0, r1 @ r0<- op, r0-r3 changed 7178 GET_INST_OPCODE(ip) @ extract opcode from rINST 7179 SET_VREG(r0, r9) @ vAA<- r0 7180 GOTO_OPCODE(ip) @ jump to next instruction 7181 /* 10-12 instructions */ 7182 7183 7184 7185/* ------------------------------ */ 7186 .balign 64 7187.L_OP_MUL_INT_LIT8: /* 0xda */ 7188/* File: armv5te/OP_MUL_INT_LIT8.S */ 7189/* must be "mul r0, r1, r0" -- "r0, r0, r1" is illegal */ 7190/* File: armv5te/binopLit8.S */ 7191 /* 7192 * Generic 32-bit "lit8" binary operation. Provide an "instr" line 7193 * that specifies an instruction that performs "result = r0 op r1". 7194 * This could be an ARM instruction or a function call. (If the result 7195 * comes back in a register other than r0, you can override "result".) 7196 * 7197 * If "chkzero" is set to 1, we perform a divide-by-zero check on 7198 * vCC (r1). Useful for integer division and modulus. 7199 * 7200 * For: add-int/lit8, rsub-int/lit8, mul-int/lit8, div-int/lit8, 7201 * rem-int/lit8, and-int/lit8, or-int/lit8, xor-int/lit8, 7202 * shl-int/lit8, shr-int/lit8, ushr-int/lit8 7203 */ 7204 /* binop/lit8 vAA, vBB, #+CC */ 7205 FETCH_S(r3, 1) @ r3<- ssssCCBB (sign-extended for CC) 7206 mov r9, rINST, lsr #8 @ r9<- AA 7207 and r2, r3, #255 @ r2<- BB 7208 GET_VREG(r0, r2) @ r0<- vBB 7209 movs r1, r3, asr #8 @ r1<- ssssssCC (sign extended) 7210 .if 0 7211 @cmp r1, #0 @ is second operand zero? 7212 beq common_errDivideByZero 7213 .endif 7214 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 7215 7216 @ optional op; may set condition codes 7217 mul r0, r1, r0 @ r0<- op, r0-r3 changed 7218 GET_INST_OPCODE(ip) @ extract opcode from rINST 7219 SET_VREG(r0, r9) @ vAA<- r0 7220 GOTO_OPCODE(ip) @ jump to next instruction 7221 /* 10-12 instructions */ 7222 7223 7224 7225/* ------------------------------ */ 7226 .balign 64 7227.L_OP_DIV_INT_LIT8: /* 0xdb */ 7228/* File: armv5te/OP_DIV_INT_LIT8.S */ 7229/* File: armv5te/binopLit8.S */ 7230 /* 7231 * Generic 32-bit "lit8" binary operation. Provide an "instr" line 7232 * that specifies an instruction that performs "result = r0 op r1". 7233 * This could be an ARM instruction or a function call. (If the result 7234 * comes back in a register other than r0, you can override "result".) 7235 * 7236 * If "chkzero" is set to 1, we perform a divide-by-zero check on 7237 * vCC (r1). Useful for integer division and modulus. 7238 * 7239 * For: add-int/lit8, rsub-int/lit8, mul-int/lit8, div-int/lit8, 7240 * rem-int/lit8, and-int/lit8, or-int/lit8, xor-int/lit8, 7241 * shl-int/lit8, shr-int/lit8, ushr-int/lit8 7242 */ 7243 /* binop/lit8 vAA, vBB, #+CC */ 7244 FETCH_S(r3, 1) @ r3<- ssssCCBB (sign-extended for CC) 7245 mov r9, rINST, lsr #8 @ r9<- AA 7246 and r2, r3, #255 @ r2<- BB 7247 GET_VREG(r0, r2) @ r0<- vBB 7248 movs r1, r3, asr #8 @ r1<- ssssssCC (sign extended) 7249 .if 1 7250 @cmp r1, #0 @ is second operand zero? 7251 beq common_errDivideByZero 7252 .endif 7253 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 7254 7255 @ optional op; may set condition codes 7256 bl __aeabi_idiv @ r0<- op, r0-r3 changed 7257 GET_INST_OPCODE(ip) @ extract opcode from rINST 7258 SET_VREG(r0, r9) @ vAA<- r0 7259 GOTO_OPCODE(ip) @ jump to next instruction 7260 /* 10-12 instructions */ 7261 7262 7263 7264/* ------------------------------ */ 7265 .balign 64 7266.L_OP_REM_INT_LIT8: /* 0xdc */ 7267/* File: armv5te/OP_REM_INT_LIT8.S */ 7268/* idivmod returns quotient in r0 and remainder in r1 */ 7269/* File: armv5te/binopLit8.S */ 7270 /* 7271 * Generic 32-bit "lit8" binary operation. Provide an "instr" line 7272 * that specifies an instruction that performs "result = r0 op r1". 7273 * This could be an ARM instruction or a function call. (If the result 7274 * comes back in a register other than r0, you can override "result".) 7275 * 7276 * If "chkzero" is set to 1, we perform a divide-by-zero check on 7277 * vCC (r1). Useful for integer division and modulus. 7278 * 7279 * For: add-int/lit8, rsub-int/lit8, mul-int/lit8, div-int/lit8, 7280 * rem-int/lit8, and-int/lit8, or-int/lit8, xor-int/lit8, 7281 * shl-int/lit8, shr-int/lit8, ushr-int/lit8 7282 */ 7283 /* binop/lit8 vAA, vBB, #+CC */ 7284 FETCH_S(r3, 1) @ r3<- ssssCCBB (sign-extended for CC) 7285 mov r9, rINST, lsr #8 @ r9<- AA 7286 and r2, r3, #255 @ r2<- BB 7287 GET_VREG(r0, r2) @ r0<- vBB 7288 movs r1, r3, asr #8 @ r1<- ssssssCC (sign extended) 7289 .if 1 7290 @cmp r1, #0 @ is second operand zero? 7291 beq common_errDivideByZero 7292 .endif 7293 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 7294 7295 @ optional op; may set condition codes 7296 bl __aeabi_idivmod @ r1<- op, r0-r3 changed 7297 GET_INST_OPCODE(ip) @ extract opcode from rINST 7298 SET_VREG(r1, r9) @ vAA<- r1 7299 GOTO_OPCODE(ip) @ jump to next instruction 7300 /* 10-12 instructions */ 7301 7302 7303 7304/* ------------------------------ */ 7305 .balign 64 7306.L_OP_AND_INT_LIT8: /* 0xdd */ 7307/* File: armv5te/OP_AND_INT_LIT8.S */ 7308/* File: armv5te/binopLit8.S */ 7309 /* 7310 * Generic 32-bit "lit8" binary operation. Provide an "instr" line 7311 * that specifies an instruction that performs "result = r0 op r1". 7312 * This could be an ARM instruction or a function call. (If the result 7313 * comes back in a register other than r0, you can override "result".) 7314 * 7315 * If "chkzero" is set to 1, we perform a divide-by-zero check on 7316 * vCC (r1). Useful for integer division and modulus. 7317 * 7318 * For: add-int/lit8, rsub-int/lit8, mul-int/lit8, div-int/lit8, 7319 * rem-int/lit8, and-int/lit8, or-int/lit8, xor-int/lit8, 7320 * shl-int/lit8, shr-int/lit8, ushr-int/lit8 7321 */ 7322 /* binop/lit8 vAA, vBB, #+CC */ 7323 FETCH_S(r3, 1) @ r3<- ssssCCBB (sign-extended for CC) 7324 mov r9, rINST, lsr #8 @ r9<- AA 7325 and r2, r3, #255 @ r2<- BB 7326 GET_VREG(r0, r2) @ r0<- vBB 7327 movs r1, r3, asr #8 @ r1<- ssssssCC (sign extended) 7328 .if 0 7329 @cmp r1, #0 @ is second operand zero? 7330 beq common_errDivideByZero 7331 .endif 7332 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 7333 7334 @ optional op; may set condition codes 7335 and r0, r0, r1 @ r0<- op, r0-r3 changed 7336 GET_INST_OPCODE(ip) @ extract opcode from rINST 7337 SET_VREG(r0, r9) @ vAA<- r0 7338 GOTO_OPCODE(ip) @ jump to next instruction 7339 /* 10-12 instructions */ 7340 7341 7342 7343/* ------------------------------ */ 7344 .balign 64 7345.L_OP_OR_INT_LIT8: /* 0xde */ 7346/* File: armv5te/OP_OR_INT_LIT8.S */ 7347/* File: armv5te/binopLit8.S */ 7348 /* 7349 * Generic 32-bit "lit8" binary operation. Provide an "instr" line 7350 * that specifies an instruction that performs "result = r0 op r1". 7351 * This could be an ARM instruction or a function call. (If the result 7352 * comes back in a register other than r0, you can override "result".) 7353 * 7354 * If "chkzero" is set to 1, we perform a divide-by-zero check on 7355 * vCC (r1). Useful for integer division and modulus. 7356 * 7357 * For: add-int/lit8, rsub-int/lit8, mul-int/lit8, div-int/lit8, 7358 * rem-int/lit8, and-int/lit8, or-int/lit8, xor-int/lit8, 7359 * shl-int/lit8, shr-int/lit8, ushr-int/lit8 7360 */ 7361 /* binop/lit8 vAA, vBB, #+CC */ 7362 FETCH_S(r3, 1) @ r3<- ssssCCBB (sign-extended for CC) 7363 mov r9, rINST, lsr #8 @ r9<- AA 7364 and r2, r3, #255 @ r2<- BB 7365 GET_VREG(r0, r2) @ r0<- vBB 7366 movs r1, r3, asr #8 @ r1<- ssssssCC (sign extended) 7367 .if 0 7368 @cmp r1, #0 @ is second operand zero? 7369 beq common_errDivideByZero 7370 .endif 7371 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 7372 7373 @ optional op; may set condition codes 7374 orr r0, r0, r1 @ r0<- op, r0-r3 changed 7375 GET_INST_OPCODE(ip) @ extract opcode from rINST 7376 SET_VREG(r0, r9) @ vAA<- r0 7377 GOTO_OPCODE(ip) @ jump to next instruction 7378 /* 10-12 instructions */ 7379 7380 7381 7382/* ------------------------------ */ 7383 .balign 64 7384.L_OP_XOR_INT_LIT8: /* 0xdf */ 7385/* File: armv5te/OP_XOR_INT_LIT8.S */ 7386/* File: armv5te/binopLit8.S */ 7387 /* 7388 * Generic 32-bit "lit8" binary operation. Provide an "instr" line 7389 * that specifies an instruction that performs "result = r0 op r1". 7390 * This could be an ARM instruction or a function call. (If the result 7391 * comes back in a register other than r0, you can override "result".) 7392 * 7393 * If "chkzero" is set to 1, we perform a divide-by-zero check on 7394 * vCC (r1). Useful for integer division and modulus. 7395 * 7396 * For: add-int/lit8, rsub-int/lit8, mul-int/lit8, div-int/lit8, 7397 * rem-int/lit8, and-int/lit8, or-int/lit8, xor-int/lit8, 7398 * shl-int/lit8, shr-int/lit8, ushr-int/lit8 7399 */ 7400 /* binop/lit8 vAA, vBB, #+CC */ 7401 FETCH_S(r3, 1) @ r3<- ssssCCBB (sign-extended for CC) 7402 mov r9, rINST, lsr #8 @ r9<- AA 7403 and r2, r3, #255 @ r2<- BB 7404 GET_VREG(r0, r2) @ r0<- vBB 7405 movs r1, r3, asr #8 @ r1<- ssssssCC (sign extended) 7406 .if 0 7407 @cmp r1, #0 @ is second operand zero? 7408 beq common_errDivideByZero 7409 .endif 7410 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 7411 7412 @ optional op; may set condition codes 7413 eor r0, r0, r1 @ r0<- op, r0-r3 changed 7414 GET_INST_OPCODE(ip) @ extract opcode from rINST 7415 SET_VREG(r0, r9) @ vAA<- r0 7416 GOTO_OPCODE(ip) @ jump to next instruction 7417 /* 10-12 instructions */ 7418 7419 7420 7421/* ------------------------------ */ 7422 .balign 64 7423.L_OP_SHL_INT_LIT8: /* 0xe0 */ 7424/* File: armv5te/OP_SHL_INT_LIT8.S */ 7425/* File: armv5te/binopLit8.S */ 7426 /* 7427 * Generic 32-bit "lit8" binary operation. Provide an "instr" line 7428 * that specifies an instruction that performs "result = r0 op r1". 7429 * This could be an ARM instruction or a function call. (If the result 7430 * comes back in a register other than r0, you can override "result".) 7431 * 7432 * If "chkzero" is set to 1, we perform a divide-by-zero check on 7433 * vCC (r1). Useful for integer division and modulus. 7434 * 7435 * For: add-int/lit8, rsub-int/lit8, mul-int/lit8, div-int/lit8, 7436 * rem-int/lit8, and-int/lit8, or-int/lit8, xor-int/lit8, 7437 * shl-int/lit8, shr-int/lit8, ushr-int/lit8 7438 */ 7439 /* binop/lit8 vAA, vBB, #+CC */ 7440 FETCH_S(r3, 1) @ r3<- ssssCCBB (sign-extended for CC) 7441 mov r9, rINST, lsr #8 @ r9<- AA 7442 and r2, r3, #255 @ r2<- BB 7443 GET_VREG(r0, r2) @ r0<- vBB 7444 movs r1, r3, asr #8 @ r1<- ssssssCC (sign extended) 7445 .if 0 7446 @cmp r1, #0 @ is second operand zero? 7447 beq common_errDivideByZero 7448 .endif 7449 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 7450 7451 and r1, r1, #31 @ optional op; may set condition codes 7452 mov r0, r0, asl r1 @ r0<- op, r0-r3 changed 7453 GET_INST_OPCODE(ip) @ extract opcode from rINST 7454 SET_VREG(r0, r9) @ vAA<- r0 7455 GOTO_OPCODE(ip) @ jump to next instruction 7456 /* 10-12 instructions */ 7457 7458 7459 7460/* ------------------------------ */ 7461 .balign 64 7462.L_OP_SHR_INT_LIT8: /* 0xe1 */ 7463/* File: armv5te/OP_SHR_INT_LIT8.S */ 7464/* File: armv5te/binopLit8.S */ 7465 /* 7466 * Generic 32-bit "lit8" binary operation. Provide an "instr" line 7467 * that specifies an instruction that performs "result = r0 op r1". 7468 * This could be an ARM instruction or a function call. (If the result 7469 * comes back in a register other than r0, you can override "result".) 7470 * 7471 * If "chkzero" is set to 1, we perform a divide-by-zero check on 7472 * vCC (r1). Useful for integer division and modulus. 7473 * 7474 * For: add-int/lit8, rsub-int/lit8, mul-int/lit8, div-int/lit8, 7475 * rem-int/lit8, and-int/lit8, or-int/lit8, xor-int/lit8, 7476 * shl-int/lit8, shr-int/lit8, ushr-int/lit8 7477 */ 7478 /* binop/lit8 vAA, vBB, #+CC */ 7479 FETCH_S(r3, 1) @ r3<- ssssCCBB (sign-extended for CC) 7480 mov r9, rINST, lsr #8 @ r9<- AA 7481 and r2, r3, #255 @ r2<- BB 7482 GET_VREG(r0, r2) @ r0<- vBB 7483 movs r1, r3, asr #8 @ r1<- ssssssCC (sign extended) 7484 .if 0 7485 @cmp r1, #0 @ is second operand zero? 7486 beq common_errDivideByZero 7487 .endif 7488 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 7489 7490 and r1, r1, #31 @ optional op; may set condition codes 7491 mov r0, r0, asr r1 @ r0<- op, r0-r3 changed 7492 GET_INST_OPCODE(ip) @ extract opcode from rINST 7493 SET_VREG(r0, r9) @ vAA<- r0 7494 GOTO_OPCODE(ip) @ jump to next instruction 7495 /* 10-12 instructions */ 7496 7497 7498 7499/* ------------------------------ */ 7500 .balign 64 7501.L_OP_USHR_INT_LIT8: /* 0xe2 */ 7502/* File: armv5te/OP_USHR_INT_LIT8.S */ 7503/* File: armv5te/binopLit8.S */ 7504 /* 7505 * Generic 32-bit "lit8" binary operation. Provide an "instr" line 7506 * that specifies an instruction that performs "result = r0 op r1". 7507 * This could be an ARM instruction or a function call. (If the result 7508 * comes back in a register other than r0, you can override "result".) 7509 * 7510 * If "chkzero" is set to 1, we perform a divide-by-zero check on 7511 * vCC (r1). Useful for integer division and modulus. 7512 * 7513 * For: add-int/lit8, rsub-int/lit8, mul-int/lit8, div-int/lit8, 7514 * rem-int/lit8, and-int/lit8, or-int/lit8, xor-int/lit8, 7515 * shl-int/lit8, shr-int/lit8, ushr-int/lit8 7516 */ 7517 /* binop/lit8 vAA, vBB, #+CC */ 7518 FETCH_S(r3, 1) @ r3<- ssssCCBB (sign-extended for CC) 7519 mov r9, rINST, lsr #8 @ r9<- AA 7520 and r2, r3, #255 @ r2<- BB 7521 GET_VREG(r0, r2) @ r0<- vBB 7522 movs r1, r3, asr #8 @ r1<- ssssssCC (sign extended) 7523 .if 0 7524 @cmp r1, #0 @ is second operand zero? 7525 beq common_errDivideByZero 7526 .endif 7527 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 7528 7529 and r1, r1, #31 @ optional op; may set condition codes 7530 mov r0, r0, lsr r1 @ r0<- op, r0-r3 changed 7531 GET_INST_OPCODE(ip) @ extract opcode from rINST 7532 SET_VREG(r0, r9) @ vAA<- r0 7533 GOTO_OPCODE(ip) @ jump to next instruction 7534 /* 10-12 instructions */ 7535 7536 7537 7538/* ------------------------------ */ 7539 .balign 64 7540.L_OP_UNUSED_E3: /* 0xe3 */ 7541/* File: armv5te/OP_UNUSED_E3.S */ 7542/* File: armv5te/unused.S */ 7543 bl common_abort 7544 7545 7546 7547/* ------------------------------ */ 7548 .balign 64 7549.L_OP_UNUSED_E4: /* 0xe4 */ 7550/* File: armv5te/OP_UNUSED_E4.S */ 7551/* File: armv5te/unused.S */ 7552 bl common_abort 7553 7554 7555 7556/* ------------------------------ */ 7557 .balign 64 7558.L_OP_UNUSED_E5: /* 0xe5 */ 7559/* File: armv5te/OP_UNUSED_E5.S */ 7560/* File: armv5te/unused.S */ 7561 bl common_abort 7562 7563 7564 7565/* ------------------------------ */ 7566 .balign 64 7567.L_OP_UNUSED_E6: /* 0xe6 */ 7568/* File: armv5te/OP_UNUSED_E6.S */ 7569/* File: armv5te/unused.S */ 7570 bl common_abort 7571 7572 7573 7574/* ------------------------------ */ 7575 .balign 64 7576.L_OP_UNUSED_E7: /* 0xe7 */ 7577/* File: armv5te/OP_UNUSED_E7.S */ 7578/* File: armv5te/unused.S */ 7579 bl common_abort 7580 7581 7582 7583/* ------------------------------ */ 7584 .balign 64 7585.L_OP_UNUSED_E8: /* 0xe8 */ 7586/* File: armv5te/OP_UNUSED_E8.S */ 7587/* File: armv5te/unused.S */ 7588 bl common_abort 7589 7590 7591 7592/* ------------------------------ */ 7593 .balign 64 7594.L_OP_UNUSED_E9: /* 0xe9 */ 7595/* File: armv5te/OP_UNUSED_E9.S */ 7596/* File: armv5te/unused.S */ 7597 bl common_abort 7598 7599 7600 7601/* ------------------------------ */ 7602 .balign 64 7603.L_OP_UNUSED_EA: /* 0xea */ 7604/* File: armv5te/OP_UNUSED_EA.S */ 7605/* File: armv5te/unused.S */ 7606 bl common_abort 7607 7608 7609 7610/* ------------------------------ */ 7611 .balign 64 7612.L_OP_UNUSED_EB: /* 0xeb */ 7613/* File: armv5te/OP_UNUSED_EB.S */ 7614/* File: armv5te/unused.S */ 7615 bl common_abort 7616 7617 7618 7619/* ------------------------------ */ 7620 .balign 64 7621.L_OP_BREAKPOINT: /* 0xec */ 7622/* File: armv5te/OP_BREAKPOINT.S */ 7623/* File: armv5te/unused.S */ 7624 bl common_abort 7625 7626 7627 7628/* ------------------------------ */ 7629 .balign 64 7630.L_OP_THROW_VERIFICATION_ERROR: /* 0xed */ 7631/* File: armv5te/OP_THROW_VERIFICATION_ERROR.S */ 7632 /* 7633 * Handle a throw-verification-error instruction. This throws an 7634 * exception for an error discovered during verification. The 7635 * exception is indicated by AA, with some detail provided by BBBB. 7636 */ 7637 /* op AA, ref@BBBB */ 7638 ldr r0, [rGLUE, #offGlue_method] @ r0<- glue->method 7639 FETCH(r2, 1) @ r2<- BBBB 7640 EXPORT_PC() @ export the PC 7641 mov r1, rINST, lsr #8 @ r1<- AA 7642 bl dvmThrowVerificationError @ always throws 7643 b common_exceptionThrown @ handle exception 7644 7645 7646/* ------------------------------ */ 7647 .balign 64 7648.L_OP_EXECUTE_INLINE: /* 0xee */ 7649/* File: armv5te/OP_EXECUTE_INLINE.S */ 7650 /* 7651 * Execute a "native inline" instruction. 7652 * 7653 * We need to call an InlineOp4Func: 7654 * bool (func)(u4 arg0, u4 arg1, u4 arg2, u4 arg3, JValue* pResult) 7655 * 7656 * The first four args are in r0-r3, pointer to return value storage 7657 * is on the stack. The function's return value is a flag that tells 7658 * us if an exception was thrown. 7659 */ 7660 /* [opt] execute-inline vAA, {vC, vD, vE, vF}, inline@BBBB */ 7661 FETCH(r10, 1) @ r10<- BBBB 7662 add r1, rGLUE, #offGlue_retval @ r1<- &glue->retval 7663 EXPORT_PC() @ can throw 7664 sub sp, sp, #8 @ make room for arg, +64 bit align 7665 mov r0, rINST, lsr #12 @ r0<- B 7666 str r1, [sp] @ push &glue->retval 7667 bl .LOP_EXECUTE_INLINE_continue @ make call; will return after 7668 add sp, sp, #8 @ pop stack 7669 cmp r0, #0 @ test boolean result of inline 7670 beq common_exceptionThrown @ returned false, handle exception 7671 FETCH_ADVANCE_INST(3) @ advance rPC, load rINST 7672 GET_INST_OPCODE(ip) @ extract opcode from rINST 7673 GOTO_OPCODE(ip) @ jump to next instruction 7674 7675/* ------------------------------ */ 7676 .balign 64 7677.L_OP_EXECUTE_INLINE_RANGE: /* 0xef */ 7678/* File: armv5te/OP_EXECUTE_INLINE_RANGE.S */ 7679 /* 7680 * Execute a "native inline" instruction, using "/range" semantics. 7681 * Same idea as execute-inline, but we get the args differently. 7682 * 7683 * We need to call an InlineOp4Func: 7684 * bool (func)(u4 arg0, u4 arg1, u4 arg2, u4 arg3, JValue* pResult) 7685 * 7686 * The first four args are in r0-r3, pointer to return value storage 7687 * is on the stack. The function's return value is a flag that tells 7688 * us if an exception was thrown. 7689 */ 7690 /* [opt] execute-inline/range {vCCCC..v(CCCC+AA-1)}, inline@BBBB */ 7691 FETCH(r10, 1) @ r10<- BBBB 7692 add r1, rGLUE, #offGlue_retval @ r1<- &glue->retval 7693 EXPORT_PC() @ can throw 7694 sub sp, sp, #8 @ make room for arg, +64 bit align 7695 mov r0, rINST, lsr #8 @ r0<- AA 7696 str r1, [sp] @ push &glue->retval 7697 bl .LOP_EXECUTE_INLINE_RANGE_continue @ make call; will return after 7698 add sp, sp, #8 @ pop stack 7699 cmp r0, #0 @ test boolean result of inline 7700 beq common_exceptionThrown @ returned false, handle exception 7701 FETCH_ADVANCE_INST(3) @ advance rPC, load rINST 7702 GET_INST_OPCODE(ip) @ extract opcode from rINST 7703 GOTO_OPCODE(ip) @ jump to next instruction 7704 7705/* ------------------------------ */ 7706 .balign 64 7707.L_OP_INVOKE_DIRECT_EMPTY: /* 0xf0 */ 7708/* File: armv5te/OP_INVOKE_DIRECT_EMPTY.S */ 7709 /* 7710 * invoke-direct-empty is a no-op in a "standard" interpreter. 7711 */ 7712 FETCH_ADVANCE_INST(3) @ advance to next instr, load rINST 7713 GET_INST_OPCODE(ip) @ ip<- opcode from rINST 7714 GOTO_OPCODE(ip) @ execute it 7715 7716/* ------------------------------ */ 7717 .balign 64 7718.L_OP_UNUSED_F1: /* 0xf1 */ 7719/* File: armv5te/OP_UNUSED_F1.S */ 7720/* File: armv5te/unused.S */ 7721 bl common_abort 7722 7723 7724 7725/* ------------------------------ */ 7726 .balign 64 7727.L_OP_IGET_QUICK: /* 0xf2 */ 7728/* File: armv5te/OP_IGET_QUICK.S */ 7729 /* For: iget-quick, iget-object-quick */ 7730 /* op vA, vB, offset@CCCC */ 7731 mov r2, rINST, lsr #12 @ r2<- B 7732 GET_VREG(r3, r2) @ r3<- object we're operating on 7733 FETCH(r1, 1) @ r1<- field byte offset 7734 cmp r3, #0 @ check object for null 7735 mov r2, rINST, lsr #8 @ r2<- A(+) 7736 beq common_errNullObject @ object was null 7737 ldr r0, [r3, r1] @ r0<- obj.field (always 32 bits) 7738 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 7739 and r2, r2, #15 7740 GET_INST_OPCODE(ip) @ extract opcode from rINST 7741 SET_VREG(r0, r2) @ fp[A]<- r0 7742 GOTO_OPCODE(ip) @ jump to next instruction 7743 7744 7745/* ------------------------------ */ 7746 .balign 64 7747.L_OP_IGET_WIDE_QUICK: /* 0xf3 */ 7748/* File: armv5te/OP_IGET_WIDE_QUICK.S */ 7749 /* iget-wide-quick vA, vB, offset@CCCC */ 7750 mov r2, rINST, lsr #12 @ r2<- B 7751 GET_VREG(r3, r2) @ r3<- object we're operating on 7752 FETCH(r1, 1) @ r1<- field byte offset 7753 cmp r3, #0 @ check object for null 7754 mov r2, rINST, lsr #8 @ r2<- A(+) 7755 beq common_errNullObject @ object was null 7756 ldrd r0, [r3, r1] @ r0<- obj.field (64 bits, aligned) 7757 and r2, r2, #15 7758 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 7759 add r3, rFP, r2, lsl #2 @ r3<- &fp[A] 7760 GET_INST_OPCODE(ip) @ extract opcode from rINST 7761 stmia r3, {r0-r1} @ fp[A]<- r0/r1 7762 GOTO_OPCODE(ip) @ jump to next instruction 7763 7764 7765/* ------------------------------ */ 7766 .balign 64 7767.L_OP_IGET_OBJECT_QUICK: /* 0xf4 */ 7768/* File: armv5te/OP_IGET_OBJECT_QUICK.S */ 7769/* File: armv5te/OP_IGET_QUICK.S */ 7770 /* For: iget-quick, iget-object-quick */ 7771 /* op vA, vB, offset@CCCC */ 7772 mov r2, rINST, lsr #12 @ r2<- B 7773 GET_VREG(r3, r2) @ r3<- object we're operating on 7774 FETCH(r1, 1) @ r1<- field byte offset 7775 cmp r3, #0 @ check object for null 7776 mov r2, rINST, lsr #8 @ r2<- A(+) 7777 beq common_errNullObject @ object was null 7778 ldr r0, [r3, r1] @ r0<- obj.field (always 32 bits) 7779 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 7780 and r2, r2, #15 7781 GET_INST_OPCODE(ip) @ extract opcode from rINST 7782 SET_VREG(r0, r2) @ fp[A]<- r0 7783 GOTO_OPCODE(ip) @ jump to next instruction 7784 7785 7786 7787/* ------------------------------ */ 7788 .balign 64 7789.L_OP_IPUT_QUICK: /* 0xf5 */ 7790/* File: armv5te/OP_IPUT_QUICK.S */ 7791 /* For: iput-quick, iput-object-quick */ 7792 /* op vA, vB, offset@CCCC */ 7793 mov r2, rINST, lsr #12 @ r2<- B 7794 GET_VREG(r3, r2) @ r3<- fp[B], the object pointer 7795 FETCH(r1, 1) @ r1<- field byte offset 7796 cmp r3, #0 @ check object for null 7797 mov r2, rINST, lsr #8 @ r2<- A(+) 7798 beq common_errNullObject @ object was null 7799 and r2, r2, #15 7800 GET_VREG(r0, r2) @ r0<- fp[A] 7801 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 7802 str r0, [r3, r1] @ obj.field (always 32 bits)<- r0 7803 GET_INST_OPCODE(ip) @ extract opcode from rINST 7804 GOTO_OPCODE(ip) @ jump to next instruction 7805 7806 7807/* ------------------------------ */ 7808 .balign 64 7809.L_OP_IPUT_WIDE_QUICK: /* 0xf6 */ 7810/* File: armv5te/OP_IPUT_WIDE_QUICK.S */ 7811 /* iput-wide-quick vA, vB, offset@CCCC */ 7812 mov r0, rINST, lsr #8 @ r0<- A(+) 7813 mov r1, rINST, lsr #12 @ r1<- B 7814 and r0, r0, #15 7815 GET_VREG(r2, r1) @ r2<- fp[B], the object pointer 7816 add r3, rFP, r0, lsl #2 @ r3<- &fp[A] 7817 cmp r2, #0 @ check object for null 7818 ldmia r3, {r0-r1} @ r0/r1<- fp[A] 7819 beq common_errNullObject @ object was null 7820 FETCH(r3, 1) @ r3<- field byte offset 7821 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 7822 strd r0, [r2, r3] @ obj.field (64 bits, aligned)<- r0/r1 7823 GET_INST_OPCODE(ip) @ extract opcode from rINST 7824 GOTO_OPCODE(ip) @ jump to next instruction 7825 7826 7827/* ------------------------------ */ 7828 .balign 64 7829.L_OP_IPUT_OBJECT_QUICK: /* 0xf7 */ 7830/* File: armv5te/OP_IPUT_OBJECT_QUICK.S */ 7831/* File: armv5te/OP_IPUT_QUICK.S */ 7832 /* For: iput-quick, iput-object-quick */ 7833 /* op vA, vB, offset@CCCC */ 7834 mov r2, rINST, lsr #12 @ r2<- B 7835 GET_VREG(r3, r2) @ r3<- fp[B], the object pointer 7836 FETCH(r1, 1) @ r1<- field byte offset 7837 cmp r3, #0 @ check object for null 7838 mov r2, rINST, lsr #8 @ r2<- A(+) 7839 beq common_errNullObject @ object was null 7840 and r2, r2, #15 7841 GET_VREG(r0, r2) @ r0<- fp[A] 7842 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 7843 str r0, [r3, r1] @ obj.field (always 32 bits)<- r0 7844 GET_INST_OPCODE(ip) @ extract opcode from rINST 7845 GOTO_OPCODE(ip) @ jump to next instruction 7846 7847 7848 7849/* ------------------------------ */ 7850 .balign 64 7851.L_OP_INVOKE_VIRTUAL_QUICK: /* 0xf8 */ 7852/* File: armv5te/OP_INVOKE_VIRTUAL_QUICK.S */ 7853 /* 7854 * Handle an optimized virtual method call. 7855 * 7856 * for: [opt] invoke-virtual-quick, invoke-virtual-quick/range 7857 */ 7858 /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */ 7859 /* op vAA, {vCCCC..v(CCCC+AA-1)}, meth@BBBB */ 7860 FETCH(r3, 2) @ r3<- FEDC or CCCC 7861 FETCH(r1, 1) @ r1<- BBBB 7862 .if (!0) 7863 and r3, r3, #15 @ r3<- C (or stays CCCC) 7864 .endif 7865 GET_VREG(r2, r3) @ r2<- vC ("this" ptr) 7866 cmp r2, #0 @ is "this" null? 7867 beq common_errNullObject @ null "this", throw exception 7868 ldr r2, [r2, #offObject_clazz] @ r2<- thisPtr->clazz 7869 ldr r2, [r2, #offClassObject_vtable] @ r2<- thisPtr->clazz->vtable 7870 EXPORT_PC() @ invoke must export 7871 ldr r0, [r2, r1, lsl #2] @ r3<- vtable[BBBB] 7872 bl common_invokeMethodNoRange @ continue on 7873 7874/* ------------------------------ */ 7875 .balign 64 7876.L_OP_INVOKE_VIRTUAL_QUICK_RANGE: /* 0xf9 */ 7877/* File: armv5te/OP_INVOKE_VIRTUAL_QUICK_RANGE.S */ 7878/* File: armv5te/OP_INVOKE_VIRTUAL_QUICK.S */ 7879 /* 7880 * Handle an optimized virtual method call. 7881 * 7882 * for: [opt] invoke-virtual-quick, invoke-virtual-quick/range 7883 */ 7884 /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */ 7885 /* op vAA, {vCCCC..v(CCCC+AA-1)}, meth@BBBB */ 7886 FETCH(r3, 2) @ r3<- FEDC or CCCC 7887 FETCH(r1, 1) @ r1<- BBBB 7888 .if (!1) 7889 and r3, r3, #15 @ r3<- C (or stays CCCC) 7890 .endif 7891 GET_VREG(r2, r3) @ r2<- vC ("this" ptr) 7892 cmp r2, #0 @ is "this" null? 7893 beq common_errNullObject @ null "this", throw exception 7894 ldr r2, [r2, #offObject_clazz] @ r2<- thisPtr->clazz 7895 ldr r2, [r2, #offClassObject_vtable] @ r2<- thisPtr->clazz->vtable 7896 EXPORT_PC() @ invoke must export 7897 ldr r0, [r2, r1, lsl #2] @ r3<- vtable[BBBB] 7898 bl common_invokeMethodRange @ continue on 7899 7900 7901/* ------------------------------ */ 7902 .balign 64 7903.L_OP_INVOKE_SUPER_QUICK: /* 0xfa */ 7904/* File: armv5te/OP_INVOKE_SUPER_QUICK.S */ 7905 /* 7906 * Handle an optimized "super" method call. 7907 * 7908 * for: [opt] invoke-super-quick, invoke-super-quick/range 7909 */ 7910 /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */ 7911 /* op vAA, {vCCCC..v(CCCC+AA-1)}, meth@BBBB */ 7912 FETCH(r10, 2) @ r10<- GFED or CCCC 7913 ldr r2, [rGLUE, #offGlue_method] @ r2<- current method 7914 .if (!0) 7915 and r10, r10, #15 @ r10<- D (or stays CCCC) 7916 .endif 7917 FETCH(r1, 1) @ r1<- BBBB 7918 ldr r2, [r2, #offMethod_clazz] @ r2<- method->clazz 7919 EXPORT_PC() @ must export for invoke 7920 ldr r2, [r2, #offClassObject_super] @ r2<- method->clazz->super 7921 GET_VREG(r3, r10) @ r3<- "this" 7922 ldr r2, [r2, #offClassObject_vtable] @ r2<- ...clazz->super->vtable 7923 cmp r3, #0 @ null "this" ref? 7924 ldr r0, [r2, r1, lsl #2] @ r0<- super->vtable[BBBB] 7925 beq common_errNullObject @ "this" is null, throw exception 7926 bl common_invokeMethodNoRange @ continue on 7927 7928 7929/* ------------------------------ */ 7930 .balign 64 7931.L_OP_INVOKE_SUPER_QUICK_RANGE: /* 0xfb */ 7932/* File: armv5te/OP_INVOKE_SUPER_QUICK_RANGE.S */ 7933/* File: armv5te/OP_INVOKE_SUPER_QUICK.S */ 7934 /* 7935 * Handle an optimized "super" method call. 7936 * 7937 * for: [opt] invoke-super-quick, invoke-super-quick/range 7938 */ 7939 /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */ 7940 /* op vAA, {vCCCC..v(CCCC+AA-1)}, meth@BBBB */ 7941 FETCH(r10, 2) @ r10<- GFED or CCCC 7942 ldr r2, [rGLUE, #offGlue_method] @ r2<- current method 7943 .if (!1) 7944 and r10, r10, #15 @ r10<- D (or stays CCCC) 7945 .endif 7946 FETCH(r1, 1) @ r1<- BBBB 7947 ldr r2, [r2, #offMethod_clazz] @ r2<- method->clazz 7948 EXPORT_PC() @ must export for invoke 7949 ldr r2, [r2, #offClassObject_super] @ r2<- method->clazz->super 7950 GET_VREG(r3, r10) @ r3<- "this" 7951 ldr r2, [r2, #offClassObject_vtable] @ r2<- ...clazz->super->vtable 7952 cmp r3, #0 @ null "this" ref? 7953 ldr r0, [r2, r1, lsl #2] @ r0<- super->vtable[BBBB] 7954 beq common_errNullObject @ "this" is null, throw exception 7955 bl common_invokeMethodRange @ continue on 7956 7957 7958 7959/* ------------------------------ */ 7960 .balign 64 7961.L_OP_UNUSED_FC: /* 0xfc */ 7962/* File: armv5te/OP_UNUSED_FC.S */ 7963/* File: armv5te/unused.S */ 7964 bl common_abort 7965 7966 7967 7968/* ------------------------------ */ 7969 .balign 64 7970.L_OP_UNUSED_FD: /* 0xfd */ 7971/* File: armv5te/OP_UNUSED_FD.S */ 7972/* File: armv5te/unused.S */ 7973 bl common_abort 7974 7975 7976 7977/* ------------------------------ */ 7978 .balign 64 7979.L_OP_UNUSED_FE: /* 0xfe */ 7980/* File: armv5te/OP_UNUSED_FE.S */ 7981/* File: armv5te/unused.S */ 7982 bl common_abort 7983 7984 7985 7986/* ------------------------------ */ 7987 .balign 64 7988.L_OP_UNUSED_FF: /* 0xff */ 7989/* File: armv5te/OP_UNUSED_FF.S */ 7990/* File: armv5te/unused.S */ 7991 bl common_abort 7992 7993 7994 7995 7996 .balign 64 7997 .size dvmAsmInstructionStart, .-dvmAsmInstructionStart 7998 .global dvmAsmInstructionEnd 7999dvmAsmInstructionEnd: 8000 8001/* 8002 * =========================================================================== 8003 * Sister implementations 8004 * =========================================================================== 8005 */ 8006 .global dvmAsmSisterStart 8007 .type dvmAsmSisterStart, %function 8008 .text 8009 .balign 4 8010dvmAsmSisterStart: 8011 8012/* continuation for OP_CONST_STRING */ 8013 8014 /* 8015 * Continuation if the String has not yet been resolved. 8016 * r1: BBBB (String ref) 8017 * r9: target register 8018 */ 8019.LOP_CONST_STRING_resolve: 8020 EXPORT_PC() 8021 ldr r0, [rGLUE, #offGlue_method] @ r0<- glue->method 8022 ldr r0, [r0, #offMethod_clazz] @ r0<- method->clazz 8023 bl dvmResolveString @ r0<- String reference 8024 cmp r0, #0 @ failed? 8025 beq common_exceptionThrown @ yup, handle the exception 8026 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 8027 GET_INST_OPCODE(ip) @ extract opcode from rINST 8028 SET_VREG(r0, r9) @ vAA<- r0 8029 GOTO_OPCODE(ip) @ jump to next instruction 8030 8031 8032/* continuation for OP_CONST_STRING_JUMBO */ 8033 8034 /* 8035 * Continuation if the String has not yet been resolved. 8036 * r1: BBBBBBBB (String ref) 8037 * r9: target register 8038 */ 8039.LOP_CONST_STRING_JUMBO_resolve: 8040 EXPORT_PC() 8041 ldr r0, [rGLUE, #offGlue_method] @ r0<- glue->method 8042 ldr r0, [r0, #offMethod_clazz] @ r0<- method->clazz 8043 bl dvmResolveString @ r0<- String reference 8044 cmp r0, #0 @ failed? 8045 beq common_exceptionThrown @ yup, handle the exception 8046 FETCH_ADVANCE_INST(3) @ advance rPC, load rINST 8047 GET_INST_OPCODE(ip) @ extract opcode from rINST 8048 SET_VREG(r0, r9) @ vAA<- r0 8049 GOTO_OPCODE(ip) @ jump to next instruction 8050 8051 8052/* continuation for OP_CONST_CLASS */ 8053 8054 /* 8055 * Continuation if the Class has not yet been resolved. 8056 * r1: BBBB (Class ref) 8057 * r9: target register 8058 */ 8059.LOP_CONST_CLASS_resolve: 8060 EXPORT_PC() 8061 ldr r0, [rGLUE, #offGlue_method] @ r0<- glue->method 8062 mov r2, #1 @ r2<- true 8063 ldr r0, [r0, #offMethod_clazz] @ r0<- method->clazz 8064 bl dvmResolveClass @ r0<- Class reference 8065 cmp r0, #0 @ failed? 8066 beq common_exceptionThrown @ yup, handle the exception 8067 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 8068 GET_INST_OPCODE(ip) @ extract opcode from rINST 8069 SET_VREG(r0, r9) @ vAA<- r0 8070 GOTO_OPCODE(ip) @ jump to next instruction 8071 8072 8073/* continuation for OP_CHECK_CAST */ 8074 8075 /* 8076 * Trivial test failed, need to perform full check. This is common. 8077 * r0 holds obj->clazz 8078 * r1 holds class resolved from BBBB 8079 * r9 holds object 8080 */ 8081.LOP_CHECK_CAST_fullcheck: 8082 bl dvmInstanceofNonTrivial @ r0<- boolean result 8083 cmp r0, #0 @ failed? 8084 bne .LOP_CHECK_CAST_okay @ no, success 8085 8086 @ A cast has failed. We need to throw a ClassCastException with the 8087 @ class of the object that failed to be cast. 8088 EXPORT_PC() @ about to throw 8089 ldr r3, [r9, #offObject_clazz] @ r3<- obj->clazz 8090 ldr r0, .LstrClassCastExceptionPtr 8091 ldr r1, [r3, #offClassObject_descriptor] @ r1<- obj->clazz->descriptor 8092 bl dvmThrowExceptionWithClassMessage 8093 b common_exceptionThrown 8094 8095 /* 8096 * Resolution required. This is the least-likely path. 8097 * 8098 * r2 holds BBBB 8099 * r9 holds object 8100 */ 8101.LOP_CHECK_CAST_resolve: 8102 EXPORT_PC() @ resolve() could throw 8103 ldr r3, [rGLUE, #offGlue_method] @ r3<- glue->method 8104 mov r1, r2 @ r1<- BBBB 8105 mov r2, #0 @ r2<- false 8106 ldr r0, [r3, #offMethod_clazz] @ r0<- method->clazz 8107 bl dvmResolveClass @ r0<- resolved ClassObject ptr 8108 cmp r0, #0 @ got null? 8109 beq common_exceptionThrown @ yes, handle exception 8110 mov r1, r0 @ r1<- class resolved from BBB 8111 ldr r0, [r9, #offObject_clazz] @ r0<- obj->clazz 8112 b .LOP_CHECK_CAST_resolved @ pick up where we left off 8113 8114.LstrClassCastExceptionPtr: 8115 .word .LstrClassCastException 8116 8117 8118/* continuation for OP_INSTANCE_OF */ 8119 8120 /* 8121 * Trivial test failed, need to perform full check. This is common. 8122 * r0 holds obj->clazz 8123 * r1 holds class resolved from BBBB 8124 * r9 holds A 8125 */ 8126.LOP_INSTANCE_OF_fullcheck: 8127 bl dvmInstanceofNonTrivial @ r0<- boolean result 8128 @ fall through to OP_INSTANCE_OF_store 8129 8130 /* 8131 * r0 holds boolean result 8132 * r9 holds A 8133 */ 8134.LOP_INSTANCE_OF_store: 8135 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 8136 SET_VREG(r0, r9) @ vA<- r0 8137 GET_INST_OPCODE(ip) @ extract opcode from rINST 8138 GOTO_OPCODE(ip) @ jump to next instruction 8139 8140 /* 8141 * Trivial test succeeded, save and bail. 8142 * r9 holds A 8143 */ 8144.LOP_INSTANCE_OF_trivial: 8145 mov r0, #1 @ indicate success 8146 @ could b OP_INSTANCE_OF_store, but copying is faster and cheaper 8147 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 8148 SET_VREG(r0, r9) @ vA<- r0 8149 GET_INST_OPCODE(ip) @ extract opcode from rINST 8150 GOTO_OPCODE(ip) @ jump to next instruction 8151 8152 /* 8153 * Resolution required. This is the least-likely path. 8154 * 8155 * r3 holds BBBB 8156 * r9 holds A 8157 */ 8158.LOP_INSTANCE_OF_resolve: 8159 EXPORT_PC() @ resolve() could throw 8160 ldr r0, [rGLUE, #offGlue_method] @ r0<- glue->method 8161 mov r1, r3 @ r1<- BBBB 8162 mov r2, #1 @ r2<- true 8163 ldr r0, [r0, #offMethod_clazz] @ r0<- method->clazz 8164 bl dvmResolveClass @ r0<- resolved ClassObject ptr 8165 cmp r0, #0 @ got null? 8166 beq common_exceptionThrown @ yes, handle exception 8167 mov r1, r0 @ r1<- class resolved from BBB 8168 mov r3, rINST, lsr #12 @ r3<- B 8169 GET_VREG(r0, r3) @ r0<- vB (object) 8170 ldr r0, [r0, #offObject_clazz] @ r0<- obj->clazz 8171 b .LOP_INSTANCE_OF_resolved @ pick up where we left off 8172 8173 8174/* continuation for OP_NEW_INSTANCE */ 8175 8176 .balign 32 @ minimize cache lines 8177.LOP_NEW_INSTANCE_finish: @ r0=new object 8178 mov r3, rINST, lsr #8 @ r3<- AA 8179 cmp r0, #0 @ failed? 8180 beq common_exceptionThrown @ yes, handle the exception 8181 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 8182 GET_INST_OPCODE(ip) @ extract opcode from rINST 8183 SET_VREG(r0, r3) @ vAA<- r0 8184 GOTO_OPCODE(ip) @ jump to next instruction 8185 8186 /* 8187 * Class initialization required. 8188 * 8189 * r0 holds class object 8190 */ 8191.LOP_NEW_INSTANCE_needinit: 8192 mov r9, r0 @ save r0 8193 bl dvmInitClass @ initialize class 8194 cmp r0, #0 @ check boolean result 8195 mov r0, r9 @ restore r0 8196 bne .LOP_NEW_INSTANCE_initialized @ success, continue 8197 b common_exceptionThrown @ failed, deal with init exception 8198 8199 /* 8200 * Resolution required. This is the least-likely path. 8201 * 8202 * r1 holds BBBB 8203 */ 8204.LOP_NEW_INSTANCE_resolve: 8205 ldr r3, [rGLUE, #offGlue_method] @ r3<- glue->method 8206 mov r2, #0 @ r2<- false 8207 ldr r0, [r3, #offMethod_clazz] @ r0<- method->clazz 8208 bl dvmResolveClass @ r0<- resolved ClassObject ptr 8209 cmp r0, #0 @ got null? 8210 bne .LOP_NEW_INSTANCE_resolved @ no, continue 8211 b common_exceptionThrown @ yes, handle exception 8212 8213.LstrInstantiationErrorPtr: 8214 .word .LstrInstantiationError 8215 8216 8217/* continuation for OP_NEW_ARRAY */ 8218 8219 8220 /* 8221 * Resolve class. (This is an uncommon case.) 8222 * 8223 * r1 holds array length 8224 * r2 holds class ref CCCC 8225 */ 8226.LOP_NEW_ARRAY_resolve: 8227 ldr r3, [rGLUE, #offGlue_method] @ r3<- glue->method 8228 mov r9, r1 @ r9<- length (save) 8229 mov r1, r2 @ r1<- CCCC 8230 mov r2, #0 @ r2<- false 8231 ldr r0, [r3, #offMethod_clazz] @ r0<- method->clazz 8232 bl dvmResolveClass @ r0<- call(clazz, ref) 8233 cmp r0, #0 @ got null? 8234 mov r1, r9 @ r1<- length (restore) 8235 beq common_exceptionThrown @ yes, handle exception 8236 @ fall through to OP_NEW_ARRAY_finish 8237 8238 /* 8239 * Finish allocation. 8240 * 8241 * r0 holds class 8242 * r1 holds array length 8243 */ 8244.LOP_NEW_ARRAY_finish: 8245 mov r2, #ALLOC_DONT_TRACK @ don't track in local refs table 8246 bl dvmAllocArrayByClass @ r0<- call(clazz, length, flags) 8247 cmp r0, #0 @ failed? 8248 mov r2, rINST, lsr #8 @ r2<- A+ 8249 beq common_exceptionThrown @ yes, handle the exception 8250 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 8251 and r2, r2, #15 @ r2<- A 8252 GET_INST_OPCODE(ip) @ extract opcode from rINST 8253 SET_VREG(r0, r2) @ vA<- r0 8254 GOTO_OPCODE(ip) @ jump to next instruction 8255 8256 8257/* continuation for OP_FILLED_NEW_ARRAY */ 8258 8259 /* 8260 * On entry: 8261 * r0 holds array class 8262 * r10 holds AA or BA 8263 */ 8264.LOP_FILLED_NEW_ARRAY_continue: 8265 ldr r3, [r0, #offClassObject_descriptor] @ r3<- arrayClass->descriptor 8266 mov r2, #ALLOC_DONT_TRACK @ r2<- alloc flags 8267 ldrb r3, [r3, #1] @ r3<- descriptor[1] 8268 .if 0 8269 mov r1, r10 @ r1<- AA (length) 8270 .else 8271 mov r1, r10, lsr #4 @ r1<- B (length) 8272 .endif 8273 cmp r3, #'I' @ array of ints? 8274 cmpne r3, #'L' @ array of objects? 8275 cmpne r3, #'[' @ array of arrays? 8276 mov r9, r1 @ save length in r9 8277 bne .LOP_FILLED_NEW_ARRAY_notimpl @ no, not handled yet 8278 bl dvmAllocArrayByClass @ r0<- call(arClass, length, flags) 8279 cmp r0, #0 @ null return? 8280 beq common_exceptionThrown @ alloc failed, handle exception 8281 8282 FETCH(r1, 2) @ r1<- FEDC or CCCC 8283 str r0, [rGLUE, #offGlue_retval] @ retval.l <- new array 8284 add r0, r0, #offArrayObject_contents @ r0<- newArray->contents 8285 subs r9, r9, #1 @ length--, check for neg 8286 FETCH_ADVANCE_INST(3) @ advance to next instr, load rINST 8287 bmi 2f @ was zero, bail 8288 8289 @ copy values from registers into the array 8290 @ r0=array, r1=CCCC/FEDC, r9=length (from AA or B), r10=AA/BA 8291 .if 0 8292 add r2, rFP, r1, lsl #2 @ r2<- &fp[CCCC] 82931: ldr r3, [r2], #4 @ r3<- *r2++ 8294 subs r9, r9, #1 @ count-- 8295 str r3, [r0], #4 @ *contents++ = vX 8296 bpl 1b 8297 @ continue at 2 8298 .else 8299 cmp r9, #4 @ length was initially 5? 8300 and r2, r10, #15 @ r2<- A 8301 bne 1f @ <= 4 args, branch 8302 GET_VREG(r3, r2) @ r3<- vA 8303 sub r9, r9, #1 @ count-- 8304 str r3, [r0, #16] @ contents[4] = vA 83051: and r2, r1, #15 @ r2<- F/E/D/C 8306 GET_VREG(r3, r2) @ r3<- vF/vE/vD/vC 8307 mov r1, r1, lsr #4 @ r1<- next reg in low 4 8308 subs r9, r9, #1 @ count-- 8309 str r3, [r0], #4 @ *contents++ = vX 8310 bpl 1b 8311 @ continue at 2 8312 .endif 8313 83142: 8315 GET_INST_OPCODE(ip) @ ip<- opcode from rINST 8316 GOTO_OPCODE(ip) @ execute it 8317 8318 /* 8319 * Throw an exception indicating that we have not implemented this 8320 * mode of filled-new-array. 8321 */ 8322.LOP_FILLED_NEW_ARRAY_notimpl: 8323 ldr r0, .L_strInternalError 8324 ldr r1, .L_strFilledNewArrayNotImpl 8325 bl dvmThrowException 8326 b common_exceptionThrown 8327 8328 .if (!0) @ define in one or the other, not both 8329.L_strFilledNewArrayNotImpl: 8330 .word .LstrFilledNewArrayNotImpl 8331.L_strInternalError: 8332 .word .LstrInternalError 8333 .endif 8334 8335 8336/* continuation for OP_FILLED_NEW_ARRAY_RANGE */ 8337 8338 /* 8339 * On entry: 8340 * r0 holds array class 8341 * r10 holds AA or BA 8342 */ 8343.LOP_FILLED_NEW_ARRAY_RANGE_continue: 8344 ldr r3, [r0, #offClassObject_descriptor] @ r3<- arrayClass->descriptor 8345 mov r2, #ALLOC_DONT_TRACK @ r2<- alloc flags 8346 ldrb r3, [r3, #1] @ r3<- descriptor[1] 8347 .if 1 8348 mov r1, r10 @ r1<- AA (length) 8349 .else 8350 mov r1, r10, lsr #4 @ r1<- B (length) 8351 .endif 8352 cmp r3, #'I' @ array of ints? 8353 cmpne r3, #'L' @ array of objects? 8354 cmpne r3, #'[' @ array of arrays? 8355 mov r9, r1 @ save length in r9 8356 bne .LOP_FILLED_NEW_ARRAY_RANGE_notimpl @ no, not handled yet 8357 bl dvmAllocArrayByClass @ r0<- call(arClass, length, flags) 8358 cmp r0, #0 @ null return? 8359 beq common_exceptionThrown @ alloc failed, handle exception 8360 8361 FETCH(r1, 2) @ r1<- FEDC or CCCC 8362 str r0, [rGLUE, #offGlue_retval] @ retval.l <- new array 8363 add r0, r0, #offArrayObject_contents @ r0<- newArray->contents 8364 subs r9, r9, #1 @ length--, check for neg 8365 FETCH_ADVANCE_INST(3) @ advance to next instr, load rINST 8366 bmi 2f @ was zero, bail 8367 8368 @ copy values from registers into the array 8369 @ r0=array, r1=CCCC/FEDC, r9=length (from AA or B), r10=AA/BA 8370 .if 1 8371 add r2, rFP, r1, lsl #2 @ r2<- &fp[CCCC] 83721: ldr r3, [r2], #4 @ r3<- *r2++ 8373 subs r9, r9, #1 @ count-- 8374 str r3, [r0], #4 @ *contents++ = vX 8375 bpl 1b 8376 @ continue at 2 8377 .else 8378 cmp r9, #4 @ length was initially 5? 8379 and r2, r10, #15 @ r2<- A 8380 bne 1f @ <= 4 args, branch 8381 GET_VREG(r3, r2) @ r3<- vA 8382 sub r9, r9, #1 @ count-- 8383 str r3, [r0, #16] @ contents[4] = vA 83841: and r2, r1, #15 @ r2<- F/E/D/C 8385 GET_VREG(r3, r2) @ r3<- vF/vE/vD/vC 8386 mov r1, r1, lsr #4 @ r1<- next reg in low 4 8387 subs r9, r9, #1 @ count-- 8388 str r3, [r0], #4 @ *contents++ = vX 8389 bpl 1b 8390 @ continue at 2 8391 .endif 8392 83932: 8394 GET_INST_OPCODE(ip) @ ip<- opcode from rINST 8395 GOTO_OPCODE(ip) @ execute it 8396 8397 /* 8398 * Throw an exception indicating that we have not implemented this 8399 * mode of filled-new-array. 8400 */ 8401.LOP_FILLED_NEW_ARRAY_RANGE_notimpl: 8402 ldr r0, .L_strInternalError 8403 ldr r1, .L_strFilledNewArrayNotImpl 8404 bl dvmThrowException 8405 b common_exceptionThrown 8406 8407 .if (!1) @ define in one or the other, not both 8408.L_strFilledNewArrayNotImpl: 8409 .word .LstrFilledNewArrayNotImpl 8410.L_strInternalError: 8411 .word .LstrInternalError 8412 .endif 8413 8414 8415/* continuation for OP_CMPL_FLOAT */ 8416 8417 @ Test for NaN with a second comparison. EABI forbids testing bit 8418 @ patterns, and we can't represent 0x7fc00000 in immediate form, so 8419 @ make the library call. 8420.LOP_CMPL_FLOAT_gt_or_nan: 8421 mov r1, r9 @ reverse order 8422 mov r0, r10 8423 bl __aeabi_cfcmple @ r0<- Z set if eq, C clear if < 8424 @bleq common_abort 8425 movcc r1, #1 @ (greater than) r1<- 1 8426 bcc .LOP_CMPL_FLOAT_finish 8427 mvn r1, #0 @ r1<- 1 or -1 for NaN 8428 b .LOP_CMPL_FLOAT_finish 8429 8430 8431#if 0 /* "clasic" form */ 8432 FETCH(r0, 1) @ r0<- CCBB 8433 and r2, r0, #255 @ r2<- BB 8434 mov r3, r0, lsr #8 @ r3<- CC 8435 GET_VREG(r9, r2) @ r9<- vBB 8436 GET_VREG(r10, r3) @ r10<- vCC 8437 mov r0, r9 @ r0<- vBB 8438 mov r1, r10 @ r1<- vCC 8439 bl __aeabi_fcmpeq @ r0<- (vBB == vCC) 8440 cmp r0, #0 @ equal? 8441 movne r1, #0 @ yes, result is 0 8442 bne OP_CMPL_FLOAT_finish 8443 mov r0, r9 @ r0<- vBB 8444 mov r1, r10 @ r1<- vCC 8445 bl __aeabi_fcmplt @ r0<- (vBB < vCC) 8446 cmp r0, #0 @ less than? 8447 b OP_CMPL_FLOAT_continue 8448@%break 8449 8450OP_CMPL_FLOAT_continue: 8451 mvnne r1, #0 @ yes, result is -1 8452 bne OP_CMPL_FLOAT_finish 8453 mov r0, r9 @ r0<- vBB 8454 mov r1, r10 @ r1<- vCC 8455 bl __aeabi_fcmpgt @ r0<- (vBB > vCC) 8456 cmp r0, #0 @ greater than? 8457 beq OP_CMPL_FLOAT_nan @ no, must be NaN 8458 mov r1, #1 @ yes, result is 1 8459 @ fall through to _finish 8460 8461OP_CMPL_FLOAT_finish: 8462 mov r3, rINST, lsr #8 @ r3<- AA 8463 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 8464 SET_VREG(r1, r3) @ vAA<- r1 8465 GET_INST_OPCODE(ip) @ extract opcode from rINST 8466 GOTO_OPCODE(ip) @ jump to next instruction 8467 8468 /* 8469 * This is expected to be uncommon, so we double-branch (once to here, 8470 * again back to _finish). 8471 */ 8472OP_CMPL_FLOAT_nan: 8473 mvn r1, #0 @ r1<- 1 or -1 for NaN 8474 b OP_CMPL_FLOAT_finish 8475 8476#endif 8477 8478 8479/* continuation for OP_CMPG_FLOAT */ 8480 8481 @ Test for NaN with a second comparison. EABI forbids testing bit 8482 @ patterns, and we can't represent 0x7fc00000 in immediate form, so 8483 @ make the library call. 8484.LOP_CMPG_FLOAT_gt_or_nan: 8485 mov r1, r9 @ reverse order 8486 mov r0, r10 8487 bl __aeabi_cfcmple @ r0<- Z set if eq, C clear if < 8488 @bleq common_abort 8489 movcc r1, #1 @ (greater than) r1<- 1 8490 bcc .LOP_CMPG_FLOAT_finish 8491 mov r1, #1 @ r1<- 1 or -1 for NaN 8492 b .LOP_CMPG_FLOAT_finish 8493 8494 8495#if 0 /* "clasic" form */ 8496 FETCH(r0, 1) @ r0<- CCBB 8497 and r2, r0, #255 @ r2<- BB 8498 mov r3, r0, lsr #8 @ r3<- CC 8499 GET_VREG(r9, r2) @ r9<- vBB 8500 GET_VREG(r10, r3) @ r10<- vCC 8501 mov r0, r9 @ r0<- vBB 8502 mov r1, r10 @ r1<- vCC 8503 bl __aeabi_fcmpeq @ r0<- (vBB == vCC) 8504 cmp r0, #0 @ equal? 8505 movne r1, #0 @ yes, result is 0 8506 bne OP_CMPG_FLOAT_finish 8507 mov r0, r9 @ r0<- vBB 8508 mov r1, r10 @ r1<- vCC 8509 bl __aeabi_fcmplt @ r0<- (vBB < vCC) 8510 cmp r0, #0 @ less than? 8511 b OP_CMPG_FLOAT_continue 8512@%break 8513 8514OP_CMPG_FLOAT_continue: 8515 mvnne r1, #0 @ yes, result is -1 8516 bne OP_CMPG_FLOAT_finish 8517 mov r0, r9 @ r0<- vBB 8518 mov r1, r10 @ r1<- vCC 8519 bl __aeabi_fcmpgt @ r0<- (vBB > vCC) 8520 cmp r0, #0 @ greater than? 8521 beq OP_CMPG_FLOAT_nan @ no, must be NaN 8522 mov r1, #1 @ yes, result is 1 8523 @ fall through to _finish 8524 8525OP_CMPG_FLOAT_finish: 8526 mov r3, rINST, lsr #8 @ r3<- AA 8527 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 8528 SET_VREG(r1, r3) @ vAA<- r1 8529 GET_INST_OPCODE(ip) @ extract opcode from rINST 8530 GOTO_OPCODE(ip) @ jump to next instruction 8531 8532 /* 8533 * This is expected to be uncommon, so we double-branch (once to here, 8534 * again back to _finish). 8535 */ 8536OP_CMPG_FLOAT_nan: 8537 mov r1, #1 @ r1<- 1 or -1 for NaN 8538 b OP_CMPG_FLOAT_finish 8539 8540#endif 8541 8542 8543/* continuation for OP_CMPL_DOUBLE */ 8544 8545 @ Test for NaN with a second comparison. EABI forbids testing bit 8546 @ patterns, and we can't represent 0x7fc00000 in immediate form, so 8547 @ make the library call. 8548.LOP_CMPL_DOUBLE_gt_or_nan: 8549 ldmia r10, {r0-r1} @ reverse order 8550 ldmia r9, {r2-r3} 8551 bl __aeabi_cdcmple @ r0<- Z set if eq, C clear if < 8552 @bleq common_abort 8553 movcc r1, #1 @ (greater than) r1<- 1 8554 bcc .LOP_CMPL_DOUBLE_finish 8555 mvn r1, #0 @ r1<- 1 or -1 for NaN 8556 b .LOP_CMPL_DOUBLE_finish 8557 8558 8559/* continuation for OP_CMPG_DOUBLE */ 8560 8561 @ Test for NaN with a second comparison. EABI forbids testing bit 8562 @ patterns, and we can't represent 0x7fc00000 in immediate form, so 8563 @ make the library call. 8564.LOP_CMPG_DOUBLE_gt_or_nan: 8565 ldmia r10, {r0-r1} @ reverse order 8566 ldmia r9, {r2-r3} 8567 bl __aeabi_cdcmple @ r0<- Z set if eq, C clear if < 8568 @bleq common_abort 8569 movcc r1, #1 @ (greater than) r1<- 1 8570 bcc .LOP_CMPG_DOUBLE_finish 8571 mov r1, #1 @ r1<- 1 or -1 for NaN 8572 b .LOP_CMPG_DOUBLE_finish 8573 8574 8575/* continuation for OP_CMP_LONG */ 8576 8577.LOP_CMP_LONG_less: 8578 mvn r1, #0 @ r1<- -1 8579 @ Want to cond code the next mov so we can avoid branch, but don't see it; 8580 @ instead, we just replicate the tail end. 8581 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 8582 SET_VREG(r1, r9) @ vAA<- r1 8583 GET_INST_OPCODE(ip) @ extract opcode from rINST 8584 GOTO_OPCODE(ip) @ jump to next instruction 8585 8586.LOP_CMP_LONG_greater: 8587 mov r1, #1 @ r1<- 1 8588 @ fall through to _finish 8589 8590.LOP_CMP_LONG_finish: 8591 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 8592 SET_VREG(r1, r9) @ vAA<- r1 8593 GET_INST_OPCODE(ip) @ extract opcode from rINST 8594 GOTO_OPCODE(ip) @ jump to next instruction 8595 8596 8597/* continuation for OP_AGET_WIDE */ 8598 8599.LOP_AGET_WIDE_finish: 8600 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 8601 ldrd r2, [r0, #offArrayObject_contents] @ r2/r3<- vBB[vCC] 8602 add r9, rFP, r9, lsl #2 @ r9<- &fp[AA] 8603 GET_INST_OPCODE(ip) @ extract opcode from rINST 8604 stmia r9, {r2-r3} @ vAA/vAA+1<- r2/r3 8605 GOTO_OPCODE(ip) @ jump to next instruction 8606 8607 8608/* continuation for OP_APUT_WIDE */ 8609 8610.LOP_APUT_WIDE_finish: 8611 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 8612 ldmia r9, {r2-r3} @ r2/r3<- vAA/vAA+1 8613 GET_INST_OPCODE(ip) @ extract opcode from rINST 8614 strd r2, [r0, #offArrayObject_contents] @ r2/r3<- vBB[vCC] 8615 GOTO_OPCODE(ip) @ jump to next instruction 8616 8617 8618/* continuation for OP_APUT_OBJECT */ 8619 /* 8620 * On entry: 8621 * r1 = vBB (arrayObj) 8622 * r9 = vAA (obj) 8623 * r10 = offset into array (vBB + vCC * width) 8624 */ 8625.LOP_APUT_OBJECT_finish: 8626 cmp r9, #0 @ storing null reference? 8627 beq .LOP_APUT_OBJECT_skip_check @ yes, skip type checks 8628 ldr r0, [r9, #offObject_clazz] @ r0<- obj->clazz 8629 ldr r1, [r1, #offObject_clazz] @ r1<- arrayObj->clazz 8630 bl dvmCanPutArrayElement @ test object type vs. array type 8631 cmp r0, #0 @ okay? 8632 beq common_errArrayStore @ no 8633.LOP_APUT_OBJECT_skip_check: 8634 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 8635 GET_INST_OPCODE(ip) @ extract opcode from rINST 8636 str r9, [r10, #offArrayObject_contents] @ vBB[vCC]<- vAA 8637 GOTO_OPCODE(ip) @ jump to next instruction 8638 8639 8640/* continuation for OP_IGET */ 8641 8642 /* 8643 * Currently: 8644 * r0 holds resolved field 8645 * r9 holds object 8646 */ 8647.LOP_IGET_finish: 8648 @bl common_squeak0 8649 cmp r9, #0 @ check object for null 8650 ldr r3, [r0, #offInstField_byteOffset] @ r3<- byte offset of field 8651 beq common_errNullObject @ object was null 8652 ldr r0, [r9, r3] @ r0<- obj.field (8/16/32 bits) 8653 mov r2, rINST, lsr #8 @ r2<- A+ 8654 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 8655 and r2, r2, #15 @ r2<- A 8656 GET_INST_OPCODE(ip) @ extract opcode from rINST 8657 SET_VREG(r0, r2) @ fp[A]<- r0 8658 GOTO_OPCODE(ip) @ jump to next instruction 8659 8660 8661/* continuation for OP_IGET_WIDE */ 8662 8663 /* 8664 * Currently: 8665 * r0 holds resolved field 8666 * r9 holds object 8667 */ 8668.LOP_IGET_WIDE_finish: 8669 cmp r9, #0 @ check object for null 8670 ldr r3, [r0, #offInstField_byteOffset] @ r3<- byte offset of field 8671 beq common_errNullObject @ object was null 8672 mov r2, rINST, lsr #8 @ r2<- A+ 8673 ldrd r0, [r9, r3] @ r0/r1<- obj.field (64-bit align ok) 8674 and r2, r2, #15 @ r2<- A 8675 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 8676 add r3, rFP, r2, lsl #2 @ r3<- &fp[A] 8677 GET_INST_OPCODE(ip) @ extract opcode from rINST 8678 stmia r3, {r0-r1} @ fp[A]<- r0/r1 8679 GOTO_OPCODE(ip) @ jump to next instruction 8680 8681 8682/* continuation for OP_IGET_OBJECT */ 8683 8684 /* 8685 * Currently: 8686 * r0 holds resolved field 8687 * r9 holds object 8688 */ 8689.LOP_IGET_OBJECT_finish: 8690 @bl common_squeak0 8691 cmp r9, #0 @ check object for null 8692 ldr r3, [r0, #offInstField_byteOffset] @ r3<- byte offset of field 8693 beq common_errNullObject @ object was null 8694 ldr r0, [r9, r3] @ r0<- obj.field (8/16/32 bits) 8695 mov r2, rINST, lsr #8 @ r2<- A+ 8696 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 8697 and r2, r2, #15 @ r2<- A 8698 GET_INST_OPCODE(ip) @ extract opcode from rINST 8699 SET_VREG(r0, r2) @ fp[A]<- r0 8700 GOTO_OPCODE(ip) @ jump to next instruction 8701 8702 8703/* continuation for OP_IGET_BOOLEAN */ 8704 8705 /* 8706 * Currently: 8707 * r0 holds resolved field 8708 * r9 holds object 8709 */ 8710.LOP_IGET_BOOLEAN_finish: 8711 @bl common_squeak1 8712 cmp r9, #0 @ check object for null 8713 ldr r3, [r0, #offInstField_byteOffset] @ r3<- byte offset of field 8714 beq common_errNullObject @ object was null 8715 ldr r0, [r9, r3] @ r0<- obj.field (8/16/32 bits) 8716 mov r2, rINST, lsr #8 @ r2<- A+ 8717 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 8718 and r2, r2, #15 @ r2<- A 8719 GET_INST_OPCODE(ip) @ extract opcode from rINST 8720 SET_VREG(r0, r2) @ fp[A]<- r0 8721 GOTO_OPCODE(ip) @ jump to next instruction 8722 8723 8724/* continuation for OP_IGET_BYTE */ 8725 8726 /* 8727 * Currently: 8728 * r0 holds resolved field 8729 * r9 holds object 8730 */ 8731.LOP_IGET_BYTE_finish: 8732 @bl common_squeak2 8733 cmp r9, #0 @ check object for null 8734 ldr r3, [r0, #offInstField_byteOffset] @ r3<- byte offset of field 8735 beq common_errNullObject @ object was null 8736 ldr r0, [r9, r3] @ r0<- obj.field (8/16/32 bits) 8737 mov r2, rINST, lsr #8 @ r2<- A+ 8738 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 8739 and r2, r2, #15 @ r2<- A 8740 GET_INST_OPCODE(ip) @ extract opcode from rINST 8741 SET_VREG(r0, r2) @ fp[A]<- r0 8742 GOTO_OPCODE(ip) @ jump to next instruction 8743 8744 8745/* continuation for OP_IGET_CHAR */ 8746 8747 /* 8748 * Currently: 8749 * r0 holds resolved field 8750 * r9 holds object 8751 */ 8752.LOP_IGET_CHAR_finish: 8753 @bl common_squeak3 8754 cmp r9, #0 @ check object for null 8755 ldr r3, [r0, #offInstField_byteOffset] @ r3<- byte offset of field 8756 beq common_errNullObject @ object was null 8757 ldr r0, [r9, r3] @ r0<- obj.field (8/16/32 bits) 8758 mov r2, rINST, lsr #8 @ r2<- A+ 8759 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 8760 and r2, r2, #15 @ r2<- A 8761 GET_INST_OPCODE(ip) @ extract opcode from rINST 8762 SET_VREG(r0, r2) @ fp[A]<- r0 8763 GOTO_OPCODE(ip) @ jump to next instruction 8764 8765 8766/* continuation for OP_IGET_SHORT */ 8767 8768 /* 8769 * Currently: 8770 * r0 holds resolved field 8771 * r9 holds object 8772 */ 8773.LOP_IGET_SHORT_finish: 8774 @bl common_squeak4 8775 cmp r9, #0 @ check object for null 8776 ldr r3, [r0, #offInstField_byteOffset] @ r3<- byte offset of field 8777 beq common_errNullObject @ object was null 8778 ldr r0, [r9, r3] @ r0<- obj.field (8/16/32 bits) 8779 mov r2, rINST, lsr #8 @ r2<- A+ 8780 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 8781 and r2, r2, #15 @ r2<- A 8782 GET_INST_OPCODE(ip) @ extract opcode from rINST 8783 SET_VREG(r0, r2) @ fp[A]<- r0 8784 GOTO_OPCODE(ip) @ jump to next instruction 8785 8786 8787/* continuation for OP_IPUT */ 8788 8789 /* 8790 * Currently: 8791 * r0 holds resolved field 8792 * r9 holds object 8793 */ 8794.LOP_IPUT_finish: 8795 @bl common_squeak0 8796 mov r1, rINST, lsr #8 @ r1<- A+ 8797 ldr r3, [r0, #offInstField_byteOffset] @ r3<- byte offset of field 8798 and r1, r1, #15 @ r1<- A 8799 cmp r9, #0 @ check object for null 8800 GET_VREG(r0, r1) @ r0<- fp[A] 8801 beq common_errNullObject @ object was null 8802 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 8803 GET_INST_OPCODE(ip) @ extract opcode from rINST 8804 str r0, [r9, r3] @ obj.field (8/16/32 bits)<- r0 8805 GOTO_OPCODE(ip) @ jump to next instruction 8806 8807 8808/* continuation for OP_IPUT_WIDE */ 8809 8810 /* 8811 * Currently: 8812 * r0 holds resolved field 8813 * r9 holds object 8814 */ 8815.LOP_IPUT_WIDE_finish: 8816 mov r2, rINST, lsr #8 @ r2<- A+ 8817 cmp r9, #0 @ check object for null 8818 and r2, r2, #15 @ r2<- A 8819 ldr r3, [r0, #offInstField_byteOffset] @ r3<- byte offset of field 8820 add r2, rFP, r2, lsl #2 @ r3<- &fp[A] 8821 beq common_errNullObject @ object was null 8822 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 8823 ldmia r2, {r0-r1} @ r0/r1<- fp[A] 8824 GET_INST_OPCODE(ip) @ extract opcode from rINST 8825 strd r0, [r9, r3] @ obj.field (64 bits, aligned)<- r0 8826 GOTO_OPCODE(ip) @ jump to next instruction 8827 8828 8829/* continuation for OP_IPUT_OBJECT */ 8830 8831 /* 8832 * Currently: 8833 * r0 holds resolved field 8834 * r9 holds object 8835 */ 8836.LOP_IPUT_OBJECT_finish: 8837 @bl common_squeak0 8838 mov r1, rINST, lsr #8 @ r1<- A+ 8839 ldr r3, [r0, #offInstField_byteOffset] @ r3<- byte offset of field 8840 and r1, r1, #15 @ r1<- A 8841 cmp r9, #0 @ check object for null 8842 GET_VREG(r0, r1) @ r0<- fp[A] 8843 beq common_errNullObject @ object was null 8844 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 8845 GET_INST_OPCODE(ip) @ extract opcode from rINST 8846 str r0, [r9, r3] @ obj.field (8/16/32 bits)<- r0 8847 GOTO_OPCODE(ip) @ jump to next instruction 8848 8849 8850/* continuation for OP_IPUT_BOOLEAN */ 8851 8852 /* 8853 * Currently: 8854 * r0 holds resolved field 8855 * r9 holds object 8856 */ 8857.LOP_IPUT_BOOLEAN_finish: 8858 @bl common_squeak1 8859 mov r1, rINST, lsr #8 @ r1<- A+ 8860 ldr r3, [r0, #offInstField_byteOffset] @ r3<- byte offset of field 8861 and r1, r1, #15 @ r1<- A 8862 cmp r9, #0 @ check object for null 8863 GET_VREG(r0, r1) @ r0<- fp[A] 8864 beq common_errNullObject @ object was null 8865 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 8866 GET_INST_OPCODE(ip) @ extract opcode from rINST 8867 str r0, [r9, r3] @ obj.field (8/16/32 bits)<- r0 8868 GOTO_OPCODE(ip) @ jump to next instruction 8869 8870 8871/* continuation for OP_IPUT_BYTE */ 8872 8873 /* 8874 * Currently: 8875 * r0 holds resolved field 8876 * r9 holds object 8877 */ 8878.LOP_IPUT_BYTE_finish: 8879 @bl common_squeak2 8880 mov r1, rINST, lsr #8 @ r1<- A+ 8881 ldr r3, [r0, #offInstField_byteOffset] @ r3<- byte offset of field 8882 and r1, r1, #15 @ r1<- A 8883 cmp r9, #0 @ check object for null 8884 GET_VREG(r0, r1) @ r0<- fp[A] 8885 beq common_errNullObject @ object was null 8886 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 8887 GET_INST_OPCODE(ip) @ extract opcode from rINST 8888 str r0, [r9, r3] @ obj.field (8/16/32 bits)<- r0 8889 GOTO_OPCODE(ip) @ jump to next instruction 8890 8891 8892/* continuation for OP_IPUT_CHAR */ 8893 8894 /* 8895 * Currently: 8896 * r0 holds resolved field 8897 * r9 holds object 8898 */ 8899.LOP_IPUT_CHAR_finish: 8900 @bl common_squeak3 8901 mov r1, rINST, lsr #8 @ r1<- A+ 8902 ldr r3, [r0, #offInstField_byteOffset] @ r3<- byte offset of field 8903 and r1, r1, #15 @ r1<- A 8904 cmp r9, #0 @ check object for null 8905 GET_VREG(r0, r1) @ r0<- fp[A] 8906 beq common_errNullObject @ object was null 8907 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 8908 GET_INST_OPCODE(ip) @ extract opcode from rINST 8909 str r0, [r9, r3] @ obj.field (8/16/32 bits)<- r0 8910 GOTO_OPCODE(ip) @ jump to next instruction 8911 8912 8913/* continuation for OP_IPUT_SHORT */ 8914 8915 /* 8916 * Currently: 8917 * r0 holds resolved field 8918 * r9 holds object 8919 */ 8920.LOP_IPUT_SHORT_finish: 8921 @bl common_squeak4 8922 mov r1, rINST, lsr #8 @ r1<- A+ 8923 ldr r3, [r0, #offInstField_byteOffset] @ r3<- byte offset of field 8924 and r1, r1, #15 @ r1<- A 8925 cmp r9, #0 @ check object for null 8926 GET_VREG(r0, r1) @ r0<- fp[A] 8927 beq common_errNullObject @ object was null 8928 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 8929 GET_INST_OPCODE(ip) @ extract opcode from rINST 8930 str r0, [r9, r3] @ obj.field (8/16/32 bits)<- r0 8931 GOTO_OPCODE(ip) @ jump to next instruction 8932 8933 8934/* continuation for OP_SGET */ 8935 8936 /* 8937 * Continuation if the field has not yet been resolved. 8938 * r1: BBBB field ref 8939 */ 8940.LOP_SGET_resolve: 8941 ldr r2, [rGLUE, #offGlue_method] @ r2<- current method 8942 EXPORT_PC() @ resolve() could throw, so export now 8943 ldr r0, [r2, #offMethod_clazz] @ r0<- method->clazz 8944 bl dvmResolveStaticField @ r0<- resolved StaticField ptr 8945 cmp r0, #0 @ success? 8946 bne .LOP_SGET_finish @ yes, finish 8947 b common_exceptionThrown @ no, handle exception 8948 8949 8950/* continuation for OP_SGET_WIDE */ 8951 8952 /* 8953 * Continuation if the field has not yet been resolved. 8954 * r1: BBBB field ref 8955 */ 8956.LOP_SGET_WIDE_resolve: 8957 ldr r2, [rGLUE, #offGlue_method] @ r2<- current method 8958 EXPORT_PC() @ resolve() could throw, so export now 8959 ldr r0, [r2, #offMethod_clazz] @ r0<- method->clazz 8960 bl dvmResolveStaticField @ r0<- resolved StaticField ptr 8961 cmp r0, #0 @ success? 8962 bne .LOP_SGET_WIDE_finish @ yes, finish 8963 b common_exceptionThrown @ no, handle exception 8964 8965 8966/* continuation for OP_SGET_OBJECT */ 8967 8968 /* 8969 * Continuation if the field has not yet been resolved. 8970 * r1: BBBB field ref 8971 */ 8972.LOP_SGET_OBJECT_resolve: 8973 ldr r2, [rGLUE, #offGlue_method] @ r2<- current method 8974 EXPORT_PC() @ resolve() could throw, so export now 8975 ldr r0, [r2, #offMethod_clazz] @ r0<- method->clazz 8976 bl dvmResolveStaticField @ r0<- resolved StaticField ptr 8977 cmp r0, #0 @ success? 8978 bne .LOP_SGET_OBJECT_finish @ yes, finish 8979 b common_exceptionThrown @ no, handle exception 8980 8981 8982/* continuation for OP_SGET_BOOLEAN */ 8983 8984 /* 8985 * Continuation if the field has not yet been resolved. 8986 * r1: BBBB field ref 8987 */ 8988.LOP_SGET_BOOLEAN_resolve: 8989 ldr r2, [rGLUE, #offGlue_method] @ r2<- current method 8990 EXPORT_PC() @ resolve() could throw, so export now 8991 ldr r0, [r2, #offMethod_clazz] @ r0<- method->clazz 8992 bl dvmResolveStaticField @ r0<- resolved StaticField ptr 8993 cmp r0, #0 @ success? 8994 bne .LOP_SGET_BOOLEAN_finish @ yes, finish 8995 b common_exceptionThrown @ no, handle exception 8996 8997 8998/* continuation for OP_SGET_BYTE */ 8999 9000 /* 9001 * Continuation if the field has not yet been resolved. 9002 * r1: BBBB field ref 9003 */ 9004.LOP_SGET_BYTE_resolve: 9005 ldr r2, [rGLUE, #offGlue_method] @ r2<- current method 9006 EXPORT_PC() @ resolve() could throw, so export now 9007 ldr r0, [r2, #offMethod_clazz] @ r0<- method->clazz 9008 bl dvmResolveStaticField @ r0<- resolved StaticField ptr 9009 cmp r0, #0 @ success? 9010 bne .LOP_SGET_BYTE_finish @ yes, finish 9011 b common_exceptionThrown @ no, handle exception 9012 9013 9014/* continuation for OP_SGET_CHAR */ 9015 9016 /* 9017 * Continuation if the field has not yet been resolved. 9018 * r1: BBBB field ref 9019 */ 9020.LOP_SGET_CHAR_resolve: 9021 ldr r2, [rGLUE, #offGlue_method] @ r2<- current method 9022 EXPORT_PC() @ resolve() could throw, so export now 9023 ldr r0, [r2, #offMethod_clazz] @ r0<- method->clazz 9024 bl dvmResolveStaticField @ r0<- resolved StaticField ptr 9025 cmp r0, #0 @ success? 9026 bne .LOP_SGET_CHAR_finish @ yes, finish 9027 b common_exceptionThrown @ no, handle exception 9028 9029 9030/* continuation for OP_SGET_SHORT */ 9031 9032 /* 9033 * Continuation if the field has not yet been resolved. 9034 * r1: BBBB field ref 9035 */ 9036.LOP_SGET_SHORT_resolve: 9037 ldr r2, [rGLUE, #offGlue_method] @ r2<- current method 9038 EXPORT_PC() @ resolve() could throw, so export now 9039 ldr r0, [r2, #offMethod_clazz] @ r0<- method->clazz 9040 bl dvmResolveStaticField @ r0<- resolved StaticField ptr 9041 cmp r0, #0 @ success? 9042 bne .LOP_SGET_SHORT_finish @ yes, finish 9043 b common_exceptionThrown @ no, handle exception 9044 9045 9046/* continuation for OP_SPUT */ 9047 9048 /* 9049 * Continuation if the field has not yet been resolved. 9050 * r1: BBBB field ref 9051 */ 9052.LOP_SPUT_resolve: 9053 ldr r2, [rGLUE, #offGlue_method] @ r2<- current method 9054 EXPORT_PC() @ resolve() could throw, so export now 9055 ldr r0, [r2, #offMethod_clazz] @ r0<- method->clazz 9056 bl dvmResolveStaticField @ r0<- resolved StaticField ptr 9057 cmp r0, #0 @ success? 9058 bne .LOP_SPUT_finish @ yes, finish 9059 b common_exceptionThrown @ no, handle exception 9060 9061 9062/* continuation for OP_SPUT_WIDE */ 9063 9064 /* 9065 * Continuation if the field has not yet been resolved. 9066 * r1: BBBB field ref 9067 * r9: &fp[AA] 9068 */ 9069.LOP_SPUT_WIDE_resolve: 9070 ldr r2, [rGLUE, #offGlue_method] @ r2<- current method 9071 EXPORT_PC() @ resolve() could throw, so export now 9072 ldr r0, [r2, #offMethod_clazz] @ r0<- method->clazz 9073 bl dvmResolveStaticField @ r0<- resolved StaticField ptr 9074 cmp r0, #0 @ success? 9075 bne .LOP_SPUT_WIDE_finish @ yes, finish 9076 b common_exceptionThrown @ no, handle exception 9077 9078 9079/* continuation for OP_SPUT_OBJECT */ 9080 9081 /* 9082 * Continuation if the field has not yet been resolved. 9083 * r1: BBBB field ref 9084 */ 9085.LOP_SPUT_OBJECT_resolve: 9086 ldr r2, [rGLUE, #offGlue_method] @ r2<- current method 9087 EXPORT_PC() @ resolve() could throw, so export now 9088 ldr r0, [r2, #offMethod_clazz] @ r0<- method->clazz 9089 bl dvmResolveStaticField @ r0<- resolved StaticField ptr 9090 cmp r0, #0 @ success? 9091 bne .LOP_SPUT_OBJECT_finish @ yes, finish 9092 b common_exceptionThrown @ no, handle exception 9093 9094 9095/* continuation for OP_SPUT_BOOLEAN */ 9096 9097 /* 9098 * Continuation if the field has not yet been resolved. 9099 * r1: BBBB field ref 9100 */ 9101.LOP_SPUT_BOOLEAN_resolve: 9102 ldr r2, [rGLUE, #offGlue_method] @ r2<- current method 9103 EXPORT_PC() @ resolve() could throw, so export now 9104 ldr r0, [r2, #offMethod_clazz] @ r0<- method->clazz 9105 bl dvmResolveStaticField @ r0<- resolved StaticField ptr 9106 cmp r0, #0 @ success? 9107 bne .LOP_SPUT_BOOLEAN_finish @ yes, finish 9108 b common_exceptionThrown @ no, handle exception 9109 9110 9111/* continuation for OP_SPUT_BYTE */ 9112 9113 /* 9114 * Continuation if the field has not yet been resolved. 9115 * r1: BBBB field ref 9116 */ 9117.LOP_SPUT_BYTE_resolve: 9118 ldr r2, [rGLUE, #offGlue_method] @ r2<- current method 9119 EXPORT_PC() @ resolve() could throw, so export now 9120 ldr r0, [r2, #offMethod_clazz] @ r0<- method->clazz 9121 bl dvmResolveStaticField @ r0<- resolved StaticField ptr 9122 cmp r0, #0 @ success? 9123 bne .LOP_SPUT_BYTE_finish @ yes, finish 9124 b common_exceptionThrown @ no, handle exception 9125 9126 9127/* continuation for OP_SPUT_CHAR */ 9128 9129 /* 9130 * Continuation if the field has not yet been resolved. 9131 * r1: BBBB field ref 9132 */ 9133.LOP_SPUT_CHAR_resolve: 9134 ldr r2, [rGLUE, #offGlue_method] @ r2<- current method 9135 EXPORT_PC() @ resolve() could throw, so export now 9136 ldr r0, [r2, #offMethod_clazz] @ r0<- method->clazz 9137 bl dvmResolveStaticField @ r0<- resolved StaticField ptr 9138 cmp r0, #0 @ success? 9139 bne .LOP_SPUT_CHAR_finish @ yes, finish 9140 b common_exceptionThrown @ no, handle exception 9141 9142 9143/* continuation for OP_SPUT_SHORT */ 9144 9145 /* 9146 * Continuation if the field has not yet been resolved. 9147 * r1: BBBB field ref 9148 */ 9149.LOP_SPUT_SHORT_resolve: 9150 ldr r2, [rGLUE, #offGlue_method] @ r2<- current method 9151 EXPORT_PC() @ resolve() could throw, so export now 9152 ldr r0, [r2, #offMethod_clazz] @ r0<- method->clazz 9153 bl dvmResolveStaticField @ r0<- resolved StaticField ptr 9154 cmp r0, #0 @ success? 9155 bne .LOP_SPUT_SHORT_finish @ yes, finish 9156 b common_exceptionThrown @ no, handle exception 9157 9158 9159/* continuation for OP_INVOKE_VIRTUAL */ 9160 9161 /* 9162 * At this point: 9163 * r0 = resolved base method 9164 * r10 = C or CCCC (index of first arg, which is the "this" ptr) 9165 */ 9166.LOP_INVOKE_VIRTUAL_continue: 9167 GET_VREG(r1, r10) @ r1<- "this" ptr 9168 ldrh r2, [r0, #offMethod_methodIndex] @ r2<- baseMethod->methodIndex 9169 cmp r1, #0 @ is "this" null? 9170 beq common_errNullObject @ null "this", throw exception 9171 ldr r3, [r1, #offObject_clazz] @ r1<- thisPtr->clazz 9172 ldr r3, [r3, #offClassObject_vtable] @ r3<- thisPtr->clazz->vtable 9173 ldr r0, [r3, r2, lsl #2] @ r3<- vtable[methodIndex] 9174 bl common_invokeMethodNoRange @ continue on 9175 9176 9177/* continuation for OP_INVOKE_SUPER */ 9178 9179 /* 9180 * At this point: 9181 * r0 = resolved base method 9182 * r9 = method->clazz 9183 */ 9184.LOP_INVOKE_SUPER_continue: 9185 ldr r1, [r9, #offClassObject_super] @ r1<- method->clazz->super 9186 ldrh r2, [r0, #offMethod_methodIndex] @ r2<- baseMethod->methodIndex 9187 ldr r3, [r1, #offClassObject_vtableCount] @ r3<- super->vtableCount 9188 EXPORT_PC() @ must export for invoke 9189 cmp r2, r3 @ compare (methodIndex, vtableCount) 9190 bcs .LOP_INVOKE_SUPER_nsm @ method not present in superclass 9191 ldr r1, [r1, #offClassObject_vtable] @ r1<- ...clazz->super->vtable 9192 ldr r0, [r1, r2, lsl #2] @ r3<- vtable[methodIndex] 9193 bl common_invokeMethodNoRange @ continue on 9194 9195.LOP_INVOKE_SUPER_resolve: 9196 mov r0, r9 @ r0<- method->clazz 9197 mov r2, #METHOD_VIRTUAL @ resolver method type 9198 bl dvmResolveMethod @ r0<- call(clazz, ref, flags) 9199 cmp r0, #0 @ got null? 9200 bne .LOP_INVOKE_SUPER_continue @ no, continue 9201 b common_exceptionThrown @ yes, handle exception 9202 9203 /* 9204 * Throw a NoSuchMethodError with the method name as the message. 9205 * r0 = resolved base method 9206 */ 9207.LOP_INVOKE_SUPER_nsm: 9208 ldr r1, [r0, #offMethod_name] @ r1<- method name 9209 b common_errNoSuchMethod 9210 9211 9212/* continuation for OP_INVOKE_DIRECT */ 9213 9214 /* 9215 * On entry: 9216 * r1 = reference (BBBB or CCCC) 9217 * r10 = "this" register 9218 */ 9219.LOP_INVOKE_DIRECT_resolve: 9220 ldr r3, [rGLUE, #offGlue_method] @ r3<- glue->method 9221 ldr r0, [r3, #offMethod_clazz] @ r0<- method->clazz 9222 mov r2, #METHOD_DIRECT @ resolver method type 9223 bl dvmResolveMethod @ r0<- call(clazz, ref, flags) 9224 cmp r0, #0 @ got null? 9225 GET_VREG(r2, r10) @ r2<- "this" ptr (reload) 9226 bne .LOP_INVOKE_DIRECT_finish @ no, continue 9227 b common_exceptionThrown @ yes, handle exception 9228 9229 9230/* continuation for OP_INVOKE_VIRTUAL_RANGE */ 9231 9232 /* 9233 * At this point: 9234 * r0 = resolved base method 9235 * r10 = C or CCCC (index of first arg, which is the "this" ptr) 9236 */ 9237.LOP_INVOKE_VIRTUAL_RANGE_continue: 9238 GET_VREG(r1, r10) @ r1<- "this" ptr 9239 ldrh r2, [r0, #offMethod_methodIndex] @ r2<- baseMethod->methodIndex 9240 cmp r1, #0 @ is "this" null? 9241 beq common_errNullObject @ null "this", throw exception 9242 ldr r3, [r1, #offObject_clazz] @ r1<- thisPtr->clazz 9243 ldr r3, [r3, #offClassObject_vtable] @ r3<- thisPtr->clazz->vtable 9244 ldr r0, [r3, r2, lsl #2] @ r3<- vtable[methodIndex] 9245 bl common_invokeMethodRange @ continue on 9246 9247 9248/* continuation for OP_INVOKE_SUPER_RANGE */ 9249 9250 /* 9251 * At this point: 9252 * r0 = resolved base method 9253 * r9 = method->clazz 9254 */ 9255.LOP_INVOKE_SUPER_RANGE_continue: 9256 ldr r1, [r9, #offClassObject_super] @ r1<- method->clazz->super 9257 ldrh r2, [r0, #offMethod_methodIndex] @ r2<- baseMethod->methodIndex 9258 ldr r3, [r1, #offClassObject_vtableCount] @ r3<- super->vtableCount 9259 EXPORT_PC() @ must export for invoke 9260 cmp r2, r3 @ compare (methodIndex, vtableCount) 9261 bcs .LOP_INVOKE_SUPER_RANGE_nsm @ method not present in superclass 9262 ldr r1, [r1, #offClassObject_vtable] @ r1<- ...clazz->super->vtable 9263 ldr r0, [r1, r2, lsl #2] @ r3<- vtable[methodIndex] 9264 bl common_invokeMethodRange @ continue on 9265 9266.LOP_INVOKE_SUPER_RANGE_resolve: 9267 mov r0, r9 @ r0<- method->clazz 9268 mov r2, #METHOD_VIRTUAL @ resolver method type 9269 bl dvmResolveMethod @ r0<- call(clazz, ref, flags) 9270 cmp r0, #0 @ got null? 9271 bne .LOP_INVOKE_SUPER_RANGE_continue @ no, continue 9272 b common_exceptionThrown @ yes, handle exception 9273 9274 /* 9275 * Throw a NoSuchMethodError with the method name as the message. 9276 * r0 = resolved base method 9277 */ 9278.LOP_INVOKE_SUPER_RANGE_nsm: 9279 ldr r1, [r0, #offMethod_name] @ r1<- method name 9280 b common_errNoSuchMethod 9281 9282 9283/* continuation for OP_INVOKE_DIRECT_RANGE */ 9284 9285 /* 9286 * On entry: 9287 * r1 = reference (BBBB or CCCC) 9288 * r10 = "this" register 9289 */ 9290.LOP_INVOKE_DIRECT_RANGE_resolve: 9291 ldr r3, [rGLUE, #offGlue_method] @ r3<- glue->method 9292 ldr r0, [r3, #offMethod_clazz] @ r0<- method->clazz 9293 mov r2, #METHOD_DIRECT @ resolver method type 9294 bl dvmResolveMethod @ r0<- call(clazz, ref, flags) 9295 cmp r0, #0 @ got null? 9296 GET_VREG(r2, r10) @ r2<- "this" ptr (reload) 9297 bne .LOP_INVOKE_DIRECT_RANGE_finish @ no, continue 9298 b common_exceptionThrown @ yes, handle exception 9299 9300 9301/* continuation for OP_FLOAT_TO_LONG */ 9302/* 9303 * Convert the float in r0 to a long in r0/r1. 9304 * 9305 * We have to clip values to long min/max per the specification. The 9306 * expected common case is a "reasonable" value that converts directly 9307 * to modest integer. The EABI convert function isn't doing this for us. 9308 */ 9309f2l_doconv: 9310 stmfd sp!, {r4, lr} 9311 mov r1, #0x5f000000 @ (float)maxlong 9312 mov r4, r0 9313 bl __aeabi_fcmpge @ is arg >= maxlong? 9314 cmp r0, #0 @ nonzero == yes 9315 mvnne r0, #0 @ return maxlong (7fffffff) 9316 mvnne r1, #0x80000000 9317 ldmnefd sp!, {r4, pc} 9318 9319 mov r0, r4 @ recover arg 9320 mov r1, #0xdf000000 @ (float)minlong 9321 bl __aeabi_fcmple @ is arg <= minlong? 9322 cmp r0, #0 @ nonzero == yes 9323 movne r0, #0 @ return minlong (80000000) 9324 movne r1, #0x80000000 9325 ldmnefd sp!, {r4, pc} 9326 9327 mov r0, r4 @ recover arg 9328 mov r1, r4 9329 bl __aeabi_fcmpeq @ is arg == self? 9330 cmp r0, #0 @ zero == no 9331 moveq r1, #0 @ return zero for NaN 9332 ldmeqfd sp!, {r4, pc} 9333 9334 mov r0, r4 @ recover arg 9335 bl __aeabi_f2lz @ convert float to long 9336 ldmfd sp!, {r4, pc} 9337 9338 9339/* continuation for OP_DOUBLE_TO_LONG */ 9340/* 9341 * Convert the double in r0/r1 to a long in r0/r1. 9342 * 9343 * We have to clip values to long min/max per the specification. The 9344 * expected common case is a "reasonable" value that converts directly 9345 * to modest integer. The EABI convert function isn't doing this for us. 9346 */ 9347d2l_doconv: 9348 stmfd sp!, {r4, r5, lr} @ save regs 9349 mov r3, #0x43000000 @ maxlong, as a double (high word) 9350 add r3, #0x00e00000 @ 0x43e00000 9351 mov r2, #0 @ maxlong, as a double (low word) 9352 sub sp, sp, #4 @ align for EABI 9353 mov r4, r0 @ save a copy of r0 9354 mov r5, r1 @ and r1 9355 bl __aeabi_dcmpge @ is arg >= maxlong? 9356 cmp r0, #0 @ nonzero == yes 9357 mvnne r0, #0 @ return maxlong (7fffffffffffffff) 9358 mvnne r1, #0x80000000 9359 bne 1f 9360 9361 mov r0, r4 @ recover arg 9362 mov r1, r5 9363 mov r3, #0xc3000000 @ minlong, as a double (high word) 9364 add r3, #0x00e00000 @ 0xc3e00000 9365 mov r2, #0 @ minlong, as a double (low word) 9366 bl __aeabi_dcmple @ is arg <= minlong? 9367 cmp r0, #0 @ nonzero == yes 9368 movne r0, #0 @ return minlong (8000000000000000) 9369 movne r1, #0x80000000 9370 bne 1f 9371 9372 mov r0, r4 @ recover arg 9373 mov r1, r5 9374 mov r2, r4 @ compare against self 9375 mov r3, r5 9376 bl __aeabi_dcmpeq @ is arg == self? 9377 cmp r0, #0 @ zero == no 9378 moveq r1, #0 @ return zero for NaN 9379 beq 1f 9380 9381 mov r0, r4 @ recover arg 9382 mov r1, r5 9383 bl __aeabi_d2lz @ convert double to long 9384 93851: 9386 add sp, sp, #4 9387 ldmfd sp!, {r4, r5, pc} 9388 9389 9390/* continuation for OP_MUL_LONG */ 9391 9392.LOP_MUL_LONG_finish: 9393 GET_INST_OPCODE(ip) @ extract opcode from rINST 9394 stmia r0, {r9-r10} @ vAA/vAA+1<- r9/r10 9395 GOTO_OPCODE(ip) @ jump to next instruction 9396 9397 9398/* continuation for OP_SHL_LONG */ 9399 9400.LOP_SHL_LONG_finish: 9401 mov r0, r0, asl r2 @ r0<- r0 << r2 9402 GET_INST_OPCODE(ip) @ extract opcode from rINST 9403 stmia r9, {r0-r1} @ vAA/vAA+1<- r0/r1 9404 GOTO_OPCODE(ip) @ jump to next instruction 9405 9406 9407/* continuation for OP_SHR_LONG */ 9408 9409.LOP_SHR_LONG_finish: 9410 mov r1, r1, asr r2 @ r1<- r1 >> r2 9411 GET_INST_OPCODE(ip) @ extract opcode from rINST 9412 stmia r9, {r0-r1} @ vAA/vAA+1<- r0/r1 9413 GOTO_OPCODE(ip) @ jump to next instruction 9414 9415 9416/* continuation for OP_USHR_LONG */ 9417 9418.LOP_USHR_LONG_finish: 9419 mov r1, r1, lsr r2 @ r1<- r1 >>> r2 9420 GET_INST_OPCODE(ip) @ extract opcode from rINST 9421 stmia r9, {r0-r1} @ vAA/vAA+1<- r0/r1 9422 GOTO_OPCODE(ip) @ jump to next instruction 9423 9424 9425/* continuation for OP_SHL_LONG_2ADDR */ 9426 9427.LOP_SHL_LONG_2ADDR_finish: 9428 GET_INST_OPCODE(ip) @ extract opcode from rINST 9429 stmia r9, {r0-r1} @ vAA/vAA+1<- r0/r1 9430 GOTO_OPCODE(ip) @ jump to next instruction 9431 9432 9433/* continuation for OP_SHR_LONG_2ADDR */ 9434 9435.LOP_SHR_LONG_2ADDR_finish: 9436 GET_INST_OPCODE(ip) @ extract opcode from rINST 9437 stmia r9, {r0-r1} @ vAA/vAA+1<- r0/r1 9438 GOTO_OPCODE(ip) @ jump to next instruction 9439 9440 9441/* continuation for OP_USHR_LONG_2ADDR */ 9442 9443.LOP_USHR_LONG_2ADDR_finish: 9444 GET_INST_OPCODE(ip) @ extract opcode from rINST 9445 stmia r9, {r0-r1} @ vAA/vAA+1<- r0/r1 9446 GOTO_OPCODE(ip) @ jump to next instruction 9447 9448 9449/* continuation for OP_EXECUTE_INLINE */ 9450 9451 /* 9452 * Extract args, call function. 9453 * r0 = #of args (0-4) 9454 * r10 = call index 9455 * lr = return addr, above [DO NOT bl out of here w/o preserving LR] 9456 * 9457 * Other ideas: 9458 * - Use a jump table from the main piece to jump directly into the 9459 * AND/LDR pairs. Costs a data load, saves a branch. 9460 * - Have five separate pieces that do the loading, so we can work the 9461 * interleave a little better. Increases code size. 9462 */ 9463.LOP_EXECUTE_INLINE_continue: 9464 rsb r0, r0, #4 @ r0<- 4-r0 9465 FETCH(r9, 2) @ r9<- FEDC 9466 add pc, pc, r0, lsl #3 @ computed goto, 2 instrs each 9467 bl common_abort @ (skipped due to ARM prefetch) 94684: and ip, r9, #0xf000 @ isolate F 9469 ldr r3, [rFP, ip, lsr #10] @ r3<- vF (shift right 12, left 2) 94703: and ip, r9, #0x0f00 @ isolate E 9471 ldr r2, [rFP, ip, lsr #6] @ r2<- vE 94722: and ip, r9, #0x00f0 @ isolate D 9473 ldr r1, [rFP, ip, lsr #2] @ r1<- vD 94741: and ip, r9, #0x000f @ isolate C 9475 ldr r0, [rFP, ip, lsl #2] @ r0<- vC 94760: 9477 ldr r9, .LOP_EXECUTE_INLINE_table @ table of InlineOperation 9478 LDR_PC "[r9, r10, lsl #4]" @ sizeof=16, "func" is first entry 9479 @ (not reached) 9480 9481.LOP_EXECUTE_INLINE_table: 9482 .word gDvmInlineOpsTable 9483 9484 9485/* continuation for OP_EXECUTE_INLINE_RANGE */ 9486 9487 /* 9488 * Extract args, call function. 9489 * r0 = #of args (0-4) 9490 * r10 = call index 9491 * lr = return addr, above [DO NOT bl out of here w/o preserving LR] 9492 */ 9493.LOP_EXECUTE_INLINE_RANGE_continue: 9494 rsb r0, r0, #4 @ r0<- 4-r0 9495 FETCH(r9, 2) @ r9<- CCCC 9496 add pc, pc, r0, lsl #3 @ computed goto, 2 instrs each 9497 bl common_abort @ (skipped due to ARM prefetch) 94984: add ip, r9, #3 @ base+3 9499 GET_VREG(r3, ip) @ r3<- vBase[3] 95003: add ip, r9, #2 @ base+2 9501 GET_VREG(r2, ip) @ r2<- vBase[2] 95022: add ip, r9, #1 @ base+1 9503 GET_VREG(r1, ip) @ r1<- vBase[1] 95041: add ip, r9, #0 @ (nop) 9505 GET_VREG(r0, ip) @ r0<- vBase[0] 95060: 9507 ldr r9, .LOP_EXECUTE_INLINE_RANGE_table @ table of InlineOperation 9508 LDR_PC "[r9, r10, lsl #4]" @ sizeof=16, "func" is first entry 9509 @ (not reached) 9510 9511.LOP_EXECUTE_INLINE_RANGE_table: 9512 .word gDvmInlineOpsTable 9513 9514 9515 .size dvmAsmSisterStart, .-dvmAsmSisterStart 9516 .global dvmAsmSisterEnd 9517dvmAsmSisterEnd: 9518 9519/* File: armv5te/footer.S */ 9520 9521/* 9522 * =========================================================================== 9523 * Common subroutines and data 9524 * =========================================================================== 9525 */ 9526 9527 9528 9529 .text 9530 .align 2 9531 9532#if defined(WITH_JIT) 9533#if defined(WITH_SELF_VERIFICATION) 9534 .global dvmJitToInterpPunt 9535dvmJitToInterpPunt: 9536 mov r2,#kSVSPunt @ r2<- interpreter entry point 9537 b dvmJitSelfVerificationEnd @ doesn't return 9538 9539 .global dvmJitToInterpSingleStep 9540dvmJitToInterpSingleStep: 9541 mov r2,#kSVSSingleStep @ r2<- interpreter entry point 9542 b dvmJitSelfVerificationEnd @ doesn't return 9543 9544 .global dvmJitToTraceSelect 9545dvmJitToTraceSelect: 9546 ldr r0,[lr, #-1] @ pass our target PC 9547 mov r2,#kSVSTraceSelect @ r2<- interpreter entry point 9548 b dvmJitSelfVerificationEnd @ doesn't return 9549 9550 .global dvmJitToBackwardBranch 9551dvmJitToBackwardBranch: 9552 ldr r0,[lr, #-1] @ pass our target PC 9553 mov r2,#kSVSBackwardBranch @ r2<- interpreter entry point 9554 b dvmJitSelfVerificationEnd @ doesn't return 9555 9556 .global dvmJitToInterpNormal 9557dvmJitToInterpNormal: 9558 ldr r0,[lr, #-1] @ pass our target PC 9559 mov r2,#kSVSNormal @ r2<- interpreter entry point 9560 b dvmJitSelfVerificationEnd @ doesn't return 9561 9562 .global dvmJitToInterpNoChain 9563dvmJitToInterpNoChain: 9564 mov r0,rPC @ pass our target PC 9565 mov r2,#kSVSNoChain @ r2<- interpreter entry point 9566 b dvmJitSelfVerificationEnd @ doesn't return 9567#else 9568/* 9569 * Return from the translation cache to the interpreter when the compiler is 9570 * having issues translating/executing a Dalvik instruction. We have to skip 9571 * the code cache lookup otherwise it is possible to indefinitely bouce 9572 * between the interpreter and the code cache if the instruction that fails 9573 * to be compiled happens to be at a trace start. 9574 */ 9575 .global dvmJitToInterpPunt 9576dvmJitToInterpPunt: 9577 mov rPC, r0 9578#ifdef EXIT_STATS 9579 mov r0,lr 9580 bl dvmBumpPunt; 9581#endif 9582 EXPORT_PC() 9583 adrl rIBASE, dvmAsmInstructionStart 9584 FETCH_INST() 9585 GET_INST_OPCODE(ip) 9586 GOTO_OPCODE(ip) 9587 9588/* 9589 * Return to the interpreter to handle a single instruction. 9590 * On entry: 9591 * r0 <= PC 9592 * r1 <= PC of resume instruction 9593 * lr <= resume point in translation 9594 */ 9595 .global dvmJitToInterpSingleStep 9596dvmJitToInterpSingleStep: 9597 str lr,[rGLUE,#offGlue_jitResume] 9598 str r1,[rGLUE,#offGlue_jitResumePC] 9599 mov r1,#kInterpEntryInstr 9600 @ enum is 4 byte in aapcs-EABI 9601 str r1, [rGLUE, #offGlue_entryPoint] 9602 mov rPC,r0 9603 EXPORT_PC() 9604 adrl rIBASE, dvmAsmInstructionStart 9605 mov r2,#kJitSingleStep @ Ask for single step and then revert 9606 str r2,[rGLUE,#offGlue_jitState] 9607 mov r1,#1 @ set changeInterp to bail to debug interp 9608 b common_gotoBail 9609 9610 9611/* 9612 * Return from the translation cache and immediately request 9613 * a translation for the exit target. Commonly used following 9614 * invokes. 9615 */ 9616 .global dvmJitToTraceSelect 9617dvmJitToTraceSelect: 9618 ldr rPC,[lr, #-1] @ get our target PC 9619 add rINST,lr,#-5 @ save start of chain branch 9620 mov r0,rPC 9621 bl dvmJitGetCodeAddr @ Is there a translation? 9622 cmp r0,#0 9623 beq 2f 9624 mov r1,rINST 9625 bl dvmJitChain @ r0<- dvmJitChain(codeAddr,chainAddr) 9626 mov r1, rPC @ arg1 of translation may need this 9627 mov lr, #0 @ in case target is HANDLER_INTERPRET 9628 cmp r0,#0 @ successful chain? 9629 bxne r0 @ continue native execution 9630 b toInterpreter @ didn't chain - resume with interpreter 9631 9632/* No translation, so request one if profiling isn't disabled*/ 96332: 9634 adrl rIBASE, dvmAsmInstructionStart 9635 GET_JIT_PROF_TABLE(r0) 9636 FETCH_INST() 9637 cmp r0, #0 9638 bne common_selectTrace 9639 GET_INST_OPCODE(ip) 9640 GOTO_OPCODE(ip) 9641 9642/* 9643 * Return from the translation cache to the interpreter. 9644 * The return was done with a BLX from thumb mode, and 9645 * the following 32-bit word contains the target rPC value. 9646 * Note that lr (r14) will have its low-order bit set to denote 9647 * its thumb-mode origin. 9648 * 9649 * We'll need to stash our lr origin away, recover the new 9650 * target and then check to see if there is a translation available 9651 * for our new target. If so, we do a translation chain and 9652 * go back to native execution. Otherwise, it's back to the 9653 * interpreter (after treating this entry as a potential 9654 * trace start). 9655 */ 9656 .global dvmJitToInterpNormal 9657dvmJitToInterpNormal: 9658 ldr rPC,[lr, #-1] @ get our target PC 9659 add rINST,lr,#-5 @ save start of chain branch 9660#ifdef EXIT_STATS 9661 bl dvmBumpNormal 9662#endif 9663 mov r0,rPC 9664 bl dvmJitGetCodeAddr @ Is there a translation? 9665 cmp r0,#0 9666 beq toInterpreter @ go if not, otherwise do chain 9667 mov r1,rINST 9668 bl dvmJitChain @ r0<- dvmJitChain(codeAddr,chainAddr) 9669 mov r1, rPC @ arg1 of translation may need this 9670 mov lr, #0 @ in case target is HANDLER_INTERPRET 9671 cmp r0,#0 @ successful chain? 9672 bxne r0 @ continue native execution 9673 b toInterpreter @ didn't chain - resume with interpreter 9674 9675/* 9676 * Return from the translation cache to the interpreter to do method invocation. 9677 * Check if translation exists for the callee, but don't chain to it. 9678 */ 9679 .global dvmJitToInterpNoChain 9680dvmJitToInterpNoChain: 9681#ifdef EXIT_STATS 9682 bl dvmBumpNoChain 9683#endif 9684 mov r0,rPC 9685 bl dvmJitGetCodeAddr @ Is there a translation? 9686 mov r1, rPC @ arg1 of translation may need this 9687 mov lr, #0 @ in case target is HANDLER_INTERPRET 9688 cmp r0,#0 9689 bxne r0 @ continue native execution if so 9690#endif 9691 9692/* 9693 * No translation, restore interpreter regs and start interpreting. 9694 * rGLUE & rFP were preserved in the translated code, and rPC has 9695 * already been restored by the time we get here. We'll need to set 9696 * up rIBASE & rINST, and load the address of the JitTable into r0. 9697 */ 9698toInterpreter: 9699 EXPORT_PC() 9700 adrl rIBASE, dvmAsmInstructionStart 9701 FETCH_INST() 9702 GET_JIT_PROF_TABLE(r0) 9703 @ NOTE: intended fallthrough 9704/* 9705 * Common code to update potential trace start counter, and initiate 9706 * a trace-build if appropriate. On entry, rPC should point to the 9707 * next instruction to execute, and rINST should be already loaded with 9708 * the next opcode word, and r0 holds a pointer to the jit profile 9709 * table (pJitProfTable). 9710 */ 9711common_testUpdateProfile: 9712 cmp r0,#0 9713 GET_INST_OPCODE(ip) 9714 GOTO_OPCODE_IFEQ(ip) @ if not profiling, fallthrough otherwise */ 9715 9716common_updateProfile: 9717 eor r3,rPC,rPC,lsr #12 @ cheap, but fast hash function 9718 lsl r3,r3,#23 @ shift out excess 511 9719 ldrb r1,[r0,r3,lsr #23] @ get counter 9720 GET_INST_OPCODE(ip) 9721 subs r1,r1,#1 @ decrement counter 9722 strb r1,[r0,r3,lsr #23] @ and store it 9723 GOTO_OPCODE_IFNE(ip) @ if not threshold, fallthrough otherwise */ 9724 9725/* 9726 * Here, we switch to the debug interpreter to request 9727 * trace selection. First, though, check to see if there 9728 * is already a native translation in place (and, if so, 9729 * jump to it now). 9730 */ 9731 GET_JIT_THRESHOLD(r1) 9732 strb r1,[r0,r3,lsr #23] @ reset counter 9733 EXPORT_PC() 9734 mov r0,rPC 9735 bl dvmJitGetCodeAddr @ r0<- dvmJitGetCodeAddr(rPC) 9736 mov r1, rPC @ arg1 of translation may need this 9737 mov lr, #0 @ in case target is HANDLER_INTERPRET 9738 cmp r0,#0 9739#if !defined(WITH_SELF_VERIFICATION) 9740 bxne r0 @ jump to the translation 9741#else 9742 beq common_selectTrace 9743 /* 9744 * At this point, we have a target translation. However, if 9745 * that translation is actually the interpret-only pseudo-translation 9746 * we want to treat it the same as no translation. 9747 */ 9748 mov r10, r0 @ save target 9749 bl dvmCompilerGetInterpretTemplate 9750 cmp r0, r10 @ special case? 9751 bne dvmJitSelfVerificationStart @ set up self verification 9752 GET_INST_OPCODE(ip) 9753 GOTO_OPCODE(ip) 9754 /* no return */ 9755#endif 9756 9757common_selectTrace: 9758 mov r2,#kJitTSelectRequest @ ask for trace selection 9759 str r2,[rGLUE,#offGlue_jitState] 9760 mov r2,#kInterpEntryInstr @ normal entry reason 9761 str r2,[rGLUE,#offGlue_entryPoint] 9762 mov r1,#1 @ set changeInterp 9763 b common_gotoBail 9764 9765#if defined(WITH_SELF_VERIFICATION) 9766/* 9767 * Save PC and registers to shadow memory for self verification mode 9768 * before jumping to native translation. 9769 * On entry, r10 contains the address of the target translation. 9770 */ 9771dvmJitSelfVerificationStart: 9772 mov r0,rPC @ r0<- program counter 9773 mov r1,rFP @ r1<- frame pointer 9774 mov r2,rGLUE @ r2<- InterpState pointer 9775 mov r3,r10 @ r3<- target translation 9776 bl dvmSelfVerificationSaveState @ save registers to shadow space 9777 ldr rFP,[r0,#offShadowSpace_shadowFP] @ rFP<- fp in shadow space 9778 add rGLUE,r0,#offShadowSpace_interpState @ rGLUE<- rGLUE in shadow space 9779 bx r10 @ jump to the translation 9780 9781/* 9782 * Restore PC, registers, and interpState to original values 9783 * before jumping back to the interpreter. 9784 */ 9785dvmJitSelfVerificationEnd: 9786 mov r1,rFP @ pass ending fp 9787 bl dvmSelfVerificationRestoreState @ restore pc and fp values 9788 ldr rPC,[r0,#offShadowSpace_startPC] @ restore PC 9789 ldr rFP,[r0,#offShadowSpace_fp] @ restore FP 9790 ldr rGLUE,[r0,#offShadowSpace_glue] @ restore InterpState 9791 ldr r1,[r0,#offShadowSpace_svState] @ get self verification state 9792 cmp r1,#0 @ check for punt condition 9793 beq 1f 9794 mov r2,#kJitSelfVerification @ ask for self verification 9795 str r2,[rGLUE,#offGlue_jitState] 9796 mov r2,#kInterpEntryInstr @ normal entry reason 9797 str r2,[rGLUE,#offGlue_entryPoint] 9798 mov r1,#1 @ set changeInterp 9799 b common_gotoBail 9800 98011: @ exit to interpreter without check 9802 EXPORT_PC() 9803 adrl rIBASE, dvmAsmInstructionStart 9804 FETCH_INST() 9805 GET_INST_OPCODE(ip) 9806 GOTO_OPCODE(ip) 9807#endif 9808 9809#endif 9810 9811/* 9812 * Common code when a backward branch is taken. 9813 * 9814 * On entry: 9815 * r9 is PC adjustment *in bytes* 9816 */ 9817common_backwardBranch: 9818 mov r0, #kInterpEntryInstr 9819 bl common_periodicChecks 9820#if defined(WITH_JIT) 9821 GET_JIT_PROF_TABLE(r0) 9822 FETCH_ADVANCE_INST_RB(r9) @ update rPC, load rINST 9823 cmp r0,#0 9824 bne common_updateProfile 9825 GET_INST_OPCODE(ip) 9826 GOTO_OPCODE(ip) 9827#else 9828 FETCH_ADVANCE_INST_RB(r9) @ update rPC, load rINST 9829 GET_INST_OPCODE(ip) @ extract opcode from rINST 9830 GOTO_OPCODE(ip) @ jump to next instruction 9831#endif 9832 9833 9834/* 9835 * Need to see if the thread needs to be suspended or debugger/profiler 9836 * activity has begun. 9837 * 9838 * TODO: if JDWP isn't running, zero out pDebuggerActive pointer so we don't 9839 * have to do the second ldr. 9840 * 9841 * TODO: reduce this so we're just checking a single location. 9842 * 9843 * On entry: 9844 * r0 is reentry type, e.g. kInterpEntryInstr 9845 * r9 is trampoline PC adjustment *in bytes* 9846 */ 9847common_periodicChecks: 9848 ldr r3, [rGLUE, #offGlue_pSelfSuspendCount] @ r3<- &suspendCount 9849 9850 @ speculatively store r0 before it is clobbered by dvmCheckSuspendPending 9851 str r0, [rGLUE, #offGlue_entryPoint] 9852 9853#if defined(WITH_DEBUGGER) 9854 ldr r1, [rGLUE, #offGlue_pDebuggerActive] @ r1<- &debuggerActive 9855#endif 9856#if defined(WITH_PROFILER) 9857 ldr r2, [rGLUE, #offGlue_pActiveProfilers] @ r2<- &activeProfilers 9858#endif 9859 9860 ldr r3, [r3] @ r3<- suspendCount (int) 9861 9862#if defined(WITH_DEBUGGER) 9863 ldrb r1, [r1] @ r1<- debuggerActive (boolean) 9864#endif 9865#if defined (WITH_PROFILER) 9866 ldr r2, [r2] @ r2<- activeProfilers (int) 9867#endif 9868 9869 cmp r3, #0 @ suspend pending? 9870 bne 2f @ yes, do full suspension check 9871 9872#if defined(WITH_DEBUGGER) || defined(WITH_PROFILER) 9873# if defined(WITH_DEBUGGER) && defined(WITH_PROFILER) 9874 orrs r1, r1, r2 @ r1<- r1 | r2 9875 cmp r1, #0 @ debugger attached or profiler started? 9876# elif defined(WITH_DEBUGGER) 9877 cmp r1, #0 @ debugger attached? 9878# elif defined(WITH_PROFILER) 9879 cmp r2, #0 @ profiler started? 9880# endif 9881 bne 3f @ debugger/profiler, switch interp 9882#endif 9883 9884 bx lr @ nothing to do, return 9885 98862: @ check suspend 9887 ldr r0, [rGLUE, #offGlue_self] @ r0<- glue->self 9888 EXPORT_PC() @ need for precise GC 9889 b dvmCheckSuspendPending @ suspend if necessary, then return 9890 98913: @ debugger/profiler enabled, bail out 9892 add rPC, rPC, r9 @ update rPC 9893 mov r1, #1 @ "want switch" = true 9894 b common_gotoBail 9895 9896 9897/* 9898 * The equivalent of "goto bail", this calls through the "bail handler". 9899 * 9900 * State registers will be saved to the "glue" area before bailing. 9901 * 9902 * On entry: 9903 * r1 is "bool changeInterp", indicating if we want to switch to the 9904 * other interpreter or just bail all the way out 9905 */ 9906common_gotoBail: 9907 SAVE_PC_FP_TO_GLUE() @ export state to "glue" 9908 mov r0, rGLUE @ r0<- glue ptr 9909 b dvmMterpStdBail @ call(glue, changeInterp) 9910 9911 @add r1, r1, #1 @ using (boolean+1) 9912 @add r0, rGLUE, #offGlue_jmpBuf @ r0<- &glue->jmpBuf 9913 @bl _longjmp @ does not return 9914 @bl common_abort 9915 9916 9917/* 9918 * Common code for method invocation with range. 9919 * 9920 * On entry: 9921 * r0 is "Method* methodToCall", the method we're trying to call 9922 */ 9923common_invokeMethodRange: 9924.LinvokeNewRange: 9925 @ prepare to copy args to "outs" area of current frame 9926 movs r2, rINST, lsr #8 @ r2<- AA (arg count) -- test for zero 9927 SAVEAREA_FROM_FP(r10, rFP) @ r10<- stack save area 9928 beq .LinvokeArgsDone @ if no args, skip the rest 9929 FETCH(r1, 2) @ r1<- CCCC 9930 9931 @ r0=methodToCall, r1=CCCC, r2=count, r10=outs 9932 @ (very few methods have > 10 args; could unroll for common cases) 9933 add r3, rFP, r1, lsl #2 @ r3<- &fp[CCCC] 9934 sub r10, r10, r2, lsl #2 @ r10<- "outs" area, for call args 9935 ldrh r9, [r0, #offMethod_registersSize] @ r9<- methodToCall->regsSize 99361: ldr r1, [r3], #4 @ val = *fp++ 9937 subs r2, r2, #1 @ count-- 9938 str r1, [r10], #4 @ *outs++ = val 9939 bne 1b @ ...while count != 0 9940 ldrh r3, [r0, #offMethod_outsSize] @ r3<- methodToCall->outsSize 9941 b .LinvokeArgsDone 9942 9943/* 9944 * Common code for method invocation without range. 9945 * 9946 * On entry: 9947 * r0 is "Method* methodToCall", the method we're trying to call 9948 */ 9949common_invokeMethodNoRange: 9950.LinvokeNewNoRange: 9951 @ prepare to copy args to "outs" area of current frame 9952 movs r2, rINST, lsr #12 @ r2<- B (arg count) -- test for zero 9953 SAVEAREA_FROM_FP(r10, rFP) @ r10<- stack save area 9954 FETCH(r1, 2) @ r1<- GFED (load here to hide latency) 9955 ldrh r9, [r0, #offMethod_registersSize] @ r9<- methodToCall->regsSize 9956 ldrh r3, [r0, #offMethod_outsSize] @ r3<- methodToCall->outsSize 9957 beq .LinvokeArgsDone 9958 9959 @ r0=methodToCall, r1=GFED, r3=outSize, r2=count, r9=regSize, r10=outs 9960.LinvokeNonRange: 9961 rsb r2, r2, #5 @ r2<- 5-r2 9962 add pc, pc, r2, lsl #4 @ computed goto, 4 instrs each 9963 bl common_abort @ (skipped due to ARM prefetch) 99645: and ip, rINST, #0x0f00 @ isolate A 9965 ldr r2, [rFP, ip, lsr #6] @ r2<- vA (shift right 8, left 2) 9966 mov r0, r0 @ nop 9967 str r2, [r10, #-4]! @ *--outs = vA 99684: and ip, r1, #0xf000 @ isolate G 9969 ldr r2, [rFP, ip, lsr #10] @ r2<- vG (shift right 12, left 2) 9970 mov r0, r0 @ nop 9971 str r2, [r10, #-4]! @ *--outs = vG 99723: and ip, r1, #0x0f00 @ isolate F 9973 ldr r2, [rFP, ip, lsr #6] @ r2<- vF 9974 mov r0, r0 @ nop 9975 str r2, [r10, #-4]! @ *--outs = vF 99762: and ip, r1, #0x00f0 @ isolate E 9977 ldr r2, [rFP, ip, lsr #2] @ r2<- vE 9978 mov r0, r0 @ nop 9979 str r2, [r10, #-4]! @ *--outs = vE 99801: and ip, r1, #0x000f @ isolate D 9981 ldr r2, [rFP, ip, lsl #2] @ r2<- vD 9982 mov r0, r0 @ nop 9983 str r2, [r10, #-4]! @ *--outs = vD 99840: @ fall through to .LinvokeArgsDone 9985 9986.LinvokeArgsDone: @ r0=methodToCall, r3=outSize, r9=regSize 9987 ldr r2, [r0, #offMethod_insns] @ r2<- method->insns 9988 ldr rINST, [r0, #offMethod_clazz] @ rINST<- method->clazz 9989 @ find space for the new stack frame, check for overflow 9990 SAVEAREA_FROM_FP(r1, rFP) @ r1<- stack save area 9991 sub r1, r1, r9, lsl #2 @ r1<- newFp (old savearea - regsSize) 9992 SAVEAREA_FROM_FP(r10, r1) @ r10<- newSaveArea 9993@ bl common_dumpRegs 9994 ldr r9, [rGLUE, #offGlue_interpStackEnd] @ r9<- interpStackEnd 9995 sub r3, r10, r3, lsl #2 @ r3<- bottom (newsave - outsSize) 9996 cmp r3, r9 @ bottom < interpStackEnd? 9997 ldr r3, [r0, #offMethod_accessFlags] @ r3<- methodToCall->accessFlags 9998 blt .LstackOverflow @ yes, this frame will overflow stack 9999 10000 @ set up newSaveArea 10001#ifdef EASY_GDB 10002 SAVEAREA_FROM_FP(ip, rFP) @ ip<- stack save area 10003 str ip, [r10, #offStackSaveArea_prevSave] 10004#endif 10005 str rFP, [r10, #offStackSaveArea_prevFrame] 10006 str rPC, [r10, #offStackSaveArea_savedPc] 10007#if defined(WITH_JIT) 10008 mov r9, #0 10009 str r9, [r10, #offStackSaveArea_returnAddr] 10010#endif 10011 str r0, [r10, #offStackSaveArea_method] 10012 tst r3, #ACC_NATIVE 10013 bne .LinvokeNative 10014 10015 /* 10016 stmfd sp!, {r0-r3} 10017 bl common_printNewline 10018 mov r0, rFP 10019 mov r1, #0 10020 bl dvmDumpFp 10021 ldmfd sp!, {r0-r3} 10022 stmfd sp!, {r0-r3} 10023 mov r0, r1 10024 mov r1, r10 10025 bl dvmDumpFp 10026 bl common_printNewline 10027 ldmfd sp!, {r0-r3} 10028 */ 10029 10030 ldrh r9, [r2] @ r9 <- load INST from new PC 10031 ldr r3, [rINST, #offClassObject_pDvmDex] @ r3<- method->clazz->pDvmDex 10032 mov rPC, r2 @ publish new rPC 10033 ldr r2, [rGLUE, #offGlue_self] @ r2<- glue->self 10034 10035 @ Update "glue" values for the new method 10036 @ r0=methodToCall, r1=newFp, r2=self, r3=newMethodClass, r9=newINST 10037 str r0, [rGLUE, #offGlue_method] @ glue->method = methodToCall 10038 str r3, [rGLUE, #offGlue_methodClassDex] @ glue->methodClassDex = ... 10039#if defined(WITH_JIT) 10040 GET_JIT_PROF_TABLE(r0) 10041 mov rFP, r1 @ fp = newFp 10042 GET_PREFETCHED_OPCODE(ip, r9) @ extract prefetched opcode from r9 10043 mov rINST, r9 @ publish new rINST 10044 str r1, [r2, #offThread_curFrame] @ self->curFrame = newFp 10045 cmp r0,#0 10046 bne common_updateProfile 10047 GOTO_OPCODE(ip) @ jump to next instruction 10048#else 10049 mov rFP, r1 @ fp = newFp 10050 GET_PREFETCHED_OPCODE(ip, r9) @ extract prefetched opcode from r9 10051 mov rINST, r9 @ publish new rINST 10052 str r1, [r2, #offThread_curFrame] @ self->curFrame = newFp 10053 GOTO_OPCODE(ip) @ jump to next instruction 10054#endif 10055 10056.LinvokeNative: 10057 @ Prep for the native call 10058 @ r0=methodToCall, r1=newFp, r10=newSaveArea 10059 ldr r3, [rGLUE, #offGlue_self] @ r3<- glue->self 10060 ldr r9, [r3, #offThread_jniLocal_topCookie] @ r9<- thread->localRef->... 10061 str r1, [r3, #offThread_curFrame] @ self->curFrame = newFp 10062 str r9, [r10, #offStackSaveArea_localRefCookie] @newFp->localRefCookie=top 10063 mov r9, r3 @ r9<- glue->self (preserve) 10064 10065 mov r2, r0 @ r2<- methodToCall 10066 mov r0, r1 @ r0<- newFp (points to args) 10067 add r1, rGLUE, #offGlue_retval @ r1<- &retval 10068 10069#ifdef ASSIST_DEBUGGER 10070 /* insert fake function header to help gdb find the stack frame */ 10071 b .Lskip 10072 .type dalvik_mterp, %function 10073dalvik_mterp: 10074 .fnstart 10075 MTERP_ENTRY1 10076 MTERP_ENTRY2 10077.Lskip: 10078#endif 10079 10080 @mov lr, pc @ set return addr 10081 @ldr pc, [r2, #offMethod_nativeFunc] @ pc<- methodToCall->nativeFunc 10082 LDR_PC_LR "[r2, #offMethod_nativeFunc]" 10083 10084 @ native return; r9=self, r10=newSaveArea 10085 @ equivalent to dvmPopJniLocals 10086 ldr r0, [r10, #offStackSaveArea_localRefCookie] @ r0<- saved top 10087 ldr r1, [r9, #offThread_exception] @ check for exception 10088 str rFP, [r9, #offThread_curFrame] @ self->curFrame = fp 10089 cmp r1, #0 @ null? 10090 str r0, [r9, #offThread_jniLocal_topCookie] @ new top <- old top 10091 bne common_exceptionThrown @ no, handle exception 10092 10093 FETCH_ADVANCE_INST(3) @ advance rPC, load rINST 10094 GET_INST_OPCODE(ip) @ extract opcode from rINST 10095 GOTO_OPCODE(ip) @ jump to next instruction 10096 10097.LstackOverflow: @ r0=methodToCall 10098 mov r1, r0 @ r1<- methodToCall 10099 ldr r0, [rGLUE, #offGlue_self] @ r0<- self 10100 bl dvmHandleStackOverflow 10101 b common_exceptionThrown 10102#ifdef ASSIST_DEBUGGER 10103 .fnend 10104#endif 10105 10106 10107 /* 10108 * Common code for method invocation, calling through "glue code". 10109 * 10110 * TODO: now that we have range and non-range invoke handlers, this 10111 * needs to be split into two. Maybe just create entry points 10112 * that set r9 and jump here? 10113 * 10114 * On entry: 10115 * r0 is "Method* methodToCall", the method we're trying to call 10116 * r9 is "bool methodCallRange", indicating if this is a /range variant 10117 */ 10118 .if 0 10119.LinvokeOld: 10120 sub sp, sp, #8 @ space for args + pad 10121 FETCH(ip, 2) @ ip<- FEDC or CCCC 10122 mov r2, r0 @ A2<- methodToCall 10123 mov r0, rGLUE @ A0<- glue 10124 SAVE_PC_FP_TO_GLUE() @ export state to "glue" 10125 mov r1, r9 @ A1<- methodCallRange 10126 mov r3, rINST, lsr #8 @ A3<- AA 10127 str ip, [sp, #0] @ A4<- ip 10128 bl dvmMterp_invokeMethod @ call the C invokeMethod 10129 add sp, sp, #8 @ remove arg area 10130 b common_resumeAfterGlueCall @ continue to next instruction 10131 .endif 10132 10133 10134 10135/* 10136 * Common code for handling a return instruction. 10137 * 10138 * This does not return. 10139 */ 10140common_returnFromMethod: 10141.LreturnNew: 10142 mov r0, #kInterpEntryReturn 10143 mov r9, #0 10144 bl common_periodicChecks 10145 10146 SAVEAREA_FROM_FP(r0, rFP) @ r0<- saveArea (old) 10147 ldr rFP, [r0, #offStackSaveArea_prevFrame] @ fp = saveArea->prevFrame 10148 ldr r9, [r0, #offStackSaveArea_savedPc] @ r9 = saveArea->savedPc 10149 ldr r2, [rFP, #(offStackSaveArea_method - sizeofStackSaveArea)] 10150 @ r2<- method we're returning to 10151 ldr r3, [rGLUE, #offGlue_self] @ r3<- glue->self 10152 cmp r2, #0 @ is this a break frame? 10153 ldrne r10, [r2, #offMethod_clazz] @ r10<- method->clazz 10154 mov r1, #0 @ "want switch" = false 10155 beq common_gotoBail @ break frame, bail out completely 10156 10157 PREFETCH_ADVANCE_INST(rINST, r9, 3) @ advance r9, update new rINST 10158 str r2, [rGLUE, #offGlue_method]@ glue->method = newSave->method 10159 ldr r1, [r10, #offClassObject_pDvmDex] @ r1<- method->clazz->pDvmDex 10160 str rFP, [r3, #offThread_curFrame] @ self->curFrame = fp 10161#if defined(WITH_JIT) 10162 ldr r3, [r0, #offStackSaveArea_returnAddr] @ r3 = saveArea->returnAddr 10163 GET_JIT_PROF_TABLE(r0) 10164 mov rPC, r9 @ publish new rPC 10165 str r1, [rGLUE, #offGlue_methodClassDex] 10166 cmp r3, #0 @ caller is compiled code 10167 blxne r3 10168 GET_INST_OPCODE(ip) @ extract opcode from rINST 10169 cmp r0,#0 10170 bne common_updateProfile 10171 GOTO_OPCODE(ip) @ jump to next instruction 10172#else 10173 GET_INST_OPCODE(ip) @ extract opcode from rINST 10174 mov rPC, r9 @ publish new rPC 10175 str r1, [rGLUE, #offGlue_methodClassDex] 10176 GOTO_OPCODE(ip) @ jump to next instruction 10177#endif 10178 10179 /* 10180 * Return handling, calls through "glue code". 10181 */ 10182 .if 0 10183.LreturnOld: 10184 SAVE_PC_FP_TO_GLUE() @ export state 10185 mov r0, rGLUE @ arg to function 10186 bl dvmMterp_returnFromMethod 10187 b common_resumeAfterGlueCall 10188 .endif 10189 10190 10191/* 10192 * Somebody has thrown an exception. Handle it. 10193 * 10194 * If the exception processing code returns to us (instead of falling 10195 * out of the interpreter), continue with whatever the next instruction 10196 * now happens to be. 10197 * 10198 * This does not return. 10199 */ 10200 .global dvmMterpCommonExceptionThrown 10201dvmMterpCommonExceptionThrown: 10202common_exceptionThrown: 10203.LexceptionNew: 10204 mov r0, #kInterpEntryThrow 10205 mov r9, #0 10206 bl common_periodicChecks 10207 10208#if defined(WITH_JIT) 10209 mov r2,#kJitTSelectAbort @ abandon trace selection in progress 10210 str r2,[rGLUE,#offGlue_jitState] 10211#endif 10212 10213 ldr r10, [rGLUE, #offGlue_self] @ r10<- glue->self 10214 ldr r9, [r10, #offThread_exception] @ r9<- self->exception 10215 mov r1, r10 @ r1<- self 10216 mov r0, r9 @ r0<- exception 10217 bl dvmAddTrackedAlloc @ don't let the exception be GCed 10218 mov r3, #0 @ r3<- NULL 10219 str r3, [r10, #offThread_exception] @ self->exception = NULL 10220 10221 /* set up args and a local for "&fp" */ 10222 /* (str sp, [sp, #-4]! would be perfect here, but is discouraged) */ 10223 str rFP, [sp, #-4]! @ *--sp = fp 10224 mov ip, sp @ ip<- &fp 10225 mov r3, #0 @ r3<- false 10226 str ip, [sp, #-4]! @ *--sp = &fp 10227 ldr r1, [rGLUE, #offGlue_method] @ r1<- glue->method 10228 mov r0, r10 @ r0<- self 10229 ldr r1, [r1, #offMethod_insns] @ r1<- method->insns 10230 mov r2, r9 @ r2<- exception 10231 sub r1, rPC, r1 @ r1<- pc - method->insns 10232 mov r1, r1, asr #1 @ r1<- offset in code units 10233 10234 /* call, r0 gets catchRelPc (a code-unit offset) */ 10235 bl dvmFindCatchBlock @ call(self, relPc, exc, scan?, &fp) 10236 10237 /* fix earlier stack overflow if necessary; may trash rFP */ 10238 ldrb r1, [r10, #offThread_stackOverflowed] 10239 cmp r1, #0 @ did we overflow earlier? 10240 beq 1f @ no, skip ahead 10241 mov rFP, r0 @ save relPc result in rFP 10242 mov r0, r10 @ r0<- self 10243 bl dvmCleanupStackOverflow @ call(self) 10244 mov r0, rFP @ restore result 102451: 10246 10247 /* update frame pointer and check result from dvmFindCatchBlock */ 10248 ldr rFP, [sp, #4] @ retrieve the updated rFP 10249 cmp r0, #0 @ is catchRelPc < 0? 10250 add sp, sp, #8 @ restore stack 10251 bmi .LnotCaughtLocally 10252 10253 /* adjust locals to match self->curFrame and updated PC */ 10254 SAVEAREA_FROM_FP(r1, rFP) @ r1<- new save area 10255 ldr r1, [r1, #offStackSaveArea_method] @ r1<- new method 10256 str r1, [rGLUE, #offGlue_method] @ glue->method = new method 10257 ldr r2, [r1, #offMethod_clazz] @ r2<- method->clazz 10258 ldr r3, [r1, #offMethod_insns] @ r3<- method->insns 10259 ldr r2, [r2, #offClassObject_pDvmDex] @ r2<- method->clazz->pDvmDex 10260 add rPC, r3, r0, asl #1 @ rPC<- method->insns + catchRelPc 10261 str r2, [rGLUE, #offGlue_methodClassDex] @ glue->pDvmDex = meth... 10262 10263 /* release the tracked alloc on the exception */ 10264 mov r0, r9 @ r0<- exception 10265 mov r1, r10 @ r1<- self 10266 bl dvmReleaseTrackedAlloc @ release the exception 10267 10268 /* restore the exception if the handler wants it */ 10269 FETCH_INST() @ load rINST from rPC 10270 GET_INST_OPCODE(ip) @ extract opcode from rINST 10271 cmp ip, #OP_MOVE_EXCEPTION @ is it "move-exception"? 10272 streq r9, [r10, #offThread_exception] @ yes, restore the exception 10273 GOTO_OPCODE(ip) @ jump to next instruction 10274 10275.LnotCaughtLocally: @ r9=exception, r10=self 10276 /* fix stack overflow if necessary */ 10277 ldrb r1, [r10, #offThread_stackOverflowed] 10278 cmp r1, #0 @ did we overflow earlier? 10279 movne r0, r10 @ if yes: r0<- self 10280 blne dvmCleanupStackOverflow @ if yes: call(self) 10281 10282 @ may want to show "not caught locally" debug messages here 10283#if DVM_SHOW_EXCEPTION >= 2 10284 /* call __android_log_print(prio, tag, format, ...) */ 10285 /* "Exception %s from %s:%d not caught locally" */ 10286 @ dvmLineNumFromPC(method, pc - method->insns) 10287 ldr r0, [rGLUE, #offGlue_method] 10288 ldr r1, [r0, #offMethod_insns] 10289 sub r1, rPC, r1 10290 asr r1, r1, #1 10291 bl dvmLineNumFromPC 10292 str r0, [sp, #-4]! 10293 @ dvmGetMethodSourceFile(method) 10294 ldr r0, [rGLUE, #offGlue_method] 10295 bl dvmGetMethodSourceFile 10296 str r0, [sp, #-4]! 10297 @ exception->clazz->descriptor 10298 ldr r3, [r9, #offObject_clazz] 10299 ldr r3, [r3, #offClassObject_descriptor] 10300 @ 10301 ldr r2, strExceptionNotCaughtLocally 10302 ldr r1, strLogTag 10303 mov r0, #3 @ LOG_DEBUG 10304 bl __android_log_print 10305#endif 10306 str r9, [r10, #offThread_exception] @ restore exception 10307 mov r0, r9 @ r0<- exception 10308 mov r1, r10 @ r1<- self 10309 bl dvmReleaseTrackedAlloc @ release the exception 10310 mov r1, #0 @ "want switch" = false 10311 b common_gotoBail @ bail out 10312 10313 10314 /* 10315 * Exception handling, calls through "glue code". 10316 */ 10317 .if 0 10318.LexceptionOld: 10319 SAVE_PC_FP_TO_GLUE() @ export state 10320 mov r0, rGLUE @ arg to function 10321 bl dvmMterp_exceptionThrown 10322 b common_resumeAfterGlueCall 10323 .endif 10324 10325 10326/* 10327 * After returning from a "glued" function, pull out the updated 10328 * values and start executing at the next instruction. 10329 */ 10330common_resumeAfterGlueCall: 10331 LOAD_PC_FP_FROM_GLUE() @ pull rPC and rFP out of glue 10332 FETCH_INST() @ load rINST from rPC 10333 GET_INST_OPCODE(ip) @ extract opcode from rINST 10334 GOTO_OPCODE(ip) @ jump to next instruction 10335 10336/* 10337 * Invalid array index. 10338 */ 10339common_errArrayIndex: 10340 EXPORT_PC() 10341 ldr r0, strArrayIndexException 10342 mov r1, #0 10343 bl dvmThrowException 10344 b common_exceptionThrown 10345 10346/* 10347 * Invalid array value. 10348 */ 10349common_errArrayStore: 10350 EXPORT_PC() 10351 ldr r0, strArrayStoreException 10352 mov r1, #0 10353 bl dvmThrowException 10354 b common_exceptionThrown 10355 10356/* 10357 * Integer divide or mod by zero. 10358 */ 10359common_errDivideByZero: 10360 EXPORT_PC() 10361 ldr r0, strArithmeticException 10362 ldr r1, strDivideByZero 10363 bl dvmThrowException 10364 b common_exceptionThrown 10365 10366/* 10367 * Attempt to allocate an array with a negative size. 10368 */ 10369common_errNegativeArraySize: 10370 EXPORT_PC() 10371 ldr r0, strNegativeArraySizeException 10372 mov r1, #0 10373 bl dvmThrowException 10374 b common_exceptionThrown 10375 10376/* 10377 * Invocation of a non-existent method. 10378 */ 10379common_errNoSuchMethod: 10380 EXPORT_PC() 10381 ldr r0, strNoSuchMethodError 10382 mov r1, #0 10383 bl dvmThrowException 10384 b common_exceptionThrown 10385 10386/* 10387 * We encountered a null object when we weren't expecting one. We 10388 * export the PC, throw a NullPointerException, and goto the exception 10389 * processing code. 10390 */ 10391common_errNullObject: 10392 EXPORT_PC() 10393 ldr r0, strNullPointerException 10394 mov r1, #0 10395 bl dvmThrowException 10396 b common_exceptionThrown 10397 10398/* 10399 * For debugging, cause an immediate fault. The source address will 10400 * be in lr (use a bl instruction to jump here). 10401 */ 10402common_abort: 10403 ldr pc, .LdeadFood 10404.LdeadFood: 10405 .word 0xdeadf00d 10406 10407/* 10408 * Spit out a "we were here", preserving all registers. (The attempt 10409 * to save ip won't work, but we need to save an even number of 10410 * registers for EABI 64-bit stack alignment.) 10411 */ 10412 .macro SQUEAK num 10413common_squeak\num: 10414 stmfd sp!, {r0, r1, r2, r3, ip, lr} 10415 ldr r0, strSqueak 10416 mov r1, #\num 10417 bl printf 10418 ldmfd sp!, {r0, r1, r2, r3, ip, lr} 10419 bx lr 10420 .endm 10421 10422 SQUEAK 0 10423 SQUEAK 1 10424 SQUEAK 2 10425 SQUEAK 3 10426 SQUEAK 4 10427 SQUEAK 5 10428 10429/* 10430 * Spit out the number in r0, preserving registers. 10431 */ 10432common_printNum: 10433 stmfd sp!, {r0, r1, r2, r3, ip, lr} 10434 mov r1, r0 10435 ldr r0, strSqueak 10436 bl printf 10437 ldmfd sp!, {r0, r1, r2, r3, ip, lr} 10438 bx lr 10439 10440/* 10441 * Print a newline, preserving registers. 10442 */ 10443common_printNewline: 10444 stmfd sp!, {r0, r1, r2, r3, ip, lr} 10445 ldr r0, strNewline 10446 bl printf 10447 ldmfd sp!, {r0, r1, r2, r3, ip, lr} 10448 bx lr 10449 10450 /* 10451 * Print the 32-bit quantity in r0 as a hex value, preserving registers. 10452 */ 10453common_printHex: 10454 stmfd sp!, {r0, r1, r2, r3, ip, lr} 10455 mov r1, r0 10456 ldr r0, strPrintHex 10457 bl printf 10458 ldmfd sp!, {r0, r1, r2, r3, ip, lr} 10459 bx lr 10460 10461/* 10462 * Print the 64-bit quantity in r0-r1, preserving registers. 10463 */ 10464common_printLong: 10465 stmfd sp!, {r0, r1, r2, r3, ip, lr} 10466 mov r3, r1 10467 mov r2, r0 10468 ldr r0, strPrintLong 10469 bl printf 10470 ldmfd sp!, {r0, r1, r2, r3, ip, lr} 10471 bx lr 10472 10473/* 10474 * Print full method info. Pass the Method* in r0. Preserves regs. 10475 */ 10476common_printMethod: 10477 stmfd sp!, {r0, r1, r2, r3, ip, lr} 10478 bl dvmMterpPrintMethod 10479 ldmfd sp!, {r0, r1, r2, r3, ip, lr} 10480 bx lr 10481 10482/* 10483 * Call a C helper function that dumps regs and possibly some 10484 * additional info. Requires the C function to be compiled in. 10485 */ 10486 .if 0 10487common_dumpRegs: 10488 stmfd sp!, {r0, r1, r2, r3, ip, lr} 10489 bl dvmMterpDumpArmRegs 10490 ldmfd sp!, {r0, r1, r2, r3, ip, lr} 10491 bx lr 10492 .endif 10493 10494#if 0 10495/* 10496 * Experiment on VFP mode. 10497 * 10498 * uint32_t setFPSCR(uint32_t val, uint32_t mask) 10499 * 10500 * Updates the bits specified by "mask", setting them to the values in "val". 10501 */ 10502setFPSCR: 10503 and r0, r0, r1 @ make sure no stray bits are set 10504 fmrx r2, fpscr @ get VFP reg 10505 mvn r1, r1 @ bit-invert mask 10506 and r2, r2, r1 @ clear masked bits 10507 orr r2, r2, r0 @ set specified bits 10508 fmxr fpscr, r2 @ set VFP reg 10509 mov r0, r2 @ return new value 10510 bx lr 10511 10512 .align 2 10513 .global dvmConfigureFP 10514 .type dvmConfigureFP, %function 10515dvmConfigureFP: 10516 stmfd sp!, {ip, lr} 10517 /* 0x03000000 sets DN/FZ */ 10518 /* 0x00009f00 clears the six exception enable flags */ 10519 bl common_squeak0 10520 mov r0, #0x03000000 @ r0<- 0x03000000 10521 add r1, r0, #0x9f00 @ r1<- 0x03009f00 10522 bl setFPSCR 10523 ldmfd sp!, {ip, pc} 10524#endif 10525 10526 10527/* 10528 * String references, must be close to the code that uses them. 10529 */ 10530 .align 2 10531strArithmeticException: 10532 .word .LstrArithmeticException 10533strArrayIndexException: 10534 .word .LstrArrayIndexException 10535strArrayStoreException: 10536 .word .LstrArrayStoreException 10537strDivideByZero: 10538 .word .LstrDivideByZero 10539strNegativeArraySizeException: 10540 .word .LstrNegativeArraySizeException 10541strNoSuchMethodError: 10542 .word .LstrNoSuchMethodError 10543strNullPointerException: 10544 .word .LstrNullPointerException 10545 10546strLogTag: 10547 .word .LstrLogTag 10548strExceptionNotCaughtLocally: 10549 .word .LstrExceptionNotCaughtLocally 10550 10551strNewline: 10552 .word .LstrNewline 10553strSqueak: 10554 .word .LstrSqueak 10555strPrintHex: 10556 .word .LstrPrintHex 10557strPrintLong: 10558 .word .LstrPrintLong 10559 10560/* 10561 * Zero-terminated ASCII string data. 10562 * 10563 * On ARM we have two choices: do like gcc does, and LDR from a .word 10564 * with the address, or use an ADR pseudo-op to get the address 10565 * directly. ADR saves 4 bytes and an indirection, but it's using a 10566 * PC-relative addressing mode and hence has a limited range, which 10567 * makes it not work well with mergeable string sections. 10568 */ 10569 .section .rodata.str1.4,"aMS",%progbits,1 10570 10571.LstrBadEntryPoint: 10572 .asciz "Bad entry point %d\n" 10573.LstrArithmeticException: 10574 .asciz "Ljava/lang/ArithmeticException;" 10575.LstrArrayIndexException: 10576 .asciz "Ljava/lang/ArrayIndexOutOfBoundsException;" 10577.LstrArrayStoreException: 10578 .asciz "Ljava/lang/ArrayStoreException;" 10579.LstrClassCastException: 10580 .asciz "Ljava/lang/ClassCastException;" 10581.LstrDivideByZero: 10582 .asciz "divide by zero" 10583.LstrFilledNewArrayNotImpl: 10584 .asciz "filled-new-array only implemented for objects and 'int'" 10585.LstrInternalError: 10586 .asciz "Ljava/lang/InternalError;" 10587.LstrInstantiationError: 10588 .asciz "Ljava/lang/InstantiationError;" 10589.LstrNegativeArraySizeException: 10590 .asciz "Ljava/lang/NegativeArraySizeException;" 10591.LstrNoSuchMethodError: 10592 .asciz "Ljava/lang/NoSuchMethodError;" 10593.LstrNullPointerException: 10594 .asciz "Ljava/lang/NullPointerException;" 10595 10596.LstrLogTag: 10597 .asciz "mterp" 10598.LstrExceptionNotCaughtLocally: 10599 .asciz "Exception %s from %s:%d not caught locally\n" 10600 10601.LstrNewline: 10602 .asciz "\n" 10603.LstrSqueak: 10604 .asciz "<%d>" 10605.LstrPrintHex: 10606 .asciz "<0x%x>" 10607.LstrPrintLong: 10608 .asciz "<%lld>" 10609 10610 10611