CompilerTemplateAsm-armv5te.S revision e6af13cf607de870de51ffe00f48552252946a00
1/* 2 * This file was generated automatically by gen-template.py for 'armv5te'. 3 * 4 * --> DO NOT EDIT <-- 5 */ 6 7/* File: armv5te/header.S */ 8/* 9 * Copyright (C) 2008 The Android Open Source Project 10 * 11 * Licensed under the Apache License, Version 2.0 (the "License"); 12 * you may not use this file except in compliance with the License. 13 * You may obtain a copy of the License at 14 * 15 * http://www.apache.org/licenses/LICENSE-2.0 16 * 17 * Unless required by applicable law or agreed to in writing, software 18 * distributed under the License is distributed on an "AS IS" BASIS, 19 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 20 * See the License for the specific language governing permissions and 21 * limitations under the License. 22 */ 23 24#if defined(WITH_JIT) 25 26/* 27 * ARMv5 definitions and declarations. 28 */ 29 30/* 31ARM EABI general notes: 32 33r0-r3 hold first 4 args to a method; they are not preserved across method calls 34r4-r8 are available for general use 35r9 is given special treatment in some situations, but not for us 36r10 (sl) seems to be generally available 37r11 (fp) is used by gcc (unless -fomit-frame-pointer is set) 38r12 (ip) is scratch -- not preserved across method calls 39r13 (sp) should be managed carefully in case a signal arrives 40r14 (lr) must be preserved 41r15 (pc) can be tinkered with directly 42 43r0 holds returns of <= 4 bytes 44r0-r1 hold returns of 8 bytes, low word in r0 45 46Callee must save/restore r4+ (except r12) if it modifies them. 47 48Stack is "full descending". Only the arguments that don't fit in the first 4 49registers are placed on the stack. "sp" points at the first stacked argument 50(i.e. the 5th arg). 51 52VFP: single-precision results in s0, double-precision results in d0. 53 54In the EABI, "sp" must be 64-bit aligned on entry to a function, and any 5564-bit quantities (long long, double) must be 64-bit aligned. 56*/ 57 58/* 59JIT and ARM notes: 60 61The following registers have fixed assignments: 62 63 reg nick purpose 64 r5 rFP interpreted frame pointer, used for accessing locals and args 65 r6 rGLUE MterpGlue pointer 66 67The following registers have fixed assignments in mterp but are scratch 68registers in compiled code 69 70 reg nick purpose 71 r4 rPC interpreted program counter, used for fetching instructions 72 r7 rINST first 16-bit code unit of current instruction 73 r8 rIBASE interpreted instruction base pointer, used for computed goto 74 75Macros are provided for common operations. Each macro MUST emit only 76one instruction to make instruction-counting easier. They MUST NOT alter 77unspecified registers or condition codes. 78*/ 79 80/* single-purpose registers, given names for clarity */ 81#define rPC r4 82#define rFP r5 83#define rGLUE r6 84#define rINST r7 85#define rIBASE r8 86 87/* 88 * Given a frame pointer, find the stack save area. 89 * 90 * In C this is "((StackSaveArea*)(_fp) -1)". 91 */ 92#define SAVEAREA_FROM_FP(_reg, _fpreg) \ 93 sub _reg, _fpreg, #sizeofStackSaveArea 94 95#define EXPORT_PC() \ 96 str rPC, [rFP, #(-sizeofStackSaveArea + offStackSaveArea_currentPc)] 97 98/* 99 * This is a #include, not a %include, because we want the C pre-processor 100 * to expand the macros into assembler assignment statements. 101 */ 102#include "../../../mterp/common/asm-constants.h" 103 104 105/* File: armv5te/platform.S */ 106/* 107 * =========================================================================== 108 * CPU-version-specific defines and utility 109 * =========================================================================== 110 */ 111 112/* 113 * Macro for "MOV LR,PC / LDR PC,xxx", which is not allowed pre-ARMv5. 114 * Jump to subroutine. 115 * 116 * May modify IP and LR. 117 */ 118.macro LDR_PC_LR source 119 mov lr, pc 120 ldr pc, \source 121.endm 122 123 124 .global dvmCompilerTemplateStart 125 .type dvmCompilerTemplateStart, %function 126 .text 127 128dvmCompilerTemplateStart: 129 130/* ------------------------------ */ 131 .balign 4 132 .global dvmCompiler_TEMPLATE_CMP_LONG 133dvmCompiler_TEMPLATE_CMP_LONG: 134/* File: armv5te/TEMPLATE_CMP_LONG.S */ 135 /* 136 * Compare two 64-bit values. Puts 0, 1, or -1 into the destination 137 * register based on the results of the comparison. 138 * 139 * We load the full values with LDM, but in practice many values could 140 * be resolved by only looking at the high word. This could be made 141 * faster or slower by splitting the LDM into a pair of LDRs. 142 * 143 * If we just wanted to set condition flags, we could do this: 144 * subs ip, r0, r2 145 * sbcs ip, r1, r3 146 * subeqs ip, r0, r2 147 * Leaving { <0, 0, >0 } in ip. However, we have to set it to a specific 148 * integer value, which we can do with 2 conditional mov/mvn instructions 149 * (set 1, set -1; if they're equal we already have 0 in ip), giving 150 * us a constant 5-cycle path plus a branch at the end to the 151 * instruction epilogue code. The multi-compare approach below needs 152 * 2 or 3 cycles + branch if the high word doesn't match, 6 + branch 153 * in the worst case (the 64-bit values are equal). 154 */ 155 /* cmp-long vAA, vBB, vCC */ 156 cmp r1, r3 @ compare (vBB+1, vCC+1) 157 blt .LTEMPLATE_CMP_LONG_less @ signed compare on high part 158 bgt .LTEMPLATE_CMP_LONG_greater 159 subs r0, r0, r2 @ r0<- r0 - r2 160 bxeq lr 161 bhi .LTEMPLATE_CMP_LONG_greater @ unsigned compare on low part 162.LTEMPLATE_CMP_LONG_less: 163 mvn r0, #0 @ r0<- -1 164 bx lr 165.LTEMPLATE_CMP_LONG_greater: 166 mov r0, #1 @ r0<- 1 167 bx lr 168 169 170/* ------------------------------ */ 171 .balign 4 172 .global dvmCompiler_TEMPLATE_RETURN 173dvmCompiler_TEMPLATE_RETURN: 174/* File: armv5te/TEMPLATE_RETURN.S */ 175 /* 176 * Unwind a frame from the Dalvik stack for compiled OP_RETURN_XXX. 177 * If the stored value in returnAddr 178 * is non-zero, the caller is compiled by the JIT thus return to the 179 * address in the code cache following the invoke instruction. Otherwise 180 * return to the special dvmJitToInterpNoChain entry point. 181 */ 182 SAVEAREA_FROM_FP(r0, rFP) @ r0<- saveArea (old) 183 ldr r10, [r0, #offStackSaveArea_prevFrame] @ r10<- saveArea->prevFrame 184 ldr r8, [rGLUE, #offGlue_pSelfSuspendCount] @ r8<- &suspendCount 185 ldr rPC, [r0, #offStackSaveArea_savedPc] @ rPC<- saveArea->savedPc 186#if !defined(WITH_SELF_VERIFICATION) 187 ldr r9, [r0, #offStackSaveArea_returnAddr] @ r9<- chaining cell ret 188#else 189 mov r9, #0 @ disable chaining 190#endif 191 ldr r2, [r10, #(offStackSaveArea_method - sizeofStackSaveArea)] 192 @ r2<- method we're returning to 193 ldr r3, [rGLUE, #offGlue_self] @ r3<- glue->self 194 cmp r2, #0 @ break frame? 195#if !defined(WITH_SELF_VERIFICATION) 196 beq 1f @ bail to interpreter 197#else 198 blxeq lr @ punt to interpreter and compare state 199#endif 200 ldr r1, .LdvmJitToInterpNoChain @ defined in footer.S 201 mov rFP, r10 @ publish new FP 202 ldrne r10, [r2, #offMethod_clazz] @ r10<- method->clazz 203 ldr r8, [r8] @ r8<- suspendCount 204 205 str r2, [rGLUE, #offGlue_method]@ glue->method = newSave->method 206 ldr r0, [r10, #offClassObject_pDvmDex] @ r0<- method->clazz->pDvmDex 207 str rFP, [r3, #offThread_curFrame] @ self->curFrame = fp 208 add rPC, rPC, #6 @ publish new rPC (advance 6 bytes) 209 str r0, [rGLUE, #offGlue_methodClassDex] 210 cmp r8, #0 @ check the suspendCount 211 movne r9, #0 @ clear the chaining cell address 212 str r9, [r3, #offThread_inJitCodeCache] @ in code cache or not 213 cmp r9, #0 @ chaining cell exists? 214 blxne r9 @ jump to the chaining cell 215#if defined(EXIT_STATS) 216 mov r0, #kCallsiteInterpreted 217#endif 218 mov pc, r1 @ callsite is interpreted 2191: 220 stmia rGLUE, {rPC, rFP} @ SAVE_PC_FP_TO_GLUE() 221 ldr r2, .LdvmMterpStdBail @ defined in footer.S 222 mov r1, #0 @ changeInterp = false 223 mov r0, rGLUE @ Expecting rGLUE in r0 224 blx r2 @ exit the interpreter 225 226/* ------------------------------ */ 227 .balign 4 228 .global dvmCompiler_TEMPLATE_INVOKE_METHOD_NO_OPT 229dvmCompiler_TEMPLATE_INVOKE_METHOD_NO_OPT: 230/* File: armv5te/TEMPLATE_INVOKE_METHOD_NO_OPT.S */ 231 /* 232 * For polymorphic callsites - setup the Dalvik frame and load Dalvik PC 233 * into rPC then jump to dvmJitToInterpNoChain to dispatch the 234 * runtime-resolved callee. 235 */ 236 @ r0 = methodToCall, r1 = returnCell, rPC = dalvikCallsite 237 ldrh r7, [r0, #offMethod_registersSize] @ r7<- methodToCall->regsSize 238 ldrh r2, [r0, #offMethod_outsSize] @ r2<- methodToCall->outsSize 239 ldr r9, [rGLUE, #offGlue_interpStackEnd] @ r9<- interpStackEnd 240 ldr r8, [rGLUE, #offGlue_pSelfSuspendCount] @ r8<- &suspendCount 241 add r3, r1, #1 @ Thumb addr is odd 242 SAVEAREA_FROM_FP(r1, rFP) @ r1<- stack save area 243 sub r1, r1, r7, lsl #2 @ r1<- newFp (old savearea - regsSize) 244 SAVEAREA_FROM_FP(r10, r1) @ r10<- stack save area 245 sub r10, r10, r2, lsl #2 @ r10<- bottom (newsave - outsSize) 246 ldr r8, [r8] @ r8<- suspendCount (int) 247 cmp r10, r9 @ bottom < interpStackEnd? 248 bxlt lr @ return to raise stack overflow excep. 249 @ r1 = newFP, r0 = methodToCall, r3 = returnCell, rPC = dalvikCallsite 250 ldr r9, [r0, #offMethod_clazz] @ r9<- method->clazz 251 ldr r10, [r0, #offMethod_accessFlags] @ r10<- methodToCall->accessFlags 252 str rPC, [rFP, #(offStackSaveArea_currentPc - sizeofStackSaveArea)] 253 str rPC, [r1, #(offStackSaveArea_savedPc - sizeofStackSaveArea)] 254 ldr rPC, [r0, #offMethod_insns] @ rPC<- methodToCall->insns 255 256 257 @ set up newSaveArea 258 str rFP, [r1, #(offStackSaveArea_prevFrame - sizeofStackSaveArea)] 259 str r3, [r1, #(offStackSaveArea_returnAddr - sizeofStackSaveArea)] 260 str r0, [r1, #(offStackSaveArea_method - sizeofStackSaveArea)] 261 cmp r8, #0 @ suspendCount != 0 262 bxne lr @ bail to the interpreter 263 tst r10, #ACC_NATIVE 264#if !defined(WITH_SELF_VERIFICATION) 265 bne .LinvokeNative 266#else 267 bxne lr @ bail to the interpreter 268#endif 269 270 ldr r10, .LdvmJitToInterpNoChain 271 ldr r3, [r9, #offClassObject_pDvmDex] @ r3<- method->clazz->pDvmDex 272 ldr r2, [rGLUE, #offGlue_self] @ r2<- glue->self 273 274 @ Update "glue" values for the new method 275 str r0, [rGLUE, #offGlue_method] @ glue->method = methodToCall 276 str r3, [rGLUE, #offGlue_methodClassDex] @ glue->methodClassDex = ... 277 mov rFP, r1 @ fp = newFp 278 str rFP, [r2, #offThread_curFrame] @ self->curFrame = newFp 279 280 @ Start executing the callee 281#if defined(EXIT_STATS) 282 mov r0, #kInlineCacheMiss 283#endif 284 mov pc, r10 @ dvmJitToInterpNoChain 285 286/* ------------------------------ */ 287 .balign 4 288 .global dvmCompiler_TEMPLATE_INVOKE_METHOD_CHAIN 289dvmCompiler_TEMPLATE_INVOKE_METHOD_CHAIN: 290/* File: armv5te/TEMPLATE_INVOKE_METHOD_CHAIN.S */ 291 /* 292 * For monomorphic callsite, setup the Dalvik frame and return to the 293 * Thumb code through the link register to transfer control to the callee 294 * method through a dedicated chaining cell. 295 */ 296 @ r0 = methodToCall, r1 = returnCell, rPC = dalvikCallsite 297 @ methodToCall is guaranteed to be non-native 298.LinvokeChain: 299 ldrh r7, [r0, #offMethod_registersSize] @ r7<- methodToCall->regsSize 300 ldrh r2, [r0, #offMethod_outsSize] @ r2<- methodToCall->outsSize 301 ldr r9, [rGLUE, #offGlue_interpStackEnd] @ r9<- interpStackEnd 302 ldr r8, [rGLUE, #offGlue_pSelfSuspendCount] @ r8<- &suspendCount 303 add r3, r1, #1 @ Thumb addr is odd 304 SAVEAREA_FROM_FP(r1, rFP) @ r1<- stack save area 305 sub r1, r1, r7, lsl #2 @ r1<- newFp (old savearea - regsSize) 306 SAVEAREA_FROM_FP(r10, r1) @ r10<- stack save area 307 add r12, lr, #2 @ setup the punt-to-interp address 308 sub r10, r10, r2, lsl #2 @ r10<- bottom (newsave - outsSize) 309 ldr r8, [r8] @ r8<- suspendCount (int) 310 cmp r10, r9 @ bottom < interpStackEnd? 311 bxlt r12 @ return to raise stack overflow excep. 312 @ r1 = newFP, r0 = methodToCall, r3 = returnCell, rPC = dalvikCallsite 313 ldr r9, [r0, #offMethod_clazz] @ r9<- method->clazz 314 str rPC, [rFP, #(offStackSaveArea_currentPc - sizeofStackSaveArea)] 315 str rPC, [r1, #(offStackSaveArea_savedPc - sizeofStackSaveArea)] 316 ldr rPC, [r0, #offMethod_insns] @ rPC<- methodToCall->insns 317 318 319 @ set up newSaveArea 320 str rFP, [r1, #(offStackSaveArea_prevFrame - sizeofStackSaveArea)] 321 str r3, [r1, #(offStackSaveArea_returnAddr - sizeofStackSaveArea)] 322 str r0, [r1, #(offStackSaveArea_method - sizeofStackSaveArea)] 323 cmp r8, #0 @ suspendCount != 0 324 bxne r12 @ bail to the interpreter 325 326 ldr r3, [r9, #offClassObject_pDvmDex] @ r3<- method->clazz->pDvmDex 327 ldr r2, [rGLUE, #offGlue_self] @ r2<- glue->self 328 329 @ Update "glue" values for the new method 330 str r0, [rGLUE, #offGlue_method] @ glue->method = methodToCall 331 str r3, [rGLUE, #offGlue_methodClassDex] @ glue->methodClassDex = ... 332 mov rFP, r1 @ fp = newFp 333 str rFP, [r2, #offThread_curFrame] @ self->curFrame = newFp 334 335 bx lr @ return to the callee-chaining cell 336 337 338 339/* ------------------------------ */ 340 .balign 4 341 .global dvmCompiler_TEMPLATE_INVOKE_METHOD_PREDICTED_CHAIN 342dvmCompiler_TEMPLATE_INVOKE_METHOD_PREDICTED_CHAIN: 343/* File: armv5te/TEMPLATE_INVOKE_METHOD_PREDICTED_CHAIN.S */ 344 /* 345 * For polymorphic callsite, check whether the cached class pointer matches 346 * the current one. If so setup the Dalvik frame and return to the 347 * Thumb code through the link register to transfer control to the callee 348 * method through a dedicated chaining cell. 349 * 350 * The predicted chaining cell is declared in ArmLIR.h with the 351 * following layout: 352 * 353 * typedef struct PredictedChainingCell { 354 * u4 branch; 355 * const ClassObject *clazz; 356 * const Method *method; 357 * u4 counter; 358 * } PredictedChainingCell; 359 * 360 * Upon returning to the callsite: 361 * - lr : to branch to the chaining cell 362 * - lr+2: to punt to the interpreter 363 * - lr+4: to fully resolve the callee and may rechain. 364 * r3 <- class 365 * r9 <- counter 366 */ 367 @ r0 = this, r1 = returnCell, r2 = predictedChainCell, rPC = dalvikCallsite 368 ldr r3, [r0, #offObject_clazz] @ r3 <- this->class 369 ldr r8, [r2, #4] @ r8 <- predictedChainCell->clazz 370 ldr r0, [r2, #8] @ r0 <- predictedChainCell->method 371 ldr r9, [r2, #12] @ r9 <- predictedChainCell->counter 372 cmp r3, r8 @ predicted class == actual class? 373 beq .LinvokeChain @ predicted chain is valid 374 ldr r7, [r3, #offClassObject_vtable] @ r7 <- this->class->vtable 375 sub r1, r9, #1 @ count-- 376 str r1, [r2, #12] @ write back to PredictedChainingCell->counter 377 add lr, lr, #4 @ return to fully-resolve landing pad 378 /* 379 * r1 <- count 380 * r2 <- &predictedChainCell 381 * r3 <- this->class 382 * r4 <- dPC 383 * r7 <- this->class->vtable 384 */ 385 bx lr 386 387/* ------------------------------ */ 388 .balign 4 389 .global dvmCompiler_TEMPLATE_INVOKE_METHOD_NATIVE 390dvmCompiler_TEMPLATE_INVOKE_METHOD_NATIVE: 391/* File: armv5te/TEMPLATE_INVOKE_METHOD_NATIVE.S */ 392 @ r0 = methodToCall, r1 = returnCell, rPC = dalvikCallsite 393 ldrh r7, [r0, #offMethod_registersSize] @ r7<- methodToCall->regsSize 394 ldr r9, [rGLUE, #offGlue_interpStackEnd] @ r9<- interpStackEnd 395 ldr r8, [rGLUE, #offGlue_pSelfSuspendCount] @ r8<- &suspendCount 396 add r3, r1, #1 @ Thumb addr is odd 397 SAVEAREA_FROM_FP(r1, rFP) @ r1<- stack save area 398 sub r1, r1, r7, lsl #2 @ r1<- newFp (old savearea - regsSize) 399 SAVEAREA_FROM_FP(r10, r1) @ r10<- stack save area 400 ldr r8, [r8] @ r3<- suspendCount (int) 401 cmp r10, r9 @ bottom < interpStackEnd? 402 bxlt lr @ return to raise stack overflow excep. 403 @ r1 = newFP, r0 = methodToCall, r3 = returnCell, rPC = dalvikCallsite 404 str rPC, [rFP, #(offStackSaveArea_currentPc - sizeofStackSaveArea)] 405 str rPC, [r1, #(offStackSaveArea_savedPc - sizeofStackSaveArea)] 406 ldr rPC, [r0, #offMethod_insns] @ rPC<- methodToCall->insns 407 408 409 @ set up newSaveArea 410 str rFP, [r1, #(offStackSaveArea_prevFrame - sizeofStackSaveArea)] 411 str r3, [r1, #(offStackSaveArea_returnAddr - sizeofStackSaveArea)] 412 ldr r3, [rGLUE, #offGlue_self] @ r3<- glue->self 413 str r0, [r1, #(offStackSaveArea_method - sizeofStackSaveArea)] 414 cmp r8, #0 @ suspendCount != 0 415 ldr r8, [r0, #offMethod_nativeFunc] @ r8<- method->nativeFunc 416#if !defined(WITH_SELF_VERIFICATION) 417 bxne lr @ bail to the interpreter 418#else 419 bx lr @ bail to interpreter unconditionally 420#endif 421 422 @ go ahead and transfer control to the native code 423 ldr r9, [r3, #offThread_jniLocal_topCookie] @ r9<- thread->localRef->... 424 mov r2, #0 425 str r1, [r3, #offThread_curFrame] @ self->curFrame = newFp 426 str r2, [r3, #offThread_inJitCodeCache] @ not in the jit code cache 427 str r9, [r1, #(offStackSaveArea_localRefCookie - sizeofStackSaveArea)] 428 @ newFp->localRefCookie=top 429 mov r9, r3 @ r9<- glue->self (preserve) 430 SAVEAREA_FROM_FP(r10, r1) @ r10<- new stack save area 431 432 mov r2, r0 @ r2<- methodToCall 433 mov r0, r1 @ r0<- newFP 434 add r1, rGLUE, #offGlue_retval @ r1<- &retval 435 436 blx r8 @ off to the native code 437 438 @ native return; r9=self, r10=newSaveArea 439 @ equivalent to dvmPopJniLocals 440 ldr r2, [r10, #offStackSaveArea_returnAddr] @ r2 = chaining cell ret 441 ldr r0, [r10, #offStackSaveArea_localRefCookie] @ r0<- saved->top 442 ldr r1, [r9, #offThread_exception] @ check for exception 443 str rFP, [r9, #offThread_curFrame] @ self->curFrame = fp 444 cmp r1, #0 @ null? 445 str r0, [r9, #offThread_jniLocal_topCookie] @ new top <- old top 446 ldr r0, [rFP, #(offStackSaveArea_currentPc - sizeofStackSaveArea)] 447 448 @ r0 = dalvikCallsitePC 449 bne .LhandleException @ no, handle exception 450 451 str r2, [r9, #offThread_inJitCodeCache] @ set the mode properly 452 cmp r2, #0 @ return chaining cell still exists? 453 bxne r2 @ yes - go ahead 454 455 @ continue executing the next instruction through the interpreter 456 ldr r1, .LdvmJitToInterpNoChain @ defined in footer.S 457 add rPC, r0, #6 @ reconstruct new rPC (advance 6 bytes) 458#if defined(EXIT_STATS) 459 mov r0, #kCallsiteInterpreted 460#endif 461 mov pc, r1 462 463 464 465 466/* ------------------------------ */ 467 .balign 4 468 .global dvmCompiler_TEMPLATE_CMPG_DOUBLE 469dvmCompiler_TEMPLATE_CMPG_DOUBLE: 470/* File: armv5te/TEMPLATE_CMPG_DOUBLE.S */ 471/* File: armv5te/TEMPLATE_CMPL_DOUBLE.S */ 472 /* 473 * For the JIT: incoming arguments in r0-r1, r2-r3 474 * result in r0 475 * 476 * Compare two floating-point values. Puts 0, 1, or -1 into the 477 * destination register based on the results of the comparison. 478 * 479 * Provide a "naninst" instruction that puts 1 or -1 into r1 depending 480 * on what value we'd like to return when one of the operands is NaN. 481 * 482 * See OP_CMPL_FLOAT for an explanation. 483 * 484 * For: cmpl-double, cmpg-double 485 */ 486 /* op vAA, vBB, vCC */ 487 push {r0-r3} @ save operands 488 mov r11, lr @ save return address 489 LDR_PC_LR ".L__aeabi_cdcmple" @ PIC way of "bl __aeabi_cdcmple" 490 bhi .LTEMPLATE_CMPG_DOUBLE_gt_or_nan @ C set and Z clear, disambiguate 491 mvncc r0, #0 @ (less than) r1<- -1 492 moveq r0, #0 @ (equal) r1<- 0, trumps less than 493 add sp, #16 @ drop unused operands 494 bx r11 495 496 @ Test for NaN with a second comparison. EABI forbids testing bit 497 @ patterns, and we can't represent 0x7fc00000 in immediate form, so 498 @ make the library call. 499.LTEMPLATE_CMPG_DOUBLE_gt_or_nan: 500 pop {r2-r3} @ restore operands in reverse order 501 pop {r0-r1} @ restore operands in reverse order 502 LDR_PC_LR ".L__aeabi_cdcmple" @ r0<- Z set if eq, C clear if < 503 movcc r0, #1 @ (greater than) r1<- 1 504 bxcc r11 505 mov r0, #1 @ r1<- 1 or -1 for NaN 506 bx r11 507 508 509 510/* ------------------------------ */ 511 .balign 4 512 .global dvmCompiler_TEMPLATE_CMPL_DOUBLE 513dvmCompiler_TEMPLATE_CMPL_DOUBLE: 514/* File: armv5te/TEMPLATE_CMPL_DOUBLE.S */ 515 /* 516 * For the JIT: incoming arguments in r0-r1, r2-r3 517 * result in r0 518 * 519 * Compare two floating-point values. Puts 0, 1, or -1 into the 520 * destination register based on the results of the comparison. 521 * 522 * Provide a "naninst" instruction that puts 1 or -1 into r1 depending 523 * on what value we'd like to return when one of the operands is NaN. 524 * 525 * See OP_CMPL_FLOAT for an explanation. 526 * 527 * For: cmpl-double, cmpg-double 528 */ 529 /* op vAA, vBB, vCC */ 530 push {r0-r3} @ save operands 531 mov r11, lr @ save return address 532 LDR_PC_LR ".L__aeabi_cdcmple" @ PIC way of "bl __aeabi_cdcmple" 533 bhi .LTEMPLATE_CMPL_DOUBLE_gt_or_nan @ C set and Z clear, disambiguate 534 mvncc r0, #0 @ (less than) r1<- -1 535 moveq r0, #0 @ (equal) r1<- 0, trumps less than 536 add sp, #16 @ drop unused operands 537 bx r11 538 539 @ Test for NaN with a second comparison. EABI forbids testing bit 540 @ patterns, and we can't represent 0x7fc00000 in immediate form, so 541 @ make the library call. 542.LTEMPLATE_CMPL_DOUBLE_gt_or_nan: 543 pop {r2-r3} @ restore operands in reverse order 544 pop {r0-r1} @ restore operands in reverse order 545 LDR_PC_LR ".L__aeabi_cdcmple" @ r0<- Z set if eq, C clear if < 546 movcc r0, #1 @ (greater than) r1<- 1 547 bxcc r11 548 mvn r0, #0 @ r1<- 1 or -1 for NaN 549 bx r11 550 551 552/* ------------------------------ */ 553 .balign 4 554 .global dvmCompiler_TEMPLATE_CMPG_FLOAT 555dvmCompiler_TEMPLATE_CMPG_FLOAT: 556/* File: armv5te/TEMPLATE_CMPG_FLOAT.S */ 557/* File: armv5te/TEMPLATE_CMPL_FLOAT.S */ 558 /* 559 * For the JIT: incoming arguments in r0-r1, r2-r3 560 * result in r0 561 * 562 * Compare two floating-point values. Puts 0, 1, or -1 into the 563 * destination register based on the results of the comparison. 564 * 565 * Provide a "naninst" instruction that puts 1 or -1 into r1 depending 566 * on what value we'd like to return when one of the operands is NaN. 567 * 568 * The operation we're implementing is: 569 * if (x == y) 570 * return 0; 571 * else if (x < y) 572 * return -1; 573 * else if (x > y) 574 * return 1; 575 * else 576 * return {-1,1}; // one or both operands was NaN 577 * 578 * The straightforward implementation requires 3 calls to functions 579 * that return a result in r0. We can do it with two calls if our 580 * EABI library supports __aeabi_cfcmple (only one if we want to check 581 * for NaN directly): 582 * check x <= y 583 * if <, return -1 584 * if ==, return 0 585 * check y <= x 586 * if <, return 1 587 * return {-1,1} 588 * 589 * for: cmpl-float, cmpg-float 590 */ 591 /* op vAA, vBB, vCC */ 592 mov r9, r0 @ Save copies - we may need to redo 593 mov r10, r1 594 mov r11, lr @ save return address 595 LDR_PC_LR ".L__aeabi_cfcmple" @ cmp <=: C clear if <, Z set if eq 596 bhi .LTEMPLATE_CMPG_FLOAT_gt_or_nan @ C set and Z clear, disambiguate 597 mvncc r0, #0 @ (less than) r0<- -1 598 moveq r0, #0 @ (equal) r0<- 0, trumps less than 599 bx r11 600 @ Test for NaN with a second comparison. EABI forbids testing bit 601 @ patterns, and we can't represent 0x7fc00000 in immediate form, so 602 @ make the library call. 603.LTEMPLATE_CMPG_FLOAT_gt_or_nan: 604 mov r0, r10 @ restore in reverse order 605 mov r1, r9 606 LDR_PC_LR ".L__aeabi_cfcmple" @ r0<- Z set if eq, C clear if < 607 movcc r0, #1 @ (greater than) r1<- 1 608 bxcc r11 609 mov r0, #1 @ r1<- 1 or -1 for NaN 610 bx r11 611 612 613 614 615/* ------------------------------ */ 616 .balign 4 617 .global dvmCompiler_TEMPLATE_CMPL_FLOAT 618dvmCompiler_TEMPLATE_CMPL_FLOAT: 619/* File: armv5te/TEMPLATE_CMPL_FLOAT.S */ 620 /* 621 * For the JIT: incoming arguments in r0-r1, r2-r3 622 * result in r0 623 * 624 * Compare two floating-point values. Puts 0, 1, or -1 into the 625 * destination register based on the results of the comparison. 626 * 627 * Provide a "naninst" instruction that puts 1 or -1 into r1 depending 628 * on what value we'd like to return when one of the operands is NaN. 629 * 630 * The operation we're implementing is: 631 * if (x == y) 632 * return 0; 633 * else if (x < y) 634 * return -1; 635 * else if (x > y) 636 * return 1; 637 * else 638 * return {-1,1}; // one or both operands was NaN 639 * 640 * The straightforward implementation requires 3 calls to functions 641 * that return a result in r0. We can do it with two calls if our 642 * EABI library supports __aeabi_cfcmple (only one if we want to check 643 * for NaN directly): 644 * check x <= y 645 * if <, return -1 646 * if ==, return 0 647 * check y <= x 648 * if <, return 1 649 * return {-1,1} 650 * 651 * for: cmpl-float, cmpg-float 652 */ 653 /* op vAA, vBB, vCC */ 654 mov r9, r0 @ Save copies - we may need to redo 655 mov r10, r1 656 mov r11, lr @ save return address 657 LDR_PC_LR ".L__aeabi_cfcmple" @ cmp <=: C clear if <, Z set if eq 658 bhi .LTEMPLATE_CMPL_FLOAT_gt_or_nan @ C set and Z clear, disambiguate 659 mvncc r0, #0 @ (less than) r0<- -1 660 moveq r0, #0 @ (equal) r0<- 0, trumps less than 661 bx r11 662 @ Test for NaN with a second comparison. EABI forbids testing bit 663 @ patterns, and we can't represent 0x7fc00000 in immediate form, so 664 @ make the library call. 665.LTEMPLATE_CMPL_FLOAT_gt_or_nan: 666 mov r0, r10 @ restore in reverse order 667 mov r1, r9 668 LDR_PC_LR ".L__aeabi_cfcmple" @ r0<- Z set if eq, C clear if < 669 movcc r0, #1 @ (greater than) r1<- 1 670 bxcc r11 671 mvn r0, #0 @ r1<- 1 or -1 for NaN 672 bx r11 673 674 675 676/* ------------------------------ */ 677 .balign 4 678 .global dvmCompiler_TEMPLATE_MUL_LONG 679dvmCompiler_TEMPLATE_MUL_LONG: 680/* File: armv5te/TEMPLATE_MUL_LONG.S */ 681 /* 682 * Signed 64-bit integer multiply. 683 * 684 * For JIT: op1 in r0/r1, op2 in r2/r3, return in r0/r1 685 * 686 * Consider WXxYZ (r1r0 x r3r2) with a long multiply: 687 * WX 688 * x YZ 689 * -------- 690 * ZW ZX 691 * YW YX 692 * 693 * The low word of the result holds ZX, the high word holds 694 * (ZW+YX) + (the high overflow from ZX). YW doesn't matter because 695 * it doesn't fit in the low 64 bits. 696 * 697 * Unlike most ARM math operations, multiply instructions have 698 * restrictions on using the same register more than once (Rd and Rm 699 * cannot be the same). 700 */ 701 /* mul-long vAA, vBB, vCC */ 702 mul ip, r2, r1 @ ip<- ZxW 703 umull r9, r10, r2, r0 @ r9/r10 <- ZxX 704 mla r2, r0, r3, ip @ r2<- YxX + (ZxW) 705 add r10, r2, r10 @ r10<- r10 + low(ZxW + (YxX)) 706 mov r0,r9 707 mov r1,r10 708 bx lr 709 710/* ------------------------------ */ 711 .balign 4 712 .global dvmCompiler_TEMPLATE_SHL_LONG 713dvmCompiler_TEMPLATE_SHL_LONG: 714/* File: armv5te/TEMPLATE_SHL_LONG.S */ 715 /* 716 * Long integer shift. This is different from the generic 32/64-bit 717 * binary operations because vAA/vBB are 64-bit but vCC (the shift 718 * distance) is 32-bit. Also, Dalvik requires us to ignore all but the low 719 * 6 bits. 720 */ 721 /* shl-long vAA, vBB, vCC */ 722 and r2, r2, #63 @ r2<- r2 & 0x3f 723 mov r1, r1, asl r2 @ r1<- r1 << r2 724 rsb r3, r2, #32 @ r3<- 32 - r2 725 orr r1, r1, r0, lsr r3 @ r1<- r1 | (r0 << (32-r2)) 726 subs ip, r2, #32 @ ip<- r2 - 32 727 movpl r1, r0, asl ip @ if r2 >= 32, r1<- r0 << (r2-32) 728 mov r0, r0, asl r2 @ r0<- r0 << r2 729 bx lr 730 731/* ------------------------------ */ 732 .balign 4 733 .global dvmCompiler_TEMPLATE_SHR_LONG 734dvmCompiler_TEMPLATE_SHR_LONG: 735/* File: armv5te/TEMPLATE_SHR_LONG.S */ 736 /* 737 * Long integer shift. This is different from the generic 32/64-bit 738 * binary operations because vAA/vBB are 64-bit but vCC (the shift 739 * distance) is 32-bit. Also, Dalvik requires us to ignore all but the low 740 * 6 bits. 741 */ 742 /* shr-long vAA, vBB, vCC */ 743 and r2, r2, #63 @ r0<- r0 & 0x3f 744 mov r0, r0, lsr r2 @ r0<- r2 >> r2 745 rsb r3, r2, #32 @ r3<- 32 - r2 746 orr r0, r0, r1, asl r3 @ r0<- r0 | (r1 << (32-r2)) 747 subs ip, r2, #32 @ ip<- r2 - 32 748 movpl r0, r1, asr ip @ if r2 >= 32, r0<-r1 >> (r2-32) 749 mov r1, r1, asr r2 @ r1<- r1 >> r2 750 bx lr 751 752 753/* ------------------------------ */ 754 .balign 4 755 .global dvmCompiler_TEMPLATE_USHR_LONG 756dvmCompiler_TEMPLATE_USHR_LONG: 757/* File: armv5te/TEMPLATE_USHR_LONG.S */ 758 /* 759 * Long integer shift. This is different from the generic 32/64-bit 760 * binary operations because vAA/vBB are 64-bit but vCC (the shift 761 * distance) is 32-bit. Also, Dalvik requires us to ignore all but the low 762 * 6 bits. 763 */ 764 /* ushr-long vAA, vBB, vCC */ 765 and r2, r2, #63 @ r0<- r0 & 0x3f 766 mov r0, r0, lsr r2 @ r0<- r2 >> r2 767 rsb r3, r2, #32 @ r3<- 32 - r2 768 orr r0, r0, r1, asl r3 @ r0<- r0 | (r1 << (32-r2)) 769 subs ip, r2, #32 @ ip<- r2 - 32 770 movpl r0, r1, lsr ip @ if r2 >= 32, r0<-r1 >>> (r2-32) 771 mov r1, r1, lsr r2 @ r1<- r1 >>> r2 772 bx lr 773 774 775/* ------------------------------ */ 776 .balign 4 777 .global dvmCompiler_TEMPLATE_THROW_EXCEPTION_COMMON 778dvmCompiler_TEMPLATE_THROW_EXCEPTION_COMMON: 779/* File: armv5te/TEMPLATE_THROW_EXCEPTION_COMMON.S */ 780 /* 781 * Throw an exception from JIT'ed code. 782 * On entry: 783 * r0 Dalvik PC that raises the exception 784 */ 785 b .LhandleException 786 787/* ------------------------------ */ 788 .balign 4 789 .global dvmCompiler_TEMPLATE_MEM_OP_DECODE 790dvmCompiler_TEMPLATE_MEM_OP_DECODE: 791/* File: armv5te/TEMPLATE_MEM_OP_DECODE.S */ 792#if defined(WITH_SELF_VERIFICATION) 793 /* 794 * This handler encapsulates heap memory ops for selfVerification mode. 795 * 796 * The call to the handler is inserted prior to a heap memory operation. 797 * This handler then calls a function to decode the memory op, and process 798 * it accordingly. Afterwards, the handler changes the return address to 799 * skip the memory op so it never gets executed. 800 */ 801 push {r0-r12,lr} @ save out all registers 802 mov r0, lr @ arg0 <- link register 803 mov r1, sp @ arg1 <- stack pointer 804 ldr r2, .LdvmSelfVerificationMemOpDecode @ defined in footer.S 805 blx r2 @ decode and handle the mem op 806 pop {r0-r12,lr} @ restore all registers 807 bx lr @ return to compiled code 808#endif 809 810/* ------------------------------ */ 811 .balign 4 812 .global dvmCompiler_TEMPLATE_STRING_COMPARETO 813dvmCompiler_TEMPLATE_STRING_COMPARETO: 814/* File: armv5te/TEMPLATE_STRING_COMPARETO.S */ 815 /* 816 * String's compareTo. 817 * 818 * Requires r0/r1 to have been previously checked for null. Will 819 * return negative if this's string is < comp, 0 if they are the 820 * same and positive if >. 821 * 822 * IMPORTANT NOTE: 823 * 824 * This code relies on hard-coded offsets for string objects, and must be 825 * kept in sync with definitions in UtfString.h. See asm-constants.h 826 * 827 * On entry: 828 * r0: this object pointer 829 * r1: comp object pointer 830 * 831 */ 832 833 mov r2, r0 @ this to r2, opening up r0 for return value 834 subs r0, r2, r1 @ Same? 835 bxeq lr 836 837 ldr r4, [r2, #STRING_FIELDOFF_OFFSET] 838 ldr r9, [r1, #STRING_FIELDOFF_OFFSET] 839 ldr r7, [r2, #STRING_FIELDOFF_COUNT] 840 ldr r10, [r1, #STRING_FIELDOFF_COUNT] 841 ldr r2, [r2, #STRING_FIELDOFF_VALUE] 842 ldr r1, [r1, #STRING_FIELDOFF_VALUE] 843 844 /* 845 * At this point, we have: 846 * value: r2/r1 847 * offset: r4/r9 848 * count: r7/r10 849 * We're going to compute 850 * r11 <- countDiff 851 * r10 <- minCount 852 */ 853 subs r11, r7, r10 854 movls r10, r7 855 856 /* Now, build pointers to the string data */ 857 add r2, r2, r4, lsl #1 858 add r1, r1, r9, lsl #1 859 /* 860 * Note: data pointers point to previous element so we can use pre-index 861 * mode with base writeback. 862 */ 863 add r2, #16-2 @ offset to contents[-1] 864 add r1, #16-2 @ offset to contents[-1] 865 866 /* 867 * At this point we have: 868 * r2: *this string data 869 * r1: *comp string data 870 * r10: iteration count for comparison 871 * r11: value to return if the first part of the string is equal 872 * r0: reserved for result 873 * r3, r4, r7, r8, r9, r12 available for loading string data 874 */ 875 876 subs r10, #2 877 blt do_remainder2 878 879 /* 880 * Unroll the first two checks so we can quickly catch early mismatch 881 * on long strings (but preserve incoming alignment) 882 */ 883 884 ldrh r3, [r2, #2]! 885 ldrh r4, [r1, #2]! 886 ldrh r7, [r2, #2]! 887 ldrh r8, [r1, #2]! 888 subs r0, r3, r4 889 subeqs r0, r7, r8 890 bxne lr 891 cmp r10, #28 892 bgt do_memcmp16 893 subs r10, #3 894 blt do_remainder 895 896loopback_triple: 897 ldrh r3, [r2, #2]! 898 ldrh r4, [r1, #2]! 899 ldrh r7, [r2, #2]! 900 ldrh r8, [r1, #2]! 901 ldrh r9, [r2, #2]! 902 ldrh r12,[r1, #2]! 903 subs r0, r3, r4 904 subeqs r0, r7, r8 905 subeqs r0, r9, r12 906 bxne lr 907 subs r10, #3 908 bge loopback_triple 909 910do_remainder: 911 adds r10, #3 912 beq returnDiff 913 914loopback_single: 915 ldrh r3, [r2, #2]! 916 ldrh r4, [r1, #2]! 917 subs r0, r3, r4 918 bxne lr 919 subs r10, #1 920 bne loopback_single 921 922returnDiff: 923 mov r0, r11 924 bx lr 925 926do_remainder2: 927 adds r10, #2 928 bne loopback_single 929 mov r0, r11 930 bx lr 931 932 /* Long string case */ 933do_memcmp16: 934 mov r4, lr 935 ldr lr, .Lmemcmp16 936 mov r7, r11 937 add r0, r2, #2 938 add r1, r1, #2 939 mov r2, r10 940 blx lr 941 cmp r0, #0 942 bxne r4 943 mov r0, r7 944 bx r4 945 946.Lmemcmp16: 947 .word __memcmp16 948 949 950/* ------------------------------ */ 951 .balign 4 952 .global dvmCompiler_TEMPLATE_STRING_INDEXOF 953dvmCompiler_TEMPLATE_STRING_INDEXOF: 954/* File: armv5te/TEMPLATE_STRING_INDEXOF.S */ 955 /* 956 * String's indexOf. 957 * 958 * Requires r0 to have been previously checked for null. Will 959 * return index of match of r1 in r0. 960 * 961 * IMPORTANT NOTE: 962 * 963 * This code relies on hard-coded offsets for string objects, and must be 964 * kept in sync wth definitions in UtfString.h See asm-constants.h 965 * 966 * On entry: 967 * r0: string object pointer 968 * r1: char to match 969 * r2: Starting offset in string data 970 */ 971 972 ldr r7, [r0, #STRING_FIELDOFF_OFFSET] 973 ldr r8, [r0, #STRING_FIELDOFF_COUNT] 974 ldr r0, [r0, #STRING_FIELDOFF_VALUE] 975 976 /* 977 * At this point, we have: 978 * r0: object pointer 979 * r1: char to match 980 * r2: starting offset 981 * r7: offset 982 * r8: string length 983 */ 984 985 /* Build pointer to start of string data */ 986 add r0, #16 987 add r0, r0, r7, lsl #1 988 989 /* Save a copy of starting data in r7 */ 990 mov r7, r0 991 992 /* Clamp start to [0..count] */ 993 cmp r2, #0 994 movlt r2, #0 995 cmp r2, r8 996 movgt r2, r8 997 998 /* Build pointer to start of data to compare and pre-bias */ 999 add r0, r0, r2, lsl #1 1000 sub r0, #2 1001 1002 /* Compute iteration count */ 1003 sub r8, r2 1004 1005 /* 1006 * At this point we have: 1007 * r0: start of data to test 1008 * r1: chat to compare 1009 * r8: iteration count 1010 * r7: original start of string 1011 * r3, r4, r9, r10, r11, r12 available for loading string data 1012 */ 1013 1014 subs r8, #4 1015 blt indexof_remainder 1016 1017indexof_loop4: 1018 ldrh r3, [r0, #2]! 1019 ldrh r4, [r0, #2]! 1020 ldrh r10, [r0, #2]! 1021 ldrh r11, [r0, #2]! 1022 cmp r3, r1 1023 beq match_0 1024 cmp r4, r1 1025 beq match_1 1026 cmp r10, r1 1027 beq match_2 1028 cmp r11, r1 1029 beq match_3 1030 subs r8, #4 1031 bge indexof_loop4 1032 1033indexof_remainder: 1034 adds r8, #4 1035 beq indexof_nomatch 1036 1037indexof_loop1: 1038 ldrh r3, [r0, #2]! 1039 cmp r3, r1 1040 beq match_3 1041 subs r8, #1 1042 bne indexof_loop1 1043 1044indexof_nomatch: 1045 mov r0, #-1 1046 bx lr 1047 1048match_0: 1049 sub r0, #6 1050 sub r0, r7 1051 asr r0, r0, #1 1052 bx lr 1053match_1: 1054 sub r0, #4 1055 sub r0, r7 1056 asr r0, r0, #1 1057 bx lr 1058match_2: 1059 sub r0, #2 1060 sub r0, r7 1061 asr r0, r0, #1 1062 bx lr 1063match_3: 1064 sub r0, r7 1065 asr r0, r0, #1 1066 bx lr 1067 1068 1069/* ------------------------------ */ 1070 .balign 4 1071 .global dvmCompiler_TEMPLATE_INTERPRET 1072dvmCompiler_TEMPLATE_INTERPRET: 1073/* File: armv5te/TEMPLATE_INTERPRET.S */ 1074 /* 1075 * This handler transfers control to the interpeter without performing 1076 * any lookups. It may be called either as part of a normal chaining 1077 * operation, or from the transition code in header.S. We distinquish 1078 * the two cases by looking at the link register. If called from a 1079 * translation chain, it will point to the chaining Dalvik PC + 1. 1080 * On entry: 1081 * lr - if NULL: 1082 * r1 - the Dalvik PC to begin interpretation. 1083 * else 1084 * [lr, #-1] contains Dalvik PC to begin interpretation 1085 * rGLUE - pointer to interpState 1086 * rFP - Dalvik frame pointer 1087 */ 1088 cmp lr, #0 1089 ldrne r1,[lr, #-1] 1090 ldr r2, .LinterpPunt 1091 mov r0, r1 @ set Dalvik PC 1092 bx r2 1093 @ doesn't return 1094 1095.LinterpPunt: 1096 .word dvmJitToInterpPunt 1097 1098/* ------------------------------ */ 1099 .balign 4 1100 .global dvmCompiler_TEMPLATE_MONITOR_ENTER 1101dvmCompiler_TEMPLATE_MONITOR_ENTER: 1102/* File: armv5te/TEMPLATE_MONITOR_ENTER.S */ 1103 /* 1104 * Call out to the runtime to lock an object. Because this thread 1105 * may have been suspended in THREAD_MONITOR state and the Jit's 1106 * translation cache subsequently cleared, we cannot return directly. 1107 * Instead, unconditionally transition to the interpreter to resume. 1108 * 1109 * On entry: 1110 * r0 - self pointer 1111 * r1 - the object (which has already been null-checked by the caller 1112 * r4 - the Dalvik PC of the following instruction. 1113 */ 1114 ldr r2, .LdvmLockObject 1115 mov r3, #0 @ Record that we're not returning 1116 str r3, [r0, #offThread_inJitCodeCache] 1117 blx r2 @ dvmLockObject(self, obj) 1118 @ refresh Jit's on/off status 1119 ldr r0, [rGLUE, #offGlue_ppJitProfTable] 1120 ldr r0, [r0] 1121 ldr r2, .LdvmJitToInterpNoChain 1122 str r0, [rGLUE, #offGlue_pJitProfTable] 1123 @ Bail to interpreter - no chain [note - r4 still contains rPC] 1124#if defined(EXIT_STATS) 1125 mov r0, #kHeavyweightMonitor 1126#endif 1127 bx r2 1128 1129 1130/* ------------------------------ */ 1131 .balign 4 1132 .global dvmCompiler_TEMPLATE_MONITOR_ENTER_DEBUG 1133dvmCompiler_TEMPLATE_MONITOR_ENTER_DEBUG: 1134/* File: armv5te/TEMPLATE_MONITOR_ENTER_DEBUG.S */ 1135 /* 1136 * To support deadlock prediction, this version of MONITOR_ENTER 1137 * will always call the heavyweight dvmLockObject, check for an 1138 * exception and then bail out to the interpreter. 1139 * 1140 * On entry: 1141 * r0 - self pointer 1142 * r1 - the object (which has already been null-checked by the caller 1143 * r4 - the Dalvik PC of the following instruction. 1144 * 1145 */ 1146 ldr r2, .LdvmLockObject 1147 mov r3, #0 @ Record that we're not returning 1148 str r3, [r0, #offThread_inJitCodeCache] 1149 blx r2 @ dvmLockObject(self, obj) 1150 @ refresh Jit's on/off status & test for exception 1151 ldr r0, [rGLUE, #offGlue_ppJitProfTable] 1152 ldr r1, [rGLUE, #offGlue_self] 1153 ldr r0, [r0] 1154 ldr r1, [r1, #offThread_exception] 1155 str r0, [rGLUE, #offGlue_pJitProfTable] 1156 cmp r1, #0 1157 beq 1f 1158 ldr r2, .LhandleException 1159 sub r0, r4, #2 @ roll dPC back to this monitor instruction 1160 bx r2 11611: 1162 @ Bail to interpreter - no chain [note - r4 still contains rPC] 1163#if defined(EXIT_STATS) 1164 mov r0, #kHeavyweightMonitor 1165#endif 1166 ldr pc, .LdvmJitToInterpNoChain 1167 1168 .size dvmCompilerTemplateStart, .-dvmCompilerTemplateStart 1169/* File: armv5te/footer.S */ 1170/* 1171 * =========================================================================== 1172 * Common subroutines and data 1173 * =========================================================================== 1174 */ 1175 1176 .text 1177 .align 2 1178.LinvokeNative: 1179 @ Prep for the native call 1180 @ r1 = newFP, r0 = methodToCall 1181 ldr r3, [rGLUE, #offGlue_self] @ r3<- glue->self 1182 mov r2, #0 1183 ldr r9, [r3, #offThread_jniLocal_topCookie] @ r9<- thread->localRef->... 1184 str r2, [r3, #offThread_inJitCodeCache] @ not in jit code cache 1185 str r1, [r3, #offThread_curFrame] @ self->curFrame = newFp 1186 str r9, [r1, #(offStackSaveArea_localRefCookie - sizeofStackSaveArea)] 1187 @ newFp->localRefCookie=top 1188 mov r9, r3 @ r9<- glue->self (preserve) 1189 SAVEAREA_FROM_FP(r10, r1) @ r10<- new stack save area 1190 1191 mov r2, r0 @ r2<- methodToCall 1192 mov r0, r1 @ r0<- newFP 1193 add r1, rGLUE, #offGlue_retval @ r1<- &retval 1194 1195 LDR_PC_LR "[r2, #offMethod_nativeFunc]" 1196 1197 @ Refresh Jit's on/off status 1198 ldr r3, [rGLUE, #offGlue_ppJitProfTable] 1199 1200 @ native return; r9=self, r10=newSaveArea 1201 @ equivalent to dvmPopJniLocals 1202 ldr r2, [r10, #offStackSaveArea_returnAddr] @ r2 = chaining cell ret 1203 ldr r0, [r10, #offStackSaveArea_localRefCookie] @ r0<- saved->top 1204 ldr r1, [r9, #offThread_exception] @ check for exception 1205 ldr r3, [r3] @ r1 <- pointer to Jit profile table 1206 str rFP, [r9, #offThread_curFrame] @ self->curFrame = fp 1207 cmp r1, #0 @ null? 1208 str r0, [r9, #offThread_jniLocal_topCookie] @ new top <- old top 1209 ldr r0, [r10, #offStackSaveArea_savedPc] @ reload rPC 1210 str r3, [rGLUE, #offGlue_pJitProfTable] @ cache current JitProfTable 1211 1212 @ r0 = dalvikCallsitePC 1213 bne .LhandleException @ no, handle exception 1214 1215 str r2, [r9, #offThread_inJitCodeCache] @ set the new mode 1216 cmp r2, #0 @ return chaining cell still exists? 1217 bxne r2 @ yes - go ahead 1218 1219 @ continue executing the next instruction through the interpreter 1220 ldr r1, .LdvmJitToInterpNoChain @ defined in footer.S 1221 add rPC, r0, #6 @ reconstruct new rPC (advance 6 bytes) 1222#if defined(EXIT_STATS) 1223 mov r0, #kCallsiteInterpreted 1224#endif 1225 mov pc, r1 1226 1227/* 1228 * On entry: 1229 * r0 Faulting Dalvik PC 1230 */ 1231.LhandleException: 1232 ldr r3, [rGLUE, #offGlue_self] @ r3<- glue->self 1233 mov r2, #0 1234 str r2, [r3, #offThread_inJitCodeCache] @ in interpreter land 1235 ldr r1, .LdvmMterpCommonExceptionThrown @ PIC way of getting &func 1236 ldr rIBASE, .LdvmAsmInstructionStart @ same as above 1237 mov rPC, r0 @ reload the faulting Dalvik address 1238 mov pc, r1 @ branch to dvmMterpCommonExceptionThrown 1239 1240 .align 2 1241.LdvmAsmInstructionStart: 1242 .word dvmAsmInstructionStart 1243.LdvmJitToInterpNoChain: 1244 .word dvmJitToInterpNoChain 1245.LdvmMterpStdBail: 1246 .word dvmMterpStdBail 1247.LdvmMterpCommonExceptionThrown: 1248 .word dvmMterpCommonExceptionThrown 1249.LdvmLockObject: 1250 .word dvmLockObject 1251#if defined(WITH_SELF_VERIFICATION) 1252.LdvmSelfVerificationMemOpDecode: 1253 .word dvmSelfVerificationMemOpDecode 1254#endif 1255.L__aeabi_cdcmple: 1256 .word __aeabi_cdcmple 1257.L__aeabi_cfcmple: 1258 .word __aeabi_cfcmple 1259 1260 .global dmvCompilerTemplateEnd 1261dmvCompilerTemplateEnd: 1262 1263#endif /* WITH_JIT */ 1264 1265