CompilerTemplateAsm-armv5te.S revision 342806dae77556290dfe0760e6fe3117d812c7ba
1/* 2 * This file was generated automatically by gen-template.py for 'armv5te'. 3 * 4 * --> DO NOT EDIT <-- 5 */ 6 7/* File: armv5te/header.S */ 8/* 9 * Copyright (C) 2008 The Android Open Source Project 10 * 11 * Licensed under the Apache License, Version 2.0 (the "License"); 12 * you may not use this file except in compliance with the License. 13 * You may obtain a copy of the License at 14 * 15 * http://www.apache.org/licenses/LICENSE-2.0 16 * 17 * Unless required by applicable law or agreed to in writing, software 18 * distributed under the License is distributed on an "AS IS" BASIS, 19 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 20 * See the License for the specific language governing permissions and 21 * limitations under the License. 22 */ 23 24#if defined(WITH_JIT) 25 26/* 27 * ARMv5 definitions and declarations. 28 */ 29 30/* 31ARM EABI general notes: 32 33r0-r3 hold first 4 args to a method; they are not preserved across method calls 34r4-r8 are available for general use 35r9 is given special treatment in some situations, but not for us 36r10 (sl) seems to be generally available 37r11 (fp) is used by gcc (unless -fomit-frame-pointer is set) 38r12 (ip) is scratch -- not preserved across method calls 39r13 (sp) should be managed carefully in case a signal arrives 40r14 (lr) must be preserved 41r15 (pc) can be tinkered with directly 42 43r0 holds returns of <= 4 bytes 44r0-r1 hold returns of 8 bytes, low word in r0 45 46Callee must save/restore r4+ (except r12) if it modifies them. 47 48Stack is "full descending". Only the arguments that don't fit in the first 4 49registers are placed on the stack. "sp" points at the first stacked argument 50(i.e. the 5th arg). 51 52VFP: single-precision results in s0, double-precision results in d0. 53 54In the EABI, "sp" must be 64-bit aligned on entry to a function, and any 5564-bit quantities (long long, double) must be 64-bit aligned. 56*/ 57 58/* 59JIT and ARM notes: 60 61The following registers have fixed assignments: 62 63 reg nick purpose 64 r5 rFP interpreted frame pointer, used for accessing locals and args 65 r6 rGLUE MterpGlue pointer 66 67The following registers have fixed assignments in mterp but are scratch 68registers in compiled code 69 70 reg nick purpose 71 r4 rPC interpreted program counter, used for fetching instructions 72 r7 rINST first 16-bit code unit of current instruction 73 r8 rIBASE interpreted instruction base pointer, used for computed goto 74 75Macros are provided for common operations. Each macro MUST emit only 76one instruction to make instruction-counting easier. They MUST NOT alter 77unspecified registers or condition codes. 78*/ 79 80/* single-purpose registers, given names for clarity */ 81#define rPC r4 82#define rFP r5 83#define rGLUE r6 84#define rINST r7 85#define rIBASE r8 86 87/* 88 * Given a frame pointer, find the stack save area. 89 * 90 * In C this is "((StackSaveArea*)(_fp) -1)". 91 */ 92#define SAVEAREA_FROM_FP(_reg, _fpreg) \ 93 sub _reg, _fpreg, #sizeofStackSaveArea 94 95#define EXPORT_PC() \ 96 str rPC, [rFP, #(-sizeofStackSaveArea + offStackSaveArea_currentPc)] 97 98/* 99 * This is a #include, not a %include, because we want the C pre-processor 100 * to expand the macros into assembler assignment statements. 101 */ 102#include "../../../mterp/common/asm-constants.h" 103 104 105/* File: armv5te/platform.S */ 106/* 107 * =========================================================================== 108 * CPU-version-specific defines and utility 109 * =========================================================================== 110 */ 111 112/* 113 * Macro for "MOV LR,PC / LDR PC,xxx", which is not allowed pre-ARMv5. 114 * Jump to subroutine. 115 * 116 * May modify IP and LR. 117 */ 118.macro LDR_PC_LR source 119 mov lr, pc 120 ldr pc, \source 121.endm 122 123/* 124 * Save & restore for callee-save FP registers. 125 * On entry: 126 * r0 : pointer to save area of JIT_CALLEE_SAVE_WORD_SIZE 127 */ 128 .text 129 .align 2 130 .global dvmJitCalleeSave 131 .type dvmJitCalleeSave, %function 132dvmJitCalleeSave: 133 bx lr 134 135 .global dvmJitCalleeRestore 136 .type dvmJitCalleeRestore, %function 137dvmJitCalleeRestore: 138 bx lr 139 140 141 142 .global dvmCompilerTemplateStart 143 .type dvmCompilerTemplateStart, %function 144 .text 145 146dvmCompilerTemplateStart: 147 148/* ------------------------------ */ 149 .balign 4 150 .global dvmCompiler_TEMPLATE_CMP_LONG 151dvmCompiler_TEMPLATE_CMP_LONG: 152/* File: armv5te/TEMPLATE_CMP_LONG.S */ 153 /* 154 * Compare two 64-bit values. Puts 0, 1, or -1 into the destination 155 * register based on the results of the comparison. 156 * 157 * We load the full values with LDM, but in practice many values could 158 * be resolved by only looking at the high word. This could be made 159 * faster or slower by splitting the LDM into a pair of LDRs. 160 * 161 * If we just wanted to set condition flags, we could do this: 162 * subs ip, r0, r2 163 * sbcs ip, r1, r3 164 * subeqs ip, r0, r2 165 * Leaving { <0, 0, >0 } in ip. However, we have to set it to a specific 166 * integer value, which we can do with 2 conditional mov/mvn instructions 167 * (set 1, set -1; if they're equal we already have 0 in ip), giving 168 * us a constant 5-cycle path plus a branch at the end to the 169 * instruction epilogue code. The multi-compare approach below needs 170 * 2 or 3 cycles + branch if the high word doesn't match, 6 + branch 171 * in the worst case (the 64-bit values are equal). 172 */ 173 /* cmp-long vAA, vBB, vCC */ 174 cmp r1, r3 @ compare (vBB+1, vCC+1) 175 blt .LTEMPLATE_CMP_LONG_less @ signed compare on high part 176 bgt .LTEMPLATE_CMP_LONG_greater 177 subs r0, r0, r2 @ r0<- r0 - r2 178 bxeq lr 179 bhi .LTEMPLATE_CMP_LONG_greater @ unsigned compare on low part 180.LTEMPLATE_CMP_LONG_less: 181 mvn r0, #0 @ r0<- -1 182 bx lr 183.LTEMPLATE_CMP_LONG_greater: 184 mov r0, #1 @ r0<- 1 185 bx lr 186 187 188/* ------------------------------ */ 189 .balign 4 190 .global dvmCompiler_TEMPLATE_RETURN 191dvmCompiler_TEMPLATE_RETURN: 192/* File: armv5te/TEMPLATE_RETURN.S */ 193 /* 194 * Unwind a frame from the Dalvik stack for compiled OP_RETURN_XXX. 195 * If the stored value in returnAddr 196 * is non-zero, the caller is compiled by the JIT thus return to the 197 * address in the code cache following the invoke instruction. Otherwise 198 * return to the special dvmJitToInterpNoChain entry point. 199 */ 200 SAVEAREA_FROM_FP(r0, rFP) @ r0<- saveArea (old) 201 ldr r10, [r0, #offStackSaveArea_prevFrame] @ r10<- saveArea->prevFrame 202 ldr r8, [rGLUE, #offGlue_pSelfSuspendCount] @ r8<- &suspendCount 203 ldr rPC, [r0, #offStackSaveArea_savedPc] @ rPC<- saveArea->savedPc 204#if !defined(WITH_SELF_VERIFICATION) 205 ldr r9, [r0, #offStackSaveArea_returnAddr] @ r9<- chaining cell ret 206#else 207 mov r9, #0 @ disable chaining 208#endif 209 ldr r2, [r10, #(offStackSaveArea_method - sizeofStackSaveArea)] 210 @ r2<- method we're returning to 211 ldr r3, [rGLUE, #offGlue_self] @ r3<- glue->self 212 cmp r2, #0 @ break frame? 213#if !defined(WITH_SELF_VERIFICATION) 214 beq 1f @ bail to interpreter 215#else 216 blxeq lr @ punt to interpreter and compare state 217#endif 218 ldr r1, .LdvmJitToInterpNoChain @ defined in footer.S 219 mov rFP, r10 @ publish new FP 220 ldrne r10, [r2, #offMethod_clazz] @ r10<- method->clazz 221 ldr r8, [r8] @ r8<- suspendCount 222 223 str r2, [rGLUE, #offGlue_method]@ glue->method = newSave->method 224 ldr r0, [r10, #offClassObject_pDvmDex] @ r0<- method->clazz->pDvmDex 225 str rFP, [r3, #offThread_curFrame] @ self->curFrame = fp 226 add rPC, rPC, #6 @ publish new rPC (advance 6 bytes) 227 str r0, [rGLUE, #offGlue_methodClassDex] 228 cmp r8, #0 @ check the suspendCount 229 movne r9, #0 @ clear the chaining cell address 230 cmp r9, #0 @ chaining cell exists? 231 blxne r9 @ jump to the chaining cell 232#if defined(EXIT_STATS) 233 mov r0, #kCallsiteInterpreted 234#endif 235 mov pc, r1 @ callsite is interpreted 2361: 237 stmia rGLUE, {rPC, rFP} @ SAVE_PC_FP_TO_GLUE() 238 ldr r2, .LdvmMterpStdBail @ defined in footer.S 239 mov r1, #0 @ changeInterp = false 240 mov r0, rGLUE @ Expecting rGLUE in r0 241 blx r2 @ exit the interpreter 242 243/* ------------------------------ */ 244 .balign 4 245 .global dvmCompiler_TEMPLATE_INVOKE_METHOD_NO_OPT 246dvmCompiler_TEMPLATE_INVOKE_METHOD_NO_OPT: 247/* File: armv5te/TEMPLATE_INVOKE_METHOD_NO_OPT.S */ 248 /* 249 * For polymorphic callsites - setup the Dalvik frame and load Dalvik PC 250 * into rPC then jump to dvmJitToInterpNoChain to dispatch the 251 * runtime-resolved callee. 252 */ 253 @ r0 = methodToCall, r1 = returnCell, rPC = dalvikCallsite 254 ldrh r7, [r0, #offMethod_registersSize] @ r7<- methodToCall->regsSize 255 ldrh r2, [r0, #offMethod_outsSize] @ r2<- methodToCall->outsSize 256 ldr r9, [rGLUE, #offGlue_interpStackEnd] @ r9<- interpStackEnd 257 ldr r8, [rGLUE, #offGlue_pSelfSuspendCount] @ r8<- &suspendCount 258 add r3, r1, #1 @ Thumb addr is odd 259 SAVEAREA_FROM_FP(r1, rFP) @ r1<- stack save area 260 sub r1, r1, r7, lsl #2 @ r1<- newFp (old savearea - regsSize) 261 SAVEAREA_FROM_FP(r10, r1) @ r10<- stack save area 262 sub r10, r10, r2, lsl #2 @ r10<- bottom (newsave - outsSize) 263 ldr r8, [r8] @ r3<- suspendCount (int) 264 cmp r10, r9 @ bottom < interpStackEnd? 265 bxlt lr @ return to raise stack overflow excep. 266 @ r1 = newFP, r0 = methodToCall, r3 = returnCell, rPC = dalvikCallsite 267 ldr r9, [r0, #offMethod_clazz] @ r9<- method->clazz 268 ldr r10, [r0, #offMethod_accessFlags] @ r10<- methodToCall->accessFlags 269 str rPC, [rFP, #(offStackSaveArea_currentPc - sizeofStackSaveArea)] 270 str rPC, [r1, #(offStackSaveArea_savedPc - sizeofStackSaveArea)] 271 ldr rPC, [r0, #offMethod_insns] @ rPC<- methodToCall->insns 272 273 274 @ set up newSaveArea 275 str rFP, [r1, #(offStackSaveArea_prevFrame - sizeofStackSaveArea)] 276 str r3, [r1, #(offStackSaveArea_returnAddr - sizeofStackSaveArea)] 277 str r0, [r1, #(offStackSaveArea_method - sizeofStackSaveArea)] 278 cmp r8, #0 @ suspendCount != 0 279 bxne lr @ bail to the interpreter 280 tst r10, #ACC_NATIVE 281#if !defined(WITH_SELF_VERIFICATION) 282 bne .LinvokeNative 283#else 284 bxne lr @ bail to the interpreter 285#endif 286 287 ldr r10, .LdvmJitToInterpNoChain 288 ldr r3, [r9, #offClassObject_pDvmDex] @ r3<- method->clazz->pDvmDex 289 ldr r2, [rGLUE, #offGlue_self] @ r2<- glue->self 290 291 @ Update "glue" values for the new method 292 str r0, [rGLUE, #offGlue_method] @ glue->method = methodToCall 293 str r3, [rGLUE, #offGlue_methodClassDex] @ glue->methodClassDex = ... 294 mov rFP, r1 @ fp = newFp 295 str rFP, [r2, #offThread_curFrame] @ self->curFrame = newFp 296 297 @ Start executing the callee 298#if defined(EXIT_STATS) 299 mov r0, #kInlineCacheMiss 300#endif 301 mov pc, r10 @ dvmJitToInterpNoChain 302 303/* ------------------------------ */ 304 .balign 4 305 .global dvmCompiler_TEMPLATE_INVOKE_METHOD_CHAIN 306dvmCompiler_TEMPLATE_INVOKE_METHOD_CHAIN: 307/* File: armv5te/TEMPLATE_INVOKE_METHOD_CHAIN.S */ 308 /* 309 * For monomorphic callsite, setup the Dalvik frame and return to the 310 * Thumb code through the link register to transfer control to the callee 311 * method through a dedicated chaining cell. 312 */ 313 @ r0 = methodToCall, r1 = returnCell, rPC = dalvikCallsite 314 @ methodToCall is guaranteed to be non-native 315.LinvokeChain: 316 ldrh r7, [r0, #offMethod_registersSize] @ r7<- methodToCall->regsSize 317 ldrh r2, [r0, #offMethod_outsSize] @ r2<- methodToCall->outsSize 318 ldr r9, [rGLUE, #offGlue_interpStackEnd] @ r9<- interpStackEnd 319 ldr r8, [rGLUE, #offGlue_pSelfSuspendCount] @ r8<- &suspendCount 320 add r3, r1, #1 @ Thumb addr is odd 321 SAVEAREA_FROM_FP(r1, rFP) @ r1<- stack save area 322 sub r1, r1, r7, lsl #2 @ r1<- newFp (old savearea - regsSize) 323 SAVEAREA_FROM_FP(r10, r1) @ r10<- stack save area 324 add r12, lr, #2 @ setup the punt-to-interp address 325 sub r10, r10, r2, lsl #2 @ r10<- bottom (newsave - outsSize) 326 ldr r8, [r8] @ r3<- suspendCount (int) 327 cmp r10, r9 @ bottom < interpStackEnd? 328 bxlt r12 @ return to raise stack overflow excep. 329 @ r1 = newFP, r0 = methodToCall, r3 = returnCell, rPC = dalvikCallsite 330 ldr r9, [r0, #offMethod_clazz] @ r9<- method->clazz 331 str rPC, [rFP, #(offStackSaveArea_currentPc - sizeofStackSaveArea)] 332 str rPC, [r1, #(offStackSaveArea_savedPc - sizeofStackSaveArea)] 333 ldr rPC, [r0, #offMethod_insns] @ rPC<- methodToCall->insns 334 335 336 @ set up newSaveArea 337 str rFP, [r1, #(offStackSaveArea_prevFrame - sizeofStackSaveArea)] 338 str r3, [r1, #(offStackSaveArea_returnAddr - sizeofStackSaveArea)] 339 str r0, [r1, #(offStackSaveArea_method - sizeofStackSaveArea)] 340 cmp r8, #0 @ suspendCount != 0 341 bxne r12 @ bail to the interpreter 342 343 ldr r3, [r9, #offClassObject_pDvmDex] @ r3<- method->clazz->pDvmDex 344 ldr r2, [rGLUE, #offGlue_self] @ r2<- glue->self 345 346 @ Update "glue" values for the new method 347 str r0, [rGLUE, #offGlue_method] @ glue->method = methodToCall 348 str r3, [rGLUE, #offGlue_methodClassDex] @ glue->methodClassDex = ... 349 mov rFP, r1 @ fp = newFp 350 str rFP, [r2, #offThread_curFrame] @ self->curFrame = newFp 351 352 bx lr @ return to the callee-chaining cell 353 354 355 356/* ------------------------------ */ 357 .balign 4 358 .global dvmCompiler_TEMPLATE_INVOKE_METHOD_PREDICTED_CHAIN 359dvmCompiler_TEMPLATE_INVOKE_METHOD_PREDICTED_CHAIN: 360/* File: armv5te/TEMPLATE_INVOKE_METHOD_PREDICTED_CHAIN.S */ 361 /* 362 * For polymorphic callsite, check whether the cached class pointer matches 363 * the current one. If so setup the Dalvik frame and return to the 364 * Thumb code through the link register to transfer control to the callee 365 * method through a dedicated chaining cell. 366 * 367 * The predicted chaining cell is declared in ArmLIR.h with the 368 * following layout: 369 * 370 * typedef struct PredictedChainingCell { 371 * u4 branch; 372 * const ClassObject *clazz; 373 * const Method *method; 374 * u4 counter; 375 * } PredictedChainingCell; 376 * 377 * Upon returning to the callsite: 378 * - lr : to branch to the chaining cell 379 * - lr+2: to punt to the interpreter 380 * - lr+4: to fully resolve the callee and may rechain. 381 * r3 <- class 382 * r9 <- counter 383 */ 384 @ r0 = this, r1 = returnCell, r2 = predictedChainCell, rPC = dalvikCallsite 385 ldr r3, [r0, #offObject_clazz] @ r3 <- this->class 386 ldr r8, [r2, #4] @ r8 <- predictedChainCell->clazz 387 ldr r0, [r2, #8] @ r0 <- predictedChainCell->method 388 ldr r9, [r2, #12] @ r9 <- predictedChainCell->counter 389 cmp r3, r8 @ predicted class == actual class? 390 beq .LinvokeChain @ predicted chain is valid 391 ldr r7, [r3, #offClassObject_vtable] @ r7 <- this->class->vtable 392 sub r1, r9, #1 @ count-- 393 str r1, [r2, #12] @ write back to PredictedChainingCell->counter 394 add lr, lr, #4 @ return to fully-resolve landing pad 395 /* 396 * r1 <- count 397 * r2 <- &predictedChainCell 398 * r3 <- this->class 399 * r4 <- dPC 400 * r7 <- this->class->vtable 401 */ 402 bx lr 403 404/* ------------------------------ */ 405 .balign 4 406 .global dvmCompiler_TEMPLATE_INVOKE_METHOD_NATIVE 407dvmCompiler_TEMPLATE_INVOKE_METHOD_NATIVE: 408/* File: armv5te/TEMPLATE_INVOKE_METHOD_NATIVE.S */ 409 @ r0 = methodToCall, r1 = returnCell, rPC = dalvikCallsite 410 ldrh r7, [r0, #offMethod_registersSize] @ r7<- methodToCall->regsSize 411 ldr r9, [rGLUE, #offGlue_interpStackEnd] @ r9<- interpStackEnd 412 ldr r8, [rGLUE, #offGlue_pSelfSuspendCount] @ r8<- &suspendCount 413 add r3, r1, #1 @ Thumb addr is odd 414 SAVEAREA_FROM_FP(r1, rFP) @ r1<- stack save area 415 sub r1, r1, r7, lsl #2 @ r1<- newFp (old savearea - regsSize) 416 SAVEAREA_FROM_FP(r10, r1) @ r10<- stack save area 417 ldr r8, [r8] @ r3<- suspendCount (int) 418 cmp r10, r9 @ bottom < interpStackEnd? 419 bxlt lr @ return to raise stack overflow excep. 420 @ r1 = newFP, r0 = methodToCall, r3 = returnCell, rPC = dalvikCallsite 421 str rPC, [rFP, #(offStackSaveArea_currentPc - sizeofStackSaveArea)] 422 str rPC, [r1, #(offStackSaveArea_savedPc - sizeofStackSaveArea)] 423 ldr rPC, [r0, #offMethod_insns] @ rPC<- methodToCall->insns 424 425 426 @ set up newSaveArea 427 str rFP, [r1, #(offStackSaveArea_prevFrame - sizeofStackSaveArea)] 428 str r3, [r1, #(offStackSaveArea_returnAddr - sizeofStackSaveArea)] 429 ldr r3, [rGLUE, #offGlue_self] @ r3<- glue->self 430 str r0, [r1, #(offStackSaveArea_method - sizeofStackSaveArea)] 431 cmp r8, #0 @ suspendCount != 0 432 ldr r8, [r0, #offMethod_nativeFunc] @ r8<- method->nativeFunc 433#if !defined(WITH_SELF_VERIFICATION) 434 bxne lr @ bail to the interpreter 435#else 436 bx lr @ bail to interpreter unconditionally 437#endif 438 439 @ go ahead and transfer control to the native code 440 ldr r9, [r3, #offThread_jniLocal_topCookie] @ r9<- thread->localRef->... 441 str r1, [r3, #offThread_curFrame] @ self->curFrame = newFp 442 str r9, [r1, #(offStackSaveArea_localRefCookie - sizeofStackSaveArea)] 443 @ newFp->localRefCookie=top 444 mov r9, r3 @ r9<- glue->self (preserve) 445 SAVEAREA_FROM_FP(r10, r1) @ r10<- new stack save area 446 447 mov r2, r0 @ r2<- methodToCall 448 mov r0, r1 @ r0<- newFP 449 add r1, rGLUE, #offGlue_retval @ r1<- &retval 450 451 blx r8 @ off to the native code 452 453 @ native return; r9=self, r10=newSaveArea 454 @ equivalent to dvmPopJniLocals 455 ldr r2, [r10, #offStackSaveArea_returnAddr] @ r2 = chaining cell ret 456 ldr r0, [r10, #offStackSaveArea_localRefCookie] @ r0<- saved->top 457 ldr r1, [r9, #offThread_exception] @ check for exception 458 str rFP, [r9, #offThread_curFrame] @ self->curFrame = fp 459 cmp r1, #0 @ null? 460 str r0, [r9, #offThread_jniLocal_topCookie] @ new top <- old top 461 ldr r0, [rFP, #(offStackSaveArea_currentPc - sizeofStackSaveArea)] 462 bne .LhandleException @ no, handle exception 463 bx r2 464 465 466/* ------------------------------ */ 467 .balign 4 468 .global dvmCompiler_TEMPLATE_CMPG_DOUBLE 469dvmCompiler_TEMPLATE_CMPG_DOUBLE: 470/* File: armv5te/TEMPLATE_CMPG_DOUBLE.S */ 471/* File: armv5te/TEMPLATE_CMPL_DOUBLE.S */ 472 /* 473 * For the JIT: incoming arguments in r0-r1, r2-r3 474 * result in r0 475 * 476 * Compare two floating-point values. Puts 0, 1, or -1 into the 477 * destination register based on the results of the comparison. 478 * 479 * Provide a "naninst" instruction that puts 1 or -1 into r1 depending 480 * on what value we'd like to return when one of the operands is NaN. 481 * 482 * See OP_CMPL_FLOAT for an explanation. 483 * 484 * For: cmpl-double, cmpg-double 485 */ 486 /* op vAA, vBB, vCC */ 487 push {r0-r3} @ save operands 488 mov r11, lr @ save return address 489 LDR_PC_LR ".L__aeabi_cdcmple" @ PIC way of "bl __aeabi_cdcmple" 490 bhi .LTEMPLATE_CMPG_DOUBLE_gt_or_nan @ C set and Z clear, disambiguate 491 mvncc r0, #0 @ (less than) r1<- -1 492 moveq r0, #0 @ (equal) r1<- 0, trumps less than 493 add sp, #16 @ drop unused operands 494 bx r11 495 496 @ Test for NaN with a second comparison. EABI forbids testing bit 497 @ patterns, and we can't represent 0x7fc00000 in immediate form, so 498 @ make the library call. 499.LTEMPLATE_CMPG_DOUBLE_gt_or_nan: 500 pop {r2-r3} @ restore operands in reverse order 501 pop {r0-r1} @ restore operands in reverse order 502 LDR_PC_LR ".L__aeabi_cdcmple" @ r0<- Z set if eq, C clear if < 503 movcc r0, #1 @ (greater than) r1<- 1 504 bxcc r11 505 mov r0, #1 @ r1<- 1 or -1 for NaN 506 bx r11 507 508 509 510/* ------------------------------ */ 511 .balign 4 512 .global dvmCompiler_TEMPLATE_CMPL_DOUBLE 513dvmCompiler_TEMPLATE_CMPL_DOUBLE: 514/* File: armv5te/TEMPLATE_CMPL_DOUBLE.S */ 515 /* 516 * For the JIT: incoming arguments in r0-r1, r2-r3 517 * result in r0 518 * 519 * Compare two floating-point values. Puts 0, 1, or -1 into the 520 * destination register based on the results of the comparison. 521 * 522 * Provide a "naninst" instruction that puts 1 or -1 into r1 depending 523 * on what value we'd like to return when one of the operands is NaN. 524 * 525 * See OP_CMPL_FLOAT for an explanation. 526 * 527 * For: cmpl-double, cmpg-double 528 */ 529 /* op vAA, vBB, vCC */ 530 push {r0-r3} @ save operands 531 mov r11, lr @ save return address 532 LDR_PC_LR ".L__aeabi_cdcmple" @ PIC way of "bl __aeabi_cdcmple" 533 bhi .LTEMPLATE_CMPL_DOUBLE_gt_or_nan @ C set and Z clear, disambiguate 534 mvncc r0, #0 @ (less than) r1<- -1 535 moveq r0, #0 @ (equal) r1<- 0, trumps less than 536 add sp, #16 @ drop unused operands 537 bx r11 538 539 @ Test for NaN with a second comparison. EABI forbids testing bit 540 @ patterns, and we can't represent 0x7fc00000 in immediate form, so 541 @ make the library call. 542.LTEMPLATE_CMPL_DOUBLE_gt_or_nan: 543 pop {r2-r3} @ restore operands in reverse order 544 pop {r0-r1} @ restore operands in reverse order 545 LDR_PC_LR ".L__aeabi_cdcmple" @ r0<- Z set if eq, C clear if < 546 movcc r0, #1 @ (greater than) r1<- 1 547 bxcc r11 548 mvn r0, #0 @ r1<- 1 or -1 for NaN 549 bx r11 550 551 552/* ------------------------------ */ 553 .balign 4 554 .global dvmCompiler_TEMPLATE_CMPG_FLOAT 555dvmCompiler_TEMPLATE_CMPG_FLOAT: 556/* File: armv5te/TEMPLATE_CMPG_FLOAT.S */ 557/* File: armv5te/TEMPLATE_CMPL_FLOAT.S */ 558 /* 559 * For the JIT: incoming arguments in r0-r1, r2-r3 560 * result in r0 561 * 562 * Compare two floating-point values. Puts 0, 1, or -1 into the 563 * destination register based on the results of the comparison. 564 * 565 * Provide a "naninst" instruction that puts 1 or -1 into r1 depending 566 * on what value we'd like to return when one of the operands is NaN. 567 * 568 * The operation we're implementing is: 569 * if (x == y) 570 * return 0; 571 * else if (x < y) 572 * return -1; 573 * else if (x > y) 574 * return 1; 575 * else 576 * return {-1,1}; // one or both operands was NaN 577 * 578 * The straightforward implementation requires 3 calls to functions 579 * that return a result in r0. We can do it with two calls if our 580 * EABI library supports __aeabi_cfcmple (only one if we want to check 581 * for NaN directly): 582 * check x <= y 583 * if <, return -1 584 * if ==, return 0 585 * check y <= x 586 * if <, return 1 587 * return {-1,1} 588 * 589 * for: cmpl-float, cmpg-float 590 */ 591 /* op vAA, vBB, vCC */ 592 mov r9, r0 @ Save copies - we may need to redo 593 mov r10, r1 594 mov r11, lr @ save return address 595 LDR_PC_LR ".L__aeabi_cfcmple" @ cmp <=: C clear if <, Z set if eq 596 bhi .LTEMPLATE_CMPG_FLOAT_gt_or_nan @ C set and Z clear, disambiguate 597 mvncc r0, #0 @ (less than) r0<- -1 598 moveq r0, #0 @ (equal) r0<- 0, trumps less than 599 bx r11 600 @ Test for NaN with a second comparison. EABI forbids testing bit 601 @ patterns, and we can't represent 0x7fc00000 in immediate form, so 602 @ make the library call. 603.LTEMPLATE_CMPG_FLOAT_gt_or_nan: 604 mov r0, r10 @ restore in reverse order 605 mov r1, r9 606 LDR_PC_LR ".L__aeabi_cfcmple" @ r0<- Z set if eq, C clear if < 607 movcc r0, #1 @ (greater than) r1<- 1 608 bxcc r11 609 mov r0, #1 @ r1<- 1 or -1 for NaN 610 bx r11 611 612 613 614 615/* ------------------------------ */ 616 .balign 4 617 .global dvmCompiler_TEMPLATE_CMPL_FLOAT 618dvmCompiler_TEMPLATE_CMPL_FLOAT: 619/* File: armv5te/TEMPLATE_CMPL_FLOAT.S */ 620 /* 621 * For the JIT: incoming arguments in r0-r1, r2-r3 622 * result in r0 623 * 624 * Compare two floating-point values. Puts 0, 1, or -1 into the 625 * destination register based on the results of the comparison. 626 * 627 * Provide a "naninst" instruction that puts 1 or -1 into r1 depending 628 * on what value we'd like to return when one of the operands is NaN. 629 * 630 * The operation we're implementing is: 631 * if (x == y) 632 * return 0; 633 * else if (x < y) 634 * return -1; 635 * else if (x > y) 636 * return 1; 637 * else 638 * return {-1,1}; // one or both operands was NaN 639 * 640 * The straightforward implementation requires 3 calls to functions 641 * that return a result in r0. We can do it with two calls if our 642 * EABI library supports __aeabi_cfcmple (only one if we want to check 643 * for NaN directly): 644 * check x <= y 645 * if <, return -1 646 * if ==, return 0 647 * check y <= x 648 * if <, return 1 649 * return {-1,1} 650 * 651 * for: cmpl-float, cmpg-float 652 */ 653 /* op vAA, vBB, vCC */ 654 mov r9, r0 @ Save copies - we may need to redo 655 mov r10, r1 656 mov r11, lr @ save return address 657 LDR_PC_LR ".L__aeabi_cfcmple" @ cmp <=: C clear if <, Z set if eq 658 bhi .LTEMPLATE_CMPL_FLOAT_gt_or_nan @ C set and Z clear, disambiguate 659 mvncc r0, #0 @ (less than) r0<- -1 660 moveq r0, #0 @ (equal) r0<- 0, trumps less than 661 bx r11 662 @ Test for NaN with a second comparison. EABI forbids testing bit 663 @ patterns, and we can't represent 0x7fc00000 in immediate form, so 664 @ make the library call. 665.LTEMPLATE_CMPL_FLOAT_gt_or_nan: 666 mov r0, r10 @ restore in reverse order 667 mov r1, r9 668 LDR_PC_LR ".L__aeabi_cfcmple" @ r0<- Z set if eq, C clear if < 669 movcc r0, #1 @ (greater than) r1<- 1 670 bxcc r11 671 mvn r0, #0 @ r1<- 1 or -1 for NaN 672 bx r11 673 674 675 676/* ------------------------------ */ 677 .balign 4 678 .global dvmCompiler_TEMPLATE_MUL_LONG 679dvmCompiler_TEMPLATE_MUL_LONG: 680/* File: armv5te/TEMPLATE_MUL_LONG.S */ 681 /* 682 * Signed 64-bit integer multiply. 683 * 684 * For JIT: op1 in r0/r1, op2 in r2/r3, return in r0/r1 685 * 686 * Consider WXxYZ (r1r0 x r3r2) with a long multiply: 687 * WX 688 * x YZ 689 * -------- 690 * ZW ZX 691 * YW YX 692 * 693 * The low word of the result holds ZX, the high word holds 694 * (ZW+YX) + (the high overflow from ZX). YW doesn't matter because 695 * it doesn't fit in the low 64 bits. 696 * 697 * Unlike most ARM math operations, multiply instructions have 698 * restrictions on using the same register more than once (Rd and Rm 699 * cannot be the same). 700 */ 701 /* mul-long vAA, vBB, vCC */ 702 mul ip, r2, r1 @ ip<- ZxW 703 umull r9, r10, r2, r0 @ r9/r10 <- ZxX 704 mla r2, r0, r3, ip @ r2<- YxX + (ZxW) 705 add r10, r2, r10 @ r10<- r10 + low(ZxW + (YxX)) 706 mov r0,r9 707 mov r1,r10 708 bx lr 709 710/* ------------------------------ */ 711 .balign 4 712 .global dvmCompiler_TEMPLATE_SHL_LONG 713dvmCompiler_TEMPLATE_SHL_LONG: 714/* File: armv5te/TEMPLATE_SHL_LONG.S */ 715 /* 716 * Long integer shift. This is different from the generic 32/64-bit 717 * binary operations because vAA/vBB are 64-bit but vCC (the shift 718 * distance) is 32-bit. Also, Dalvik requires us to ignore all but the low 719 * 6 bits. 720 */ 721 /* shl-long vAA, vBB, vCC */ 722 and r2, r2, #63 @ r2<- r2 & 0x3f 723 mov r1, r1, asl r2 @ r1<- r1 << r2 724 rsb r3, r2, #32 @ r3<- 32 - r2 725 orr r1, r1, r0, lsr r3 @ r1<- r1 | (r0 << (32-r2)) 726 subs ip, r2, #32 @ ip<- r2 - 32 727 movpl r1, r0, asl ip @ if r2 >= 32, r1<- r0 << (r2-32) 728 mov r0, r0, asl r2 @ r0<- r0 << r2 729 bx lr 730 731/* ------------------------------ */ 732 .balign 4 733 .global dvmCompiler_TEMPLATE_SHR_LONG 734dvmCompiler_TEMPLATE_SHR_LONG: 735/* File: armv5te/TEMPLATE_SHR_LONG.S */ 736 /* 737 * Long integer shift. This is different from the generic 32/64-bit 738 * binary operations because vAA/vBB are 64-bit but vCC (the shift 739 * distance) is 32-bit. Also, Dalvik requires us to ignore all but the low 740 * 6 bits. 741 */ 742 /* shr-long vAA, vBB, vCC */ 743 and r2, r2, #63 @ r0<- r0 & 0x3f 744 mov r0, r0, lsr r2 @ r0<- r2 >> r2 745 rsb r3, r2, #32 @ r3<- 32 - r2 746 orr r0, r0, r1, asl r3 @ r0<- r0 | (r1 << (32-r2)) 747 subs ip, r2, #32 @ ip<- r2 - 32 748 movpl r0, r1, asr ip @ if r2 >= 32, r0<-r1 >> (r2-32) 749 mov r1, r1, asr r2 @ r1<- r1 >> r2 750 bx lr 751 752 753/* ------------------------------ */ 754 .balign 4 755 .global dvmCompiler_TEMPLATE_USHR_LONG 756dvmCompiler_TEMPLATE_USHR_LONG: 757/* File: armv5te/TEMPLATE_USHR_LONG.S */ 758 /* 759 * Long integer shift. This is different from the generic 32/64-bit 760 * binary operations because vAA/vBB are 64-bit but vCC (the shift 761 * distance) is 32-bit. Also, Dalvik requires us to ignore all but the low 762 * 6 bits. 763 */ 764 /* ushr-long vAA, vBB, vCC */ 765 and r2, r2, #63 @ r0<- r0 & 0x3f 766 mov r0, r0, lsr r2 @ r0<- r2 >> r2 767 rsb r3, r2, #32 @ r3<- 32 - r2 768 orr r0, r0, r1, asl r3 @ r0<- r0 | (r1 << (32-r2)) 769 subs ip, r2, #32 @ ip<- r2 - 32 770 movpl r0, r1, lsr ip @ if r2 >= 32, r0<-r1 >>> (r2-32) 771 mov r1, r1, lsr r2 @ r1<- r1 >>> r2 772 bx lr 773 774 775/* ------------------------------ */ 776 .balign 4 777 .global dvmCompiler_TEMPLATE_THROW_EXCEPTION_COMMON 778dvmCompiler_TEMPLATE_THROW_EXCEPTION_COMMON: 779/* File: armv5te/TEMPLATE_THROW_EXCEPTION_COMMON.S */ 780 /* 781 * Throw an exception from JIT'ed code. 782 * On entry: 783 * r0 Dalvik PC that raises the exception 784 */ 785 b .LhandleException 786 787/* ------------------------------ */ 788 .balign 4 789 .global dvmCompiler_TEMPLATE_SAVE_STATE 790dvmCompiler_TEMPLATE_SAVE_STATE: 791/* File: armv5te/TEMPLATE_SAVE_STATE.S */ 792 /* 793 * This handler performs a register save for selfVerification mode. 794 * On entry: 795 * Top of stack + 4: r7 value to save 796 * Top of stack + 0: r0 value to save 797 * r0 - offset from rGLUE to the beginning of the heapArgSpace record 798 * r7 - the value of regMap 799 * 800 * The handler must save regMap, r0-r12 and then return with r0-r12 801 * with their original values (note that this means r0 and r7 must take 802 * the values on the stack - not the ones in those registers on entry. 803 * Finally, the two registers previously pushed must be popped. 804 */ 805 add r0, r0, rGLUE @ pointer to heapArgSpace 806 stmia r0!, {r7} @ save regMap 807 ldr r7, [r13, #0] @ recover r0 value 808 stmia r0!, {r7} @ save r0 809 ldr r7, [r13, #4] @ recover r7 value 810 stmia r0!, {r1-r12} 811 pop {r0, r7} @ recover r0, r7 812 bx lr 813 814/* ------------------------------ */ 815 .balign 4 816 .global dvmCompiler_TEMPLATE_RESTORE_STATE 817dvmCompiler_TEMPLATE_RESTORE_STATE: 818/* File: armv5te/TEMPLATE_RESTORE_STATE.S */ 819 /* 820 * This handler restores state following a selfVerification memory access. 821 * On entry: 822 * r0 - offset from rGLUE to the 1st element of the coreRegs save array. 823 */ 824 add r0, r0, rGLUE @ pointer to heapArgSpace.coreRegs[0] 825 ldmia r0, {r0-r12} 826 bx lr 827 828/* ------------------------------ */ 829 .balign 4 830 .global dvmCompiler_TEMPLATE_STRING_COMPARETO 831dvmCompiler_TEMPLATE_STRING_COMPARETO: 832/* File: armv5te/TEMPLATE_STRING_COMPARETO.S */ 833 /* 834 * String's compareTo. 835 * 836 * Requires r0/r1 to have been previously checked for null. Will 837 * return negative if this's string is < comp, 0 if they are the 838 * same and positive if >. 839 * 840 * IMPORTANT NOTE: 841 * 842 * This code relies on hard-coded offsets for string objects, and must be 843 * kept in sync with definitions in UtfString.h. See asm-constants.h 844 * 845 * On entry: 846 * r0: this object pointer 847 * r1: comp object pointer 848 * 849 */ 850 851 mov r2, r0 @ this to r2, opening up r0 for return value 852 subs r0, r2, r1 @ Same? 853 bxeq lr 854 855 ldr r4, [r2, #STRING_FIELDOFF_OFFSET] 856 ldr r9, [r1, #STRING_FIELDOFF_OFFSET] 857 ldr r7, [r2, #STRING_FIELDOFF_COUNT] 858 ldr r10, [r1, #STRING_FIELDOFF_COUNT] 859 ldr r2, [r2, #STRING_FIELDOFF_VALUE] 860 ldr r1, [r1, #STRING_FIELDOFF_VALUE] 861 862 /* 863 * At this point, we have: 864 * value: r2/r1 865 * offset: r4/r9 866 * count: r7/r10 867 * We're going to compute 868 * r11 <- countDiff 869 * r10 <- minCount 870 */ 871 subs r11, r7, r10 872 movls r10, r7 873 874 /* Now, build pointers to the string data */ 875 add r2, r2, r4, lsl #1 876 add r1, r1, r9, lsl #1 877 /* 878 * Note: data pointers point to previous element so we can use pre-index 879 * mode with base writeback. 880 */ 881 add r2, #16-2 @ offset to contents[-1] 882 add r1, #16-2 @ offset to contents[-1] 883 884 /* 885 * At this point we have: 886 * r2: *this string data 887 * r1: *comp string data 888 * r10: iteration count for comparison 889 * r11: value to return if the first part of the string is equal 890 * r0: reserved for result 891 * r3, r4, r7, r8, r9, r12 available for loading string data 892 */ 893 894 subs r10, #2 895 blt do_remainder2 896 897 /* 898 * Unroll the first two checks so we can quickly catch early mismatch 899 * on long strings (but preserve incoming alignment) 900 */ 901 902 ldrh r3, [r2, #2]! 903 ldrh r4, [r1, #2]! 904 ldrh r7, [r2, #2]! 905 ldrh r8, [r1, #2]! 906 subs r0, r3, r4 907 subeqs r0, r7, r8 908 bxne lr 909 cmp r10, #28 910 bgt do_memcmp16 911 subs r10, #3 912 blt do_remainder 913 914loopback_triple: 915 ldrh r3, [r2, #2]! 916 ldrh r4, [r1, #2]! 917 ldrh r7, [r2, #2]! 918 ldrh r8, [r1, #2]! 919 ldrh r9, [r2, #2]! 920 ldrh r12,[r1, #2]! 921 subs r0, r3, r4 922 subeqs r0, r7, r8 923 subeqs r0, r9, r12 924 bxne lr 925 subs r10, #3 926 bge loopback_triple 927 928do_remainder: 929 adds r10, #3 930 beq returnDiff 931 932loopback_single: 933 ldrh r3, [r2, #2]! 934 ldrh r4, [r1, #2]! 935 subs r0, r3, r4 936 bxne lr 937 subs r10, #1 938 bne loopback_single 939 940returnDiff: 941 mov r0, r11 942 bx lr 943 944do_remainder2: 945 adds r10, #2 946 bne loopback_single 947 mov r0, r11 948 bx lr 949 950 /* Long string case */ 951do_memcmp16: 952 mov r4, lr 953 ldr lr, .Lmemcmp16 954 mov r7, r11 955 add r0, r2, #2 956 add r1, r1, #2 957 mov r2, r10 958 blx lr 959 cmp r0, #0 960 bxne r4 961 mov r0, r7 962 bx r4 963 964.Lmemcmp16: 965 .word __memcmp16 966 967 968/* ------------------------------ */ 969 .balign 4 970 .global dvmCompiler_TEMPLATE_STRING_INDEXOF 971dvmCompiler_TEMPLATE_STRING_INDEXOF: 972/* File: armv5te/TEMPLATE_STRING_INDEXOF.S */ 973 /* 974 * String's indexOf. 975 * 976 * Requires r0 to have been previously checked for null. Will 977 * return index of match of r1 in r0. 978 * 979 * IMPORTANT NOTE: 980 * 981 * This code relies on hard-coded offsets for string objects, and must be 982 * kept in sync wth definitions in UtfString.h See asm-constants.h 983 * 984 * On entry: 985 * r0: string object pointer 986 * r1: char to match 987 * r2: Starting offset in string data 988 */ 989 990 ldr r7, [r0, #STRING_FIELDOFF_OFFSET] 991 ldr r8, [r0, #STRING_FIELDOFF_COUNT] 992 ldr r0, [r0, #STRING_FIELDOFF_VALUE] 993 994 /* 995 * At this point, we have: 996 * r0: object pointer 997 * r1: char to match 998 * r2: starting offset 999 * r7: offset 1000 * r8: string length 1001 */ 1002 1003 /* Build pointer to start of string data */ 1004 add r0, #16 1005 add r0, r0, r7, lsl #1 1006 1007 /* Save a copy of starting data in r7 */ 1008 mov r7, r0 1009 1010 /* Clamp start to [0..count] */ 1011 cmp r2, #0 1012 movlt r2, #0 1013 cmp r2, r8 1014 movgt r2, r8 1015 1016 /* Build pointer to start of data to compare and pre-bias */ 1017 add r0, r0, r2, lsl #1 1018 sub r0, #2 1019 1020 /* Compute iteration count */ 1021 sub r8, r2 1022 1023 /* 1024 * At this point we have: 1025 * r0: start of data to test 1026 * r1: chat to compare 1027 * r8: iteration count 1028 * r7: original start of string 1029 * r3, r4, r9, r10, r11, r12 available for loading string data 1030 */ 1031 1032 subs r8, #4 1033 blt indexof_remainder 1034 1035indexof_loop4: 1036 ldrh r3, [r0, #2]! 1037 ldrh r4, [r0, #2]! 1038 ldrh r10, [r0, #2]! 1039 ldrh r11, [r0, #2]! 1040 cmp r3, r1 1041 beq match_0 1042 cmp r4, r1 1043 beq match_1 1044 cmp r10, r1 1045 beq match_2 1046 cmp r11, r1 1047 beq match_3 1048 subs r8, #4 1049 bge indexof_loop4 1050 1051indexof_remainder: 1052 adds r8, #4 1053 beq indexof_nomatch 1054 1055indexof_loop1: 1056 ldrh r3, [r0, #2]! 1057 cmp r3, r1 1058 beq match_3 1059 subs r8, #1 1060 bne indexof_loop1 1061 1062indexof_nomatch: 1063 mov r0, #-1 1064 bx lr 1065 1066match_0: 1067 sub r0, #6 1068 sub r0, r7 1069 asr r0, r0, #1 1070 bx lr 1071match_1: 1072 sub r0, #4 1073 sub r0, r7 1074 asr r0, r0, #1 1075 bx lr 1076match_2: 1077 sub r0, #2 1078 sub r0, r7 1079 asr r0, r0, #1 1080 bx lr 1081match_3: 1082 sub r0, r7 1083 asr r0, r0, #1 1084 bx lr 1085 1086 1087/* ------------------------------ */ 1088 .balign 4 1089 .global dvmCompiler_TEMPLATE_INTERPRET 1090dvmCompiler_TEMPLATE_INTERPRET: 1091/* File: armv5te/TEMPLATE_INTERPRET.S */ 1092 /* 1093 * This handler transfers control to the interpeter without performing 1094 * any lookups. It may be called either as part of a normal chaining 1095 * operation, or from the transition code in header.S. We distinquish 1096 * the two cases by looking at the link register. If called from a 1097 * translation chain, it will point to the chaining Dalvik PC + 1. 1098 * On entry: 1099 * lr - if NULL: 1100 * r1 - the Dalvik PC to begin interpretation. 1101 * else 1102 * [lr, #-1] contains Dalvik PC to begin interpretation 1103 * rGLUE - pointer to interpState 1104 * rFP - Dalvik frame pointer 1105 */ 1106 cmp lr, #0 1107 ldrne r1,[lr, #-1] 1108 ldr r2, .LinterpPunt 1109 mov r0, r1 @ set Dalvik PC 1110 bx r2 1111 @ doesn't return 1112 1113.LinterpPunt: 1114 .word dvmJitToInterpPunt 1115 1116 .size dvmCompilerTemplateStart, .-dvmCompilerTemplateStart 1117/* File: armv5te/footer.S */ 1118/* 1119 * =========================================================================== 1120 * Common subroutines and data 1121 * =========================================================================== 1122 */ 1123 1124 .text 1125 .align 2 1126.LinvokeNative: 1127 @ Prep for the native call 1128 @ r1 = newFP, r0 = methodToCall 1129 ldr r3, [rGLUE, #offGlue_self] @ r3<- glue->self 1130 ldr r9, [r3, #offThread_jniLocal_topCookie] @ r9<- thread->localRef->... 1131 str r1, [r3, #offThread_curFrame] @ self->curFrame = newFp 1132 str r9, [r1, #(offStackSaveArea_localRefCookie - sizeofStackSaveArea)] 1133 @ newFp->localRefCookie=top 1134 mov r9, r3 @ r9<- glue->self (preserve) 1135 SAVEAREA_FROM_FP(r10, r1) @ r10<- new stack save area 1136 1137 mov r2, r0 @ r2<- methodToCall 1138 mov r0, r1 @ r0<- newFP 1139 add r1, rGLUE, #offGlue_retval @ r1<- &retval 1140 1141 LDR_PC_LR "[r2, #offMethod_nativeFunc]" 1142 1143 @ native return; r9=self, r10=newSaveArea 1144 @ equivalent to dvmPopJniLocals 1145 ldr r2, [r10, #offStackSaveArea_returnAddr] @ r2 = chaining cell ret 1146 ldr r0, [r10, #offStackSaveArea_localRefCookie] @ r0<- saved->top 1147 ldr r1, [r9, #offThread_exception] @ check for exception 1148 str rFP, [r9, #offThread_curFrame] @ self->curFrame = fp 1149 cmp r1, #0 @ null? 1150 str r0, [r9, #offThread_jniLocal_topCookie] @ new top <- old top 1151 ldr r0, [r10, #offStackSaveArea_savedPc] @ reload rPC 1152 bne .LhandleException @ no, handle exception 1153 bx r2 1154 1155/* 1156 * On entry: 1157 * r0 Faulting Dalvik PC 1158 */ 1159.LhandleException: 1160 ldr r1, .LdvmMterpCommonExceptionThrown @ PIC way of getting &func 1161 ldr rIBASE, .LdvmAsmInstructionStart @ same as above 1162 mov rPC, r0 @ reload the faulting Dalvik address 1163 mov pc, r1 @ branch to dvmMterpCommonExceptionThrown 1164 1165 .align 2 1166.LdvmAsmInstructionStart: 1167 .word dvmAsmInstructionStart 1168.LdvmJitToInterpNoChain: 1169 .word dvmJitToInterpNoChain 1170.LdvmMterpStdBail: 1171 .word dvmMterpStdBail 1172.LdvmMterpCommonExceptionThrown: 1173 .word dvmMterpCommonExceptionThrown 1174.L__aeabi_cdcmple: 1175 .word __aeabi_cdcmple 1176.L__aeabi_cfcmple: 1177 .word __aeabi_cfcmple 1178 1179 .global dmvCompilerTemplateEnd 1180dmvCompilerTemplateEnd: 1181 1182#endif /* WITH_JIT */ 1183 1184