CompilerTemplateAsm-armv5te.S revision 24ac537cf8d214f7f1bcb07aace429521247d1eb
1/* 2 * This file was generated automatically by gen-template.py for 'armv5te'. 3 * 4 * --> DO NOT EDIT <-- 5 */ 6 7/* File: armv5te/header.S */ 8/* 9 * Copyright (C) 2008 The Android Open Source Project 10 * 11 * Licensed under the Apache License, Version 2.0 (the "License"); 12 * you may not use this file except in compliance with the License. 13 * You may obtain a copy of the License at 14 * 15 * http://www.apache.org/licenses/LICENSE-2.0 16 * 17 * Unless required by applicable law or agreed to in writing, software 18 * distributed under the License is distributed on an "AS IS" BASIS, 19 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 20 * See the License for the specific language governing permissions and 21 * limitations under the License. 22 */ 23 24#if defined(WITH_JIT) 25 26/* 27 * ARMv5 definitions and declarations. 28 */ 29 30/* 31ARM EABI general notes: 32 33r0-r3 hold first 4 args to a method; they are not preserved across method calls 34r4-r8 are available for general use 35r9 is given special treatment in some situations, but not for us 36r10 (sl) seems to be generally available 37r11 (fp) is used by gcc (unless -fomit-frame-pointer is set) 38r12 (ip) is scratch -- not preserved across method calls 39r13 (sp) should be managed carefully in case a signal arrives 40r14 (lr) must be preserved 41r15 (pc) can be tinkered with directly 42 43r0 holds returns of <= 4 bytes 44r0-r1 hold returns of 8 bytes, low word in r0 45 46Callee must save/restore r4+ (except r12) if it modifies them. 47 48Stack is "full descending". Only the arguments that don't fit in the first 4 49registers are placed on the stack. "sp" points at the first stacked argument 50(i.e. the 5th arg). 51 52VFP: single-precision results in s0, double-precision results in d0. 53 54In the EABI, "sp" must be 64-bit aligned on entry to a function, and any 5564-bit quantities (long long, double) must be 64-bit aligned. 56*/ 57 58/* 59JIT and ARM notes: 60 61The following registers have fixed assignments: 62 63 reg nick purpose 64 r5 rFP interpreted frame pointer, used for accessing locals and args 65 r6 rGLUE MterpGlue pointer 66 67The following registers have fixed assignments in mterp but are scratch 68registers in compiled code 69 70 reg nick purpose 71 r4 rPC interpreted program counter, used for fetching instructions 72 r7 rINST first 16-bit code unit of current instruction 73 r8 rIBASE interpreted instruction base pointer, used for computed goto 74 75Macros are provided for common operations. Each macro MUST emit only 76one instruction to make instruction-counting easier. They MUST NOT alter 77unspecified registers or condition codes. 78*/ 79 80/* single-purpose registers, given names for clarity */ 81#define rPC r4 82#define rFP r5 83#define rGLUE r6 84#define rINST r7 85#define rIBASE r8 86 87/* 88 * Given a frame pointer, find the stack save area. 89 * 90 * In C this is "((StackSaveArea*)(_fp) -1)". 91 */ 92#define SAVEAREA_FROM_FP(_reg, _fpreg) \ 93 sub _reg, _fpreg, #sizeofStackSaveArea 94 95#define EXPORT_PC() \ 96 str rPC, [rFP, #(-sizeofStackSaveArea + offStackSaveArea_currentPc)] 97 98/* 99 * This is a #include, not a %include, because we want the C pre-processor 100 * to expand the macros into assembler assignment statements. 101 */ 102#include "../../../mterp/common/asm-constants.h" 103 104 105/* File: armv5te/platform.S */ 106/* 107 * =========================================================================== 108 * CPU-version-specific defines and utility 109 * =========================================================================== 110 */ 111 112/* 113 * Macro for "MOV LR,PC / LDR PC,xxx", which is not allowed pre-ARMv5. 114 * Jump to subroutine. 115 * 116 * May modify IP and LR. 117 */ 118.macro LDR_PC_LR source 119 mov lr, pc 120 ldr pc, \source 121.endm 122 123 124 .global dvmCompilerTemplateStart 125 .type dvmCompilerTemplateStart, %function 126 .text 127 128dvmCompilerTemplateStart: 129 130/* ------------------------------ */ 131 .balign 4 132 .global dvmCompiler_TEMPLATE_CMP_LONG 133dvmCompiler_TEMPLATE_CMP_LONG: 134/* File: armv5te/TEMPLATE_CMP_LONG.S */ 135 /* 136 * Compare two 64-bit values. Puts 0, 1, or -1 into the destination 137 * register based on the results of the comparison. 138 * 139 * We load the full values with LDM, but in practice many values could 140 * be resolved by only looking at the high word. This could be made 141 * faster or slower by splitting the LDM into a pair of LDRs. 142 * 143 * If we just wanted to set condition flags, we could do this: 144 * subs ip, r0, r2 145 * sbcs ip, r1, r3 146 * subeqs ip, r0, r2 147 * Leaving { <0, 0, >0 } in ip. However, we have to set it to a specific 148 * integer value, which we can do with 2 conditional mov/mvn instructions 149 * (set 1, set -1; if they're equal we already have 0 in ip), giving 150 * us a constant 5-cycle path plus a branch at the end to the 151 * instruction epilogue code. The multi-compare approach below needs 152 * 2 or 3 cycles + branch if the high word doesn't match, 6 + branch 153 * in the worst case (the 64-bit values are equal). 154 */ 155 /* cmp-long vAA, vBB, vCC */ 156 cmp r1, r3 @ compare (vBB+1, vCC+1) 157 blt .LTEMPLATE_CMP_LONG_less @ signed compare on high part 158 bgt .LTEMPLATE_CMP_LONG_greater 159 subs r0, r0, r2 @ r0<- r0 - r2 160 bxeq lr 161 bhi .LTEMPLATE_CMP_LONG_greater @ unsigned compare on low part 162.LTEMPLATE_CMP_LONG_less: 163 mvn r0, #0 @ r0<- -1 164 bx lr 165.LTEMPLATE_CMP_LONG_greater: 166 mov r0, #1 @ r0<- 1 167 bx lr 168 169 170/* ------------------------------ */ 171 .balign 4 172 .global dvmCompiler_TEMPLATE_RETURN 173dvmCompiler_TEMPLATE_RETURN: 174/* File: armv5te/TEMPLATE_RETURN.S */ 175 /* 176 * Unwind a frame from the Dalvik stack for compiled OP_RETURN_XXX. 177 * If the stored value in returnAddr 178 * is non-zero, the caller is compiled by the JIT thus return to the 179 * address in the code cache following the invoke instruction. Otherwise 180 * return to the special dvmJitToInterpNoChain entry point. 181 */ 182 SAVEAREA_FROM_FP(r0, rFP) @ r0<- saveArea (old) 183 ldr r10, [r0, #offStackSaveArea_prevFrame] @ r10<- saveArea->prevFrame 184 ldr r8, [rGLUE, #offGlue_pSelfSuspendCount] @ r8<- &suspendCount 185 ldr rPC, [r0, #offStackSaveArea_savedPc] @ rPC<- saveArea->savedPc 186#if !defined(WITH_SELF_VERIFICATION) 187 ldr r9, [r0, #offStackSaveArea_returnAddr] @ r9<- chaining cell ret 188#else 189 mov r9, #0 @ disable chaining 190#endif 191 ldr r2, [r10, #(offStackSaveArea_method - sizeofStackSaveArea)] 192 @ r2<- method we're returning to 193 ldr r3, [rGLUE, #offGlue_self] @ r3<- glue->self 194 cmp r2, #0 @ break frame? 195#if !defined(WITH_SELF_VERIFICATION) 196 beq 1f @ bail to interpreter 197#else 198 blxeq lr @ punt to interpreter and compare state 199#endif 200 ldr r1, .LdvmJitToInterpNoChain @ defined in footer.S 201 mov rFP, r10 @ publish new FP 202 ldrne r10, [r2, #offMethod_clazz] @ r10<- method->clazz 203 ldr r8, [r8] @ r8<- suspendCount 204 205 str r2, [rGLUE, #offGlue_method]@ glue->method = newSave->method 206 ldr r0, [r10, #offClassObject_pDvmDex] @ r0<- method->clazz->pDvmDex 207 str rFP, [r3, #offThread_curFrame] @ self->curFrame = fp 208 add rPC, rPC, #6 @ publish new rPC (advance 6 bytes) 209 str r0, [rGLUE, #offGlue_methodClassDex] 210 cmp r8, #0 @ check the suspendCount 211 movne r9, #0 @ clear the chaining cell address 212 cmp r9, #0 @ chaining cell exists? 213 blxne r9 @ jump to the chaining cell 214#if defined(EXIT_STATS) 215 mov r0, #kCallsiteInterpreted 216#endif 217 mov pc, r1 @ callsite is interpreted 2181: 219 stmia rGLUE, {rPC, rFP} @ SAVE_PC_FP_TO_GLUE() 220 ldr r2, .LdvmMterpStdBail @ defined in footer.S 221 mov r1, #0 @ changeInterp = false 222 mov r0, rGLUE @ Expecting rGLUE in r0 223 blx r2 @ exit the interpreter 224 225/* ------------------------------ */ 226 .balign 4 227 .global dvmCompiler_TEMPLATE_INVOKE_METHOD_NO_OPT 228dvmCompiler_TEMPLATE_INVOKE_METHOD_NO_OPT: 229/* File: armv5te/TEMPLATE_INVOKE_METHOD_NO_OPT.S */ 230 /* 231 * For polymorphic callsites - setup the Dalvik frame and load Dalvik PC 232 * into rPC then jump to dvmJitToInterpNoChain to dispatch the 233 * runtime-resolved callee. 234 */ 235 @ r0 = methodToCall, r1 = returnCell, rPC = dalvikCallsite 236 ldrh r7, [r0, #offMethod_registersSize] @ r7<- methodToCall->regsSize 237 ldrh r2, [r0, #offMethod_outsSize] @ r2<- methodToCall->outsSize 238 ldr r9, [rGLUE, #offGlue_interpStackEnd] @ r9<- interpStackEnd 239 ldr r8, [rGLUE, #offGlue_pSelfSuspendCount] @ r8<- &suspendCount 240 add r3, r1, #1 @ Thumb addr is odd 241 SAVEAREA_FROM_FP(r1, rFP) @ r1<- stack save area 242 sub r1, r1, r7, lsl #2 @ r1<- newFp (old savearea - regsSize) 243 SAVEAREA_FROM_FP(r10, r1) @ r10<- stack save area 244 sub r10, r10, r2, lsl #2 @ r10<- bottom (newsave - outsSize) 245 ldr r8, [r8] @ r3<- suspendCount (int) 246 cmp r10, r9 @ bottom < interpStackEnd? 247 bxlt lr @ return to raise stack overflow excep. 248 @ r1 = newFP, r0 = methodToCall, r3 = returnCell, rPC = dalvikCallsite 249 ldr r9, [r0, #offMethod_clazz] @ r9<- method->clazz 250 ldr r10, [r0, #offMethod_accessFlags] @ r10<- methodToCall->accessFlags 251 str rPC, [rFP, #(offStackSaveArea_currentPc - sizeofStackSaveArea)] 252 str rPC, [r1, #(offStackSaveArea_savedPc - sizeofStackSaveArea)] 253 ldr rPC, [r0, #offMethod_insns] @ rPC<- methodToCall->insns 254 255 256 @ set up newSaveArea 257 str rFP, [r1, #(offStackSaveArea_prevFrame - sizeofStackSaveArea)] 258 str r3, [r1, #(offStackSaveArea_returnAddr - sizeofStackSaveArea)] 259 str r0, [r1, #(offStackSaveArea_method - sizeofStackSaveArea)] 260 cmp r8, #0 @ suspendCount != 0 261 bxne lr @ bail to the interpreter 262 tst r10, #ACC_NATIVE 263#if !defined(WITH_SELF_VERIFICATION) 264 bne .LinvokeNative 265#else 266 bxne lr @ bail to the interpreter 267#endif 268 269 ldr r10, .LdvmJitToInterpNoChain 270 ldr r3, [r9, #offClassObject_pDvmDex] @ r3<- method->clazz->pDvmDex 271 ldr r2, [rGLUE, #offGlue_self] @ r2<- glue->self 272 273 @ Update "glue" values for the new method 274 str r0, [rGLUE, #offGlue_method] @ glue->method = methodToCall 275 str r3, [rGLUE, #offGlue_methodClassDex] @ glue->methodClassDex = ... 276 mov rFP, r1 @ fp = newFp 277 str rFP, [r2, #offThread_curFrame] @ self->curFrame = newFp 278 279 @ Start executing the callee 280#if defined(EXIT_STATS) 281 mov r0, #kInlineCacheMiss 282#endif 283 mov pc, r10 @ dvmJitToInterpNoChain 284 285/* ------------------------------ */ 286 .balign 4 287 .global dvmCompiler_TEMPLATE_INVOKE_METHOD_CHAIN 288dvmCompiler_TEMPLATE_INVOKE_METHOD_CHAIN: 289/* File: armv5te/TEMPLATE_INVOKE_METHOD_CHAIN.S */ 290 /* 291 * For monomorphic callsite, setup the Dalvik frame and return to the 292 * Thumb code through the link register to transfer control to the callee 293 * method through a dedicated chaining cell. 294 */ 295 @ r0 = methodToCall, r1 = returnCell, rPC = dalvikCallsite 296 @ methodToCall is guaranteed to be non-native 297.LinvokeChain: 298 ldrh r7, [r0, #offMethod_registersSize] @ r7<- methodToCall->regsSize 299 ldrh r2, [r0, #offMethod_outsSize] @ r2<- methodToCall->outsSize 300 ldr r9, [rGLUE, #offGlue_interpStackEnd] @ r9<- interpStackEnd 301 ldr r8, [rGLUE, #offGlue_pSelfSuspendCount] @ r8<- &suspendCount 302 add r3, r1, #1 @ Thumb addr is odd 303 SAVEAREA_FROM_FP(r1, rFP) @ r1<- stack save area 304 sub r1, r1, r7, lsl #2 @ r1<- newFp (old savearea - regsSize) 305 SAVEAREA_FROM_FP(r10, r1) @ r10<- stack save area 306 add r12, lr, #2 @ setup the punt-to-interp address 307 sub r10, r10, r2, lsl #2 @ r10<- bottom (newsave - outsSize) 308 ldr r8, [r8] @ r3<- suspendCount (int) 309 cmp r10, r9 @ bottom < interpStackEnd? 310 bxlt r12 @ return to raise stack overflow excep. 311 @ r1 = newFP, r0 = methodToCall, r3 = returnCell, rPC = dalvikCallsite 312 ldr r9, [r0, #offMethod_clazz] @ r9<- method->clazz 313 str rPC, [rFP, #(offStackSaveArea_currentPc - sizeofStackSaveArea)] 314 str rPC, [r1, #(offStackSaveArea_savedPc - sizeofStackSaveArea)] 315 ldr rPC, [r0, #offMethod_insns] @ rPC<- methodToCall->insns 316 317 318 @ set up newSaveArea 319 str rFP, [r1, #(offStackSaveArea_prevFrame - sizeofStackSaveArea)] 320 str r3, [r1, #(offStackSaveArea_returnAddr - sizeofStackSaveArea)] 321 str r0, [r1, #(offStackSaveArea_method - sizeofStackSaveArea)] 322 cmp r8, #0 @ suspendCount != 0 323 bxne r12 @ bail to the interpreter 324 325 ldr r3, [r9, #offClassObject_pDvmDex] @ r3<- method->clazz->pDvmDex 326 ldr r2, [rGLUE, #offGlue_self] @ r2<- glue->self 327 328 @ Update "glue" values for the new method 329 str r0, [rGLUE, #offGlue_method] @ glue->method = methodToCall 330 str r3, [rGLUE, #offGlue_methodClassDex] @ glue->methodClassDex = ... 331 mov rFP, r1 @ fp = newFp 332 str rFP, [r2, #offThread_curFrame] @ self->curFrame = newFp 333 334 bx lr @ return to the callee-chaining cell 335 336 337 338/* ------------------------------ */ 339 .balign 4 340 .global dvmCompiler_TEMPLATE_INVOKE_METHOD_PREDICTED_CHAIN 341dvmCompiler_TEMPLATE_INVOKE_METHOD_PREDICTED_CHAIN: 342/* File: armv5te/TEMPLATE_INVOKE_METHOD_PREDICTED_CHAIN.S */ 343 /* 344 * For polymorphic callsite, check whether the cached class pointer matches 345 * the current one. If so setup the Dalvik frame and return to the 346 * Thumb code through the link register to transfer control to the callee 347 * method through a dedicated chaining cell. 348 * 349 * The predicted chaining cell is declared in ArmLIR.h with the 350 * following layout: 351 * 352 * typedef struct PredictedChainingCell { 353 * u4 branch; 354 * const ClassObject *clazz; 355 * const Method *method; 356 * u4 counter; 357 * } PredictedChainingCell; 358 * 359 * Upon returning to the callsite: 360 * - lr : to branch to the chaining cell 361 * - lr+2: to punt to the interpreter 362 * - lr+4: to fully resolve the callee and may rechain. 363 * r3 <- class 364 * r9 <- counter 365 */ 366 @ r0 = this, r1 = returnCell, r2 = predictedChainCell, rPC = dalvikCallsite 367 ldr r3, [r0, #offObject_clazz] @ r3 <- this->class 368 ldr r8, [r2, #4] @ r8 <- predictedChainCell->clazz 369 ldr r0, [r2, #8] @ r0 <- predictedChainCell->method 370 ldr r9, [r2, #12] @ r9 <- predictedChainCell->counter 371 cmp r3, r8 @ predicted class == actual class? 372 beq .LinvokeChain @ predicted chain is valid 373 ldr r7, [r3, #offClassObject_vtable] @ r7 <- this->class->vtable 374 sub r1, r9, #1 @ count-- 375 str r1, [r2, #12] @ write back to PredictedChainingCell->counter 376 add lr, lr, #4 @ return to fully-resolve landing pad 377 /* 378 * r1 <- count 379 * r2 <- &predictedChainCell 380 * r3 <- this->class 381 * r4 <- dPC 382 * r7 <- this->class->vtable 383 */ 384 bx lr 385 386/* ------------------------------ */ 387 .balign 4 388 .global dvmCompiler_TEMPLATE_INVOKE_METHOD_NATIVE 389dvmCompiler_TEMPLATE_INVOKE_METHOD_NATIVE: 390/* File: armv5te/TEMPLATE_INVOKE_METHOD_NATIVE.S */ 391 @ r0 = methodToCall, r1 = returnCell, rPC = dalvikCallsite 392 ldrh r7, [r0, #offMethod_registersSize] @ r7<- methodToCall->regsSize 393 ldr r9, [rGLUE, #offGlue_interpStackEnd] @ r9<- interpStackEnd 394 ldr r8, [rGLUE, #offGlue_pSelfSuspendCount] @ r8<- &suspendCount 395 add r3, r1, #1 @ Thumb addr is odd 396 SAVEAREA_FROM_FP(r1, rFP) @ r1<- stack save area 397 sub r1, r1, r7, lsl #2 @ r1<- newFp (old savearea - regsSize) 398 SAVEAREA_FROM_FP(r10, r1) @ r10<- stack save area 399 ldr r8, [r8] @ r3<- suspendCount (int) 400 cmp r10, r9 @ bottom < interpStackEnd? 401 bxlt lr @ return to raise stack overflow excep. 402 @ r1 = newFP, r0 = methodToCall, r3 = returnCell, rPC = dalvikCallsite 403 str rPC, [rFP, #(offStackSaveArea_currentPc - sizeofStackSaveArea)] 404 str rPC, [r1, #(offStackSaveArea_savedPc - sizeofStackSaveArea)] 405 ldr rPC, [r0, #offMethod_insns] @ rPC<- methodToCall->insns 406 407 408 @ set up newSaveArea 409 str rFP, [r1, #(offStackSaveArea_prevFrame - sizeofStackSaveArea)] 410 str r3, [r1, #(offStackSaveArea_returnAddr - sizeofStackSaveArea)] 411 ldr r3, [rGLUE, #offGlue_self] @ r3<- glue->self 412 str r0, [r1, #(offStackSaveArea_method - sizeofStackSaveArea)] 413 cmp r8, #0 @ suspendCount != 0 414 ldr r8, [r0, #offMethod_nativeFunc] @ r8<- method->nativeFunc 415#if !defined(WITH_SELF_VERIFICATION) 416 bxne lr @ bail to the interpreter 417#else 418 bx lr @ bail to interpreter unconditionally 419#endif 420 421 @ go ahead and transfer control to the native code 422 ldr r9, [r3, #offThread_jniLocal_topCookie] @ r9<- thread->localRef->... 423 str r1, [r3, #offThread_curFrame] @ self->curFrame = newFp 424 str r9, [r1, #(offStackSaveArea_localRefCookie - sizeofStackSaveArea)] 425 @ newFp->localRefCookie=top 426 mov r9, r3 @ r9<- glue->self (preserve) 427 SAVEAREA_FROM_FP(r10, r1) @ r10<- new stack save area 428 429 mov r2, r0 @ r2<- methodToCall 430 mov r0, r1 @ r0<- newFP 431 add r1, rGLUE, #offGlue_retval @ r1<- &retval 432 433 blx r8 @ off to the native code 434 435 @ native return; r9=self, r10=newSaveArea 436 @ equivalent to dvmPopJniLocals 437 ldr r2, [r10, #offStackSaveArea_returnAddr] @ r2 = chaining cell ret 438 ldr r0, [r10, #offStackSaveArea_localRefCookie] @ r0<- saved->top 439 ldr r1, [r9, #offThread_exception] @ check for exception 440 str rFP, [r9, #offThread_curFrame] @ self->curFrame = fp 441 cmp r1, #0 @ null? 442 str r0, [r9, #offThread_jniLocal_topCookie] @ new top <- old top 443 ldr r0, [rFP, #(offStackSaveArea_currentPc - sizeofStackSaveArea)] 444 bne .LhandleException @ no, handle exception 445 bx r2 446 447 448/* ------------------------------ */ 449 .balign 4 450 .global dvmCompiler_TEMPLATE_CMPG_DOUBLE 451dvmCompiler_TEMPLATE_CMPG_DOUBLE: 452/* File: armv5te/TEMPLATE_CMPG_DOUBLE.S */ 453/* File: armv5te/TEMPLATE_CMPL_DOUBLE.S */ 454 /* 455 * For the JIT: incoming arguments in r0-r1, r2-r3 456 * result in r0 457 * 458 * Compare two floating-point values. Puts 0, 1, or -1 into the 459 * destination register based on the results of the comparison. 460 * 461 * Provide a "naninst" instruction that puts 1 or -1 into r1 depending 462 * on what value we'd like to return when one of the operands is NaN. 463 * 464 * See OP_CMPL_FLOAT for an explanation. 465 * 466 * For: cmpl-double, cmpg-double 467 */ 468 /* op vAA, vBB, vCC */ 469 push {r0-r3} @ save operands 470 mov r11, lr @ save return address 471 LDR_PC_LR ".L__aeabi_cdcmple" @ PIC way of "bl __aeabi_cdcmple" 472 bhi .LTEMPLATE_CMPG_DOUBLE_gt_or_nan @ C set and Z clear, disambiguate 473 mvncc r0, #0 @ (less than) r1<- -1 474 moveq r0, #0 @ (equal) r1<- 0, trumps less than 475 add sp, #16 @ drop unused operands 476 bx r11 477 478 @ Test for NaN with a second comparison. EABI forbids testing bit 479 @ patterns, and we can't represent 0x7fc00000 in immediate form, so 480 @ make the library call. 481.LTEMPLATE_CMPG_DOUBLE_gt_or_nan: 482 pop {r2-r3} @ restore operands in reverse order 483 pop {r0-r1} @ restore operands in reverse order 484 LDR_PC_LR ".L__aeabi_cdcmple" @ r0<- Z set if eq, C clear if < 485 movcc r0, #1 @ (greater than) r1<- 1 486 bxcc r11 487 mov r0, #1 @ r1<- 1 or -1 for NaN 488 bx r11 489 490 491 492/* ------------------------------ */ 493 .balign 4 494 .global dvmCompiler_TEMPLATE_CMPL_DOUBLE 495dvmCompiler_TEMPLATE_CMPL_DOUBLE: 496/* File: armv5te/TEMPLATE_CMPL_DOUBLE.S */ 497 /* 498 * For the JIT: incoming arguments in r0-r1, r2-r3 499 * result in r0 500 * 501 * Compare two floating-point values. Puts 0, 1, or -1 into the 502 * destination register based on the results of the comparison. 503 * 504 * Provide a "naninst" instruction that puts 1 or -1 into r1 depending 505 * on what value we'd like to return when one of the operands is NaN. 506 * 507 * See OP_CMPL_FLOAT for an explanation. 508 * 509 * For: cmpl-double, cmpg-double 510 */ 511 /* op vAA, vBB, vCC */ 512 push {r0-r3} @ save operands 513 mov r11, lr @ save return address 514 LDR_PC_LR ".L__aeabi_cdcmple" @ PIC way of "bl __aeabi_cdcmple" 515 bhi .LTEMPLATE_CMPL_DOUBLE_gt_or_nan @ C set and Z clear, disambiguate 516 mvncc r0, #0 @ (less than) r1<- -1 517 moveq r0, #0 @ (equal) r1<- 0, trumps less than 518 add sp, #16 @ drop unused operands 519 bx r11 520 521 @ Test for NaN with a second comparison. EABI forbids testing bit 522 @ patterns, and we can't represent 0x7fc00000 in immediate form, so 523 @ make the library call. 524.LTEMPLATE_CMPL_DOUBLE_gt_or_nan: 525 pop {r2-r3} @ restore operands in reverse order 526 pop {r0-r1} @ restore operands in reverse order 527 LDR_PC_LR ".L__aeabi_cdcmple" @ r0<- Z set if eq, C clear if < 528 movcc r0, #1 @ (greater than) r1<- 1 529 bxcc r11 530 mvn r0, #0 @ r1<- 1 or -1 for NaN 531 bx r11 532 533 534/* ------------------------------ */ 535 .balign 4 536 .global dvmCompiler_TEMPLATE_CMPG_FLOAT 537dvmCompiler_TEMPLATE_CMPG_FLOAT: 538/* File: armv5te/TEMPLATE_CMPG_FLOAT.S */ 539/* File: armv5te/TEMPLATE_CMPL_FLOAT.S */ 540 /* 541 * For the JIT: incoming arguments in r0-r1, r2-r3 542 * result in r0 543 * 544 * Compare two floating-point values. Puts 0, 1, or -1 into the 545 * destination register based on the results of the comparison. 546 * 547 * Provide a "naninst" instruction that puts 1 or -1 into r1 depending 548 * on what value we'd like to return when one of the operands is NaN. 549 * 550 * The operation we're implementing is: 551 * if (x == y) 552 * return 0; 553 * else if (x < y) 554 * return -1; 555 * else if (x > y) 556 * return 1; 557 * else 558 * return {-1,1}; // one or both operands was NaN 559 * 560 * The straightforward implementation requires 3 calls to functions 561 * that return a result in r0. We can do it with two calls if our 562 * EABI library supports __aeabi_cfcmple (only one if we want to check 563 * for NaN directly): 564 * check x <= y 565 * if <, return -1 566 * if ==, return 0 567 * check y <= x 568 * if <, return 1 569 * return {-1,1} 570 * 571 * for: cmpl-float, cmpg-float 572 */ 573 /* op vAA, vBB, vCC */ 574 mov r9, r0 @ Save copies - we may need to redo 575 mov r10, r1 576 mov r11, lr @ save return address 577 LDR_PC_LR ".L__aeabi_cfcmple" @ cmp <=: C clear if <, Z set if eq 578 bhi .LTEMPLATE_CMPG_FLOAT_gt_or_nan @ C set and Z clear, disambiguate 579 mvncc r0, #0 @ (less than) r0<- -1 580 moveq r0, #0 @ (equal) r0<- 0, trumps less than 581 bx r11 582 @ Test for NaN with a second comparison. EABI forbids testing bit 583 @ patterns, and we can't represent 0x7fc00000 in immediate form, so 584 @ make the library call. 585.LTEMPLATE_CMPG_FLOAT_gt_or_nan: 586 mov r0, r10 @ restore in reverse order 587 mov r1, r9 588 LDR_PC_LR ".L__aeabi_cfcmple" @ r0<- Z set if eq, C clear if < 589 movcc r0, #1 @ (greater than) r1<- 1 590 bxcc r11 591 mov r0, #1 @ r1<- 1 or -1 for NaN 592 bx r11 593 594 595 596 597/* ------------------------------ */ 598 .balign 4 599 .global dvmCompiler_TEMPLATE_CMPL_FLOAT 600dvmCompiler_TEMPLATE_CMPL_FLOAT: 601/* File: armv5te/TEMPLATE_CMPL_FLOAT.S */ 602 /* 603 * For the JIT: incoming arguments in r0-r1, r2-r3 604 * result in r0 605 * 606 * Compare two floating-point values. Puts 0, 1, or -1 into the 607 * destination register based on the results of the comparison. 608 * 609 * Provide a "naninst" instruction that puts 1 or -1 into r1 depending 610 * on what value we'd like to return when one of the operands is NaN. 611 * 612 * The operation we're implementing is: 613 * if (x == y) 614 * return 0; 615 * else if (x < y) 616 * return -1; 617 * else if (x > y) 618 * return 1; 619 * else 620 * return {-1,1}; // one or both operands was NaN 621 * 622 * The straightforward implementation requires 3 calls to functions 623 * that return a result in r0. We can do it with two calls if our 624 * EABI library supports __aeabi_cfcmple (only one if we want to check 625 * for NaN directly): 626 * check x <= y 627 * if <, return -1 628 * if ==, return 0 629 * check y <= x 630 * if <, return 1 631 * return {-1,1} 632 * 633 * for: cmpl-float, cmpg-float 634 */ 635 /* op vAA, vBB, vCC */ 636 mov r9, r0 @ Save copies - we may need to redo 637 mov r10, r1 638 mov r11, lr @ save return address 639 LDR_PC_LR ".L__aeabi_cfcmple" @ cmp <=: C clear if <, Z set if eq 640 bhi .LTEMPLATE_CMPL_FLOAT_gt_or_nan @ C set and Z clear, disambiguate 641 mvncc r0, #0 @ (less than) r0<- -1 642 moveq r0, #0 @ (equal) r0<- 0, trumps less than 643 bx r11 644 @ Test for NaN with a second comparison. EABI forbids testing bit 645 @ patterns, and we can't represent 0x7fc00000 in immediate form, so 646 @ make the library call. 647.LTEMPLATE_CMPL_FLOAT_gt_or_nan: 648 mov r0, r10 @ restore in reverse order 649 mov r1, r9 650 LDR_PC_LR ".L__aeabi_cfcmple" @ r0<- Z set if eq, C clear if < 651 movcc r0, #1 @ (greater than) r1<- 1 652 bxcc r11 653 mvn r0, #0 @ r1<- 1 or -1 for NaN 654 bx r11 655 656 657 658/* ------------------------------ */ 659 .balign 4 660 .global dvmCompiler_TEMPLATE_MUL_LONG 661dvmCompiler_TEMPLATE_MUL_LONG: 662/* File: armv5te/TEMPLATE_MUL_LONG.S */ 663 /* 664 * Signed 64-bit integer multiply. 665 * 666 * For JIT: op1 in r0/r1, op2 in r2/r3, return in r0/r1 667 * 668 * Consider WXxYZ (r1r0 x r3r2) with a long multiply: 669 * WX 670 * x YZ 671 * -------- 672 * ZW ZX 673 * YW YX 674 * 675 * The low word of the result holds ZX, the high word holds 676 * (ZW+YX) + (the high overflow from ZX). YW doesn't matter because 677 * it doesn't fit in the low 64 bits. 678 * 679 * Unlike most ARM math operations, multiply instructions have 680 * restrictions on using the same register more than once (Rd and Rm 681 * cannot be the same). 682 */ 683 /* mul-long vAA, vBB, vCC */ 684 mul ip, r2, r1 @ ip<- ZxW 685 umull r9, r10, r2, r0 @ r9/r10 <- ZxX 686 mla r2, r0, r3, ip @ r2<- YxX + (ZxW) 687 add r10, r2, r10 @ r10<- r10 + low(ZxW + (YxX)) 688 mov r0,r9 689 mov r1,r10 690 bx lr 691 692/* ------------------------------ */ 693 .balign 4 694 .global dvmCompiler_TEMPLATE_SHL_LONG 695dvmCompiler_TEMPLATE_SHL_LONG: 696/* File: armv5te/TEMPLATE_SHL_LONG.S */ 697 /* 698 * Long integer shift. This is different from the generic 32/64-bit 699 * binary operations because vAA/vBB are 64-bit but vCC (the shift 700 * distance) is 32-bit. Also, Dalvik requires us to ignore all but the low 701 * 6 bits. 702 */ 703 /* shl-long vAA, vBB, vCC */ 704 and r2, r2, #63 @ r2<- r2 & 0x3f 705 mov r1, r1, asl r2 @ r1<- r1 << r2 706 rsb r3, r2, #32 @ r3<- 32 - r2 707 orr r1, r1, r0, lsr r3 @ r1<- r1 | (r0 << (32-r2)) 708 subs ip, r2, #32 @ ip<- r2 - 32 709 movpl r1, r0, asl ip @ if r2 >= 32, r1<- r0 << (r2-32) 710 mov r0, r0, asl r2 @ r0<- r0 << r2 711 bx lr 712 713/* ------------------------------ */ 714 .balign 4 715 .global dvmCompiler_TEMPLATE_SHR_LONG 716dvmCompiler_TEMPLATE_SHR_LONG: 717/* File: armv5te/TEMPLATE_SHR_LONG.S */ 718 /* 719 * Long integer shift. This is different from the generic 32/64-bit 720 * binary operations because vAA/vBB are 64-bit but vCC (the shift 721 * distance) is 32-bit. Also, Dalvik requires us to ignore all but the low 722 * 6 bits. 723 */ 724 /* shr-long vAA, vBB, vCC */ 725 and r2, r2, #63 @ r0<- r0 & 0x3f 726 mov r0, r0, lsr r2 @ r0<- r2 >> r2 727 rsb r3, r2, #32 @ r3<- 32 - r2 728 orr r0, r0, r1, asl r3 @ r0<- r0 | (r1 << (32-r2)) 729 subs ip, r2, #32 @ ip<- r2 - 32 730 movpl r0, r1, asr ip @ if r2 >= 32, r0<-r1 >> (r2-32) 731 mov r1, r1, asr r2 @ r1<- r1 >> r2 732 bx lr 733 734 735/* ------------------------------ */ 736 .balign 4 737 .global dvmCompiler_TEMPLATE_USHR_LONG 738dvmCompiler_TEMPLATE_USHR_LONG: 739/* File: armv5te/TEMPLATE_USHR_LONG.S */ 740 /* 741 * Long integer shift. This is different from the generic 32/64-bit 742 * binary operations because vAA/vBB are 64-bit but vCC (the shift 743 * distance) is 32-bit. Also, Dalvik requires us to ignore all but the low 744 * 6 bits. 745 */ 746 /* ushr-long vAA, vBB, vCC */ 747 and r2, r2, #63 @ r0<- r0 & 0x3f 748 mov r0, r0, lsr r2 @ r0<- r2 >> r2 749 rsb r3, r2, #32 @ r3<- 32 - r2 750 orr r0, r0, r1, asl r3 @ r0<- r0 | (r1 << (32-r2)) 751 subs ip, r2, #32 @ ip<- r2 - 32 752 movpl r0, r1, lsr ip @ if r2 >= 32, r0<-r1 >>> (r2-32) 753 mov r1, r1, lsr r2 @ r1<- r1 >>> r2 754 bx lr 755 756 757/* ------------------------------ */ 758 .balign 4 759 .global dvmCompiler_TEMPLATE_THROW_EXCEPTION_COMMON 760dvmCompiler_TEMPLATE_THROW_EXCEPTION_COMMON: 761/* File: armv5te/TEMPLATE_THROW_EXCEPTION_COMMON.S */ 762 /* 763 * Throw an exception from JIT'ed code. 764 * On entry: 765 * r0 Dalvik PC that raises the exception 766 */ 767 b .LhandleException 768 769/* ------------------------------ */ 770 .balign 4 771 .global dvmCompiler_TEMPLATE_SAVE_STATE 772dvmCompiler_TEMPLATE_SAVE_STATE: 773/* File: armv5te/TEMPLATE_SAVE_STATE.S */ 774 /* 775 * This handler performs a register save for selfVerification mode. 776 * On entry: 777 * Top of stack + 4: r7 value to save 778 * Top of stack + 0: r0 value to save 779 * r0 - offset from rGLUE to the beginning of the heapArgSpace record 780 * r7 - the value of regMap 781 * 782 * The handler must save regMap, r0-r12 and then return with r0-r12 783 * with their original values (note that this means r0 and r7 must take 784 * the values on the stack - not the ones in those registers on entry. 785 * Finally, the two registers previously pushed must be popped. 786 */ 787 add r0, r0, rGLUE @ pointer to heapArgSpace 788 stmia r0!, {r7} @ save regMap 789 ldr r7, [r13, #0] @ recover r0 value 790 stmia r0!, {r7} @ save r0 791 ldr r7, [r13, #4] @ recover r7 value 792 stmia r0!, {r1-r12} 793 pop {r0, r7} @ recover r0, r7 794 bx lr 795 796/* ------------------------------ */ 797 .balign 4 798 .global dvmCompiler_TEMPLATE_RESTORE_STATE 799dvmCompiler_TEMPLATE_RESTORE_STATE: 800/* File: armv5te/TEMPLATE_RESTORE_STATE.S */ 801 /* 802 * This handler restores state following a selfVerification memory access. 803 * On entry: 804 * r0 - offset from rGLUE to the 1st element of the coreRegs save array. 805 */ 806 add r0, r0, rGLUE @ pointer to heapArgSpace.coreRegs[0] 807 ldmia r0, {r0-r12} 808 bx lr 809 810/* ------------------------------ */ 811 .balign 4 812 .global dvmCompiler_TEMPLATE_STRING_COMPARETO 813dvmCompiler_TEMPLATE_STRING_COMPARETO: 814/* File: armv5te/TEMPLATE_STRING_COMPARETO.S */ 815 /* 816 * String's compareTo. 817 * 818 * Requires r0/r1 to have been previously checked for null. Will 819 * return negative if this's string is < comp, 0 if they are the 820 * same and positive if >. 821 * 822 * IMPORTANT NOTE: 823 * 824 * This code relies on hard-coded offsets for string objects, and must be 825 * kept in sync with definitions in UtfString.h. See asm-constants.h 826 * 827 * On entry: 828 * r0: this object pointer 829 * r1: comp object pointer 830 * 831 */ 832 833 mov r2, r0 @ this to r2, opening up r0 for return value 834 subs r0, r2, r1 @ Same? 835 bxeq lr 836 837 ldr r4, [r2, #STRING_FIELDOFF_OFFSET] 838 ldr r9, [r1, #STRING_FIELDOFF_OFFSET] 839 ldr r7, [r2, #STRING_FIELDOFF_COUNT] 840 ldr r10, [r1, #STRING_FIELDOFF_COUNT] 841 ldr r2, [r2, #STRING_FIELDOFF_VALUE] 842 ldr r1, [r1, #STRING_FIELDOFF_VALUE] 843 844 /* 845 * At this point, we have: 846 * value: r2/r1 847 * offset: r4/r9 848 * count: r7/r10 849 * We're going to compute 850 * r11 <- countDiff 851 * r10 <- minCount 852 */ 853 subs r11, r7, r10 854 movls r10, r7 855 856 /* Now, build pointers to the string data */ 857 add r2, r2, r4, lsl #1 858 add r1, r1, r9, lsl #1 859 /* 860 * Note: data pointers point to previous element so we can use pre-index 861 * mode with base writeback. 862 */ 863 add r2, #16-2 @ offset to contents[-1] 864 add r1, #16-2 @ offset to contents[-1] 865 866 /* 867 * At this point we have: 868 * r2: *this string data 869 * r1: *comp string data 870 * r10: iteration count for comparison 871 * r11: value to return if the first part of the string is equal 872 * r0: reserved for result 873 * r3, r4, r7, r8, r9, r12 available for loading string data 874 */ 875 876 subs r10, #2 877 blt do_remainder2 878 879 /* 880 * Unroll the first two checks so we can quickly catch early mismatch 881 * on long strings (but preserve incoming alignment) 882 */ 883 884 ldrh r3, [r2, #2]! 885 ldrh r4, [r1, #2]! 886 ldrh r7, [r2, #2]! 887 ldrh r8, [r1, #2]! 888 subs r0, r3, r4 889 subeqs r0, r7, r8 890 bxne lr 891 cmp r10, #28 892 bgt do_memcmp16 893 subs r10, #3 894 blt do_remainder 895 896loopback_triple: 897 ldrh r3, [r2, #2]! 898 ldrh r4, [r1, #2]! 899 ldrh r7, [r2, #2]! 900 ldrh r8, [r1, #2]! 901 ldrh r9, [r2, #2]! 902 ldrh r12,[r1, #2]! 903 subs r0, r3, r4 904 subeqs r0, r7, r8 905 subeqs r0, r9, r12 906 bxne lr 907 subs r10, #3 908 bge loopback_triple 909 910do_remainder: 911 adds r10, #3 912 beq returnDiff 913 914loopback_single: 915 ldrh r3, [r2, #2]! 916 ldrh r4, [r1, #2]! 917 subs r0, r3, r4 918 bxne lr 919 subs r10, #1 920 bne loopback_single 921 922returnDiff: 923 mov r0, r11 924 bx lr 925 926do_remainder2: 927 adds r10, #2 928 bne loopback_single 929 mov r0, r11 930 bx lr 931 932 /* Long string case */ 933do_memcmp16: 934 mov r4, lr 935 ldr lr, .Lmemcmp16 936 mov r7, r11 937 add r0, r2, #2 938 add r1, r1, #2 939 mov r2, r10 940 blx lr 941 cmp r0, #0 942 bxne r4 943 mov r0, r7 944 bx r4 945 946.Lmemcmp16: 947 .word __memcmp16 948 949 950/* ------------------------------ */ 951 .balign 4 952 .global dvmCompiler_TEMPLATE_STRING_INDEXOF 953dvmCompiler_TEMPLATE_STRING_INDEXOF: 954/* File: armv5te/TEMPLATE_STRING_INDEXOF.S */ 955 /* 956 * String's indexOf. 957 * 958 * Requires r0 to have been previously checked for null. Will 959 * return index of match of r1 in r0. 960 * 961 * IMPORTANT NOTE: 962 * 963 * This code relies on hard-coded offsets for string objects, and must be 964 * kept in sync wth definitions in UtfString.h See asm-constants.h 965 * 966 * On entry: 967 * r0: string object pointer 968 * r1: char to match 969 * r2: Starting offset in string data 970 */ 971 972 ldr r7, [r0, #STRING_FIELDOFF_OFFSET] 973 ldr r8, [r0, #STRING_FIELDOFF_COUNT] 974 ldr r0, [r0, #STRING_FIELDOFF_VALUE] 975 976 /* 977 * At this point, we have: 978 * r0: object pointer 979 * r1: char to match 980 * r2: starting offset 981 * r7: offset 982 * r8: string length 983 */ 984 985 /* Build pointer to start of string data */ 986 add r0, #16 987 add r0, r0, r7, lsl #1 988 989 /* Save a copy of starting data in r7 */ 990 mov r7, r0 991 992 /* Clamp start to [0..count] */ 993 cmp r2, #0 994 movlt r2, #0 995 cmp r2, r8 996 movgt r2, r8 997 998 /* Build pointer to start of data to compare and pre-bias */ 999 add r0, r0, r2, lsl #1 1000 sub r0, #2 1001 1002 /* Compute iteration count */ 1003 sub r8, r2 1004 1005 /* 1006 * At this point we have: 1007 * r0: start of data to test 1008 * r1: chat to compare 1009 * r8: iteration count 1010 * r7: original start of string 1011 * r3, r4, r9, r10, r11, r12 available for loading string data 1012 */ 1013 1014 subs r8, #4 1015 blt indexof_remainder 1016 1017indexof_loop4: 1018 ldrh r3, [r0, #2]! 1019 ldrh r4, [r0, #2]! 1020 ldrh r10, [r0, #2]! 1021 ldrh r11, [r0, #2]! 1022 cmp r3, r1 1023 beq match_0 1024 cmp r4, r1 1025 beq match_1 1026 cmp r10, r1 1027 beq match_2 1028 cmp r11, r1 1029 beq match_3 1030 subs r8, #4 1031 bge indexof_loop4 1032 1033indexof_remainder: 1034 adds r8, #4 1035 beq indexof_nomatch 1036 1037indexof_loop1: 1038 ldrh r3, [r0, #2]! 1039 cmp r3, r1 1040 beq match_3 1041 subs r8, #1 1042 bne indexof_loop1 1043 1044indexof_nomatch: 1045 mov r0, #-1 1046 bx lr 1047 1048match_0: 1049 sub r0, #6 1050 sub r0, r7 1051 asr r0, r0, #1 1052 bx lr 1053match_1: 1054 sub r0, #4 1055 sub r0, r7 1056 asr r0, r0, #1 1057 bx lr 1058match_2: 1059 sub r0, #2 1060 sub r0, r7 1061 asr r0, r0, #1 1062 bx lr 1063match_3: 1064 sub r0, r7 1065 asr r0, r0, #1 1066 bx lr 1067 1068 1069/* ------------------------------ */ 1070 .balign 4 1071 .global dvmCompiler_TEMPLATE_INTERPRET 1072dvmCompiler_TEMPLATE_INTERPRET: 1073/* File: armv5te/TEMPLATE_INTERPRET.S */ 1074 /* 1075 * This handler transfers control to the interpeter without performing 1076 * any lookups. It may be called either as part of a normal chaining 1077 * operation, or from the transition code in header.S. We distinquish 1078 * the two cases by looking at the link register. If called from a 1079 * translation chain, it will point to the chaining Dalvik PC + 1. 1080 * On entry: 1081 * lr - if NULL: 1082 * r1 - the Dalvik PC to begin interpretation. 1083 * else 1084 * [lr, #-1] contains Dalvik PC to begin interpretation 1085 * rGLUE - pointer to interpState 1086 * rFP - Dalvik frame pointer 1087 */ 1088 cmp lr, #0 1089 ldrne r1,[lr, #-1] 1090 ldr r2, .LinterpPunt 1091 mov r0, r1 @ set Dalvik PC 1092 bx r2 1093 @ doesn't return 1094 1095.LinterpPunt: 1096 .word dvmJitToInterpPunt 1097 1098 .size dvmCompilerTemplateStart, .-dvmCompilerTemplateStart 1099/* File: armv5te/footer.S */ 1100/* 1101 * =========================================================================== 1102 * Common subroutines and data 1103 * =========================================================================== 1104 */ 1105 1106 .text 1107 .align 2 1108.LinvokeNative: 1109 @ Prep for the native call 1110 @ r1 = newFP, r0 = methodToCall 1111 ldr r3, [rGLUE, #offGlue_self] @ r3<- glue->self 1112 ldr r9, [r3, #offThread_jniLocal_topCookie] @ r9<- thread->localRef->... 1113 str r1, [r3, #offThread_curFrame] @ self->curFrame = newFp 1114 str r9, [r1, #(offStackSaveArea_localRefCookie - sizeofStackSaveArea)] 1115 @ newFp->localRefCookie=top 1116 mov r9, r3 @ r9<- glue->self (preserve) 1117 SAVEAREA_FROM_FP(r10, r1) @ r10<- new stack save area 1118 1119 mov r2, r0 @ r2<- methodToCall 1120 mov r0, r1 @ r0<- newFP 1121 add r1, rGLUE, #offGlue_retval @ r1<- &retval 1122 1123 LDR_PC_LR "[r2, #offMethod_nativeFunc]" 1124 1125 @ native return; r9=self, r10=newSaveArea 1126 @ equivalent to dvmPopJniLocals 1127 ldr r2, [r10, #offStackSaveArea_returnAddr] @ r2 = chaining cell ret 1128 ldr r0, [r10, #offStackSaveArea_localRefCookie] @ r0<- saved->top 1129 ldr r1, [r9, #offThread_exception] @ check for exception 1130 str rFP, [r9, #offThread_curFrame] @ self->curFrame = fp 1131 cmp r1, #0 @ null? 1132 str r0, [r9, #offThread_jniLocal_topCookie] @ new top <- old top 1133 ldr r0, [r10, #offStackSaveArea_savedPc] @ reload rPC 1134 bne .LhandleException @ no, handle exception 1135 bx r2 1136 1137/* 1138 * On entry: 1139 * r0 Faulting Dalvik PC 1140 */ 1141.LhandleException: 1142 ldr r1, .LdvmMterpCommonExceptionThrown @ PIC way of getting &func 1143 ldr rIBASE, .LdvmAsmInstructionStart @ same as above 1144 mov rPC, r0 @ reload the faulting Dalvik address 1145 mov pc, r1 @ branch to dvmMterpCommonExceptionThrown 1146 1147 .align 2 1148.LdvmAsmInstructionStart: 1149 .word dvmAsmInstructionStart 1150.LdvmJitToInterpNoChain: 1151 .word dvmJitToInterpNoChain 1152.LdvmMterpStdBail: 1153 .word dvmMterpStdBail 1154.LdvmMterpCommonExceptionThrown: 1155 .word dvmMterpCommonExceptionThrown 1156.L__aeabi_cdcmple: 1157 .word __aeabi_cdcmple 1158.L__aeabi_cfcmple: 1159 .word __aeabi_cfcmple 1160 1161 .global dmvCompilerTemplateEnd 1162dmvCompilerTemplateEnd: 1163 1164#endif /* WITH_JIT */ 1165 1166