footer.S revision 8b095215a4d5bde723819087f3455bdcc250a78f
1/*
2 * ===========================================================================
3 *  Common subroutines and data
4 * ===========================================================================
5 */
6
7    .text
8    .align  2
9
10#if defined(WITH_JIT)
11
12#if defined(WITH_SELF_VERIFICATION)
13/*
14 * "longjmp" to a translation after single-stepping.  Before returning
15 * to translation, must save state for self-verification.
16 */
17    .global dvmJitResumeTranslation              @ (Thread* self, u4* dFP)
18dvmJitResumeTranslation:
19    mov    rSELF, r0                             @ restore self
20    mov    rPC, r1                               @ restore Dalvik pc
21    mov    rFP, r2                               @ restore Dalvik fp
22    ldr    r10, [rSELF,#offThread_jitResumeNPC]  @ resume address
23    mov    r2, #0
24    str    r2, [rSELF,#offThread_jitResumeNPC]   @ reset resume address
25    ldr    sp, [rSELF,#offThread_jitResumeNSP]   @ cut back native stack
26    b      jitSVShadowRunStart                   @ resume as if cache hit
27                                                 @ expects resume addr in r10
28
29    .global dvmJitToInterpPunt
30dvmJitToInterpPunt:
31    mov    r2,#kSVSPunt                 @ r2<- interpreter entry point
32    mov    r3, #0
33    str    r3, [rSELF, #offThread_inJitCodeCache] @ Back to the interp land
34    b      jitSVShadowRunEnd            @ doesn't return
35
36    .global dvmJitToInterpSingleStep
37dvmJitToInterpSingleStep:
38    mov    rPC, r0              @ set up dalvik pc
39    EXPORT_PC()
40    str    lr, [rSELF,#offThread_jitResumeNPC]
41    str    sp, [rSELF,#offThread_jitResumeNSP]
42    str    r1, [rSELF,#offThread_jitResumeDPC]
43    mov    r2,#kSVSSingleStep           @ r2<- interpreter entry point
44    b      jitSVShadowRunEnd            @ doesn't return
45
46
47    .global dvmJitToInterpNoChainNoProfile
48dvmJitToInterpNoChainNoProfile:
49    mov    r0,rPC                       @ pass our target PC
50    mov    r2,#kSVSNoProfile            @ r2<- interpreter entry point
51    mov    r3, #0                       @ 0 means !inJitCodeCache
52    str    r3, [rSELF, #offThread_inJitCodeCache] @ back to the interp land
53    b      jitSVShadowRunEnd            @ doesn't return
54
55    .global dvmJitToInterpTraceSelectNoChain
56dvmJitToInterpTraceSelectNoChain:
57    mov    r0,rPC                       @ pass our target PC
58    mov    r2,#kSVSTraceSelect          @ r2<- interpreter entry point
59    mov    r3, #0                       @ 0 means !inJitCodeCache
60    str    r3, [rSELF, #offThread_inJitCodeCache] @ Back to the interp land
61    b      jitSVShadowRunEnd            @ doesn't return
62
63    .global dvmJitToInterpTraceSelect
64dvmJitToInterpTraceSelect:
65    ldr    r0,[lr, #-1]                 @ pass our target PC
66    mov    r2,#kSVSTraceSelect          @ r2<- interpreter entry point
67    mov    r3, #0                       @ 0 means !inJitCodeCache
68    str    r3, [rSELF, #offThread_inJitCodeCache] @ Back to the interp land
69    b      jitSVShadowRunEnd            @ doesn't return
70
71    .global dvmJitToInterpBackwardBranch
72dvmJitToInterpBackwardBranch:
73    ldr    r0,[lr, #-1]                 @ pass our target PC
74    mov    r2,#kSVSBackwardBranch       @ r2<- interpreter entry point
75    mov    r3, #0                       @ 0 means !inJitCodeCache
76    str    r3, [rSELF, #offThread_inJitCodeCache] @ Back to the interp land
77    b      jitSVShadowRunEnd            @ doesn't return
78
79    .global dvmJitToInterpNormal
80dvmJitToInterpNormal:
81    ldr    r0,[lr, #-1]                 @ pass our target PC
82    mov    r2,#kSVSNormal               @ r2<- interpreter entry point
83    mov    r3, #0                       @ 0 means !inJitCodeCache
84    str    r3, [rSELF, #offThread_inJitCodeCache] @ Back to the interp land
85    b      jitSVShadowRunEnd            @ doesn't return
86
87    .global dvmJitToInterpNoChain
88dvmJitToInterpNoChain:
89    mov    r0,rPC                       @ pass our target PC
90    mov    r2,#kSVSNoChain              @ r2<- interpreter entry point
91    mov    r3, #0                       @ 0 means !inJitCodeCache
92    str    r3, [rSELF, #offThread_inJitCodeCache] @ Back to the interp land
93    b      jitSVShadowRunEnd            @ doesn't return
94#else
95
96/*
97 * "longjmp" to a translation after single-stepping.
98 */
99    .global dvmJitResumeTranslation              @ (Thread* self, u4* dFP)
100dvmJitResumeTranslation:
101    mov    rSELF, r0                             @ restore self
102    mov    rPC, r1                               @ restore Dalvik pc
103    mov    rFP, r2                               @ restore Dalvik fp
104    ldr    r0, [rSELF,#offThread_jitResumeNPC]
105    mov    r2, #0
106    str    r2, [rSELF,#offThread_jitResumeNPC]   @ reset resume address
107    ldr    sp, [rSELF,#offThread_jitResumeNSP]   @ cut back native stack
108    bx     r0                                    @ resume translation
109
110/*
111 * Return from the translation cache to the interpreter when the compiler is
112 * having issues translating/executing a Dalvik instruction. We have to skip
113 * the code cache lookup otherwise it is possible to indefinitely bouce
114 * between the interpreter and the code cache if the instruction that fails
115 * to be compiled happens to be at a trace start.
116 */
117    .global dvmJitToInterpPunt
118dvmJitToInterpPunt:
119    mov    rPC, r0
120#if defined(WITH_JIT_TUNING)
121    mov    r0,lr
122    bl     dvmBumpPunt;
123#endif
124    EXPORT_PC()
125    mov    r0, #0
126    str    r0, [rSELF, #offThread_inJitCodeCache] @ Back to the interp land
127    ldr    rIBASE, [rSELF, #offThread_curHandlerTable]
128    FETCH_INST()
129    GET_INST_OPCODE(ip)
130    GOTO_OPCODE(ip)
131
132/*
133 * Return to the interpreter to handle a single instruction.
134 * We'll use the normal single-stepping mechanism via interpBreak,
135 * but also save the native pc of the resume point in the translation
136 * and the native sp so that we can later do the equivalent of a
137 * longjmp() to resume.
138 * On entry:
139 *    dPC <= Dalvik PC of instrucion to interpret
140 *    lr <= resume point in translation
141 *    r1 <= Dalvik PC of next instruction
142 */
143    .global dvmJitToInterpSingleStep
144dvmJitToInterpSingleStep:
145    mov    rPC, r0              @ set up dalvik pc
146    EXPORT_PC()
147    str    lr, [rSELF,#offThread_jitResumeNPC]
148    str    sp, [rSELF,#offThread_jitResumeNSP]
149    str    r1, [rSELF,#offThread_jitResumeDPC]
150    mov    r1, #1
151    str    r1, [rSELF,#offThread_singleStepCount]  @ just step once
152    mov    r0, rSELF
153    mov    r1, #kSubModeCountedStep
154    bl     dvmEnableSubMode     @ (self, newMode)
155    ldr    rIBASE, [rSELF,#offThread_curHandlerTable]
156    FETCH_INST()
157    GET_INST_OPCODE(ip)
158    GOTO_OPCODE(ip)
159
160/*
161 * Return from the translation cache and immediately request
162 * a translation for the exit target.  Commonly used for callees.
163 */
164    .global dvmJitToInterpTraceSelectNoChain
165dvmJitToInterpTraceSelectNoChain:
166#if defined(WITH_JIT_TUNING)
167    bl     dvmBumpNoChain
168#endif
169    mov    r0,rPC
170    mov    r1,rSELF
171    bl     dvmJitGetTraceAddrThread @ (pc, self)
172    str    r0, [rSELF, #offThread_inJitCodeCache] @ set the inJitCodeCache flag
173    mov    r1, rPC                  @ arg1 of translation may need this
174    mov    lr, #0                   @  in case target is HANDLER_INTERPRET
175    cmp    r0,#0                    @ !0 means translation exists
176    bxne   r0                       @ continue native execution if so
177    b      2f                       @ branch over to use the interpreter
178
179/*
180 * Return from the translation cache and immediately request
181 * a translation for the exit target.  Commonly used following
182 * invokes.
183 */
184    .global dvmJitToInterpTraceSelect
185dvmJitToInterpTraceSelect:
186    ldr    rPC,[lr, #-1]           @ get our target PC
187    add    rINST,lr,#-5            @ save start of chain branch
188    add    rINST, #-4              @  .. which is 9 bytes back
189    mov    r0,rPC
190    mov    r1,rSELF
191    bl     dvmJitGetTraceAddrThread @ (pc, self)
192    str    r0, [rSELF, #offThread_inJitCodeCache] @ set the inJitCodeCache flag
193    cmp    r0,#0
194    beq    2f
195    mov    r1,rINST
196    bl     dvmJitChain              @ r0<- dvmJitChain(codeAddr,chainAddr)
197    mov    r1, rPC                  @ arg1 of translation may need this
198    mov    lr, #0                   @ in case target is HANDLER_INTERPRET
199    cmp    r0,#0                    @ successful chain?
200    bxne   r0                       @ continue native execution
201    b      toInterpreter            @ didn't chain - resume with interpreter
202
203/* No translation, so request one if profiling isn't disabled*/
2042:
205    ldr    rIBASE, [rSELF, #offThread_curHandlerTable]
206    ldr    r0, [rSELF, #offThread_pJitProfTable]
207    FETCH_INST()
208    cmp    r0, #0
209    movne  r2,#kJitTSelectRequestHot   @ ask for trace selection
210    bne    common_selectTrace
211    GET_INST_OPCODE(ip)
212    GOTO_OPCODE(ip)
213
214/*
215 * Return from the translation cache to the interpreter.
216 * The return was done with a BLX from thumb mode, and
217 * the following 32-bit word contains the target rPC value.
218 * Note that lr (r14) will have its low-order bit set to denote
219 * its thumb-mode origin.
220 *
221 * We'll need to stash our lr origin away, recover the new
222 * target and then check to see if there is a translation available
223 * for our new target.  If so, we do a translation chain and
224 * go back to native execution.  Otherwise, it's back to the
225 * interpreter (after treating this entry as a potential
226 * trace start).
227 */
228    .global dvmJitToInterpNormal
229dvmJitToInterpNormal:
230    ldr    rPC,[lr, #-1]           @ get our target PC
231    add    rINST,lr,#-5            @ save start of chain branch
232    add    rINST,#-4               @ .. which is 9 bytes back
233#if defined(WITH_JIT_TUNING)
234    bl     dvmBumpNormal
235#endif
236    mov    r0,rPC
237    mov    r1,rSELF
238    bl     dvmJitGetTraceAddrThread @ (pc, self)
239    str    r0, [rSELF, #offThread_inJitCodeCache] @ set the inJitCodeCache flag
240    cmp    r0,#0
241    beq    toInterpreter            @ go if not, otherwise do chain
242    mov    r1,rINST
243    bl     dvmJitChain              @ r0<- dvmJitChain(codeAddr,chainAddr)
244    mov    r1, rPC                  @ arg1 of translation may need this
245    mov    lr, #0                   @  in case target is HANDLER_INTERPRET
246    cmp    r0,#0                    @ successful chain?
247    bxne   r0                       @ continue native execution
248    b      toInterpreter            @ didn't chain - resume with interpreter
249
250/*
251 * Return from the translation cache to the interpreter to do method invocation.
252 * Check if translation exists for the callee, but don't chain to it.
253 */
254    .global dvmJitToInterpNoChainNoProfile
255dvmJitToInterpNoChainNoProfile:
256#if defined(WITH_JIT_TUNING)
257    bl     dvmBumpNoChain
258#endif
259    mov    r0,rPC
260    mov    r1,rSELF
261    bl     dvmJitGetTraceAddrThread @ (pc, self)
262    str    r0, [rSELF, #offThread_inJitCodeCache] @ set the inJitCodeCache flag
263    mov    r1, rPC                  @ arg1 of translation may need this
264    mov    lr, #0                   @  in case target is HANDLER_INTERPRET
265    cmp    r0,#0
266    bxne   r0                       @ continue native execution if so
267    EXPORT_PC()
268    ldr    rIBASE, [rSELF, #offThread_curHandlerTable]
269    FETCH_INST()
270    GET_INST_OPCODE(ip)                 @ extract opcode from rINST
271    GOTO_OPCODE(ip)                     @ jump to next instruction
272
273/*
274 * Return from the translation cache to the interpreter to do method invocation.
275 * Check if translation exists for the callee, but don't chain to it.
276 */
277    .global dvmJitToInterpNoChain
278dvmJitToInterpNoChain:
279#if defined(WITH_JIT_TUNING)
280    bl     dvmBumpNoChain
281#endif
282    mov    r0,rPC
283    mov    r1,rSELF
284    bl     dvmJitGetTraceAddrThread @ (pc, self)
285    str    r0, [rSELF, #offThread_inJitCodeCache] @ set the inJitCodeCache flag
286    mov    r1, rPC                  @ arg1 of translation may need this
287    mov    lr, #0                   @  in case target is HANDLER_INTERPRET
288    cmp    r0,#0
289    bxne   r0                       @ continue native execution if so
290#endif
291
292/*
293 * No translation, restore interpreter regs and start interpreting.
294 * rSELF & rFP were preserved in the translated code, and rPC has
295 * already been restored by the time we get here.  We'll need to set
296 * up rIBASE & rINST, and load the address of the JitTable into r0.
297 */
298toInterpreter:
299    EXPORT_PC()
300    ldr    rIBASE, [rSELF, #offThread_curHandlerTable]
301    FETCH_INST()
302    ldr    r0, [rSELF, #offThread_pJitProfTable]
303    ldr    rIBASE, [rSELF, #offThread_curHandlerTable]
304    @ NOTE: intended fallthrough
305
306/*
307 * Similar to common_updateProfile, but tests for null pJitProfTable
308 * r0 holds pJifProfTAble, rINST is loaded, rPC is current and
309 * rIBASE has been recently refreshed.
310 */
311common_testUpdateProfile:
312    cmp     r0, #0               @ JIT switched off?
313    beq     4f                   @ return to interp if so
314
315/*
316 * Common code to update potential trace start counter, and initiate
317 * a trace-build if appropriate.
318 * On entry here:
319 *    r0    <= pJitProfTable (verified non-NULL)
320 *    rPC   <= Dalvik PC
321 *    rINST <= next instruction
322 */
323common_updateProfile:
324    eor     r3,rPC,rPC,lsr #12 @ cheap, but fast hash function
325    lsl     r3,r3,#(32 - JIT_PROF_SIZE_LOG_2)          @ shift out excess bits
326    ldrb    r1,[r0,r3,lsr #(32 - JIT_PROF_SIZE_LOG_2)] @ get counter
327    GET_INST_OPCODE(ip)
328    subs    r1,r1,#1           @ decrement counter
329    strb    r1,[r0,r3,lsr #(32 - JIT_PROF_SIZE_LOG_2)] @ and store it
330    GOTO_OPCODE_IFNE(ip)       @ if not threshold, fallthrough otherwise */
331
332    /* Looks good, reset the counter */
333    ldr     r1, [rSELF, #offThread_jitThreshold]
334    strb    r1,[r0,r3,lsr #(32 - JIT_PROF_SIZE_LOG_2)] @ reset counter
335    EXPORT_PC()
336    mov     r0,rPC
337    mov     r1,rSELF
338    bl      dvmJitGetTraceAddrThread    @ (pc, self)
339    str     r0, [rSELF, #offThread_inJitCodeCache] @ set the inJitCodeCache flag
340    mov     r1, rPC                     @ arg1 of translation may need this
341    mov     lr, #0                      @  in case target is HANDLER_INTERPRET
342    cmp     r0,#0
343#if !defined(WITH_SELF_VERIFICATION)
344    bxne    r0                          @ jump to the translation
345    mov     r2,#kJitTSelectRequest      @ ask for trace selection
346    @ fall-through to common_selectTrace
347#else
348    moveq   r2,#kJitTSelectRequest      @ ask for trace selection
349    beq     common_selectTrace
350    /*
351     * At this point, we have a target translation.  However, if
352     * that translation is actually the interpret-only pseudo-translation
353     * we want to treat it the same as no translation.
354     */
355    mov     r10, r0                     @ save target
356    bl      dvmCompilerGetInterpretTemplate
357    cmp     r0, r10                     @ special case?
358    bne     jitSVShadowRunStart         @ set up self verification shadow space
359    @ Need to clear the inJitCodeCache flag
360    mov    r3, #0                       @ 0 means not in the JIT code cache
361    str    r3, [rSELF, #offThread_inJitCodeCache] @ back to the interp land
362    GET_INST_OPCODE(ip)
363    GOTO_OPCODE(ip)
364    /* no return */
365#endif
366
367/*
368 * On entry:
369 *  r2 is jit state.
370 */
371common_selectTrace:
372    ldrh    r0,[rSELF,#offThread_subMode]
373    ands    r0, #(kSubModeJitTraceBuild | kSubModeJitSV)
374    bne     3f                         @ already doing JIT work, continue
375    str     r2,[rSELF,#offThread_jitState]
376    mov     r0, rSELF
377/*
378 * Call out to validate trace-building request.  If successful,
379 * rIBASE will be swapped to to send us into single-stepping trace
380 * building mode, so we need to refresh before we continue.
381 */
382    EXPORT_PC()
383    SAVE_PC_FP_TO_SELF()                 @ copy of pc/fp to Thread
384    bl      dvmJitCheckTraceRequest
3853:
386    FETCH_INST()
387    ldr    rIBASE, [rSELF, #offThread_curHandlerTable]
3884:
389    GET_INST_OPCODE(ip)                 @ extract opcode from rINST
390    GOTO_OPCODE(ip)
391    /* no return */
392#endif
393
394#if defined(WITH_SELF_VERIFICATION)
395/*
396 * Save PC and registers to shadow memory for self verification mode
397 * before jumping to native translation.
398 * On entry:
399 *    rPC, rFP, rSELF: the values that they should contain
400 *    r10: the address of the target translation.
401 */
402jitSVShadowRunStart:
403    mov     r0,rPC                      @ r0<- program counter
404    mov     r1,rFP                      @ r1<- frame pointer
405    mov     r2,rSELF                    @ r2<- self (Thread) pointer
406    mov     r3,r10                      @ r3<- target translation
407    bl      dvmSelfVerificationSaveState @ save registers to shadow space
408    ldr     rFP,[r0,#offShadowSpace_shadowFP] @ rFP<- fp in shadow space
409    bx      r10                         @ jump to the translation
410
411/*
412 * Restore PC, registers, and interpreter state to original values
413 * before jumping back to the interpreter.
414 * On entry:
415 *   r0:  dPC
416 *   r2:  self verification state
417 */
418jitSVShadowRunEnd:
419    mov    r1,rFP                        @ pass ending fp
420    mov    r3,rSELF                      @ pass self ptr for convenience
421    bl     dvmSelfVerificationRestoreState @ restore pc and fp values
422    LOAD_PC_FP_FROM_SELF()               @ restore pc, fp
423    ldr    r1,[r0,#offShadowSpace_svState] @ get self verification state
424    cmp    r1,#0                         @ check for punt condition
425    beq    1f
426    @ Set up SV single-stepping
427    mov    r0, rSELF
428    mov    r1, #kSubModeJitSV
429    bl     dvmEnableSubMode              @ (self, subMode)
430    mov    r2,#kJitSelfVerification      @ ask for self verification
431    str    r2,[rSELF,#offThread_jitState]
432    @ intentional fallthrough
4331:                                       @ exit to interpreter without check
434    EXPORT_PC()
435    ldr    rIBASE, [rSELF, #offThread_curHandlerTable]
436    FETCH_INST()
437    GET_INST_OPCODE(ip)
438    GOTO_OPCODE(ip)
439#endif
440
441/*
442 * The equivalent of "goto bail", this calls through the "bail handler".
443 * It will end this interpreter activation, and return to the caller
444 * of dvmMterpStdRun.
445 *
446 * State registers will be saved to the "thread" area before bailing
447 * debugging purposes
448 */
449common_gotoBail:
450    SAVE_PC_FP_TO_SELF()                @ export state to "thread"
451    mov     r0, rSELF                   @ r0<- self ptr
452    b       dvmMterpStdBail             @ call(self, changeInterp)
453
454/*
455 * The JIT's invoke method needs to remember the callsite class and
456 * target pair.  Save them here so that they are available to
457 * dvmCheckJit following the interpretation of this invoke.
458 */
459#if defined(WITH_JIT)
460save_callsiteinfo:
461    cmp     r9, #0
462    ldrne   r9, [r9, #offObject_clazz]
463    str     r0, [rSELF, #offThread_methodToCall]
464    str     r9, [rSELF, #offThread_callsiteClass]
465    bx      lr
466#endif
467
468/*
469 * Common code for jumbo method invocation.
470 * NOTE: this adjusts rPC to account for the difference in instruction width.
471 * As a result, the savedPc in the stack frame will not be wholly accurate. So
472 * long as that is only used for source file line number calculations, we're
473 * okay.
474 */
475common_invokeMethodJumboNoThis:
476#if defined(WITH_JIT)
477 /* On entry: r0 is "Method* methodToCall */
478    mov     r9, #0                      @ clear "this"
479#endif
480common_invokeMethodJumbo:
481 /* On entry: r0 is "Method* methodToCall, r9 is "this" */
482.LinvokeNewJumbo:
483#if defined(WITH_JIT)
484    ldrh    r1, [rSELF, #offThread_subMode]
485    ands    r1, #kSubModeJitTraceBuild
486    blne    save_callsiteinfo
487#endif
488    @ prepare to copy args to "outs" area of current frame
489    add     rPC, rPC, #4                @ adjust pc to make return consistent
490    FETCH(r2, 1)                        @ r2<- BBBB (arg count)
491    SAVEAREA_FROM_FP(r10, rFP)          @ r10<- stack save area
492    cmp     r2, #0                      @ no args?
493    beq     .LinvokeArgsDone            @ if no args, skip the rest
494    FETCH(r1, 2)                        @ r1<- CCCC
495    b       .LinvokeRangeArgs           @ handle args like invoke range
496
497/*
498 * Common code for method invocation with range.
499 *
500 * On entry:
501 *  r0 is "Method* methodToCall", r9 is "this"
502 */
503common_invokeMethodRange:
504.LinvokeNewRange:
505#if defined(WITH_JIT)
506    ldrh    r1, [rSELF, #offThread_subMode]
507    ands    r1, #kSubModeJitTraceBuild
508    blne    save_callsiteinfo
509#endif
510    @ prepare to copy args to "outs" area of current frame
511    movs    r2, rINST, lsr #8           @ r2<- AA (arg count) -- test for zero
512    SAVEAREA_FROM_FP(r10, rFP)          @ r10<- stack save area
513    beq     .LinvokeArgsDone            @ if no args, skip the rest
514    FETCH(r1, 2)                        @ r1<- CCCC
515
516.LinvokeRangeArgs:
517    @ r0=methodToCall, r1=CCCC, r2=count, r10=outs
518    @ (very few methods have > 10 args; could unroll for common cases)
519    add     r3, rFP, r1, lsl #2         @ r3<- &fp[CCCC]
520    sub     r10, r10, r2, lsl #2        @ r10<- "outs" area, for call args
5211:  ldr     r1, [r3], #4                @ val = *fp++
522    subs    r2, r2, #1                  @ count--
523    str     r1, [r10], #4               @ *outs++ = val
524    bne     1b                          @ ...while count != 0
525    b       .LinvokeArgsDone
526
527/*
528 * Common code for method invocation without range.
529 *
530 * On entry:
531 *  r0 is "Method* methodToCall", r9 is "this"
532 */
533common_invokeMethodNoRange:
534.LinvokeNewNoRange:
535#if defined(WITH_JIT)
536    ldrh    r1, [rSELF, #offThread_subMode]
537    ands    r1, #kSubModeJitTraceBuild
538    blne    save_callsiteinfo
539#endif
540    @ prepare to copy args to "outs" area of current frame
541    movs    r2, rINST, lsr #12          @ r2<- B (arg count) -- test for zero
542    SAVEAREA_FROM_FP(r10, rFP)          @ r10<- stack save area
543    FETCH(r1, 2)                        @ r1<- GFED (load here to hide latency)
544    beq     .LinvokeArgsDone
545
546    @ r0=methodToCall, r1=GFED, r2=count, r10=outs
547.LinvokeNonRange:
548    rsb     r2, r2, #5                  @ r2<- 5-r2
549    add     pc, pc, r2, lsl #4          @ computed goto, 4 instrs each
550    bl      common_abort                @ (skipped due to ARM prefetch)
5515:  and     ip, rINST, #0x0f00          @ isolate A
552    ldr     r2, [rFP, ip, lsr #6]       @ r2<- vA (shift right 8, left 2)
553    mov     r0, r0                      @ nop
554    str     r2, [r10, #-4]!             @ *--outs = vA
5554:  and     ip, r1, #0xf000             @ isolate G
556    ldr     r2, [rFP, ip, lsr #10]      @ r2<- vG (shift right 12, left 2)
557    mov     r0, r0                      @ nop
558    str     r2, [r10, #-4]!             @ *--outs = vG
5593:  and     ip, r1, #0x0f00             @ isolate F
560    ldr     r2, [rFP, ip, lsr #6]       @ r2<- vF
561    mov     r0, r0                      @ nop
562    str     r2, [r10, #-4]!             @ *--outs = vF
5632:  and     ip, r1, #0x00f0             @ isolate E
564    ldr     r2, [rFP, ip, lsr #2]       @ r2<- vE
565    mov     r0, r0                      @ nop
566    str     r2, [r10, #-4]!             @ *--outs = vE
5671:  and     ip, r1, #0x000f             @ isolate D
568    ldr     r2, [rFP, ip, lsl #2]       @ r2<- vD
569    mov     r0, r0                      @ nop
570    str     r2, [r10, #-4]!             @ *--outs = vD
5710:  @ fall through to .LinvokeArgsDone
572
573.LinvokeArgsDone: @ r0=methodToCall
574    ldrh    r9, [r0, #offMethod_registersSize]  @ r9<- methodToCall->regsSize
575    ldrh    r3, [r0, #offMethod_outsSize]  @ r3<- methodToCall->outsSize
576    ldr     r2, [r0, #offMethod_insns]  @ r2<- method->insns
577    ldr     rINST, [r0, #offMethod_clazz]  @ rINST<- method->clazz
578    @ find space for the new stack frame, check for overflow
579    SAVEAREA_FROM_FP(r1, rFP)           @ r1<- stack save area
580    sub     r1, r1, r9, lsl #2          @ r1<- newFp (old savearea - regsSize)
581    SAVEAREA_FROM_FP(r10, r1)           @ r10<- newSaveArea
582@    bl      common_dumpRegs
583    ldr     r9, [rSELF, #offThread_interpStackEnd]    @ r9<- interpStackEnd
584    sub     r3, r10, r3, lsl #2         @ r3<- bottom (newsave - outsSize)
585    cmp     r3, r9                      @ bottom < interpStackEnd?
586    ldrh    lr, [rSELF, #offThread_subMode]
587    ldr     r3, [r0, #offMethod_accessFlags] @ r3<- methodToCall->accessFlags
588    blo     .LstackOverflow             @ yes, this frame will overflow stack
589
590    @ set up newSaveArea
591#ifdef EASY_GDB
592    SAVEAREA_FROM_FP(ip, rFP)           @ ip<- stack save area
593    str     ip, [r10, #offStackSaveArea_prevSave]
594#endif
595    str     rFP, [r10, #offStackSaveArea_prevFrame]
596    str     rPC, [r10, #offStackSaveArea_savedPc]
597#if defined(WITH_JIT)
598    mov     r9, #0
599    str     r9, [r10, #offStackSaveArea_returnAddr]
600#endif
601    str     r0, [r10, #offStackSaveArea_method]
602
603    @ Profiling?
604    cmp     lr, #0                      @ any special modes happening?
605    bne     2f                          @ go if so
6061:
607    tst     r3, #ACC_NATIVE
608    bne     .LinvokeNative
609
610    /*
611    stmfd   sp!, {r0-r3}
612    bl      common_printNewline
613    mov     r0, rFP
614    mov     r1, #0
615    bl      dvmDumpFp
616    ldmfd   sp!, {r0-r3}
617    stmfd   sp!, {r0-r3}
618    mov     r0, r1
619    mov     r1, r10
620    bl      dvmDumpFp
621    bl      common_printNewline
622    ldmfd   sp!, {r0-r3}
623    */
624
625    ldrh    r9, [r2]                        @ r9 <- load INST from new PC
626    ldr     r3, [rINST, #offClassObject_pDvmDex] @ r3<- method->clazz->pDvmDex
627    mov     rPC, r2                         @ publish new rPC
628
629    @ Update state values for the new method
630    @ r0=methodToCall, r1=newFp, r3=newMethodClass, r9=newINST
631    str     r0, [rSELF, #offThread_method]    @ self->method = methodToCall
632    str     r3, [rSELF, #offThread_methodClassDex] @ self->methodClassDex = ...
633    mov     r2, #1
634    str     r2, [rSELF, #offThread_debugIsMethodEntry]
635#if defined(WITH_JIT)
636    ldr     r0, [rSELF, #offThread_pJitProfTable]
637    mov     rFP, r1                         @ fp = newFp
638    GET_PREFETCHED_OPCODE(ip, r9)           @ extract prefetched opcode from r9
639    mov     rINST, r9                       @ publish new rINST
640    str     r1, [rSELF, #offThread_curFrame]   @ curFrame = newFp
641    cmp     r0,#0
642    bne     common_updateProfile
643    GOTO_OPCODE(ip)                         @ jump to next instruction
644#else
645    mov     rFP, r1                         @ fp = newFp
646    GET_PREFETCHED_OPCODE(ip, r9)           @ extract prefetched opcode from r9
647    mov     rINST, r9                       @ publish new rINST
648    str     r1, [rSELF, #offThread_curFrame]   @ curFrame = newFp
649    GOTO_OPCODE(ip)                         @ jump to next instruction
650#endif
651
6522:
653    @ Profiling - record method entry.  r0: methodToCall
654    stmfd   sp!, {r0-r3}                @ preserve r0-r3
655    str     rPC, [rSELF, #offThread_pc] @ update interpSave.pc
656    mov     r1, r0
657    mov     r0, rSELF
658    bl      dvmReportInvoke             @ (self, method)
659    ldmfd   sp!, {r0-r3}                @ restore r0-r3
660    b       1b
661
662.LinvokeNative:
663    @ Prep for the native call
664    @ r0=methodToCall, r1=newFp, r10=newSaveArea
665    ldrh    lr, [rSELF, #offThread_subMode]
666    ldr     r9, [rSELF, #offThread_jniLocal_topCookie]@r9<-thread->localRef->...
667    str     r1, [rSELF, #offThread_curFrame]   @ curFrame = newFp
668    str     r9, [r10, #offStackSaveArea_localRefCookie] @newFp->localRefCookie=top
669    mov     r2, r0                      @ r2<- methodToCall
670    mov     r0, r1                      @ r0<- newFp (points to args)
671    add     r1, rSELF, #offThread_retval  @ r1<- &retval
672    mov     r3, rSELF                   @ arg3<- self
673
674#ifdef ASSIST_DEBUGGER
675    /* insert fake function header to help gdb find the stack frame */
676    b       .Lskip
677    .type   dalvik_mterp, %function
678dalvik_mterp:
679    .fnstart
680    MTERP_ENTRY1
681    MTERP_ENTRY2
682.Lskip:
683#endif
684
685    cmp     lr, #0                      @ any special SubModes active?
686    bne     11f                         @ go handle them if so
687    ldr     ip, [r2, #offMethod_nativeFunc] @ pc<- methodToCall->nativeFunc
688    blx     ip
6897:
690
691    @ native return; r10=newSaveArea
692    @ equivalent to dvmPopJniLocals
693    ldr     r0, [r10, #offStackSaveArea_localRefCookie] @ r0<- saved top
694    ldr     r1, [rSELF, #offThread_exception] @ check for exception
695    str     rFP, [rSELF, #offThread_curFrame]  @ curFrame = fp
696    cmp     r1, #0                      @ null?
697    str     r0, [rSELF, #offThread_jniLocal_topCookie] @ new top <- old top
698    bne     common_exceptionThrown      @ no, handle exception
699
700    FETCH_ADVANCE_INST(3)               @ advance rPC, load rINST
701    GET_INST_OPCODE(ip)                 @ extract opcode from rINST
702    GOTO_OPCODE(ip)                     @ jump to next instruction
703
70411:
705    @ r0=newFp, r1=&retval, r2=methodToCall, r3=self, lr=subModes
706    stmfd   sp!, {r0-r3}                @ save all but subModes
707    mov     r0, r2                      @ r0<- methodToCall
708    mov     r1, rSELF
709    mov     r2, rFP
710    bl      dvmReportPreNativeInvoke    @ (methodToCall, self, fp)
711    ldmfd   sp, {r0-r3}                 @ refresh.  NOTE: no sp autoincrement
712
713    @ Call the native method
714    ldr     ip, [r2, #offMethod_nativeFunc] @ pc<- methodToCall->nativeFunc
715    blx     ip
716
717    @ Restore the pre-call arguments
718    ldmfd   sp!, {r0-r3}                @ r2<- methodToCall (others unneeded)
719
720    @ Finish up any post-invoke subMode requirements
721    mov     r0, r2                      @ r0<- methodToCall
722    mov     r1, rSELF
723    mov     r2, rFP
724    bl      dvmReportPostNativeInvoke   @ (methodToCall, self, fp)
725    b       7b                          @ resume
726
727.LstackOverflow:    @ r0=methodToCall
728    mov     r1, r0                      @ r1<- methodToCall
729    mov     r0, rSELF                   @ r0<- self
730    bl      dvmHandleStackOverflow
731    b       common_exceptionThrown
732#ifdef ASSIST_DEBUGGER
733    .fnend
734    .size   dalvik_mterp, .-dalvik_mterp
735#endif
736
737
738    /*
739     * Common code for method invocation, calling through "glue code".
740     *
741     * TODO: now that we have range and non-range invoke handlers, this
742     *       needs to be split into two.  Maybe just create entry points
743     *       that set r9 and jump here?
744     *
745     * On entry:
746     *  r0 is "Method* methodToCall", the method we're trying to call
747     *  r9 is "bool methodCallRange", indicating if this is a /range variant
748     */
749     .if    0
750.LinvokeOld:
751    sub     sp, sp, #8                  @ space for args + pad
752    FETCH(ip, 2)                        @ ip<- FEDC or CCCC
753    mov     r2, r0                      @ A2<- methodToCall
754    mov     r0, rSELF                   @ A0<- self
755    SAVE_PC_FP_TO_SELF()                @ export state to "self"
756    mov     r1, r9                      @ A1<- methodCallRange
757    mov     r3, rINST, lsr #8           @ A3<- AA
758    str     ip, [sp, #0]                @ A4<- ip
759    bl      dvmMterp_invokeMethod       @ call the C invokeMethod
760    add     sp, sp, #8                  @ remove arg area
761    b       common_resumeAfterGlueCall  @ continue to next instruction
762    .endif
763
764
765
766/*
767 * Common code for handling a return instruction.
768 *
769 * This does not return.
770 */
771common_returnFromMethod:
772.LreturnNew:
773    ldrh    lr, [rSELF, #offThread_subMode]
774    SAVEAREA_FROM_FP(r0, rFP)
775    ldr     r9, [r0, #offStackSaveArea_savedPc] @ r9 = saveArea->savedPc
776    cmp     lr, #0                      @ any special subMode handling needed?
777    bne     19f
77814:
779    ldr     rFP, [r0, #offStackSaveArea_prevFrame] @ fp = saveArea->prevFrame
780    ldr     r2, [rFP, #(offStackSaveArea_method - sizeofStackSaveArea)]
781                                        @ r2<- method we're returning to
782    cmp     r2, #0                      @ is this a break frame?
783#if defined(WORKAROUND_CORTEX_A9_745320)
784    /* Don't use conditional loads if the HW defect exists */
785    beq     15f
786    ldr     r10, [r2, #offMethod_clazz] @ r10<- method->clazz
78715:
788#else
789    ldrne   r10, [r2, #offMethod_clazz] @ r10<- method->clazz
790#endif
791    beq     common_gotoBail             @ break frame, bail out completely
792
793    ldr     rIBASE, [rSELF, #offThread_curHandlerTable]  @ refresh rIBASE
794    PREFETCH_ADVANCE_INST(rINST, r9, 3) @ advance r9, update new rINST
795    str     r2, [rSELF, #offThread_method]@ self->method = newSave->method
796    ldr     r1, [r10, #offClassObject_pDvmDex]   @ r1<- method->clazz->pDvmDex
797    str     rFP, [rSELF, #offThread_curFrame]  @ curFrame = fp
798#if defined(WITH_JIT)
799    ldr     r10, [r0, #offStackSaveArea_returnAddr] @ r10 = saveArea->returnAddr
800    mov     rPC, r9                     @ publish new rPC
801    str     r1, [rSELF, #offThread_methodClassDex]
802    str     r10, [rSELF, #offThread_inJitCodeCache]  @ may return to JIT'ed land
803    cmp     r10, #0                      @ caller is compiled code
804    blxne   r10
805    GET_INST_OPCODE(ip)                 @ extract opcode from rINST
806    GOTO_OPCODE(ip)                     @ jump to next instruction
807#else
808    GET_INST_OPCODE(ip)                 @ extract opcode from rINST
809    mov     rPC, r9                     @ publish new rPC
810    str     r1, [rSELF, #offThread_methodClassDex]
811    GOTO_OPCODE(ip)                     @ jump to next instruction
812#endif
813
81419:
815    @ Handle special actions
816    @ On entry, r0: StackSaveArea
817    ldr     r1, [r0, #offStackSaveArea_prevFrame]  @ r2<- prevFP
818    str     rPC, [rSELF, #offThread_pc] @ update interpSave.pc
819    str     r1, [rSELF, #offThread_curFrame]   @ update interpSave.curFrame
820    mov     r0, rSELF
821    bl      dvmReportReturn             @ (self)
822    SAVEAREA_FROM_FP(r0, rFP)           @ restore StackSaveArea
823    b       14b                         @ continue
824
825    /*
826     * Return handling, calls through "glue code".
827     */
828     .if    0
829.LreturnOld:
830    SAVE_PC_FP_TO_SELF()                @ export state
831    mov     r0, rSELF                   @ arg to function
832    bl      dvmMterp_returnFromMethod
833    b       common_resumeAfterGlueCall
834    .endif
835
836
837/*
838 * Somebody has thrown an exception.  Handle it.
839 *
840 * If the exception processing code returns to us (instead of falling
841 * out of the interpreter), continue with whatever the next instruction
842 * now happens to be.
843 *
844 * This does not return.
845 */
846     .global dvmMterpCommonExceptionThrown
847dvmMterpCommonExceptionThrown:
848common_exceptionThrown:
849.LexceptionNew:
850
851    EXPORT_PC()
852
853    mov     r0, rSELF
854    bl      dvmCheckSuspendPending
855
856    ldr     r9, [rSELF, #offThread_exception] @ r9<- self->exception
857    mov     r1, rSELF                   @ r1<- self
858    mov     r0, r9                      @ r0<- exception
859    bl      dvmAddTrackedAlloc          @ don't let the exception be GCed
860    ldrh    r2, [rSELF, #offThread_subMode]  @ get subMode flags
861    mov     r3, #0                      @ r3<- NULL
862    str     r3, [rSELF, #offThread_exception] @ self->exception = NULL
863
864    @ Special subMode?
865    cmp     r2, #0                      @ any special subMode handling needed?
866    bne     7f                          @ go if so
8678:
868    /* set up args and a local for "&fp" */
869    /* (str sp, [sp, #-4]!  would be perfect here, but is discouraged) */
870    str     rFP, [sp, #-4]!             @ *--sp = fp
871    mov     ip, sp                      @ ip<- &fp
872    mov     r3, #0                      @ r3<- false
873    str     ip, [sp, #-4]!              @ *--sp = &fp
874    ldr     r1, [rSELF, #offThread_method] @ r1<- self->method
875    mov     r0, rSELF                   @ r0<- self
876    ldr     r1, [r1, #offMethod_insns]  @ r1<- method->insns
877    ldrh    lr, [rSELF, #offThread_subMode]  @ lr<- subMode flags
878    mov     r2, r9                      @ r2<- exception
879    sub     r1, rPC, r1                 @ r1<- pc - method->insns
880    mov     r1, r1, asr #1              @ r1<- offset in code units
881
882    /* call, r0 gets catchRelPc (a code-unit offset) */
883    bl      dvmFindCatchBlock           @ call(self, relPc, exc, scan?, &fp)
884
885    /* fix earlier stack overflow if necessary; may trash rFP */
886    ldrb    r1, [rSELF, #offThread_stackOverflowed]
887    cmp     r1, #0                      @ did we overflow earlier?
888    beq     1f                          @ no, skip ahead
889    mov     rFP, r0                     @ save relPc result in rFP
890    mov     r0, rSELF                   @ r0<- self
891    mov     r1, r9                      @ r1<- exception
892    bl      dvmCleanupStackOverflow     @ call(self)
893    mov     r0, rFP                     @ restore result
8941:
895
896    /* update frame pointer and check result from dvmFindCatchBlock */
897    ldr     rFP, [sp, #4]               @ retrieve the updated rFP
898    cmp     r0, #0                      @ is catchRelPc < 0?
899    add     sp, sp, #8                  @ restore stack
900    bmi     .LnotCaughtLocally
901
902    /* adjust locals to match self->interpSave.curFrame and updated PC */
903    SAVEAREA_FROM_FP(r1, rFP)           @ r1<- new save area
904    ldr     r1, [r1, #offStackSaveArea_method] @ r1<- new method
905    str     r1, [rSELF, #offThread_method]  @ self->method = new method
906    ldr     r2, [r1, #offMethod_clazz]      @ r2<- method->clazz
907    ldr     r3, [r1, #offMethod_insns]      @ r3<- method->insns
908    ldr     r2, [r2, #offClassObject_pDvmDex] @ r2<- method->clazz->pDvmDex
909    add     rPC, r3, r0, asl #1             @ rPC<- method->insns + catchRelPc
910    str     r2, [rSELF, #offThread_methodClassDex] @ self->pDvmDex = meth...
911
912    /* release the tracked alloc on the exception */
913    mov     r0, r9                      @ r0<- exception
914    mov     r1, rSELF                   @ r1<- self
915    bl      dvmReleaseTrackedAlloc      @ release the exception
916
917    /* restore the exception if the handler wants it */
918    ldr    rIBASE, [rSELF, #offThread_curHandlerTable]  @ refresh rIBASE
919    FETCH_INST()                        @ load rINST from rPC
920    GET_INST_OPCODE(ip)                 @ extract opcode from rINST
921    cmp     ip, #OP_MOVE_EXCEPTION      @ is it "move-exception"?
922    streq   r9, [rSELF, #offThread_exception] @ yes, restore the exception
923    GOTO_OPCODE(ip)                     @ jump to next instruction
924
925    @ Manage debugger bookkeeping
9267:
927    str     rPC, [rSELF, #offThread_pc]     @ update interpSave.pc
928    str     rFP, [rSELF, #offThread_curFrame]     @ update interpSave.curFrame
929    mov     r0, rSELF                       @ arg0<- self
930    mov     r1, r9                          @ arg1<- exception
931    bl      dvmReportExceptionThrow         @ (self, exception)
932    b       8b                              @ resume with normal handling
933
934.LnotCaughtLocally: @ r9=exception
935    /* fix stack overflow if necessary */
936    ldrb    r1, [rSELF, #offThread_stackOverflowed]
937    cmp     r1, #0                      @ did we overflow earlier?
938    movne   r0, rSELF                   @ if yes: r0<- self
939    movne   r1, r9                      @ if yes: r1<- exception
940    blne    dvmCleanupStackOverflow     @ if yes: call(self)
941
942    @ may want to show "not caught locally" debug messages here
943#if DVM_SHOW_EXCEPTION >= 2
944    /* call __android_log_print(prio, tag, format, ...) */
945    /* "Exception %s from %s:%d not caught locally" */
946    @ dvmLineNumFromPC(method, pc - method->insns)
947    ldr     r0, [rSELF, #offThread_method]
948    ldr     r1, [r0, #offMethod_insns]
949    sub     r1, rPC, r1
950    asr     r1, r1, #1
951    bl      dvmLineNumFromPC
952    str     r0, [sp, #-4]!
953    @ dvmGetMethodSourceFile(method)
954    ldr     r0, [rSELF, #offThread_method]
955    bl      dvmGetMethodSourceFile
956    str     r0, [sp, #-4]!
957    @ exception->clazz->descriptor
958    ldr     r3, [r9, #offObject_clazz]
959    ldr     r3, [r3, #offClassObject_descriptor]
960    @
961    ldr     r2, strExceptionNotCaughtLocally
962    ldr     r1, strLogTag
963    mov     r0, #3                      @ LOG_DEBUG
964    bl      __android_log_print
965#endif
966    str     r9, [rSELF, #offThread_exception] @ restore exception
967    mov     r0, r9                      @ r0<- exception
968    mov     r1, rSELF                   @ r1<- self
969    bl      dvmReleaseTrackedAlloc      @ release the exception
970    b       common_gotoBail             @ bail out
971
972
973    /*
974     * Exception handling, calls through "glue code".
975     */
976    .if     0
977.LexceptionOld:
978    SAVE_PC_FP_TO_SELF()                @ export state
979    mov     r0, rSELF                   @ arg to function
980    bl      dvmMterp_exceptionThrown
981    b       common_resumeAfterGlueCall
982    .endif
983
984#if defined(WITH_JIT)
985    /*
986     * If the JIT is actively building a trace we need to make sure
987     * that the field is fully resolved before including the current
988     * instruction.
989     *
990     * On entry:
991     *     r10: &dvmDex->pResFields[field]
992     *     r0:  field pointer (must preserve)
993     */
994common_verifyField:
995    ldrh    r3, [rSELF, #offThread_subMode]  @ r3 <- submode byte
996    ands    r3, #kSubModeJitTraceBuild
997    bxeq    lr                          @ Not building trace, continue
998    ldr     r1, [r10]                   @ r1<- reload resolved StaticField ptr
999    cmp     r1, #0                      @ resolution complete?
1000    bxne    lr                          @ yes, continue
1001    stmfd   sp!, {r0-r2,lr}             @ save regs
1002    mov     r0, rSELF
1003    mov     r1, rPC
1004    bl      dvmJitEndTraceSelect        @ (self,pc) end trace before this inst
1005    ldmfd   sp!, {r0-r2, lr}
1006    bx      lr                          @ return
1007#endif
1008
1009/*
1010 * After returning from a "glued" function, pull out the updated
1011 * values and start executing at the next instruction.
1012 */
1013common_resumeAfterGlueCall:
1014    LOAD_PC_FP_FROM_SELF()              @ pull rPC and rFP out of thread
1015    ldr     rIBASE, [rSELF, #offThread_curHandlerTable]  @ refresh
1016    FETCH_INST()                        @ load rINST from rPC
1017    GET_INST_OPCODE(ip)                 @ extract opcode from rINST
1018    GOTO_OPCODE(ip)                     @ jump to next instruction
1019
1020/*
1021 * Invalid array index. Note that our calling convention is strange; we use r1
1022 * and r3 because those just happen to be the registers all our callers are
1023 * using. We move r3 before calling the C function, but r1 happens to match.
1024 * r1: index
1025 * r3: size
1026 */
1027common_errArrayIndex:
1028    EXPORT_PC()
1029    mov     r0, r3
1030    bl      dvmThrowArrayIndexOutOfBoundsException
1031    b       common_exceptionThrown
1032
1033/*
1034 * Integer divide or mod by zero.
1035 */
1036common_errDivideByZero:
1037    EXPORT_PC()
1038    ldr     r0, strDivideByZero
1039    bl      dvmThrowArithmeticException
1040    b       common_exceptionThrown
1041
1042/*
1043 * Attempt to allocate an array with a negative size.
1044 * On entry: length in r1
1045 */
1046common_errNegativeArraySize:
1047    EXPORT_PC()
1048    mov     r0, r1                                @ arg0 <- len
1049    bl      dvmThrowNegativeArraySizeException    @ (len)
1050    b       common_exceptionThrown
1051
1052/*
1053 * Invocation of a non-existent method.
1054 * On entry: method name in r1
1055 */
1056common_errNoSuchMethod:
1057    EXPORT_PC()
1058    mov     r0, r1
1059    bl      dvmThrowNoSuchMethodError
1060    b       common_exceptionThrown
1061
1062/*
1063 * We encountered a null object when we weren't expecting one.  We
1064 * export the PC, throw a NullPointerException, and goto the exception
1065 * processing code.
1066 */
1067common_errNullObject:
1068    EXPORT_PC()
1069    mov     r0, #0
1070    bl      dvmThrowNullPointerException
1071    b       common_exceptionThrown
1072
1073/*
1074 * For debugging, cause an immediate fault.  The source address will
1075 * be in lr (use a bl instruction to jump here).
1076 */
1077common_abort:
1078    ldr     pc, .LdeadFood
1079.LdeadFood:
1080    .word   0xdeadf00d
1081
1082/*
1083 * Spit out a "we were here", preserving all registers.  (The attempt
1084 * to save ip won't work, but we need to save an even number of
1085 * registers for EABI 64-bit stack alignment.)
1086 */
1087    .macro  SQUEAK num
1088common_squeak\num:
1089    stmfd   sp!, {r0, r1, r2, r3, ip, lr}
1090    ldr     r0, strSqueak
1091    mov     r1, #\num
1092    bl      printf
1093    ldmfd   sp!, {r0, r1, r2, r3, ip, lr}
1094    bx      lr
1095    .endm
1096
1097    SQUEAK  0
1098    SQUEAK  1
1099    SQUEAK  2
1100    SQUEAK  3
1101    SQUEAK  4
1102    SQUEAK  5
1103
1104/*
1105 * Spit out the number in r0, preserving registers.
1106 */
1107common_printNum:
1108    stmfd   sp!, {r0, r1, r2, r3, ip, lr}
1109    mov     r1, r0
1110    ldr     r0, strSqueak
1111    bl      printf
1112    ldmfd   sp!, {r0, r1, r2, r3, ip, lr}
1113    bx      lr
1114
1115/*
1116 * Print a newline, preserving registers.
1117 */
1118common_printNewline:
1119    stmfd   sp!, {r0, r1, r2, r3, ip, lr}
1120    ldr     r0, strNewline
1121    bl      printf
1122    ldmfd   sp!, {r0, r1, r2, r3, ip, lr}
1123    bx      lr
1124
1125    /*
1126     * Print the 32-bit quantity in r0 as a hex value, preserving registers.
1127     */
1128common_printHex:
1129    stmfd   sp!, {r0, r1, r2, r3, ip, lr}
1130    mov     r1, r0
1131    ldr     r0, strPrintHex
1132    bl      printf
1133    ldmfd   sp!, {r0, r1, r2, r3, ip, lr}
1134    bx      lr
1135
1136/*
1137 * Print the 64-bit quantity in r0-r1, preserving registers.
1138 */
1139common_printLong:
1140    stmfd   sp!, {r0, r1, r2, r3, ip, lr}
1141    mov     r3, r1
1142    mov     r2, r0
1143    ldr     r0, strPrintLong
1144    bl      printf
1145    ldmfd   sp!, {r0, r1, r2, r3, ip, lr}
1146    bx      lr
1147
1148/*
1149 * Print full method info.  Pass the Method* in r0.  Preserves regs.
1150 */
1151common_printMethod:
1152    stmfd   sp!, {r0, r1, r2, r3, ip, lr}
1153    bl      dvmMterpPrintMethod
1154    ldmfd   sp!, {r0, r1, r2, r3, ip, lr}
1155    bx      lr
1156
1157/*
1158 * Call a C helper function that dumps regs and possibly some
1159 * additional info.  Requires the C function to be compiled in.
1160 */
1161    .if     0
1162common_dumpRegs:
1163    stmfd   sp!, {r0, r1, r2, r3, ip, lr}
1164    bl      dvmMterpDumpArmRegs
1165    ldmfd   sp!, {r0, r1, r2, r3, ip, lr}
1166    bx      lr
1167    .endif
1168
1169#if 0
1170/*
1171 * Experiment on VFP mode.
1172 *
1173 * uint32_t setFPSCR(uint32_t val, uint32_t mask)
1174 *
1175 * Updates the bits specified by "mask", setting them to the values in "val".
1176 */
1177setFPSCR:
1178    and     r0, r0, r1                  @ make sure no stray bits are set
1179    fmrx    r2, fpscr                   @ get VFP reg
1180    mvn     r1, r1                      @ bit-invert mask
1181    and     r2, r2, r1                  @ clear masked bits
1182    orr     r2, r2, r0                  @ set specified bits
1183    fmxr    fpscr, r2                   @ set VFP reg
1184    mov     r0, r2                      @ return new value
1185    bx      lr
1186
1187    .align  2
1188    .global dvmConfigureFP
1189    .type   dvmConfigureFP, %function
1190dvmConfigureFP:
1191    stmfd   sp!, {ip, lr}
1192    /* 0x03000000 sets DN/FZ */
1193    /* 0x00009f00 clears the six exception enable flags */
1194    bl      common_squeak0
1195    mov     r0, #0x03000000             @ r0<- 0x03000000
1196    add     r1, r0, #0x9f00             @ r1<- 0x03009f00
1197    bl      setFPSCR
1198    ldmfd   sp!, {ip, pc}
1199#endif
1200
1201
1202/*
1203 * String references, must be close to the code that uses them.
1204 */
1205    .align  2
1206strDivideByZero:
1207    .word   .LstrDivideByZero
1208strLogTag:
1209    .word   .LstrLogTag
1210strExceptionNotCaughtLocally:
1211    .word   .LstrExceptionNotCaughtLocally
1212
1213strNewline:
1214    .word   .LstrNewline
1215strSqueak:
1216    .word   .LstrSqueak
1217strPrintHex:
1218    .word   .LstrPrintHex
1219strPrintLong:
1220    .word   .LstrPrintLong
1221
1222/*
1223 * Zero-terminated ASCII string data.
1224 *
1225 * On ARM we have two choices: do like gcc does, and LDR from a .word
1226 * with the address, or use an ADR pseudo-op to get the address
1227 * directly.  ADR saves 4 bytes and an indirection, but it's using a
1228 * PC-relative addressing mode and hence has a limited range, which
1229 * makes it not work well with mergeable string sections.
1230 */
1231    .section .rodata.str1.4,"aMS",%progbits,1
1232
1233.LstrBadEntryPoint:
1234    .asciz  "Bad entry point %d\n"
1235.LstrFilledNewArrayNotImpl:
1236    .asciz  "filled-new-array only implemented for objects and 'int'"
1237.LstrDivideByZero:
1238    .asciz  "divide by zero"
1239.LstrLogTag:
1240    .asciz  "mterp"
1241.LstrExceptionNotCaughtLocally:
1242    .asciz  "Exception %s from %s:%d not caught locally\n"
1243
1244.LstrNewline:
1245    .asciz  "\n"
1246.LstrSqueak:
1247    .asciz  "<%d>"
1248.LstrPrintHex:
1249    .asciz  "<%#x>"
1250.LstrPrintLong:
1251    .asciz  "<%lld>"
1252