1/*
2 * ===========================================================================
3 *  Common subroutines and data
4 * ===========================================================================
5 */
6
7    .text
8    .align 2
9
10#if defined(WITH_JIT)
11#if defined(WITH_SELF_VERIFICATION)
12
13/*
14 * "longjmp" to a translation after single-stepping.  Before returning
15 * to translation, must save state for self-verification.
16 */
17    .global dvmJitResumeTranslation             # (Thread* self, u4* dFP)
18dvmJitResumeTranslation:
19    move    rSELF, a0                           # restore self
20    move    rPC, a1                             # restore Dalvik pc
21    move    rFP, a2                             # restore Dalvik fp
22    lw      rBIX, offThread_jitResumeNPC(rSELF)
23    sw      zero, offThread_jitResumeNPC(rSELF) # reset resume address
24    lw      sp, offThread_jitResumeNSP(rSELF)   # cut back native stack
25    b       jitSVShadowRunStart                 # resume as if cache hit
26                                                # expects resume addr in rBIX
27
28    .global dvmJitToInterpPunt
29dvmJitToInterpPunt:
30    li        a2, kSVSPunt                 #  a2 <- interpreter entry point
31    sw        zero, offThread_inJitCodeCache(rSELF) #  Back to the interp land
32    b         jitSVShadowRunEnd            #  doesn't return
33
34    .global dvmJitToInterpSingleStep
35dvmJitToInterpSingleStep:
36    move      rPC, a0                      # set up dalvik pc
37    EXPORT_PC()
38    sw        ra, offThread_jitResumeNPC(rSELF)
39    sw        a1, offThread_jitResumeDPC(rSELF)
40    li        a2, kSVSSingleStep           #  a2 <- interpreter entry point
41    b         jitSVShadowRunEnd            #  doesn't return
42
43    .global dvmJitToInterpNoChainNoProfile
44dvmJitToInterpNoChainNoProfile:
45    move      a0, rPC                      #  pass our target PC
46    li        a2, kSVSNoProfile            #  a2 <- interpreter entry point
47    sw        zero, offThread_inJitCodeCache(rSELF) #  Back to the interp land
48    b         jitSVShadowRunEnd            #  doesn't return
49
50    .global dvmJitToInterpTraceSelectNoChain
51dvmJitToInterpTraceSelectNoChain:
52    move      a0, rPC                      #  pass our target PC
53    li        a2, kSVSTraceSelect          #  a2 <- interpreter entry point
54    sw        zero, offThread_inJitCodeCache(rSELF) #  Back to the interp land
55    b         jitSVShadowRunEnd            #  doesn't return
56
57    .global dvmJitToInterpTraceSelect
58dvmJitToInterpTraceSelect:
59    lw        a0, 0(ra)                   #  pass our target PC
60    li        a2, kSVSTraceSelect          #  a2 <- interpreter entry point
61    sw        zero, offThread_inJitCodeCache(rSELF) #  Back to the interp land
62    b         jitSVShadowRunEnd            #  doesn't return
63
64    .global dvmJitToInterpBackwardBranch
65dvmJitToInterpBackwardBranch:
66    lw        a0, 0(ra)                   #  pass our target PC
67    li        a2, kSVSBackwardBranch       #  a2 <- interpreter entry point
68    sw        zero, offThread_inJitCodeCache(rSELF) #  Back to the interp land
69    b         jitSVShadowRunEnd            #  doesn't return
70
71    .global dvmJitToInterpNormal
72dvmJitToInterpNormal:
73    lw        a0, 0(ra)                   #  pass our target PC
74    li        a2, kSVSNormal               #  a2 <- interpreter entry point
75    sw        zero, offThread_inJitCodeCache(rSELF) #  Back to the interp land
76    b         jitSVShadowRunEnd            #  doesn't return
77
78    .global dvmJitToInterpNoChain
79dvmJitToInterpNoChain:
80    move      a0, rPC                      #  pass our target PC
81    li        a2, kSVSNoChain              #  a2 <- interpreter entry point
82    sw        zero, offThread_inJitCodeCache(rSELF) #  Back to the interp land
83    b         jitSVShadowRunEnd            #  doesn't return
84#else                                   /*  WITH_SELF_VERIFICATION */
85
86
87/*
88 * "longjmp" to a translation after single-stepping.
89 */
90    .global dvmJitResumeTranslation             # (Thread* self, u4* dFP)
91dvmJitResumeTranslation:
92    move    rSELF, a0                           # restore self
93    move    rPC, a1                             # restore Dalvik pc
94    move    rFP, a2                             # restore Dalvik fp
95    lw      a0, offThread_jitResumeNPC(rSELF)
96    sw      zero, offThread_jitResumeNPC(rSELF) # reset resume address
97    lw      sp, offThread_jitResumeNSP(rSELF)   # cut back native stack
98    jr      a0                                  # resume translation
99
100
101/*
102 * Return from the translation cache to the interpreter when the compiler is
103 * having issues translating/executing a Dalvik instruction. We have to skip
104 * the code cache lookup otherwise it is possible to indefinitely bouce
105 * between the interpreter and the code cache if the instruction that fails
106 * to be compiled happens to be at a trace start.
107 */
108    .global dvmJitToInterpPunt
109dvmJitToInterpPunt:
110    lw        gp, STACK_OFFSET_GP(sp)
111    move      rPC, a0
112#if defined(WITH_JIT_TUNING)
113    move      a0, ra
114    JAL(dvmBumpPunt)
115#endif
116    EXPORT_PC()
117    sw        zero, offThread_inJitCodeCache(rSELF) # Back to the interp land
118    lw        rIBASE, offThread_curHandlerTable(rSELF)
119    FETCH_INST()
120    GET_INST_OPCODE(t0)
121    GOTO_OPCODE(t0)
122
123/*
124 * Return to the interpreter to handle a single instruction.
125 * On entry:
126 *    rPC <= Dalvik PC of instrucion to interpret
127 *    a1 <= Dalvik PC of resume instruction
128 *    ra <= resume point in translation
129 */
130
131    .global dvmJitToInterpSingleStep
132dvmJitToInterpSingleStep:
133    lw        gp, STACK_OFFSET_GP(sp)
134    move      rPC, a0                       # set up dalvik pc
135    EXPORT_PC()
136    sw        ra, offThread_jitResumeNPC(rSELF)
137    sw        sp, offThread_jitResumeNSP(rSELF)
138    sw        a1, offThread_jitResumeDPC(rSELF)
139    li        a1, 1
140    sw        a1, offThread_singleStepCount(rSELF) # just step once
141    move      a0, rSELF
142    li        a1, kSubModeCountedStep
143    JAL(dvmEnableSubMode)                   # (self, subMode)
144    lw        rIBASE, offThread_curHandlerTable(rSELF)
145    FETCH_INST()
146    GET_INST_OPCODE(t0)
147    GOTO_OPCODE(t0)
148/*
149 * Return from the translation cache and immediately request
150 * a translation for the exit target.  Commonly used for callees.
151 */
152    .global dvmJitToInterpTraceSelectNoChain
153dvmJitToInterpTraceSelectNoChain:
154    lw        gp, STACK_OFFSET_GP(sp)
155#if defined(WITH_JIT_TUNING)
156    JAL(dvmBumpNoChain)
157#endif
158    move      a0, rPC
159    move      a1, rSELF
160    JAL(dvmJitGetTraceAddrThread)          # (pc, self)
161    move      a0, v0
162    sw        a0, offThread_inJitCodeCache(rSELF) # set the inJitCodeCache flag
163    move      a1, rPC                      # arg1 of translation may need this
164    move      ra, zero                     #  in case target is HANDLER_INTERPRET
165    beqz      a0, 2f                       # 0 means translation does not exist
166    jr        a0
167
168/*
169 * Return from the translation cache and immediately request
170 * a translation for the exit target.  Commonly used following
171 * invokes.
172 */
173    .global dvmJitToInterpTraceSelect
174dvmJitToInterpTraceSelect:
175    lw        gp, STACK_OFFSET_GP(sp)
176    lw        rPC, (ra)                    #  get our target PC
177    subu      rINST, ra, 8                 #  save start of chain branch
178    move      a0, rPC
179    move      a1, rSELF
180    JAL(dvmJitGetTraceAddrThread)          # @ (pc, self)
181    sw        v0, offThread_inJitCodeCache(rSELF) # set the inJitCodeCache flag
182    beqz      v0, 2f
183    move      a0, v0
184    move      a1, rINST
185    JAL(dvmJitChain)                       #  v0 <- dvmJitChain(codeAddr, chainAddr)
186    move      a1, rPC                      #  arg1 of translation may need this
187    move      ra, zero                     #  in case target is HANDLER_INTERPRET
188    move      a0, v0
189    beqz      a0, toInterpreter            #  didn't chain - resume with interpreter
190
191    jr        a0                           #  continue native execution
192
193/* No translation, so request one if profiling isn't disabled */
1942:
195    lw        rIBASE, offThread_curHandlerTable(rSELF)
196    lw        a0, offThread_pJitProfTable(rSELF)
197    FETCH_INST()
198    li        t0, kJitTSelectRequestHot
199    movn      a2, t0, a0                   #  ask for trace selection
200    bnez      a0, common_selectTrace
201    GET_INST_OPCODE(t0)
202    GOTO_OPCODE(t0)
203
204/*
205 * Return from the translation cache to the interpreter.
206 * The return was done with a BLX from thumb mode, and
207 * the following 32-bit word contains the target rPC value.
208 * Note that lr (r14) will have its low-order bit set to denote
209 * its thumb-mode origin.
210 *
211 * We'll need to stash our lr origin away, recover the new
212 * target and then check to see if there is a translation available
213 * for our new target.  If so, we do a translation chain and
214 * go back to native execution.  Otherwise, it's back to the
215 * interpreter (after treating this entry as a potential
216 * trace start).
217 */
218    .global dvmJitToInterpNormal
219dvmJitToInterpNormal:
220    lw        gp, STACK_OFFSET_GP(sp)
221    lw        rPC, (ra)                    #  get our target PC
222    subu      rINST, ra, 8                 #  save start of chain branch
223#if defined(WITH_JIT_TUNING)
224    JAL(dvmBumpNormal)
225#endif
226    move      a0, rPC
227    move      a1, rSELF
228    JAL(dvmJitGetTraceAddrThread)           # @ (pc, self)
229    move      a0, v0
230    sw        a0, offThread_inJitCodeCache(rSELF) #  set the inJitCodeCache flag
231    beqz      a0, toInterpreter            #  go if not, otherwise do chain
232    move      a1, rINST
233    JAL(dvmJitChain)                       #  v0 <- dvmJitChain(codeAddr, chainAddr)
234    move      a1, rPC                      #  arg1 of translation may need this
235    move      ra, zero                     #  in case target is HANDLER_INTERPRET
236    move      a0, v0
237    beqz      a0, toInterpreter            #  didn't chain - resume with interpreter
238
239    jr        a0                           #  continue native execution
240
241/*
242 * Return from the translation cache to the interpreter to do method invocation.
243 * Check if translation exists for the callee, but don't chain to it.
244 */
245    .global dvmJitToInterpNoChainNoProfile
246dvmJitToInterpNoChainNoProfile:
247#if defined(WITH_JIT_TUNING)
248    JAL(dvmBumpNoChain)
249#endif
250    move      a0, rPC
251    move      a1, rSELF
252    JAL(dvmJitGetTraceAddrThread)          # (pc, self)
253    move      a0, v0
254    sw        a0, offThread_inJitCodeCache(rSELF) #  set the inJitCodeCache flag
255    move      a1, rPC                      #  arg1 of translation may need this
256    move      ra, zero                     #  in case target is HANDLER_INTERPRET
257    beqz      a0, footer235
258
259    jr        a0                           #  continue native execution if so
260footer235:
261    EXPORT_PC()
262    lw        rIBASE, offThread_curHandlerTable(rSELF)
263    FETCH_INST()
264    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
265    GOTO_OPCODE(t0)                        #  jump to next instruction
266
267/*
268 * Return from the translation cache to the interpreter to do method invocation.
269 * Check if translation exists for the callee, but don't chain to it.
270 */
271
272    .global dvmJitToInterpNoChain
273dvmJitToInterpNoChain:
274    lw        gp, STACK_OFFSET_GP(sp)
275#if defined(WITH_JIT_TUNING)
276    JAL(dvmBumpNoChain)
277#endif
278    move      a0, rPC
279    move      a1, rSELF
280    JAL(dvmJitGetTraceAddrThread)          # (pc, self)
281    move      a0, v0
282    sw        a0, offThread_inJitCodeCache(rSELF) #  set the inJitCodeCache flag
283    move      a1, rPC                      #  arg1 of translation may need this
284    move      ra, zero                     #  in case target is HANDLER_INTERPRET
285    beqz      a0, 1f
286    jr        a0                           #  continue native execution if so
2871:
288#endif                                  /*  WITH_SELF_VERIFICATION */
289
290/*
291 * No translation, restore interpreter regs and start interpreting.
292 * rSELF & rFP were preserved in the translated code, and rPC has
293 * already been restored by the time we get here.  We'll need to set
294 * up rIBASE & rINST, and load the address of the JitTable into r0.
295 */
296
297toInterpreter:
298    EXPORT_PC()
299    lw        rIBASE, offThread_curHandlerTable(rSELF)
300    FETCH_INST()
301    lw        a0, offThread_pJitProfTable(rSELF)
302    lw        rIBASE, offThread_curHandlerTable(rSELF)
303    # NOTE: intended fallthrough
304
305/*
306 * Similar to common_updateProfile, but tests for null pJitProfTable
307 * r0 holds pJifProfTAble, rINST is loaded, rPC is current and
308 * rIBASE has been recently refreshed.
309 */
310
311common_testUpdateProfile:
312
313    beqz      a0, 4f
314
315/*
316 * Common code to update potential trace start counter, and initiate
317 * a trace-build if appropriate.
318 * On entry here:
319 *    r0    <= pJitProfTable (verified non-NULL)
320 *    rPC   <= Dalvik PC
321 *    rINST <= next instruction
322 */
323common_updateProfile:
324    srl       a3, rPC, 12                  #  cheap, but fast hash function
325    xor       a3, a3, rPC
326    andi      a3, a3, JIT_PROF_SIZE-1      #  eliminate excess bits
327    addu      t1, a0, a3
328    lbu       a1, (t1)                     #  get counter
329    GET_INST_OPCODE(t0)
330    subu      a1, a1, 1                    #  decrement counter
331    sb        a1, (t1)                     #  and store it
332    beqz      a1, 1f
333    GOTO_OPCODE(t0)                        #  if not threshold, fallthrough otherwise
3341:
335    /* Looks good, reset the counter */
336    lw        a1, offThread_jitThreshold(rSELF)
337    sb        a1, (t1)
338    EXPORT_PC()
339    move      a0, rPC
340    move      a1, rSELF
341    JAL(dvmJitGetTraceAddrThread)          # (pc, self)
342    move      a0, v0
343    sw        v0, offThread_inJitCodeCache(rSELF) #  set the inJitCodeCache flag
344    move      a1, rPC                      #  arg1 of translation may need this
345    move      ra, zero                     #  in case target is HANDLER_INTERPRET
346
347#if !defined(WITH_SELF_VERIFICATION)
348    li        t0, kJitTSelectRequest       #  ask for trace selection
349    movz      a2, t0, a0
350    beqz      a0, common_selectTrace
351    jr        a0                           #  jump to the translation
352#else
353
354    bne       a0, zero, skip_ask_for_trace_selection
355    li        a2, kJitTSelectRequest       #  ask for trace selection
356    j         common_selectTrace
357
358skip_ask_for_trace_selection:
359    /*
360     * At this point, we have a target translation.  However, if
361     * that translation is actually the interpret-only pseudo-translation
362     * we want to treat it the same as no translation.
363     */
364    move      rBIX, a0                     #  save target
365    jal       dvmCompilerGetInterpretTemplate
366    # special case?
367    bne       v0, rBIX, jitSVShadowRunStart  #  set up self verification shadow space
368    # Need to clear the inJitCodeCache flag
369    sw        zero, offThread_inJitCodeCache(rSELF) #  back to the interp land
370    GET_INST_OPCODE(t0)
371    GOTO_OPCODE(t0)
372    /* no return */
373#endif
374
375/*
376 * On entry:
377 *  r2 is jit state.
378 */
379
380common_selectTrace:
381    lhu        a0, offThread_subMode(rSELF)
382    andi       a0, (kSubModeJitTraceBuild | kSubModeJitSV)
383    bnez       a0, 3f                      # already doing JIT work, continue
384    sw         a2, offThread_jitState(rSELF)
385    move       a0, rSELF
386
387/*
388 * Call out to validate trace-building request.  If successful,
389 * rIBASE will be swapped to to send us into single-stepping trace
390 * building mode, so we need to refresh before we continue.
391 */
392
393    EXPORT_PC()
394    SAVE_PC_TO_SELF()
395    SAVE_FP_TO_SELF()
396    JAL(dvmJitCheckTraceRequest)
3973:
398    FETCH_INST()
399    lw        rIBASE, offThread_curHandlerTable(rSELF)
4004:
401    GET_INST_OPCODE(t0)                    # extract opcode from rINST
402    GOTO_OPCODE(t0)
403    /* no return */
404#endif
405
406#if defined(WITH_SELF_VERIFICATION)
407
408/*
409 * Save PC and registers to shadow memory for self verification mode
410 * before jumping to native translation.
411 * On entry:
412 *    rPC, rFP, rSELF: the values that they should contain
413 *    r10: the address of the target translation.
414 */
415jitSVShadowRunStart:
416    move      a0, rPC                      #  r0 <- program counter
417    move      a1, rFP                      #  r1 <- frame pointer
418    move      a2, rSELF                    #  r2 <- InterpState pointer
419    move      a3, rBIX                     #  r3 <- target translation
420    jal       dvmSelfVerificationSaveState #  save registers to shadow space
421    lw        rFP, offShadowSpace_shadowFP(v0) #  rFP <- fp in shadow space
422    jr        rBIX                         #  jump to the translation
423
424/*
425 * Restore PC, registers, and interpState to original values
426 * before jumping back to the interpreter.
427 */
428jitSVShadowRunEnd:
429    move      a1, rFP                      #  pass ending fp
430    move      a3, rSELF                    #  pass self ptr for convenience
431    jal       dvmSelfVerificationRestoreState #  restore pc and fp values
432    LOAD_PC_FP_FROM_SELF()                 #  restore pc, fp
433    lw        a1, offShadowSpace_svState(a0) #  get self verification state
434    beq       a1, zero, 1f                 #  check for punt condition
435
436    # Setup SV single-stepping
437    move      a0, rSELF
438    li        a1, kSubModeJitSV
439    JAL(dvmEnableSubMode)                  # (self, subMode)
440    li        a2, kJitSelfVerification     #  ask for self verification
441    sw        a2, offThread_jitState(rSELF)
442    # Intentional fallthrough
443
4441:
445    # exit to interpreter without check
446    EXPORT_PC()
447    lw        rIBASE, offThread_curHandlerTable(rSELF)
448    FETCH_INST()
449    GET_INST_OPCODE(t0)
450    GOTO_OPCODE(t0)
451#endif
452
453/*
454 * The equivalent of "goto bail", this calls through the "bail handler".
455 * It will end this interpreter activation, and return to the caller
456 * of dvmMterpStdRun.
457 *
458 * State registers will be saved to the "thread" area before bailing
459 * debugging purposes
460 */
461    .ent common_gotoBail
462common_gotoBail:
463    SAVE_PC_FP_TO_SELF()                   # export state to "thread"
464    move      a0, rSELF                    # a0 <- self ptr
465    b         dvmMterpStdBail              # call(self, changeInterp)
466    .end common_gotoBail
467
468/*
469 * The JIT's invoke method needs to remember the callsite class and
470 * target pair.  Save them here so that they are available to
471 * dvmCheckJit following the interpretation of this invoke.
472 */
473#if defined(WITH_JIT)
474save_callsiteinfo:
475    beqz    rOBJ, 1f
476    lw      rOBJ, offObject_clazz(rOBJ)
4771:
478    sw      a0, offThread_methodToCall(rSELF)
479    sw      rOBJ, offThread_callsiteClass(rSELF)
480    jr      ra
481#endif
482
483/*
484 * Common code for method invocation with range.
485 *
486 * On entry:
487 *  a0 is "Method* methodToCall", the method we're trying to call
488 */
489common_invokeMethodRange:
490.LinvokeNewRange:
491#if defined(WITH_JIT)
492    lhu      a1, offThread_subMode(rSELF)
493    andi     a1, kSubModeJitTraceBuild
494    beqz     a1, 1f
495    JAL(save_callsiteinfo)
496#endif
497    # prepare to copy args to "outs" area of current frame
4981:
499    GET_OPA(a2)
500    SAVEAREA_FROM_FP(rBIX, rFP)              #  rBIX <- stack save area
501    beqz      a2, .LinvokeArgsDone
502    FETCH(a1, 2)                           #  a1 <- CCCC
503.LinvokeRangeArgs:
504    # a0=methodToCall, a1=CCCC, a2=count, rBIX=outs
505    # (very few methods have > 10 args; could unroll for common cases)
506    EAS2(a3, rFP, a1)
507    sll       t0, a2, 2
508    subu      rBIX, rBIX, t0
509
5101:
511    lw        a1, 0(a3)
512    addu      a3, a3, 4
513    subu      a2, a2, 1
514    sw        a1, 0(rBIX)
515    addu      rBIX, 4
516    bnez      a2, 1b
517    b         .LinvokeArgsDone
518
519/*
520 * Common code for method invocation without range.
521 *
522 * On entry:
523 *  a0 is "Method* methodToCall", "rOBJ is this"
524 */
525common_invokeMethodNoRange:
526.LinvokeNewNoRange:
527#if defined(WITH_JIT)
528    lhu      a1, offThread_subMode(rSELF)
529    andi     a1, kSubModeJitTraceBuild
530    beqz     a1, 1f
531    JAL(save_callsiteinfo)
532#endif
533
534    # prepare to copy args to "outs" area of current frame
5351:
536    GET_OPB(a2)
537    SAVEAREA_FROM_FP(rBIX, rFP)
538    beqz      a2, .LinvokeArgsDone
539    FETCH(a1, 2)
540
541    # a0=methodToCall, a1=GFED, a2=count,
542.LinvokeNonRange:
543    beq       a2, 0, 0f
544    beq       a2, 1, 1f
545    beq       a2, 2, 2f
546    beq       a2, 3, 3f
547    beq       a2, 4, 4f
548    beq       a2, 5, 5f
549
5505:
551    and       t0, rINST, 0x0f00
552    ESRN(t2, rFP, t0, 6)
553    lw        a3, (t2)
554    subu      rBIX, 4
555    sw        a3, 0(rBIX)
556
5574:
558    and       t0, a1, 0xf000
559    ESRN(t2, rFP, t0, 10)
560    lw        a3, (t2)
561    subu      rBIX, 4
562    sw        a3, 0(rBIX)
563
5643:
565    and       t0, a1, 0x0f00
566    ESRN(t2, rFP, t0, 6)
567    lw        a3, (t2)
568    subu      rBIX, 4
569    sw        a3, 0(rBIX)
570
5712:
572    and       t0, a1, 0x00f0
573    ESRN(t2, rFP, t0, 2)
574    lw        a3, (t2)
575    subu      rBIX, 4
576    sw        a3, 0(rBIX)
577
5781:
579    and       t0, a1, 0x000f
580    EASN(t2, rFP, t0, 2)
581    lw        a3, (t2)
582    subu      rBIX, 4
583    sw        a3, 0(rBIX)
584
5850:
586    #fall through .LinvokeArgsDone
587
588
589.LinvokeArgsDone:                          #  a0=methodToCall
590    lhu       rOBJ, offMethod_registersSize(a0)
591    lhu       a3, offMethod_outsSize(a0)
592    lw        a2, offMethod_insns(a0)
593    lw        rINST, offMethod_clazz(a0)
594    # find space for the new stack frame, check for overflow
595    SAVEAREA_FROM_FP(a1, rFP)              # a1 <- stack save area
596    sll       t0, rOBJ, 2                    #  a1 <- newFp (old savearea - regsSize)
597    subu      a1, a1, t0
598    SAVEAREA_FROM_FP(rBIX, a1)
599    lw        rOBJ, offThread_interpStackEnd(rSELF) #  t3 <- interpStackEnd
600    sll       t2, a3, 2
601    subu      t0, rBIX, t2
602    lhu       ra, offThread_subMode(rSELF)
603    lw        a3, offMethod_accessFlags(a0) #  a3 <- methodToCall->accessFlags
604    bltu      t0, rOBJ, .LstackOverflow      #  yes, this frame will overflow stack
605
606
607    # set up newSaveArea
608#ifdef EASY_GDB
609    SAVEAREA_FROM_FP(t0, rFP)
610    sw        t0, offStackSaveArea_prevSave(rBIX)
611#endif
612    sw        rFP, (offStackSaveArea_prevFrame)(rBIX)
613    sw        rPC, (offStackSaveArea_savedPc)(rBIX)
614#if defined(WITH_JIT)
615    sw        zero, (offStackSaveArea_returnAddr)(rBIX)
616#endif
617    sw        a0, (offStackSaveArea_method)(rBIX)
618    # Profiling?
619    bnez       ra, 2f
6201:
621    and       t2, a3, ACC_NATIVE
622    bnez      t2, .LinvokeNative
623    lhu       rOBJ, (a2)           # rOBJ -< load Inst from New PC
624    lw        a3, offClassObject_pDvmDex(rINST)
625    move      rPC, a2              # Publish new rPC
626    # Update state values for the new method
627    # a0=methodToCall, a1=newFp, a3=newMethodClass, rOBJ=newINST
628    sw        a0, offThread_method(rSELF)
629    sw        a3, offThread_methodClassDex(rSELF)
630    li        a2, 1
631    sw        a2, offThread_debugIsMethodEntry(rSELF)
632
633#if defined(WITH_JIT)
634    lw        a0, offThread_pJitProfTable(rSELF)
635    move      rFP, a1                    # fp = newFp
636    GET_PREFETCHED_OPCODE(t0, rOBJ)      # extract prefetched opcode from rOBJ
637    move      rINST, rOBJ                # publish new rINST
638    sw        a1, offThread_curFrame(rSELF)
639    bnez      a0, common_updateProfile
640    GOTO_OPCODE(t0)
641#else
642    move      rFP, a1
643    GET_PREFETCHED_OPCODE(t0, rOBJ)
644    move      rINST, rOBJ
645    sw        a1, offThread_curFrame(rSELF)
646    GOTO_OPCODE(t0)
647#endif
648
6492:
650    # Profiling - record method entry.  a0: methodToCall
651    STACK_STORE(a0, 0)
652    STACK_STORE(a1, 4)
653    STACK_STORE(a2, 8)
654    STACK_STORE(a3, 12)
655    sw       rPC, offThread_pc(rSELF)          # update interpSave.pc
656    move     a1, a0
657    move     a0, rSELF
658    JAL(dvmReportInvoke)
659    STACK_LOAD(a3, 12)                         # restore a0-a3
660    STACK_LOAD(a2, 8)
661    STACK_LOAD(a1, 4)
662    STACK_LOAD(a0, 0)
663    b        1b
664.LinvokeNative:
665    # Prep for the native call
666    # a0=methodToCall, a1=newFp, rBIX=newSaveArea
667    lhu       ra, offThread_subMode(rSELF)
668    lw        t3, offThread_jniLocal_topCookie(rSELF)
669    sw        a1, offThread_curFrame(rSELF)
670    sw        t3, offStackSaveArea_localRefCookie(rBIX) # newFp->localRefCookie=top
671    move      a2, a0
672    move      a0, a1
673    addu      a1, rSELF, offThread_retval
674    move      a3, rSELF
675#ifdef ASSIST_DEBUGGER
676    /* insert fake function header to help gdb find the stack frame */
677    b         .Lskip
678    .ent dalvik_mterp
679dalvik_mterp:
680    STACK_STORE_FULL()
681.Lskip:
682#endif
683    bnez      ra, 11f                          # Any special SubModes active?
684    lw        t9, offMethod_nativeFunc(a2)
685    jalr      t9
686    lw        gp, STACK_OFFSET_GP(sp)
6877:
688    # native return; rBIX=newSaveArea
689    # equivalent to dvmPopJniLocals
690    lw        a0, offStackSaveArea_localRefCookie(rBIX)
691    lw        a1, offThread_exception(rSELF)
692    sw        rFP, offThread_curFrame(rSELF)
693    sw        a0, offThread_jniLocal_topCookie(rSELF)    # new top <- old top
694    bnez      a1, common_exceptionThrown
695
696    FETCH_ADVANCE_INST(3)
697    GET_INST_OPCODE(t0)
698    GOTO_OPCODE(t0)
69911:
700    # a0=newFp, a1=&retval, a2=methodToCall, a3=self, ra=subModes
701    SCRATCH_STORE(a0, 0)
702    SCRATCH_STORE(a1, 4)
703    SCRATCH_STORE(a2, 8)
704    SCRATCH_STORE(a3, 12)
705    move      a0, a2                    # a0 <- methodToCall
706    move      a1, rSELF
707    move      a2, rFP
708    JAL(dvmReportPreNativeInvoke)       # (methodToCall, self, fp)
709    SCRATCH_LOAD(a3, 12)                         # restore a0-a3
710    SCRATCH_LOAD(a2, 8)
711    SCRATCH_LOAD(a1, 4)
712    SCRATCH_LOAD(a0, 0)
713
714    # Call the native method
715    lw       t9, offMethod_nativeFunc(a2)      # t9<-methodToCall->nativeFunc
716    jalr     t9
717    lw       gp, STACK_OFFSET_GP(sp)
718
719    # Restore the pre-call arguments
720    SCRATCH_LOAD(a3, 12)                         # restore a0-a3
721    SCRATCH_LOAD(a2, 8)
722    SCRATCH_LOAD(a1, 4)
723    SCRATCH_LOAD(a0, 0)
724
725    # Finish up any post-invoke subMode requirements
726    move      a0, a2
727    move      a1, rSELF
728    move      a2, rFP
729    JAL(dvmReportPostNativeInvoke)      # (methodToCall, self, fp)
730    b         7b
731
732
733.LstackOverflow:       # a0=methodToCall
734    move      a1, a0                    #  a1 <- methodToCall
735    move      a0, rSELF                 # a0 <- self
736    JAL(dvmHandleStackOverflow)         #  dvmHandleStackOverflow(self, methodToCall)
737    b         common_exceptionThrown
738#ifdef ASSIST_DEBUGGER
739    .end dalvik_mterp
740#endif
741
742    /*
743     * Common code for method invocation, calling through "glue code".
744     *
745     * TODO: now that we have range and non-range invoke handlers, this
746     *       needs to be split into two.  Maybe just create entry points
747     *       that set r9 and jump here?
748     *
749     * On entry:
750     *  r0 is "Method* methodToCall", the method we're trying to call
751     *  r9 is "bool methodCallRange", indicating if this is a /range variant
752     */
753
754/*
755 * Common code for handling a return instruction.
756 *
757 * This does not return.
758 */
759common_returnFromMethod:
760.LreturnNew:
761    lhu       t0, offThread_subMode(rSELF)
762    SAVEAREA_FROM_FP(a0, rFP)
763    lw        rOBJ, offStackSaveArea_savedPc(a0) # rOBJ = saveArea->savedPc
764    bnez      t0, 19f
76514:
766    lw        rFP, offStackSaveArea_prevFrame(a0) # fp = saveArea->prevFrame
767    lw        a2, (offStackSaveArea_method - sizeofStackSaveArea)(rFP)
768                                               # a2<- method we're returning to
769    # is this a break frame?
770    beqz      a2, common_gotoBail              # break frame, bail out completely
771
772    lw        rBIX, offMethod_clazz(a2)        # rBIX<- method->clazz
773    lw        rIBASE, offThread_curHandlerTable(rSELF) # refresh rIBASE
774    PREFETCH_ADVANCE_INST(rINST, rOBJ, 3)      # advance rOBJ, update new rINST
775    sw        a2, offThread_method(rSELF)      # self->method = newSave->method
776    lw        a1, offClassObject_pDvmDex(rBIX) # r1<- method->clazz->pDvmDex
777    sw        rFP, offThread_curFrame(rSELF)   # curFrame = fp
778#if defined(WITH_JIT)
779    lw         rBIX, offStackSaveArea_returnAddr(a0)
780    move       rPC, rOBJ                       # publish new rPC
781    sw         a1, offThread_methodClassDex(rSELF)
782    sw         rBIX, offThread_inJitCodeCache(rSELF) # may return to JIT'ed land
783    beqz       rBIX, 15f                       # caller is compiled code
784    move       t9, rBIX
785    jalr       t9
786    lw         gp, STACK_OFFSET_GP(sp)
78715:
788    GET_INST_OPCODE(t0)                        # extract opcode from rINST
789    GOTO_OPCODE(t0)                            # jump to next instruction
790#else
791    GET_INST_OPCODE(t0)                        # extract opcode from rINST
792    move       rPC, rOBJ                       # publish new rPC
793    sw         a1, offThread_methodClassDex(rSELF)
794    GOTO_OPCODE(t0)
795#endif
796
79719:
798    # Handle special actions
799    # On entry, a0: StackSaveArea
800    lw         a1, offStackSaveArea_prevFrame(a0) # a1<- prevFP
801    sw         rPC, offThread_pc(rSELF)        # update interpSave.pc
802    sw         a1, offThread_curFrame(rSELF)   # update interpSave.curFrame
803    move       a0, rSELF
804    JAL(dvmReportReturn)
805    SAVEAREA_FROM_FP(a0, rFP)                  # restore StackSaveArea
806    b          14b
807
808    .if 0
809    /*
810     * Return handling, calls through "glue code".
811     */
812.LreturnOld:
813    SAVE_PC_FP_TO_SELF()                       # export state
814    move       a0, rSELF                       # arg to function
815    JAL(dvmMterp_returnFromMethod)
816    b          common_resumeAfterGlueCall
817    .endif
818
819/*
820 * Somebody has thrown an exception.  Handle it.
821 *
822 * If the exception processing code returns to us (instead of falling
823 * out of the interpreter), continue with whatever the next instruction
824 * now happens to be.
825 *
826 * This does not return.
827 */
828    .global dvmMterpCommonExceptionThrown
829dvmMterpCommonExceptionThrown:
830common_exceptionThrown:
831.LexceptionNew:
832
833    EXPORT_PC()
834    move     a0, rSELF
835    JAL(dvmCheckSuspendPending)
836    lw       rOBJ, offThread_exception(rSELF)
837    move     a1, rSELF
838    move     a0, rOBJ
839    JAL(dvmAddTrackedAlloc)
840    lhu      a2, offThread_subMode(rSELF)
841    sw       zero, offThread_exception(rSELF)
842
843    # Special subMode?
844    bnez     a2, 7f                     # any special subMode handling needed?
8458:
846    /* set up args and a local for "&fp" */
847    sw       rFP, 20(sp)                 #  store rFP => tmp
848    addu     t0, sp, 20                  #  compute &tmp
849    sw       t0, STACK_OFFSET_ARG04(sp)  #  save it in arg4 as per ABI
850    li       a3, 0                       #  a3 <- false
851    lw       a1, offThread_method(rSELF)
852    move     a0, rSELF
853    lw       a1, offMethod_insns(a1)
854    move     a2, rOBJ
855    subu     a1, rPC, a1
856    sra      a1, a1, 1
857
858    /* call, r0 gets catchRelPc (a code-unit offset) */
859    JAL(dvmFindCatchBlock)           # call(self, relPc, exc, scan?, &fp)
860    lw        rFP, 20(sp)            # retrieve the updated rFP
861
862    /* update frame pointer and check result from dvmFindCatchBlock */
863    move      a0, v0
864    bltz      v0, .LnotCaughtLocally
865
866    /* fix earlier stack overflow if necessary; Preserve a0 */
867    lbu       a1, offThread_stackOverflowed(rSELF)
868    beqz      a1, 1f
869    move      rBIX, a0
870    move      a0, rSELF
871    move      a1, rOBJ
872    JAL(dvmCleanupStackOverflow)
873    move      a0, rBIX
874
8751:
876
877/* adjust locals to match self->interpSave.curFrame and updated PC */
878    SAVEAREA_FROM_FP(a1, rFP)           # a1<- new save area
879    lw        a1, offStackSaveArea_method(a1)
880    sw        a1, offThread_method(rSELF)
881    lw        a2, offMethod_clazz(a1)
882    lw        a3, offMethod_insns(a1)
883    lw        a2, offClassObject_pDvmDex(a2)
884    EAS1(rPC, a3, a0)
885    sw        a2, offThread_methodClassDex(rSELF)
886
887    /* release the tracked alloc on the exception */
888    move      a0, rOBJ
889    move      a1, rSELF
890    JAL(dvmReleaseTrackedAlloc)
891
892    /* restore the exception if the handler wants it */
893    lw        rIBASE, offThread_curHandlerTable(rSELF)
894    FETCH_INST()
895    GET_INST_OPCODE(t0)
896    bne       t0, OP_MOVE_EXCEPTION, 2f
897    sw        rOBJ, offThread_exception(rSELF)
8982:
899    GOTO_OPCODE(t0)
900
901    # Manage debugger bookkeeping
9027:
903    sw        rPC, offThread_pc(rSELF)
904    sw        rFP, offThread_curFrame(rSELF)
905    move      a0, rSELF
906    move      a1, rOBJ
907    JAL(dvmReportExceptionThrow)
908    b         8b
909
910.LnotCaughtLocally:                     #  rOBJ = exception
911    /* fix stack overflow if necessary */
912    lbu       a1, offThread_stackOverflowed(rSELF)
913    beqz      a1, 3f
914    move      a0, rSELF
915    move      a1, rOBJ
916    JAL(dvmCleanupStackOverflow)           #  dvmCleanupStackOverflow(self, exception)
917
9183:
919    # may want to show "not caught locally" debug messages here
920#if DVM_SHOW_EXCEPTION >= 2
921    /* call __android_log_print(prio, tag, format, ...) */
922    /* "Exception %s from %s:%d not caught locally" */
923    lw        a0, offThread_method(rSELF)
924    lw        a1, offMethod_insns(a0)
925    subu      a1, rPC, a1
926    sra       a1, a1, 1
927    JAL(dvmLineNumFromPC)
928    sw        v0, 20(sp)
929    # dvmGetMethodSourceFile(method)
930    lw        a0, offThread_method(rSELF)
931    JAL(dvmGetMethodSourceFile)
932    sw        v0, 16(sp)
933    # exception->clazz->descriptor
934    lw        a3, offObject_clazz(rOBJ)
935    lw        a3, offClassObject_descriptor(a3)
936    la        a2, .LstrExceptionNotCaughtLocally
937    la        a1, .LstrLogTag
938    li        a0, 3
939    JAL(__android_log_print)
940#endif
941    sw        rOBJ, offThread_exception(rSELF)
942    move      a0, rOBJ
943    move      a1, rSELF
944    JAL(dvmReleaseTrackedAlloc)
945    b         common_gotoBail
946
947    /*
948     * Exception handling, calls through "glue code".
949     */
950    .if     0
951.LexceptionOld:
952    SAVE_PC_TO_SELF()                # export state
953    SAVE_FP_TO_SELF()
954    move     a0, rSELF               # arg to function
955    JAL(dvmMterp_exceptionThrown)
956    b       common_resumeAfterGlueCall
957    .endif
958
959#if defined(WITH_JIT)
960    /*
961     * If the JIT is actively building a trace we need to make sure
962     * that the field is fully resolved before including the current
963     * instruction.
964     *
965     * On entry:
966     *     rBIX: &dvmDex->pResFields[field]
967     *     a0:  field pointer (must preserve)
968     */
969common_verifyField:
970     lhu     a3, offThread_subMode(rSELF)
971     andi    a3, kSubModeJitTraceBuild
972     bnez    a3, 1f                 # Not building trace, continue
973     jr      ra
9741:
975     lw      a1, (rBIX)
976     beqz    a1, 2f                 # resolution complete ?
977     jr      ra
9782:
979    SCRATCH_STORE(a0, 0)
980    SCRATCH_STORE(a1, 4)
981    SCRATCH_STORE(a2, 8)
982    SCRATCH_STORE(a3, 12)
983    SCRATCH_STORE(ra, 16)
984    move    a0, rSELF
985    move    a1, rPC
986    JAL(dvmJitEndTraceSelect)        #(self,pc) end trace before this inst)
987    SCRATCH_LOAD(a0, 0)
988    SCRATCH_LOAD(a1, 4)
989    SCRATCH_LOAD(a2, 8)
990    SCRATCH_LOAD(a3, 12)
991    SCRATCH_LOAD(ra, 16)
992    jr      ra                       # return
993#endif
994
995/*
996 * After returning from a "glued" function, pull out the updated
997 * values and start executing at the next instruction.
998 */
999common_resumeAfterGlueCall:
1000    LOAD_PC_FP_FROM_SELF()           #  pull rPC and rFP out of thread
1001    lw      rIBASE, offThread_curHandlerTable(rSELF) # refresh
1002    FETCH_INST()                     #  load rINST from rPC
1003    GET_INST_OPCODE(t0)              #  extract opcode from rINST
1004    GOTO_OPCODE(t0)                  #  jump to next instruction
1005
1006/*
1007 * Invalid array index. Note that our calling convention is strange; we use a1
1008 * and a3 because those just happen to be the registers all our callers are
1009 * using. We move a3 before calling the C function, but a1 happens to match.
1010 * a1: index
1011 * a3: size
1012 */
1013common_errArrayIndex:
1014    EXPORT_PC()
1015    move      a0, a3
1016    JAL(dvmThrowArrayIndexOutOfBoundsException)
1017    b         common_exceptionThrown
1018
1019/*
1020 * Integer divide or mod by zero.
1021 */
1022common_errDivideByZero:
1023    EXPORT_PC()
1024    la     a0, .LstrDivideByZero
1025    JAL(dvmThrowArithmeticException)
1026    b       common_exceptionThrown
1027
1028/*
1029 * Attempt to allocate an array with a negative size.
1030 * On entry: length in a1
1031 */
1032common_errNegativeArraySize:
1033    EXPORT_PC()
1034    move    a0, a1                                # arg0 <- len
1035    JAL(dvmThrowNegativeArraySizeException)    # (len)
1036    b       common_exceptionThrown
1037
1038/*
1039 * Invocation of a non-existent method.
1040 * On entry: method name in a1
1041 */
1042common_errNoSuchMethod:
1043    EXPORT_PC()
1044    move     a0, a1
1045    JAL(dvmThrowNoSuchMethodError)
1046    b       common_exceptionThrown
1047
1048/*
1049 * We encountered a null object when we weren't expecting one.  We
1050 * export the PC, throw a NullPointerException, and goto the exception
1051 * processing code.
1052 */
1053common_errNullObject:
1054    EXPORT_PC()
1055    li      a0, 0
1056    JAL(dvmThrowNullPointerException)
1057    b       common_exceptionThrown
1058
1059/*
1060 * For debugging, cause an immediate fault. The source address will be in ra. Use a jal to jump here.
1061 */
1062common_abort:
1063    lw      zero,-4(zero)            #  generate SIGSEGV
1064
1065/*
1066 * Spit out a "we were here", preserving all registers.
1067 */
1068    .macro SQUEAK num
1069common_squeak\num:
1070    STACK_STORE_RA();
1071    la        a0, .LstrSqueak
1072    LOAD_IMM(a1, \num);
1073    JAL(printf);
1074    STACK_LOAD_RA();
1075    RETURN;
1076    .endm
1077
1078    SQUEAK 0
1079    SQUEAK 1
1080    SQUEAK 2
1081    SQUEAK 3
1082    SQUEAK 4
1083    SQUEAK 5
1084
1085/*
1086 * Spit out the number in a0, preserving registers.
1087 */
1088common_printNum:
1089    STACK_STORE_RA()
1090    MOVE_REG(a1, a0)
1091    la        a0, .LstrSqueak
1092    JAL(printf)
1093    STACK_LOAD_RA()
1094    RETURN
1095
1096/*
1097 * Print a newline, preserving registers.
1098 */
1099common_printNewline:
1100    STACK_STORE_RA()
1101    la        a0, .LstrNewline
1102    JAL(printf)
1103    STACK_LOAD_RA()
1104    RETURN
1105
1106    /*
1107     * Print the 32-bit quantity in a0 as a hex value, preserving registers.
1108     */
1109common_printHex:
1110    STACK_STORE_RA()
1111    MOVE_REG(a1, a0)
1112    la        a0, .LstrPrintHex
1113    JAL(printf)
1114    STACK_LOAD_RA()
1115RETURN;
1116
1117/*
1118 * Print the 64-bit quantity in a0-a1, preserving registers.
1119 */
1120common_printLong:
1121    STACK_STORE_RA()
1122    MOVE_REG(a3, a1)
1123    MOVE_REG(a2, a0)
1124    la        a0, .LstrPrintLong
1125    JAL(printf)
1126    STACK_LOAD_RA()
1127    RETURN;
1128
1129/*
1130 * Print full method info.  Pass the Method* in a0.  Preserves regs.
1131 */
1132common_printMethod:
1133    STACK_STORE_RA()
1134    JAL(dvmMterpPrintMethod)
1135    STACK_LOAD_RA()
1136    RETURN
1137
1138/*
1139 * Call a C helper function that dumps regs and possibly some
1140 * additional info.  Requires the C function to be compiled in.
1141 */
1142    .if 0
1143common_dumpRegs:
1144    STACK_STORE_RA()
1145    JAL(dvmMterpDumpMipsRegs)
1146    STACK_LOAD_RA()
1147    RETURN
1148    .endif
1149
1150/*
1151 * Zero-terminated ASCII string data.
1152 */
1153    .data
1154
1155.LstrBadEntryPoint:
1156    .asciiz "Bad entry point %d\n"
1157.LstrDivideByZero:
1158    .asciiz "divide by zero"
1159.LstrFilledNewArrayNotImpl:
1160    .asciiz "filled-new-array only implemented for 'int'"
1161.LstrLogTag:
1162    .asciiz  "mterp"
1163.LstrExceptionNotCaughtLocally:
1164    .asciiz  "Exception %s from %s:%d not caught locally\n"
1165
1166.LstrNewline:
1167    .asciiz "\n"
1168.LstrSqueak:
1169    .asciiz "<%d>"
1170.LstrPrintHex:
1171    .asciiz "<0x%x>"
1172.LstrPrintLong:
1173    .asciiz "<%lld>"
1174