1/*
2 * Copyright (C) 2008 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 *      http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16#ifdef WITH_JIT
17
18/*
19 * Target independent portion of Android's Jit
20 */
21
22#include "Dalvik.h"
23#include "Jit.h"
24
25
26#include "libdex/OpCodeNames.h"
27#include <unistd.h>
28#include <pthread.h>
29#include <sys/time.h>
30#include <signal.h>
31#include "compiler/Compiler.h"
32#include "compiler/CompilerUtility.h"
33#include "compiler/CompilerIR.h"
34#include <errno.h>
35
36#if defined(WITH_SELF_VERIFICATION)
37/* Allocate space for per-thread ShadowSpace data structures */
38void* dvmSelfVerificationShadowSpaceAlloc(Thread* self)
39{
40    self->shadowSpace = (ShadowSpace*) calloc(1, sizeof(ShadowSpace));
41    if (self->shadowSpace == NULL)
42        return NULL;
43
44    self->shadowSpace->registerSpaceSize = REG_SPACE;
45    self->shadowSpace->registerSpace =
46        (int*) calloc(self->shadowSpace->registerSpaceSize, sizeof(int));
47
48    return self->shadowSpace->registerSpace;
49}
50
51/* Free per-thread ShadowSpace data structures */
52void dvmSelfVerificationShadowSpaceFree(Thread* self)
53{
54    free(self->shadowSpace->registerSpace);
55    free(self->shadowSpace);
56}
57
58/*
59 * Save out PC, FP, InterpState, and registers to shadow space.
60 * Return a pointer to the shadow space for JIT to use.
61 */
62void* dvmSelfVerificationSaveState(const u2* pc, const void* fp,
63                                   InterpState* interpState, int targetTrace)
64{
65    Thread *self = dvmThreadSelf();
66    ShadowSpace *shadowSpace = self->shadowSpace;
67    unsigned preBytes = interpState->method->outsSize*4 + sizeof(StackSaveArea);
68    unsigned postBytes = interpState->method->registersSize*4;
69
70    //LOGD("### selfVerificationSaveState(%d) pc: 0x%x fp: 0x%x",
71    //    self->threadId, (int)pc, (int)fp);
72
73    if (shadowSpace->selfVerificationState != kSVSIdle) {
74        LOGD("~~~ Save: INCORRECT PREVIOUS STATE(%d): %d",
75            self->threadId, shadowSpace->selfVerificationState);
76        LOGD("********** SHADOW STATE DUMP **********");
77        LOGD("PC: 0x%x FP: 0x%x", (int)pc, (int)fp);
78    }
79    shadowSpace->selfVerificationState = kSVSStart;
80
81    if (interpState->entryPoint == kInterpEntryResume) {
82        interpState->entryPoint = kInterpEntryInstr;
83#if 0
84        /* Tracking the success rate of resume after single-stepping */
85        if (interpState->jitResumeDPC == pc) {
86            LOGD("SV single step resumed at %p", pc);
87        }
88        else {
89            LOGD("real %p DPC %p NPC %p", pc, interpState->jitResumeDPC,
90                 interpState->jitResumeNPC);
91        }
92#endif
93    }
94
95    // Dynamically grow shadow register space if necessary
96    if (preBytes + postBytes > shadowSpace->registerSpaceSize * sizeof(u4)) {
97        free(shadowSpace->registerSpace);
98        shadowSpace->registerSpaceSize = (preBytes + postBytes) / sizeof(u4);
99        shadowSpace->registerSpace =
100            (int*) calloc(shadowSpace->registerSpaceSize, sizeof(u4));
101    }
102
103    // Remember original state
104    shadowSpace->startPC = pc;
105    shadowSpace->fp = fp;
106    shadowSpace->glue = interpState;
107    /*
108     * Store the original method here in case the trace ends with a
109     * return/invoke, the last method.
110     */
111    shadowSpace->method = interpState->method;
112    shadowSpace->shadowFP = shadowSpace->registerSpace +
113                            shadowSpace->registerSpaceSize - postBytes/4;
114
115    // Create a copy of the InterpState
116    memcpy(&(shadowSpace->interpState), interpState, sizeof(InterpState));
117    shadowSpace->interpState.fp = shadowSpace->shadowFP;
118    shadowSpace->interpState.interpStackEnd = (u1*)shadowSpace->registerSpace;
119
120    // Create a copy of the stack
121    memcpy(((char*)shadowSpace->shadowFP)-preBytes, ((char*)fp)-preBytes,
122        preBytes+postBytes);
123
124    // Setup the shadowed heap space
125    shadowSpace->heapSpaceTail = shadowSpace->heapSpace;
126
127    // Reset trace length
128    shadowSpace->traceLength = 0;
129
130    return shadowSpace;
131}
132
133/*
134 * Save ending PC, FP and compiled code exit point to shadow space.
135 * Return a pointer to the shadow space for JIT to restore state.
136 */
137void* dvmSelfVerificationRestoreState(const u2* pc, const void* fp,
138                                      SelfVerificationState exitState)
139{
140    Thread *self = dvmThreadSelf();
141    ShadowSpace *shadowSpace = self->shadowSpace;
142    // Official InterpState structure
143    InterpState *realGlue = shadowSpace->glue;
144    shadowSpace->endPC = pc;
145    shadowSpace->endShadowFP = fp;
146    shadowSpace->jitExitState = exitState;
147
148    //LOGD("### selfVerificationRestoreState(%d) pc: 0x%x fp: 0x%x endPC: 0x%x",
149    //    self->threadId, (int)shadowSpace->startPC, (int)shadowSpace->fp,
150    //    (int)pc);
151
152    if (shadowSpace->selfVerificationState != kSVSStart) {
153        LOGD("~~~ Restore: INCORRECT PREVIOUS STATE(%d): %d",
154            self->threadId, shadowSpace->selfVerificationState);
155        LOGD("********** SHADOW STATE DUMP **********");
156        LOGD("Dalvik PC: 0x%x endPC: 0x%x", (int)shadowSpace->startPC,
157            (int)shadowSpace->endPC);
158        LOGD("Interp FP: 0x%x", (int)shadowSpace->fp);
159        LOGD("Shadow FP: 0x%x endFP: 0x%x", (int)shadowSpace->shadowFP,
160            (int)shadowSpace->endShadowFP);
161    }
162
163    // Move the resume [ND]PC from the shadow space to the real space so that
164    // the debug interpreter can return to the translation
165    if (exitState == kSVSSingleStep) {
166        realGlue->jitResumeNPC = shadowSpace->interpState.jitResumeNPC;
167        realGlue->jitResumeDPC = shadowSpace->interpState.jitResumeDPC;
168    } else {
169        realGlue->jitResumeNPC = NULL;
170        realGlue->jitResumeDPC = NULL;
171    }
172
173    // Special case when punting after a single instruction
174    if (exitState == kSVSPunt && pc == shadowSpace->startPC) {
175        shadowSpace->selfVerificationState = kSVSIdle;
176    } else if (exitState == kSVSBackwardBranch && pc < shadowSpace->startPC) {
177        /*
178         * Consider a trace with a backward branch:
179         *   1: ..
180         *   2: ..
181         *   3: ..
182         *   4: ..
183         *   5: Goto {1 or 2 or 3 or 4}
184         *
185         * If there instruction 5 goes to 1 and there is no single-step
186         * instruction in the loop, pc is equal to shadowSpace->startPC and
187         * we will honor the backward branch condition.
188         *
189         * If the single-step instruction is outside the loop, then after
190         * resuming in the trace the startPC will be less than pc so we will
191         * also honor the backward branch condition.
192         *
193         * If the single-step is inside the loop, we won't hit the same endPC
194         * twice when the interpreter is re-executing the trace so we want to
195         * cancel the backward branch condition. In this case it can be
196         * detected as the endPC (ie pc) will be less than startPC.
197         */
198        shadowSpace->selfVerificationState = kSVSNormal;
199    } else {
200        shadowSpace->selfVerificationState = exitState;
201    }
202
203    return shadowSpace;
204}
205
206/* Print contents of virtual registers */
207static void selfVerificationPrintRegisters(int* addr, int* addrRef,
208                                           int numWords)
209{
210    int i;
211    for (i = 0; i < numWords; i++) {
212        LOGD("(v%d) 0x%8x%s", i, addr[i], addr[i] != addrRef[i] ? " X" : "");
213    }
214}
215
216/* Print values maintained in shadowSpace */
217static void selfVerificationDumpState(const u2* pc, Thread* self)
218{
219    ShadowSpace* shadowSpace = self->shadowSpace;
220    StackSaveArea* stackSave = SAVEAREA_FROM_FP(self->curFrame);
221    int frameBytes = (int) shadowSpace->registerSpace +
222                     shadowSpace->registerSpaceSize*4 -
223                     (int) shadowSpace->shadowFP;
224    int localRegs = 0;
225    int frameBytes2 = 0;
226    if (self->curFrame < shadowSpace->fp) {
227        localRegs = (stackSave->method->registersSize -
228                     stackSave->method->insSize)*4;
229        frameBytes2 = (int) shadowSpace->fp - (int) self->curFrame - localRegs;
230    }
231    LOGD("********** SHADOW STATE DUMP **********");
232    LOGD("CurrentPC: 0x%x, Offset: 0x%04x", (int)pc,
233        (int)(pc - stackSave->method->insns));
234    LOGD("Class: %s", shadowSpace->method->clazz->descriptor);
235    LOGD("Method: %s", shadowSpace->method->name);
236    LOGD("Dalvik PC: 0x%x endPC: 0x%x", (int)shadowSpace->startPC,
237        (int)shadowSpace->endPC);
238    LOGD("Interp FP: 0x%x endFP: 0x%x", (int)shadowSpace->fp,
239        (int)self->curFrame);
240    LOGD("Shadow FP: 0x%x endFP: 0x%x", (int)shadowSpace->shadowFP,
241        (int)shadowSpace->endShadowFP);
242    LOGD("Frame1 Bytes: %d Frame2 Local: %d Bytes: %d", frameBytes,
243        localRegs, frameBytes2);
244    LOGD("Trace length: %d State: %d", shadowSpace->traceLength,
245        shadowSpace->selfVerificationState);
246}
247
248/* Print decoded instructions in the current trace */
249static void selfVerificationDumpTrace(const u2* pc, Thread* self)
250{
251    ShadowSpace* shadowSpace = self->shadowSpace;
252    StackSaveArea* stackSave = SAVEAREA_FROM_FP(self->curFrame);
253    int i, addr, offset;
254    DecodedInstruction *decInsn;
255
256    LOGD("********** SHADOW TRACE DUMP **********");
257    for (i = 0; i < shadowSpace->traceLength; i++) {
258        addr = shadowSpace->trace[i].addr;
259        offset =  (int)((u2*)addr - stackSave->method->insns);
260        decInsn = &(shadowSpace->trace[i].decInsn);
261        /* Not properly decoding instruction, some registers may be garbage */
262        LOGD("0x%x: (0x%04x) %s",
263            addr, offset, dexGetOpcodeName(decInsn->opCode));
264    }
265}
266
267/* Code is forced into this spin loop when a divergence is detected */
268static void selfVerificationSpinLoop(ShadowSpace *shadowSpace)
269{
270    const u2 *startPC = shadowSpace->startPC;
271    JitTraceDescription* desc = dvmCopyTraceDescriptor(startPC, NULL);
272    if (desc) {
273        dvmCompilerWorkEnqueue(startPC, kWorkOrderTraceDebug, desc);
274        /*
275         * This function effectively terminates the VM right here, so not
276         * freeing the desc pointer when the enqueuing fails is acceptable.
277         */
278    }
279    gDvmJit.selfVerificationSpin = true;
280    while(gDvmJit.selfVerificationSpin) sleep(10);
281}
282
283/* Manage self verification while in the debug interpreter */
284static bool selfVerificationDebugInterp(const u2* pc, Thread* self,
285                                        InterpState *interpState)
286{
287    ShadowSpace *shadowSpace = self->shadowSpace;
288    SelfVerificationState state = shadowSpace->selfVerificationState;
289
290    DecodedInstruction decInsn;
291    dexDecodeInstruction(gDvm.instrFormat, pc, &decInsn);
292
293    //LOGD("### DbgIntp(%d): PC: 0x%x endPC: 0x%x state: %d len: %d %s",
294    //    self->threadId, (int)pc, (int)shadowSpace->endPC, state,
295    //    shadowSpace->traceLength, dexGetOpcodeName(decInsn.opCode));
296
297    if (state == kSVSIdle || state == kSVSStart) {
298        LOGD("~~~ DbgIntrp: INCORRECT PREVIOUS STATE(%d): %d",
299            self->threadId, state);
300        selfVerificationDumpState(pc, self);
301        selfVerificationDumpTrace(pc, self);
302    }
303
304    /*
305     * Skip endPC once when trace has a backward branch. If the SV state is
306     * single step, keep it that way.
307     */
308    if ((state == kSVSBackwardBranch && pc == shadowSpace->endPC) ||
309        (state != kSVSBackwardBranch && state != kSVSSingleStep)) {
310        shadowSpace->selfVerificationState = kSVSDebugInterp;
311    }
312
313    /* Check that the current pc is the end of the trace */
314    if ((state == kSVSDebugInterp || state == kSVSSingleStep) &&
315        pc == shadowSpace->endPC) {
316
317        shadowSpace->selfVerificationState = kSVSIdle;
318
319        /* Check register space */
320        int frameBytes = (int) shadowSpace->registerSpace +
321                         shadowSpace->registerSpaceSize*4 -
322                         (int) shadowSpace->shadowFP;
323        if (memcmp(shadowSpace->fp, shadowSpace->shadowFP, frameBytes)) {
324            LOGD("~~~ DbgIntp(%d): REGISTERS DIVERGENCE!", self->threadId);
325            selfVerificationDumpState(pc, self);
326            selfVerificationDumpTrace(pc, self);
327            LOGD("*** Interp Registers: addr: 0x%x bytes: %d",
328                (int)shadowSpace->fp, frameBytes);
329            selfVerificationPrintRegisters((int*)shadowSpace->fp,
330                                           (int*)shadowSpace->shadowFP,
331                                           frameBytes/4);
332            LOGD("*** Shadow Registers: addr: 0x%x bytes: %d",
333                (int)shadowSpace->shadowFP, frameBytes);
334            selfVerificationPrintRegisters((int*)shadowSpace->shadowFP,
335                                           (int*)shadowSpace->fp,
336                                           frameBytes/4);
337            selfVerificationSpinLoop(shadowSpace);
338        }
339        /* Check new frame if it exists (invokes only) */
340        if (self->curFrame < shadowSpace->fp) {
341            StackSaveArea* stackSave = SAVEAREA_FROM_FP(self->curFrame);
342            int localRegs = (stackSave->method->registersSize -
343                             stackSave->method->insSize)*4;
344            int frameBytes2 = (int) shadowSpace->fp -
345                              (int) self->curFrame - localRegs;
346            if (memcmp(((char*)self->curFrame)+localRegs,
347                ((char*)shadowSpace->endShadowFP)+localRegs, frameBytes2)) {
348                LOGD("~~~ DbgIntp(%d): REGISTERS (FRAME2) DIVERGENCE!",
349                    self->threadId);
350                selfVerificationDumpState(pc, self);
351                selfVerificationDumpTrace(pc, self);
352                LOGD("*** Interp Registers: addr: 0x%x l: %d bytes: %d",
353                    (int)self->curFrame, localRegs, frameBytes2);
354                selfVerificationPrintRegisters((int*)self->curFrame,
355                                               (int*)shadowSpace->endShadowFP,
356                                               (frameBytes2+localRegs)/4);
357                LOGD("*** Shadow Registers: addr: 0x%x l: %d bytes: %d",
358                    (int)shadowSpace->endShadowFP, localRegs, frameBytes2);
359                selfVerificationPrintRegisters((int*)shadowSpace->endShadowFP,
360                                               (int*)self->curFrame,
361                                               (frameBytes2+localRegs)/4);
362                selfVerificationSpinLoop(shadowSpace);
363            }
364        }
365
366        /* Check memory space */
367        bool memDiff = false;
368        ShadowHeap* heapSpacePtr;
369        for (heapSpacePtr = shadowSpace->heapSpace;
370             heapSpacePtr != shadowSpace->heapSpaceTail; heapSpacePtr++) {
371            int memData = *((unsigned int*) heapSpacePtr->addr);
372            if (heapSpacePtr->data != memData) {
373                LOGD("~~~ DbgIntp(%d): MEMORY DIVERGENCE!", self->threadId);
374                LOGD("Addr: 0x%x Intrp Data: 0x%x Jit Data: 0x%x",
375                    heapSpacePtr->addr, memData, heapSpacePtr->data);
376                selfVerificationDumpState(pc, self);
377                selfVerificationDumpTrace(pc, self);
378                memDiff = true;
379            }
380        }
381        if (memDiff) selfVerificationSpinLoop(shadowSpace);
382
383        /*
384         * Switch to JIT single step mode to stay in the debug interpreter for
385         * one more instruction
386         */
387        if (state == kSVSSingleStep) {
388            interpState->jitState = kJitSingleStepEnd;
389        }
390        return true;
391
392    /* If end not been reached, make sure max length not exceeded */
393    } else if (shadowSpace->traceLength >= JIT_MAX_TRACE_LEN) {
394        LOGD("~~~ DbgIntp(%d): CONTROL DIVERGENCE!", self->threadId);
395        LOGD("startPC: 0x%x endPC: 0x%x currPC: 0x%x",
396            (int)shadowSpace->startPC, (int)shadowSpace->endPC, (int)pc);
397        selfVerificationDumpState(pc, self);
398        selfVerificationDumpTrace(pc, self);
399        selfVerificationSpinLoop(shadowSpace);
400
401        return true;
402    }
403    /* Log the instruction address and decoded instruction for debug */
404    shadowSpace->trace[shadowSpace->traceLength].addr = (int)pc;
405    shadowSpace->trace[shadowSpace->traceLength].decInsn = decInsn;
406    shadowSpace->traceLength++;
407
408    return false;
409}
410#endif
411
412/*
413 * If one of our fixed tables or the translation buffer fills up,
414 * call this routine to avoid wasting cycles on future translation requests.
415 */
416void dvmJitStopTranslationRequests()
417{
418    /*
419     * Note 1: This won't necessarily stop all translation requests, and
420     * operates on a delayed mechanism.  Running threads look to the copy
421     * of this value in their private InterpState structures and won't see
422     * this change until it is refreshed (which happens on interpreter
423     * entry).
424     * Note 2: This is a one-shot memory leak on this table. Because this is a
425     * permanent off switch for Jit profiling, it is a one-time leak of 1K
426     * bytes, and no further attempt will be made to re-allocate it.  Can't
427     * free it because some thread may be holding a reference.
428     */
429    gDvmJit.pProfTable = NULL;
430}
431
432#if defined(WITH_JIT_TUNING)
433/* Convenience function to increment counter from assembly code */
434void dvmBumpNoChain(int from)
435{
436    gDvmJit.noChainExit[from]++;
437}
438
439/* Convenience function to increment counter from assembly code */
440void dvmBumpNormal()
441{
442    gDvmJit.normalExit++;
443}
444
445/* Convenience function to increment counter from assembly code */
446void dvmBumpPunt(int from)
447{
448    gDvmJit.puntExit++;
449}
450#endif
451
452/* Dumps debugging & tuning stats to the log */
453void dvmJitStats()
454{
455    int i;
456    int hit;
457    int not_hit;
458    int chains;
459    int stubs;
460    if (gDvmJit.pJitEntryTable) {
461        for (i=0, stubs=chains=hit=not_hit=0;
462             i < (int) gDvmJit.jitTableSize;
463             i++) {
464            if (gDvmJit.pJitEntryTable[i].dPC != 0) {
465                hit++;
466                if (gDvmJit.pJitEntryTable[i].codeAddress ==
467                      dvmCompilerGetInterpretTemplate())
468                    stubs++;
469            } else
470                not_hit++;
471            if (gDvmJit.pJitEntryTable[i].u.info.chain != gDvmJit.jitTableSize)
472                chains++;
473        }
474        LOGD("JIT: table size is %d, entries used is %d",
475             gDvmJit.jitTableSize,  gDvmJit.jitTableEntriesUsed);
476        LOGD("JIT: %d traces, %d slots, %d chains, %d thresh, %s",
477             hit, not_hit + hit, chains, gDvmJit.threshold,
478             gDvmJit.blockingMode ? "Blocking" : "Non-blocking");
479
480#if defined(WITH_JIT_TUNING)
481        LOGD("JIT: Code cache patches: %d", gDvmJit.codeCachePatches);
482
483        LOGD("JIT: Lookups: %d hits, %d misses; %d normal, %d punt",
484             gDvmJit.addrLookupsFound, gDvmJit.addrLookupsNotFound,
485             gDvmJit.normalExit, gDvmJit.puntExit);
486
487        LOGD("JIT: ICHits: %d", gDvmICHitCount);
488
489        LOGD("JIT: noChainExit: %d IC miss, %d interp callsite, "
490             "%d switch overflow",
491             gDvmJit.noChainExit[kInlineCacheMiss],
492             gDvmJit.noChainExit[kCallsiteInterpreted],
493             gDvmJit.noChainExit[kSwitchOverflow]);
494
495        LOGD("JIT: ICPatch: %d init, %d rejected, %d lock-free, %d queued, "
496             "%d dropped",
497             gDvmJit.icPatchInit, gDvmJit.icPatchRejected,
498             gDvmJit.icPatchLockFree, gDvmJit.icPatchQueued,
499             gDvmJit.icPatchDropped);
500
501        LOGD("JIT: Invoke: %d mono, %d poly, %d native, %d return",
502             gDvmJit.invokeMonomorphic, gDvmJit.invokePolymorphic,
503             gDvmJit.invokeNative, gDvmJit.returnOp);
504        LOGD("JIT: Inline: %d mgetter, %d msetter, %d pgetter, %d psetter",
505             gDvmJit.invokeMonoGetterInlined, gDvmJit.invokeMonoSetterInlined,
506             gDvmJit.invokePolyGetterInlined, gDvmJit.invokePolySetterInlined);
507        LOGD("JIT: Total compilation time: %llu ms", gDvmJit.jitTime / 1000);
508        LOGD("JIT: Avg unit compilation time: %llu us",
509             gDvmJit.jitTime / gDvmJit.numCompilations);
510#endif
511
512        LOGD("JIT: %d Translation chains, %d interp stubs",
513             gDvmJit.translationChains, stubs);
514        if (gDvmJit.profile) {
515            dvmCompilerSortAndPrintTraceProfiles();
516        }
517    }
518}
519
520
521void setTraceConstruction(JitEntry *slot, bool value)
522{
523
524    JitEntryInfoUnion oldValue;
525    JitEntryInfoUnion newValue;
526    do {
527        oldValue = slot->u;
528        newValue = oldValue;
529        newValue.info.traceConstruction = value;
530    } while (android_atomic_release_cas(oldValue.infoWord, newValue.infoWord,
531            &slot->u.infoWord) != 0);
532}
533
534void resetTracehead(InterpState* interpState, JitEntry *slot)
535{
536    slot->codeAddress = dvmCompilerGetInterpretTemplate();
537    setTraceConstruction(slot, false);
538}
539
540/* Clean up any pending trace builds */
541void dvmJitAbortTraceSelect(InterpState* interpState)
542{
543    if (interpState->jitState == kJitTSelect)
544        interpState->jitState = kJitDone;
545}
546
547/*
548 * Find an entry in the JitTable, creating if necessary.
549 * Returns null if table is full.
550 */
551static JitEntry *lookupAndAdd(const u2* dPC, bool callerLocked)
552{
553    u4 chainEndMarker = gDvmJit.jitTableSize;
554    u4 idx = dvmJitHash(dPC);
555
556    /* Walk the bucket chain to find an exact match for our PC */
557    while ((gDvmJit.pJitEntryTable[idx].u.info.chain != chainEndMarker) &&
558           (gDvmJit.pJitEntryTable[idx].dPC != dPC)) {
559        idx = gDvmJit.pJitEntryTable[idx].u.info.chain;
560    }
561
562    if (gDvmJit.pJitEntryTable[idx].dPC != dPC) {
563        /*
564         * No match.  Aquire jitTableLock and find the last
565         * slot in the chain. Possibly continue the chain walk in case
566         * some other thread allocated the slot we were looking
567         * at previuosly (perhaps even the dPC we're trying to enter).
568         */
569        if (!callerLocked)
570            dvmLockMutex(&gDvmJit.tableLock);
571        /*
572         * At this point, if .dPC is NULL, then the slot we're
573         * looking at is the target slot from the primary hash
574         * (the simple, and common case).  Otherwise we're going
575         * to have to find a free slot and chain it.
576         */
577        ANDROID_MEMBAR_FULL(); /* Make sure we reload [].dPC after lock */
578        if (gDvmJit.pJitEntryTable[idx].dPC != NULL) {
579            u4 prev;
580            while (gDvmJit.pJitEntryTable[idx].u.info.chain != chainEndMarker) {
581                if (gDvmJit.pJitEntryTable[idx].dPC == dPC) {
582                    /* Another thread got there first for this dPC */
583                    if (!callerLocked)
584                        dvmUnlockMutex(&gDvmJit.tableLock);
585                    return &gDvmJit.pJitEntryTable[idx];
586                }
587                idx = gDvmJit.pJitEntryTable[idx].u.info.chain;
588            }
589            /* Here, idx should be pointing to the last cell of an
590             * active chain whose last member contains a valid dPC */
591            assert(gDvmJit.pJitEntryTable[idx].dPC != NULL);
592            /* Linear walk to find a free cell and add it to the end */
593            prev = idx;
594            while (true) {
595                idx++;
596                if (idx == chainEndMarker)
597                    idx = 0;  /* Wraparound */
598                if ((gDvmJit.pJitEntryTable[idx].dPC == NULL) ||
599                    (idx == prev))
600                    break;
601            }
602            if (idx != prev) {
603                JitEntryInfoUnion oldValue;
604                JitEntryInfoUnion newValue;
605                /*
606                 * Although we hold the lock so that noone else will
607                 * be trying to update a chain field, the other fields
608                 * packed into the word may be in use by other threads.
609                 */
610                do {
611                    oldValue = gDvmJit.pJitEntryTable[prev].u;
612                    newValue = oldValue;
613                    newValue.info.chain = idx;
614                } while (android_atomic_release_cas(oldValue.infoWord,
615                        newValue.infoWord,
616                        &gDvmJit.pJitEntryTable[prev].u.infoWord) != 0);
617            }
618        }
619        if (gDvmJit.pJitEntryTable[idx].dPC == NULL) {
620            /*
621             * Initialize codeAddress and allocate the slot.  Must
622             * happen in this order (since dPC is set, the entry is live.
623             */
624            gDvmJit.pJitEntryTable[idx].dPC = dPC;
625            gDvmJit.jitTableEntriesUsed++;
626        } else {
627            /* Table is full */
628            idx = chainEndMarker;
629        }
630        if (!callerLocked)
631            dvmUnlockMutex(&gDvmJit.tableLock);
632    }
633    return (idx == chainEndMarker) ? NULL : &gDvmJit.pJitEntryTable[idx];
634}
635
636/*
637 * Append the class ptr of "this" and the current method ptr to the current
638 * trace. That is, the trace runs will contain the following components:
639 *  + trace run that ends with an invoke (existing entry)
640 *  + thisClass (new)
641 *  + calleeMethod (new)
642 */
643static void insertClassMethodInfo(InterpState* interpState,
644                                  const ClassObject* thisClass,
645                                  const Method* calleeMethod,
646                                  const DecodedInstruction* insn)
647{
648    int currTraceRun = ++interpState->currTraceRun;
649    interpState->trace[currTraceRun].meta = (void *) thisClass;
650    currTraceRun = ++interpState->currTraceRun;
651    interpState->trace[currTraceRun].meta = (void *) calleeMethod;
652}
653
654/*
655 * Check if the next instruction following the invoke is a move-result and if
656 * so add it to the trace. That is, this will add the trace run that includes
657 * the move-result to the trace list.
658 *
659 *  + trace run that ends with an invoke (existing entry)
660 *  + thisClass (existing entry)
661 *  + calleeMethod (existing entry)
662 *  + move result (new)
663 *
664 * lastPC, len, offset are all from the preceding invoke instruction
665 */
666static void insertMoveResult(const u2 *lastPC, int len, int offset,
667                             InterpState *interpState)
668{
669    DecodedInstruction nextDecInsn;
670    const u2 *moveResultPC = lastPC + len;
671
672    dexDecodeInstruction(gDvm.instrFormat, moveResultPC, &nextDecInsn);
673    if ((nextDecInsn.opCode != OP_MOVE_RESULT) &&
674        (nextDecInsn.opCode != OP_MOVE_RESULT_WIDE) &&
675        (nextDecInsn.opCode != OP_MOVE_RESULT_OBJECT))
676        return;
677
678    /* We need to start a new trace run */
679    int currTraceRun = ++interpState->currTraceRun;
680    interpState->currRunHead = moveResultPC;
681    interpState->trace[currTraceRun].frag.startOffset = offset + len;
682    interpState->trace[currTraceRun].frag.numInsts = 1;
683    interpState->trace[currTraceRun].frag.runEnd = false;
684    interpState->trace[currTraceRun].frag.hint = kJitHintNone;
685    interpState->trace[currTraceRun].frag.isCode = true;
686    interpState->totalTraceLen++;
687
688    interpState->currRunLen = dexGetInstrOrTableWidthAbs(gDvm.instrWidth,
689                                                         moveResultPC);
690}
691
692/*
693 * Adds to the current trace request one instruction at a time, just
694 * before that instruction is interpreted.  This is the primary trace
695 * selection function.  NOTE: return instruction are handled a little
696 * differently.  In general, instructions are "proposed" to be added
697 * to the current trace prior to interpretation.  If the interpreter
698 * then successfully completes the instruction, is will be considered
699 * part of the request.  This allows us to examine machine state prior
700 * to interpretation, and also abort the trace request if the instruction
701 * throws or does something unexpected.  However, return instructions
702 * will cause an immediate end to the translation request - which will
703 * be passed to the compiler before the return completes.  This is done
704 * in response to special handling of returns by the interpreter (and
705 * because returns cannot throw in a way that causes problems for the
706 * translated code.
707 */
708int dvmCheckJit(const u2* pc, Thread* self, InterpState* interpState,
709                const ClassObject* thisClass, const Method* curMethod)
710{
711    int flags, len;
712    int switchInterp = false;
713    bool debugOrProfile = dvmDebuggerOrProfilerActive();
714    /* Stay in the dbg interpreter for the next instruction */
715    bool stayOneMoreInst = false;
716
717    /*
718     * Bug 2710533 - dalvik crash when disconnecting debugger
719     *
720     * Reset the entry point to the default value. If needed it will be set to a
721     * specific value in the corresponding case statement (eg kJitSingleStepEnd)
722     */
723    interpState->entryPoint = kInterpEntryInstr;
724
725    /* Prepare to handle last PC and stage the current PC */
726    const u2 *lastPC = interpState->lastPC;
727    interpState->lastPC = pc;
728
729    switch (interpState->jitState) {
730        int offset;
731        DecodedInstruction decInsn;
732        case kJitTSelect:
733            /* First instruction - just remember the PC and exit */
734            if (lastPC == NULL) break;
735            /* Grow the trace around the last PC if jitState is kJitTSelect */
736            dexDecodeInstruction(gDvm.instrFormat, lastPC, &decInsn);
737
738            /*
739             * Treat {PACKED,SPARSE}_SWITCH as trace-ending instructions due
740             * to the amount of space it takes to generate the chaining
741             * cells.
742             */
743            if (interpState->totalTraceLen != 0 &&
744                (decInsn.opCode == OP_PACKED_SWITCH ||
745                 decInsn.opCode == OP_SPARSE_SWITCH)) {
746                interpState->jitState = kJitTSelectEnd;
747                break;
748            }
749
750
751#if defined(SHOW_TRACE)
752            LOGD("TraceGen: adding %s", dexGetOpcodeName(decInsn.opCode));
753#endif
754            flags = dexGetInstrFlags(gDvm.instrFlags, decInsn.opCode);
755            len = dexGetInstrOrTableWidthAbs(gDvm.instrWidth, lastPC);
756            offset = lastPC - interpState->method->insns;
757            assert((unsigned) offset <
758                   dvmGetMethodInsnsSize(interpState->method));
759            if (lastPC != interpState->currRunHead + interpState->currRunLen) {
760                int currTraceRun;
761                /* We need to start a new trace run */
762                currTraceRun = ++interpState->currTraceRun;
763                interpState->currRunLen = 0;
764                interpState->currRunHead = (u2*)lastPC;
765                interpState->trace[currTraceRun].frag.startOffset = offset;
766                interpState->trace[currTraceRun].frag.numInsts = 0;
767                interpState->trace[currTraceRun].frag.runEnd = false;
768                interpState->trace[currTraceRun].frag.hint = kJitHintNone;
769                interpState->trace[currTraceRun].frag.isCode = true;
770            }
771            interpState->trace[interpState->currTraceRun].frag.numInsts++;
772            interpState->totalTraceLen++;
773            interpState->currRunLen += len;
774
775            /*
776             * If the last instruction is an invoke, we will try to sneak in
777             * the move-result* (if existent) into a separate trace run.
778             */
779            int needReservedRun = (flags & kInstrInvoke) ? 1 : 0;
780
781            /* Will probably never hit this with the current trace buildier */
782            if (interpState->currTraceRun ==
783                (MAX_JIT_RUN_LEN - 1 - needReservedRun)) {
784                interpState->jitState = kJitTSelectEnd;
785            }
786
787            if (  ((flags & kInstrUnconditional) == 0) &&
788                  /* don't end trace on INVOKE_DIRECT_EMPTY  */
789                  (decInsn.opCode != OP_INVOKE_DIRECT_EMPTY) &&
790                  ((flags & (kInstrCanBranch |
791                             kInstrCanSwitch |
792                             kInstrCanReturn |
793                             kInstrInvoke)) != 0)) {
794                    interpState->jitState = kJitTSelectEnd;
795#if defined(SHOW_TRACE)
796                LOGD("TraceGen: ending on %s, basic block end",
797                     dexGetOpcodeName(decInsn.opCode));
798#endif
799
800                /*
801                 * If the current invoke is a {virtual,interface}, get the
802                 * current class/method pair into the trace as well.
803                 * If the next instruction is a variant of move-result, insert
804                 * it to the trace too.
805                 */
806                if (flags & kInstrInvoke) {
807                    insertClassMethodInfo(interpState, thisClass, curMethod,
808                                          &decInsn);
809                    insertMoveResult(lastPC, len, offset, interpState);
810                }
811            }
812            /* Break on throw or self-loop */
813            if ((decInsn.opCode == OP_THROW) || (lastPC == pc)){
814                interpState->jitState = kJitTSelectEnd;
815            }
816            if (interpState->totalTraceLen >= JIT_MAX_TRACE_LEN) {
817                interpState->jitState = kJitTSelectEnd;
818            }
819             /* Abandon the trace request if debugger/profiler is attached */
820            if (debugOrProfile) {
821                interpState->jitState = kJitDone;
822                break;
823            }
824            if ((flags & kInstrCanReturn) != kInstrCanReturn) {
825                break;
826            }
827            else {
828                /*
829                 * Last instruction is a return - stay in the dbg interpreter
830                 * for one more instruction if it is a non-void return, since
831                 * we don't want to start a trace with move-result as the first
832                 * instruction (which is already included in the trace
833                 * containing the invoke.
834                 */
835                if (decInsn.opCode != OP_RETURN_VOID) {
836                    stayOneMoreInst = true;
837                }
838            }
839            /* NOTE: intentional fallthrough for returns */
840        case kJitTSelectEnd:
841            {
842                /* Bad trace */
843                if (interpState->totalTraceLen == 0) {
844                    /* Bad trace - mark as untranslatable */
845                    interpState->jitState = kJitDone;
846                    switchInterp = true;
847                    break;
848                }
849
850                int lastTraceDesc = interpState->currTraceRun;
851
852                /* Extend a new empty desc if the last slot is meta info */
853                if (!interpState->trace[lastTraceDesc].frag.isCode) {
854                    lastTraceDesc = ++interpState->currTraceRun;
855                    interpState->trace[lastTraceDesc].frag.startOffset = 0;
856                    interpState->trace[lastTraceDesc].frag.numInsts = 0;
857                    interpState->trace[lastTraceDesc].frag.hint = kJitHintNone;
858                    interpState->trace[lastTraceDesc].frag.isCode = true;
859                }
860
861                /* Mark the end of the trace runs */
862                interpState->trace[lastTraceDesc].frag.runEnd = true;
863
864                JitTraceDescription* desc =
865                   (JitTraceDescription*)malloc(sizeof(JitTraceDescription) +
866                     sizeof(JitTraceRun) * (interpState->currTraceRun+1));
867
868                if (desc == NULL) {
869                    LOGE("Out of memory in trace selection");
870                    dvmJitStopTranslationRequests();
871                    interpState->jitState = kJitDone;
872                    switchInterp = true;
873                    break;
874                }
875
876                desc->method = interpState->method;
877                memcpy((char*)&(desc->trace[0]),
878                    (char*)&(interpState->trace[0]),
879                    sizeof(JitTraceRun) * (interpState->currTraceRun+1));
880#if defined(SHOW_TRACE)
881                LOGD("TraceGen:  trace done, adding to queue");
882#endif
883                if (dvmCompilerWorkEnqueue(
884                       interpState->currTraceHead,kWorkOrderTrace,desc)) {
885                    /* Work order successfully enqueued */
886                    if (gDvmJit.blockingMode) {
887                        dvmCompilerDrainQueue();
888                    }
889                } else {
890                    /*
891                     * Make sure the descriptor for the abandoned work order is
892                     * freed.
893                     */
894                    free(desc);
895                }
896                /*
897                 * Reset "trace in progress" flag whether or not we
898                 * successfully entered a work order.
899                 */
900                JitEntry *jitEntry =
901                    lookupAndAdd(interpState->currTraceHead, false);
902                if (jitEntry) {
903                    setTraceConstruction(jitEntry, false);
904                }
905                interpState->jitState = kJitDone;
906                switchInterp = true;
907            }
908            break;
909        case kJitSingleStep:
910            interpState->jitState = kJitSingleStepEnd;
911            break;
912        case kJitSingleStepEnd:
913            /*
914             * Clear the inJitCodeCache flag and abandon the resume attempt if
915             * we cannot switch back to the translation due to corner-case
916             * conditions. If the flag is not cleared and the code cache is full
917             * we will be stuck in the debug interpreter as the code cache
918             * cannot be reset.
919             */
920            if (dvmJitStayInPortableInterpreter()) {
921                interpState->entryPoint = kInterpEntryInstr;
922                self->inJitCodeCache = 0;
923            } else {
924                interpState->entryPoint = kInterpEntryResume;
925            }
926            interpState->jitState = kJitDone;
927            switchInterp = true;
928            break;
929        case kJitDone:
930            switchInterp = true;
931            break;
932#if defined(WITH_SELF_VERIFICATION)
933        case kJitSelfVerification:
934            if (selfVerificationDebugInterp(pc, self, interpState)) {
935                /*
936                 * If the next state is not single-step end, we can switch
937                 * interpreter now.
938                 */
939                if (interpState->jitState != kJitSingleStepEnd) {
940                    interpState->jitState = kJitDone;
941                    switchInterp = true;
942                }
943            }
944            break;
945#endif
946        case kJitNot:
947            switchInterp = !debugOrProfile;
948            break;
949        default:
950            LOGE("Unexpected JIT state: %d entry point: %d",
951                 interpState->jitState, interpState->entryPoint);
952            dvmAbort();
953            break;
954    }
955    /*
956     * Final check to see if we can really switch the interpreter. Make sure
957     * the jitState is kJitDone or kJitNot when switchInterp is set to true.
958     */
959     assert(switchInterp == false || interpState->jitState == kJitDone ||
960            interpState->jitState == kJitNot);
961     return switchInterp && !debugOrProfile && !stayOneMoreInst &&
962            !dvmJitStayInPortableInterpreter();
963}
964
965JitEntry *dvmFindJitEntry(const u2* pc)
966{
967    int idx = dvmJitHash(pc);
968
969    /* Expect a high hit rate on 1st shot */
970    if (gDvmJit.pJitEntryTable[idx].dPC == pc)
971        return &gDvmJit.pJitEntryTable[idx];
972    else {
973        int chainEndMarker = gDvmJit.jitTableSize;
974        while (gDvmJit.pJitEntryTable[idx].u.info.chain != chainEndMarker) {
975            idx = gDvmJit.pJitEntryTable[idx].u.info.chain;
976            if (gDvmJit.pJitEntryTable[idx].dPC == pc)
977                return &gDvmJit.pJitEntryTable[idx];
978        }
979    }
980    return NULL;
981}
982
983/*
984 * If a translated code address exists for the davik byte code
985 * pointer return it.  This routine needs to be fast.
986 */
987void* dvmJitGetCodeAddr(const u2* dPC)
988{
989    int idx = dvmJitHash(dPC);
990    const u2* npc = gDvmJit.pJitEntryTable[idx].dPC;
991    if (npc != NULL) {
992        bool hideTranslation = dvmJitHideTranslation();
993
994        if (npc == dPC) {
995#if defined(WITH_JIT_TUNING)
996            gDvmJit.addrLookupsFound++;
997#endif
998            return hideTranslation ?
999                NULL : gDvmJit.pJitEntryTable[idx].codeAddress;
1000        } else {
1001            int chainEndMarker = gDvmJit.jitTableSize;
1002            while (gDvmJit.pJitEntryTable[idx].u.info.chain != chainEndMarker) {
1003                idx = gDvmJit.pJitEntryTable[idx].u.info.chain;
1004                if (gDvmJit.pJitEntryTable[idx].dPC == dPC) {
1005#if defined(WITH_JIT_TUNING)
1006                    gDvmJit.addrLookupsFound++;
1007#endif
1008                    return hideTranslation ?
1009                        NULL : gDvmJit.pJitEntryTable[idx].codeAddress;
1010                }
1011            }
1012        }
1013    }
1014#if defined(WITH_JIT_TUNING)
1015    gDvmJit.addrLookupsNotFound++;
1016#endif
1017    return NULL;
1018}
1019
1020/*
1021 * Register the translated code pointer into the JitTable.
1022 * NOTE: Once a codeAddress field transitions from initial state to
1023 * JIT'd code, it must not be altered without first halting all
1024 * threads.  This routine should only be called by the compiler
1025 * thread.
1026 */
1027void dvmJitSetCodeAddr(const u2* dPC, void *nPC, JitInstructionSetType set) {
1028    JitEntryInfoUnion oldValue;
1029    JitEntryInfoUnion newValue;
1030    JitEntry *jitEntry = lookupAndAdd(dPC, false);
1031    assert(jitEntry);
1032    /* Note: order of update is important */
1033    do {
1034        oldValue = jitEntry->u;
1035        newValue = oldValue;
1036        newValue.info.instructionSet = set;
1037    } while (android_atomic_release_cas(
1038             oldValue.infoWord, newValue.infoWord,
1039             &jitEntry->u.infoWord) != 0);
1040    jitEntry->codeAddress = nPC;
1041}
1042
1043/*
1044 * Determine if valid trace-bulding request is active.  Return true
1045 * if we need to abort and switch back to the fast interpreter, false
1046 * otherwise.
1047 */
1048bool dvmJitCheckTraceRequest(Thread* self, InterpState* interpState)
1049{
1050    bool switchInterp = false;         /* Assume success */
1051    int i;
1052    /*
1053     * A note on trace "hotness" filtering:
1054     *
1055     * Our first level trigger is intentionally loose - we need it to
1056     * fire easily not just to identify potential traces to compile, but
1057     * also to allow re-entry into the code cache.
1058     *
1059     * The 2nd level filter (done here) exists to be selective about
1060     * what we actually compile.  It works by requiring the same
1061     * trace head "key" (defined as filterKey below) to appear twice in
1062     * a relatively short period of time.   The difficulty is defining the
1063     * shape of the filterKey.  Unfortunately, there is no "one size fits
1064     * all" approach.
1065     *
1066     * For spiky execution profiles dominated by a smallish
1067     * number of very hot loops, we would want the second-level filter
1068     * to be very selective.  A good selective filter is requiring an
1069     * exact match of the Dalvik PC.  In other words, defining filterKey as:
1070     *     intptr_t filterKey = (intptr_t)interpState->pc
1071     *
1072     * However, for flat execution profiles we do best when aggressively
1073     * translating.  A heuristically decent proxy for this is to use
1074     * the value of the method pointer containing the trace as the filterKey.
1075     * Intuitively, this is saying that once any trace in a method appears hot,
1076     * immediately translate any other trace from that same method that
1077     * survives the first-level filter.  Here, filterKey would be defined as:
1078     *     intptr_t filterKey = (intptr_t)interpState->method
1079     *
1080     * The problem is that we can't easily detect whether we're dealing
1081     * with a spiky or flat profile.  If we go with the "pc" match approach,
1082     * flat profiles perform poorly.  If we go with the loose "method" match,
1083     * we end up generating a lot of useless translations.  Probably the
1084     * best approach in the future will be to retain profile information
1085     * across runs of each application in order to determine it's profile,
1086     * and then choose once we have enough history.
1087     *
1088     * However, for now we've decided to chose a compromise filter scheme that
1089     * includes elements of both.  The high order bits of the filter key
1090     * are drawn from the enclosing method, and are combined with a slice
1091     * of the low-order bits of the Dalvik pc of the trace head.  The
1092     * looseness of the filter can be adjusted by changing with width of
1093     * the Dalvik pc slice (JIT_TRACE_THRESH_FILTER_PC_BITS).  The wider
1094     * the slice, the tighter the filter.
1095     *
1096     * Note: the fixed shifts in the function below reflect assumed word
1097     * alignment for method pointers, and half-word alignment of the Dalvik pc.
1098     * for method pointers and half-word alignment for dalvik pc.
1099     */
1100    u4 methodKey = (u4)interpState->method <<
1101                   (JIT_TRACE_THRESH_FILTER_PC_BITS - 2);
1102    u4 pcKey = ((u4)interpState->pc >> 1) &
1103               ((1 << JIT_TRACE_THRESH_FILTER_PC_BITS) - 1);
1104    intptr_t filterKey = (intptr_t)(methodKey | pcKey);
1105    bool debugOrProfile = dvmDebuggerOrProfilerActive();
1106
1107    /* Check if the JIT request can be handled now */
1108    if (gDvmJit.pJitEntryTable != NULL && debugOrProfile == false) {
1109        /* Bypass the filter for hot trace requests or during stress mode */
1110        if (interpState->jitState == kJitTSelectRequest &&
1111            gDvmJit.threshold > 6) {
1112            /* Two-level filtering scheme */
1113            for (i=0; i< JIT_TRACE_THRESH_FILTER_SIZE; i++) {
1114                if (filterKey == interpState->threshFilter[i]) {
1115                    interpState->threshFilter[i] = 0; // Reset filter entry
1116                    break;
1117                }
1118            }
1119            if (i == JIT_TRACE_THRESH_FILTER_SIZE) {
1120                /*
1121                 * Use random replacement policy - otherwise we could miss a
1122                 * large loop that contains more traces than the size of our
1123                 * filter array.
1124                 */
1125                i = rand() % JIT_TRACE_THRESH_FILTER_SIZE;
1126                interpState->threshFilter[i] = filterKey;
1127                interpState->jitState = kJitDone;
1128            }
1129        }
1130
1131        /* If the compiler is backlogged, cancel any JIT actions */
1132        if (gDvmJit.compilerQueueLength >= gDvmJit.compilerHighWater) {
1133            interpState->jitState = kJitDone;
1134        }
1135
1136        /*
1137         * Check for additional reasons that might force the trace select
1138         * request to be dropped
1139         */
1140        if (interpState->jitState == kJitTSelectRequest ||
1141            interpState->jitState == kJitTSelectRequestHot) {
1142            JitEntry *slot = lookupAndAdd(interpState->pc, false);
1143            if (slot == NULL) {
1144                /*
1145                 * Table is full.  This should have been
1146                 * detected by the compiler thread and the table
1147                 * resized before we run into it here.  Assume bad things
1148                 * are afoot and disable profiling.
1149                 */
1150                interpState->jitState = kJitDone;
1151                LOGD("JIT: JitTable full, disabling profiling");
1152                dvmJitStopTranslationRequests();
1153            } else if (slot->u.info.traceConstruction) {
1154                /*
1155                 * Trace request already in progress, but most likely it
1156                 * aborted without cleaning up.  Assume the worst and
1157                 * mark trace head as untranslatable.  If we're wrong,
1158                 * the compiler thread will correct the entry when the
1159                 * translation is completed.  The downside here is that
1160                 * some existing translation may chain to the interpret-only
1161                 * template instead of the real translation during this
1162                 * window.  Performance, but not correctness, issue.
1163                 */
1164                interpState->jitState = kJitDone;
1165                resetTracehead(interpState, slot);
1166            } else if (slot->codeAddress) {
1167                 /* Nothing to do here - just return */
1168                interpState->jitState = kJitDone;
1169            } else {
1170                /*
1171                 * Mark request.  Note, we are not guaranteed exclusivity
1172                 * here.  A window exists for another thread to be
1173                 * attempting to build this same trace.  Rather than
1174                 * bear the cost of locking, we'll just allow that to
1175                 * happen.  The compiler thread, if it chooses, can
1176                 * discard redundant requests.
1177                 */
1178                setTraceConstruction(slot, true);
1179            }
1180        }
1181
1182        switch (interpState->jitState) {
1183            case kJitTSelectRequest:
1184            case kJitTSelectRequestHot:
1185                interpState->jitState = kJitTSelect;
1186                interpState->currTraceHead = interpState->pc;
1187                interpState->currTraceRun = 0;
1188                interpState->totalTraceLen = 0;
1189                interpState->currRunHead = interpState->pc;
1190                interpState->currRunLen = 0;
1191                interpState->trace[0].frag.startOffset =
1192                     interpState->pc - interpState->method->insns;
1193                interpState->trace[0].frag.numInsts = 0;
1194                interpState->trace[0].frag.runEnd = false;
1195                interpState->trace[0].frag.hint = kJitHintNone;
1196                interpState->trace[0].frag.isCode = true;
1197                interpState->lastPC = 0;
1198                break;
1199            /*
1200             * For JIT's perspective there is no need to stay in the debug
1201             * interpreter unless debugger/profiler is attached.
1202             */
1203            case kJitDone:
1204                switchInterp = true;
1205                break;
1206            default:
1207                LOGE("Unexpected JIT state: %d entry point: %d",
1208                     interpState->jitState, interpState->entryPoint);
1209                dvmAbort();
1210        }
1211    } else {
1212        /*
1213         * Cannot build trace this time - ready to leave the dbg interpreter
1214         */
1215        interpState->jitState = kJitDone;
1216        switchInterp = true;
1217    }
1218
1219    /*
1220     * Final check to see if we can really switch the interpreter. Make sure
1221     * the jitState is kJitDone when switchInterp is set to true.
1222     */
1223    assert(switchInterp == false || interpState->jitState == kJitDone);
1224    return switchInterp && !debugOrProfile &&
1225           !dvmJitStayInPortableInterpreter();
1226}
1227
1228/*
1229 * Resizes the JitTable.  Must be a power of 2, and returns true on failure.
1230 * Stops all threads, and thus is a heavyweight operation. May only be called
1231 * by the compiler thread.
1232 */
1233bool dvmJitResizeJitTable( unsigned int size )
1234{
1235    JitEntry *pNewTable;
1236    JitEntry *pOldTable;
1237    JitEntry tempEntry;
1238    u4 newMask;
1239    unsigned int oldSize;
1240    unsigned int i;
1241
1242    assert(gDvmJit.pJitEntryTable != NULL);
1243    assert(size && !(size & (size - 1)));   /* Is power of 2? */
1244
1245    LOGI("Jit: resizing JitTable from %d to %d", gDvmJit.jitTableSize, size);
1246
1247    newMask = size - 1;
1248
1249    if (size <= gDvmJit.jitTableSize) {
1250        return true;
1251    }
1252
1253    /* Make sure requested size is compatible with chain field width */
1254    tempEntry.u.info.chain = size;
1255    if (tempEntry.u.info.chain != size) {
1256        LOGD("Jit: JitTable request of %d too big", size);
1257        return true;
1258    }
1259
1260    pNewTable = (JitEntry*)calloc(size, sizeof(*pNewTable));
1261    if (pNewTable == NULL) {
1262        return true;
1263    }
1264    for (i=0; i< size; i++) {
1265        pNewTable[i].u.info.chain = size;  /* Initialize chain termination */
1266    }
1267
1268    /* Stop all other interpreting/jit'ng threads */
1269    dvmSuspendAllThreads(SUSPEND_FOR_TBL_RESIZE);
1270
1271    pOldTable = gDvmJit.pJitEntryTable;
1272    oldSize = gDvmJit.jitTableSize;
1273
1274    dvmLockMutex(&gDvmJit.tableLock);
1275    gDvmJit.pJitEntryTable = pNewTable;
1276    gDvmJit.jitTableSize = size;
1277    gDvmJit.jitTableMask = size - 1;
1278    gDvmJit.jitTableEntriesUsed = 0;
1279
1280    for (i=0; i < oldSize; i++) {
1281        if (pOldTable[i].dPC) {
1282            JitEntry *p;
1283            u2 chain;
1284            p = lookupAndAdd(pOldTable[i].dPC, true /* holds tableLock*/ );
1285            p->codeAddress = pOldTable[i].codeAddress;
1286            /* We need to preserve the new chain field, but copy the rest */
1287            chain = p->u.info.chain;
1288            p->u = pOldTable[i].u;
1289            p->u.info.chain = chain;
1290        }
1291    }
1292    dvmUnlockMutex(&gDvmJit.tableLock);
1293
1294    free(pOldTable);
1295
1296    /* Restart the world */
1297    dvmResumeAllThreads(SUSPEND_FOR_TBL_RESIZE);
1298
1299    return false;
1300}
1301
1302/*
1303 * Reset the JitTable to the initial clean state.
1304 */
1305void dvmJitResetTable(void)
1306{
1307    JitEntry *jitEntry = gDvmJit.pJitEntryTable;
1308    unsigned int size = gDvmJit.jitTableSize;
1309    unsigned int i;
1310
1311    dvmLockMutex(&gDvmJit.tableLock);
1312    memset((void *) jitEntry, 0, sizeof(JitEntry) * size);
1313    for (i=0; i< size; i++) {
1314        jitEntry[i].u.info.chain = size;  /* Initialize chain termination */
1315    }
1316    gDvmJit.jitTableEntriesUsed = 0;
1317    dvmUnlockMutex(&gDvmJit.tableLock);
1318}
1319
1320/*
1321 * Float/double conversion requires clamping to min and max of integer form.  If
1322 * target doesn't support this normally, use these.
1323 */
1324s8 dvmJitd2l(double d)
1325{
1326    static const double kMaxLong = (double)(s8)0x7fffffffffffffffULL;
1327    static const double kMinLong = (double)(s8)0x8000000000000000ULL;
1328    if (d >= kMaxLong)
1329        return (s8)0x7fffffffffffffffULL;
1330    else if (d <= kMinLong)
1331        return (s8)0x8000000000000000ULL;
1332    else if (d != d) // NaN case
1333        return 0;
1334    else
1335        return (s8)d;
1336}
1337
1338s8 dvmJitf2l(float f)
1339{
1340    static const float kMaxLong = (float)(s8)0x7fffffffffffffffULL;
1341    static const float kMinLong = (float)(s8)0x8000000000000000ULL;
1342    if (f >= kMaxLong)
1343        return (s8)0x7fffffffffffffffULL;
1344    else if (f <= kMinLong)
1345        return (s8)0x8000000000000000ULL;
1346    else if (f != f) // NaN case
1347        return 0;
1348    else
1349        return (s8)f;
1350}
1351
1352#endif /* WITH_JIT */
1353