Compiler.cpp revision d585beda3690b8b5b978e3c59af224336614ba72
1/*
2 * Copyright (C) 2009 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 *      http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#include <sys/mman.h>
18#include <errno.h>
19#include <cutils/ashmem.h>
20
21#include "Dalvik.h"
22#include "interp/Jit.h"
23#include "CompilerInternals.h"
24#ifdef ARCH_IA32
25#include "codegen/x86/Translator.h"
26#include "codegen/x86/Lower.h"
27#endif
28
29extern "C" void dvmCompilerTemplateStart(void);
30extern "C" void dvmCompilerTemplateEnd(void);
31
32static inline bool workQueueLength(void)
33{
34    return gDvmJit.compilerQueueLength;
35}
36
37static CompilerWorkOrder workDequeue(void)
38{
39    assert(gDvmJit.compilerWorkQueue[gDvmJit.compilerWorkDequeueIndex].kind
40           != kWorkOrderInvalid);
41    CompilerWorkOrder work =
42        gDvmJit.compilerWorkQueue[gDvmJit.compilerWorkDequeueIndex];
43    gDvmJit.compilerWorkQueue[gDvmJit.compilerWorkDequeueIndex++].kind =
44        kWorkOrderInvalid;
45    if (gDvmJit.compilerWorkDequeueIndex == COMPILER_WORK_QUEUE_SIZE) {
46        gDvmJit.compilerWorkDequeueIndex = 0;
47    }
48    gDvmJit.compilerQueueLength--;
49    if (gDvmJit.compilerQueueLength == 0) {
50        dvmSignalCond(&gDvmJit.compilerQueueEmpty);
51    }
52
53    /* Remember the high water mark of the queue length */
54    if (gDvmJit.compilerQueueLength > gDvmJit.compilerMaxQueued)
55        gDvmJit.compilerMaxQueued = gDvmJit.compilerQueueLength;
56
57    return work;
58}
59
60/*
61 * Enqueue a work order - retrying until successful.  If attempt to enqueue
62 * is repeatedly unsuccessful, assume the JIT is in a bad state and force a
63 * code cache reset.
64 */
65#define ENQUEUE_MAX_RETRIES 20
66void dvmCompilerForceWorkEnqueue(const u2 *pc, WorkOrderKind kind, void* info)
67{
68    bool success;
69    int retries = 0;
70    do {
71        success = dvmCompilerWorkEnqueue(pc, kind, info);
72        if (!success) {
73            retries++;
74            if (retries > ENQUEUE_MAX_RETRIES) {
75                ALOGE("JIT: compiler queue wedged - forcing reset");
76                gDvmJit.codeCacheFull = true;  // Force reset
77                success = true;  // Because we'll drop the order now anyway
78            } else {
79                dvmLockMutex(&gDvmJit.compilerLock);
80                pthread_cond_wait(&gDvmJit.compilerQueueActivity,
81                                  &gDvmJit.compilerLock);
82                dvmUnlockMutex(&gDvmJit.compilerLock);
83
84            }
85        }
86    } while (!success);
87}
88
89/*
90 * Attempt to enqueue a work order, returning true if successful.
91 *
92 * NOTE: Make sure that the caller frees the info pointer if the return value
93 * is false.
94 */
95bool dvmCompilerWorkEnqueue(const u2 *pc, WorkOrderKind kind, void* info)
96{
97    int cc;
98    int i;
99    int numWork;
100    bool result = true;
101
102    dvmLockMutex(&gDvmJit.compilerLock);
103
104    /*
105     * Return if queue or code cache is full.
106     */
107    if (gDvmJit.compilerQueueLength == COMPILER_WORK_QUEUE_SIZE ||
108        gDvmJit.codeCacheFull == true) {
109        dvmUnlockMutex(&gDvmJit.compilerLock);
110        return false;
111    }
112
113    for (numWork = gDvmJit.compilerQueueLength,
114           i = gDvmJit.compilerWorkDequeueIndex;
115         numWork > 0;
116         numWork--) {
117        /* Already enqueued */
118        if (gDvmJit.compilerWorkQueue[i++].pc == pc) {
119            dvmUnlockMutex(&gDvmJit.compilerLock);
120            return true;
121        }
122        /* Wrap around */
123        if (i == COMPILER_WORK_QUEUE_SIZE)
124            i = 0;
125    }
126
127    CompilerWorkOrder *newOrder =
128        &gDvmJit.compilerWorkQueue[gDvmJit.compilerWorkEnqueueIndex];
129    newOrder->pc = pc;
130    newOrder->kind = kind;
131    newOrder->info = info;
132    newOrder->result.methodCompilationAborted = NULL;
133    newOrder->result.codeAddress = NULL;
134    newOrder->result.discardResult =
135        (kind == kWorkOrderTraceDebug) ? true : false;
136    newOrder->result.cacheVersion = gDvmJit.cacheVersion;
137    newOrder->result.requestingThread = dvmThreadSelf();
138
139    gDvmJit.compilerWorkEnqueueIndex++;
140    if (gDvmJit.compilerWorkEnqueueIndex == COMPILER_WORK_QUEUE_SIZE)
141        gDvmJit.compilerWorkEnqueueIndex = 0;
142    gDvmJit.compilerQueueLength++;
143    cc = pthread_cond_signal(&gDvmJit.compilerQueueActivity);
144    assert(cc == 0);
145
146    dvmUnlockMutex(&gDvmJit.compilerLock);
147    return result;
148}
149
150/* Block until the queue length is 0, or there is a pending suspend request */
151void dvmCompilerDrainQueue(void)
152{
153    Thread *self = dvmThreadSelf();
154
155    dvmLockMutex(&gDvmJit.compilerLock);
156    while (workQueueLength() != 0 && !gDvmJit.haltCompilerThread &&
157           self->suspendCount == 0) {
158        /*
159         * Use timed wait here - more than one mutator threads may be blocked
160         * but the compiler thread will only signal once when the queue is
161         * emptied. Furthermore, the compiler thread may have been shutdown
162         * so the blocked thread may never get the wakeup signal.
163         */
164        dvmRelativeCondWait(&gDvmJit.compilerQueueEmpty, &gDvmJit.compilerLock,                             1000, 0);
165    }
166    dvmUnlockMutex(&gDvmJit.compilerLock);
167}
168
169bool dvmCompilerSetupCodeCache(void)
170{
171    int fd;
172
173    /* Allocate the code cache */
174    fd = ashmem_create_region("dalvik-jit-code-cache", gDvmJit.codeCacheSize);
175    if (fd < 0) {
176        ALOGE("Could not create %u-byte ashmem region for the JIT code cache",
177             gDvmJit.codeCacheSize);
178        return false;
179    }
180    gDvmJit.codeCache = mmap(NULL, gDvmJit.codeCacheSize,
181                             PROT_READ | PROT_WRITE | PROT_EXEC,
182                             MAP_PRIVATE , fd, 0);
183    close(fd);
184    if (gDvmJit.codeCache == MAP_FAILED) {
185        ALOGE("Failed to mmap the JIT code cache: %s", strerror(errno));
186        return false;
187    }
188
189    gDvmJit.pageSizeMask = getpagesize() - 1;
190
191    /* This can be found through "dalvik-jit-code-cache" in /proc/<pid>/maps */
192    // ALOGD("Code cache starts at %p", gDvmJit.codeCache);
193
194#ifndef ARCH_IA32
195    /* Copy the template code into the beginning of the code cache */
196    int templateSize = (intptr_t) dvmCompilerTemplateEnd -
197                       (intptr_t) dvmCompilerTemplateStart;
198    memcpy((void *) gDvmJit.codeCache,
199           (void *) dvmCompilerTemplateStart,
200           templateSize);
201
202    /*
203     * Work around a CPU bug by keeping the 32-bit ARM handler code in its own
204     * page.
205     */
206    if (dvmCompilerInstructionSet() == DALVIK_JIT_THUMB2) {
207        templateSize = (templateSize + 4095) & ~4095;
208    }
209
210    gDvmJit.templateSize = templateSize;
211    gDvmJit.codeCacheByteUsed = templateSize;
212
213    /* Only flush the part in the code cache that is being used now */
214    dvmCompilerCacheFlush((intptr_t) gDvmJit.codeCache,
215                          (intptr_t) gDvmJit.codeCache + templateSize, 0);
216
217    int result = mprotect(gDvmJit.codeCache, gDvmJit.codeCacheSize,
218                          PROTECT_CODE_CACHE_ATTRS);
219
220    if (result == -1) {
221        ALOGE("Failed to remove the write permission for the code cache");
222        dvmAbort();
223    }
224#else
225    gDvmJit.codeCacheByteUsed = 0;
226    stream = (char*)gDvmJit.codeCache + gDvmJit.codeCacheByteUsed;
227    ALOGV("codeCache = %p stream = %p before initJIT", gDvmJit.codeCache, stream);
228    streamStart = stream;
229    initJIT(NULL, NULL);
230    gDvmJit.templateSize = (stream - streamStart);
231    gDvmJit.codeCacheByteUsed = (stream - streamStart);
232    ALOGV("stream = %p after initJIT", stream);
233#endif
234
235    return true;
236}
237
238static void crawlDalvikStack(Thread *thread, bool print)
239{
240    void *fp = thread->interpSave.curFrame;
241    StackSaveArea* saveArea = NULL;
242    int stackLevel = 0;
243
244    if (print) {
245        ALOGD("Crawling tid %d (%s / %p %s)", thread->systemTid,
246             dvmGetThreadStatusStr(thread->status),
247             thread->inJitCodeCache,
248             thread->inJitCodeCache ? "jit" : "interp");
249    }
250    /* Crawl the Dalvik stack frames to clear the returnAddr field */
251    while (fp != NULL) {
252        saveArea = SAVEAREA_FROM_FP(fp);
253
254        if (print) {
255            if (dvmIsBreakFrame((u4*)fp)) {
256                ALOGD("  #%d: break frame (%p)",
257                     stackLevel, saveArea->returnAddr);
258            }
259            else {
260                ALOGD("  #%d: %s.%s%s (%p)",
261                     stackLevel,
262                     saveArea->method->clazz->descriptor,
263                     saveArea->method->name,
264                     dvmIsNativeMethod(saveArea->method) ?
265                         " (native)" : "",
266                     saveArea->returnAddr);
267            }
268        }
269        stackLevel++;
270        saveArea->returnAddr = NULL;
271        assert(fp != saveArea->prevFrame);
272        fp = saveArea->prevFrame;
273    }
274    /* Make sure the stack is fully unwound to the bottom */
275    assert(saveArea == NULL ||
276           (u1 *) (saveArea+1) == thread->interpStackStart);
277}
278
279static void resetCodeCache(void)
280{
281    Thread* thread;
282    u8 startTime = dvmGetRelativeTimeUsec();
283    int inJit = 0;
284    int byteUsed = gDvmJit.codeCacheByteUsed;
285
286    /* If any thread is found stuck in the JIT state, don't reset the cache  */
287    dvmLockThreadList(NULL);
288    for (thread = gDvm.threadList; thread != NULL; thread = thread->next) {
289        /*
290         * Crawl the stack to wipe out the returnAddr field so that
291         * 1) the soon-to-be-deleted code in the JIT cache won't be used
292         * 2) or the thread stuck in the JIT land will soon return
293         *    to the interpreter land
294         */
295        crawlDalvikStack(thread, false);
296        if (thread->inJitCodeCache) {
297            inJit++;
298        }
299        /* Cancel any ongoing trace selection */
300        dvmDisableSubMode(thread, kSubModeJitTraceBuild);
301    }
302    dvmUnlockThreadList();
303
304    if (inJit) {
305        ALOGD("JIT code cache reset delayed (%d bytes %d/%d)",
306             gDvmJit.codeCacheByteUsed, gDvmJit.numCodeCacheReset,
307             ++gDvmJit.numCodeCacheResetDelayed);
308        return;
309    }
310
311    /* Lock the mutex to clean up the work queue */
312    dvmLockMutex(&gDvmJit.compilerLock);
313
314    /* Update the translation cache version */
315    gDvmJit.cacheVersion++;
316
317    /* Drain the work queue to free the work orders */
318    while (workQueueLength()) {
319        CompilerWorkOrder work = workDequeue();
320        free(work.info);
321    }
322
323    /* Reset the JitEntry table contents to the initial unpopulated state */
324    dvmJitResetTable();
325
326    UNPROTECT_CODE_CACHE(gDvmJit.codeCache, gDvmJit.codeCacheByteUsed);
327    /*
328     * Wipe out the code cache content to force immediate crashes if
329     * stale JIT'ed code is invoked.
330     */
331    dvmCompilerCacheClear((char *) gDvmJit.codeCache + gDvmJit.templateSize,
332                          gDvmJit.codeCacheByteUsed - gDvmJit.templateSize);
333
334    dvmCompilerCacheFlush((intptr_t) gDvmJit.codeCache,
335                          (intptr_t) gDvmJit.codeCache +
336                          gDvmJit.codeCacheByteUsed, 0);
337
338    PROTECT_CODE_CACHE(gDvmJit.codeCache, gDvmJit.codeCacheByteUsed);
339
340    /* Reset the current mark of used bytes to the end of template code */
341    gDvmJit.codeCacheByteUsed = gDvmJit.templateSize;
342    gDvmJit.numCompilations = 0;
343
344    /* Reset the work queue */
345    memset(gDvmJit.compilerWorkQueue, 0,
346           sizeof(CompilerWorkOrder) * COMPILER_WORK_QUEUE_SIZE);
347    gDvmJit.compilerWorkEnqueueIndex = gDvmJit.compilerWorkDequeueIndex = 0;
348    gDvmJit.compilerQueueLength = 0;
349
350    /* Reset the IC patch work queue */
351    dvmLockMutex(&gDvmJit.compilerICPatchLock);
352    gDvmJit.compilerICPatchIndex = 0;
353    dvmUnlockMutex(&gDvmJit.compilerICPatchLock);
354
355    /*
356     * Reset the inflight compilation address (can only be done in safe points
357     * or by the compiler thread when its thread state is RUNNING).
358     */
359    gDvmJit.inflightBaseAddr = NULL;
360
361    /* All clear now */
362    gDvmJit.codeCacheFull = false;
363
364    dvmUnlockMutex(&gDvmJit.compilerLock);
365
366    ALOGD("JIT code cache reset in %lld ms (%d bytes %d/%d)",
367         (dvmGetRelativeTimeUsec() - startTime) / 1000,
368         byteUsed, ++gDvmJit.numCodeCacheReset,
369         gDvmJit.numCodeCacheResetDelayed);
370}
371
372/*
373 * Perform actions that are only safe when all threads are suspended. Currently
374 * we do:
375 * 1) Check if the code cache is full. If so reset it and restart populating it
376 *    from scratch.
377 * 2) Patch predicted chaining cells by consuming recorded work orders.
378 */
379void dvmCompilerPerformSafePointChecks(void)
380{
381    if (gDvmJit.codeCacheFull) {
382        resetCodeCache();
383    }
384    dvmCompilerPatchInlineCache();
385}
386
387static bool compilerThreadStartup(void)
388{
389    JitEntry *pJitTable = NULL;
390    unsigned char *pJitProfTable = NULL;
391    JitTraceProfCounters *pJitTraceProfCounters = NULL;
392    unsigned int i;
393
394    if (!dvmCompilerArchInit())
395        goto fail;
396
397    /*
398     * Setup the code cache if we have not inherited a valid code cache
399     * from the zygote.
400     */
401    if (gDvmJit.codeCache == NULL) {
402        if (!dvmCompilerSetupCodeCache())
403            goto fail;
404    }
405
406    /* Allocate the initial arena block */
407    if (dvmCompilerHeapInit() == false) {
408        goto fail;
409    }
410
411    /* Cache the thread pointer */
412    gDvmJit.compilerThread = dvmThreadSelf();
413
414    dvmLockMutex(&gDvmJit.compilerLock);
415
416    /* Track method-level compilation statistics */
417    gDvmJit.methodStatsTable =  dvmHashTableCreate(32, NULL);
418
419#if defined(WITH_JIT_TUNING)
420    gDvm.verboseShutdown = true;
421#endif
422
423    dvmUnlockMutex(&gDvmJit.compilerLock);
424
425    /* Set up the JitTable */
426
427    /* Power of 2? */
428    assert(gDvmJit.jitTableSize &&
429           !(gDvmJit.jitTableSize & (gDvmJit.jitTableSize - 1)));
430
431    dvmInitMutex(&gDvmJit.tableLock);
432    dvmLockMutex(&gDvmJit.tableLock);
433    pJitTable = (JitEntry*)
434                calloc(gDvmJit.jitTableSize, sizeof(*pJitTable));
435    if (!pJitTable) {
436        ALOGE("jit table allocation failed");
437        dvmUnlockMutex(&gDvmJit.tableLock);
438        goto fail;
439    }
440    /*
441     * NOTE: the profile table must only be allocated once, globally.
442     * Profiling is turned on and off by nulling out gDvm.pJitProfTable
443     * and then restoring its original value.  However, this action
444     * is not synchronized for speed so threads may continue to hold
445     * and update the profile table after profiling has been turned
446     * off by null'ng the global pointer.  Be aware.
447     */
448    pJitProfTable = (unsigned char *)malloc(JIT_PROF_SIZE);
449    if (!pJitProfTable) {
450        ALOGE("jit prof table allocation failed");
451        free(pJitTable);
452        dvmUnlockMutex(&gDvmJit.tableLock);
453        goto fail;
454    }
455    memset(pJitProfTable, gDvmJit.threshold, JIT_PROF_SIZE);
456    for (i=0; i < gDvmJit.jitTableSize; i++) {
457       pJitTable[i].u.info.chain = gDvmJit.jitTableSize;
458    }
459    /* Is chain field wide enough for termination pattern? */
460    assert(pJitTable[0].u.info.chain == gDvmJit.jitTableSize);
461
462    /* Allocate the trace profiling structure */
463    pJitTraceProfCounters = (JitTraceProfCounters*)
464                             calloc(1, sizeof(*pJitTraceProfCounters));
465    if (!pJitTraceProfCounters) {
466        ALOGE("jit trace prof counters allocation failed");
467        free(pJitTable);
468        free(pJitProfTable);
469        dvmUnlockMutex(&gDvmJit.tableLock);
470        goto fail;
471    }
472
473    gDvmJit.pJitEntryTable = pJitTable;
474    gDvmJit.jitTableMask = gDvmJit.jitTableSize - 1;
475    gDvmJit.jitTableEntriesUsed = 0;
476    gDvmJit.compilerHighWater =
477        COMPILER_WORK_QUEUE_SIZE - (COMPILER_WORK_QUEUE_SIZE/4);
478    /*
479     * If the VM is launched with wait-on-the-debugger, we will need to hide
480     * the profile table here
481     */
482    gDvmJit.pProfTable = dvmDebuggerOrProfilerActive() ? NULL : pJitProfTable;
483    gDvmJit.pProfTableCopy = pJitProfTable;
484    gDvmJit.pJitTraceProfCounters = pJitTraceProfCounters;
485    dvmJitUpdateThreadStateAll();
486    dvmUnlockMutex(&gDvmJit.tableLock);
487
488    /* Signal running threads to refresh their cached pJitTable pointers */
489    dvmSuspendAllThreads(SUSPEND_FOR_REFRESH);
490    dvmResumeAllThreads(SUSPEND_FOR_REFRESH);
491
492    /* Enable signature breakpoints by customizing the following code */
493#if defined(SIGNATURE_BREAKPOINT)
494    /*
495     * Suppose one sees the following native crash in the bugreport:
496     * I/DEBUG   ( 1638): Build fingerprint: 'unknown'
497     * I/DEBUG   ( 1638): pid: 2468, tid: 2507  >>> com.google.android.gallery3d
498     * I/DEBUG   ( 1638): signal 11 (SIGSEGV), fault addr 00001400
499     * I/DEBUG   ( 1638):  r0 44ea7190  r1 44e4f7b8  r2 44ebc710  r3 00000000
500     * I/DEBUG   ( 1638):  r4 00000a00  r5 41862dec  r6 4710dc10  r7 00000280
501     * I/DEBUG   ( 1638):  r8 ad010f40  r9 46a37a12  10 001116b0  fp 42a78208
502     * I/DEBUG   ( 1638):  ip 00000090  sp 4710dbc8  lr ad060e67  pc 46b90682
503     * cpsr 00000030
504     * I/DEBUG   ( 1638):  #00  pc 46b90682 /dev/ashmem/dalvik-jit-code-cache
505     * I/DEBUG   ( 1638):  #01  pc 00060e62  /system/lib/libdvm.so
506     *
507     * I/DEBUG   ( 1638): code around pc:
508     * I/DEBUG   ( 1638): 46b90660 6888d01c 34091dcc d2174287 4a186b68
509     * I/DEBUG   ( 1638): 46b90670 d0052800 68006809 28004790 6b68d00e
510     * I/DEBUG   ( 1638): 46b90680 512000bc 37016eaf 6ea866af 6f696028
511     * I/DEBUG   ( 1638): 46b90690 682a6069 429a686b e003da08 6df1480b
512     * I/DEBUG   ( 1638): 46b906a0 1c2d4788 47806d70 46a378fa 47806d70
513     *
514     * Clearly it is a JIT bug. To find out which translation contains the
515     * offending code, the content of the memory dump around the faulting PC
516     * can be pasted into the gDvmJit.signatureBreakpoint[] array and next time
517     * when a similar compilation is being created, the JIT compiler replay the
518     * trace in the verbose mode and one can investigate the instruction
519     * sequence in details.
520     *
521     * The length of the signature may need additional experiments to determine.
522     * The rule of thumb is don't include PC-relative instructions in the
523     * signature since it may be affected by the alignment of the compiled code.
524     * However, a signature that's too short might increase the chance of false
525     * positive matches. Using gdbjithelper to disassembly the memory content
526     * first might be a good companion approach.
527     *
528     * For example, if the next 4 words starting from 46b90680 is pasted into
529     * the data structure:
530     */
531
532    gDvmJit.signatureBreakpointSize = 4;
533    gDvmJit.signatureBreakpoint =
534        malloc(sizeof(u4) * gDvmJit.signatureBreakpointSize);
535    gDvmJit.signatureBreakpoint[0] = 0x512000bc;
536    gDvmJit.signatureBreakpoint[1] = 0x37016eaf;
537    gDvmJit.signatureBreakpoint[2] = 0x6ea866af;
538    gDvmJit.signatureBreakpoint[3] = 0x6f696028;
539
540    /*
541     * The following log will be printed when a match is found in subsequent
542     * testings:
543     *
544     * D/dalvikvm( 2468): Signature match starting from offset 0x34 (4 words)
545     * D/dalvikvm( 2468): --------
546     * D/dalvikvm( 2468): Compiler: Building trace for computeVisibleItems,
547     * offset 0x1f7
548     * D/dalvikvm( 2468): 0x46a37a12: 0x0090 add-int v42, v5, v26
549     * D/dalvikvm( 2468): 0x46a37a16: 0x004d aput-object v13, v14, v42
550     * D/dalvikvm( 2468): 0x46a37a1a: 0x0028 goto, (#0), (#0)
551     * D/dalvikvm( 2468): 0x46a3794e: 0x00d8 add-int/lit8 v26, v26, (#1)
552     * D/dalvikvm( 2468): 0x46a37952: 0x0028 goto, (#0), (#0)
553     * D/dalvikvm( 2468): 0x46a378ee: 0x0002 move/from16 v0, v26, (#0)
554     * D/dalvikvm( 2468): 0x46a378f2: 0x0002 move/from16 v1, v29, (#0)
555     * D/dalvikvm( 2468): 0x46a378f6: 0x0035 if-ge v0, v1, (#10)
556     * D/dalvikvm( 2468): TRACEINFO (554): 0x46a37624
557     * Lcom/cooliris/media/GridLayer;computeVisibleItems 0x1f7 14 of 934, 8
558     * blocks
559     *     :
560     *     :
561     * D/dalvikvm( 2468): 0x20 (0020): ldr     r0, [r5, #52]
562     * D/dalvikvm( 2468): 0x22 (0022): ldr     r2, [pc, #96]
563     * D/dalvikvm( 2468): 0x24 (0024): cmp     r0, #0
564     * D/dalvikvm( 2468): 0x26 (0026): beq     0x00000034
565     * D/dalvikvm( 2468): 0x28 (0028): ldr     r1, [r1, #0]
566     * D/dalvikvm( 2468): 0x2a (002a): ldr     r0, [r0, #0]
567     * D/dalvikvm( 2468): 0x2c (002c): blx     r2
568     * D/dalvikvm( 2468): 0x2e (002e): cmp     r0, #0
569     * D/dalvikvm( 2468): 0x30 (0030): beq     0x00000050
570     * D/dalvikvm( 2468): 0x32 (0032): ldr     r0, [r5, #52]
571     * D/dalvikvm( 2468): 0x34 (0034): lsls    r4, r7, #2
572     * D/dalvikvm( 2468): 0x36 (0036): str     r0, [r4, r4]
573     * D/dalvikvm( 2468): -------- dalvik offset: 0x01fb @ goto, (#0), (#0)
574     * D/dalvikvm( 2468): L0x0195:
575     * D/dalvikvm( 2468): -------- dalvik offset: 0x0195 @ add-int/lit8 v26,
576     * v26, (#1)
577     * D/dalvikvm( 2468): 0x38 (0038): ldr     r7, [r5, #104]
578     * D/dalvikvm( 2468): 0x3a (003a): adds    r7, r7, #1
579     * D/dalvikvm( 2468): 0x3c (003c): str     r7, [r5, #104]
580     * D/dalvikvm( 2468): -------- dalvik offset: 0x0197 @ goto, (#0), (#0)
581     * D/dalvikvm( 2468): L0x0165:
582     * D/dalvikvm( 2468): -------- dalvik offset: 0x0165 @ move/from16 v0, v26,
583     * (#0)
584     * D/dalvikvm( 2468): 0x3e (003e): ldr     r0, [r5, #104]
585     * D/dalvikvm( 2468): 0x40 (0040): str     r0, [r5, #0]
586     *
587     * The "str r0, [r4, r4]" is indeed the culprit of the native crash.
588     */
589#endif
590
591    return true;
592
593fail:
594    return false;
595
596}
597
598static void *compilerThreadStart(void *arg)
599{
600    dvmChangeStatus(NULL, THREAD_VMWAIT);
601
602    /*
603     * If we're not running stand-alone, wait a little before
604     * recieving translation requests on the assumption that process start
605     * up code isn't worth compiling.  We'll resume when the framework
606     * signals us that the first screen draw has happened, or the timer
607     * below expires (to catch daemons).
608     *
609     * There is a theoretical race between the callback to
610     * VMRuntime.startJitCompiation and when the compiler thread reaches this
611     * point. In case the callback happens earlier, in order not to permanently
612     * hold the system_server (which is not using the timed wait) in
613     * interpreter-only mode we bypass the delay here.
614     */
615    if (gDvmJit.runningInAndroidFramework &&
616        !gDvmJit.alreadyEnabledViaFramework) {
617        /*
618         * If the current VM instance is the system server (detected by having
619         * 0 in gDvm.systemServerPid), we will use the indefinite wait on the
620         * conditional variable to determine whether to start the JIT or not.
621         * If the system server detects that the whole system is booted in
622         * safe mode, the conditional variable will never be signaled and the
623         * system server will remain in the interpreter-only mode. All
624         * subsequent apps will be started with the --enable-safemode flag
625         * explicitly appended.
626         */
627        if (gDvm.systemServerPid == 0) {
628            dvmLockMutex(&gDvmJit.compilerLock);
629            pthread_cond_wait(&gDvmJit.compilerQueueActivity,
630                              &gDvmJit.compilerLock);
631            dvmUnlockMutex(&gDvmJit.compilerLock);
632            ALOGD("JIT started for system_server");
633        } else {
634            dvmLockMutex(&gDvmJit.compilerLock);
635            /*
636             * TUNING: experiment with the delay & perhaps make it
637             * target-specific
638             */
639            dvmRelativeCondWait(&gDvmJit.compilerQueueActivity,
640                                 &gDvmJit.compilerLock, 3000, 0);
641            dvmUnlockMutex(&gDvmJit.compilerLock);
642        }
643        if (gDvmJit.haltCompilerThread) {
644             return NULL;
645        }
646    }
647
648    compilerThreadStartup();
649
650    dvmLockMutex(&gDvmJit.compilerLock);
651    /*
652     * Since the compiler thread will not touch any objects on the heap once
653     * being created, we just fake its state as VMWAIT so that it can be a
654     * bit late when there is suspend request pending.
655     */
656    while (!gDvmJit.haltCompilerThread) {
657        if (workQueueLength() == 0) {
658            int cc;
659            cc = pthread_cond_signal(&gDvmJit.compilerQueueEmpty);
660            assert(cc == 0);
661            pthread_cond_wait(&gDvmJit.compilerQueueActivity,
662                              &gDvmJit.compilerLock);
663            continue;
664        } else {
665            do {
666                CompilerWorkOrder work = workDequeue();
667                dvmUnlockMutex(&gDvmJit.compilerLock);
668#if defined(WITH_JIT_TUNING)
669                /*
670                 * This is live across setjmp().  Mark it volatile to suppress
671                 * a gcc warning.  We should not need this since it is assigned
672                 * only once but gcc is not smart enough.
673                 */
674                volatile u8 startTime = dvmGetRelativeTimeUsec();
675#endif
676                /*
677                 * Check whether there is a suspend request on me.  This
678                 * is necessary to allow a clean shutdown.
679                 *
680                 * However, in the blocking stress testing mode, let the
681                 * compiler thread continue doing compilations to unblock
682                 * other requesting threads. This may occasionally cause
683                 * shutdown from proceeding cleanly in the standalone invocation
684                 * of the vm but this should be acceptable.
685                 */
686                if (!gDvmJit.blockingMode)
687                    dvmCheckSuspendPending(dvmThreadSelf());
688                /* Is JitTable filling up? */
689                if (gDvmJit.jitTableEntriesUsed >
690                    (gDvmJit.jitTableSize - gDvmJit.jitTableSize/4)) {
691                    bool resizeFail =
692                        dvmJitResizeJitTable(gDvmJit.jitTableSize * 2);
693                    /*
694                     * If the jit table is full, consider it's time to reset
695                     * the code cache too.
696                     */
697                    gDvmJit.codeCacheFull |= resizeFail;
698                }
699                if (gDvmJit.haltCompilerThread) {
700                    ALOGD("Compiler shutdown in progress - discarding request");
701                } else if (!gDvmJit.codeCacheFull) {
702                    jmp_buf jmpBuf;
703                    work.bailPtr = &jmpBuf;
704                    bool aborted = setjmp(jmpBuf);
705                    if (!aborted) {
706                        bool codeCompiled = dvmCompilerDoWork(&work);
707                        /*
708                         * Make sure we are still operating with the
709                         * same translation cache version.  See
710                         * Issue 4271784 for details.
711                         */
712                        dvmLockMutex(&gDvmJit.compilerLock);
713                        if ((work.result.cacheVersion ==
714                             gDvmJit.cacheVersion) &&
715                             codeCompiled &&
716                             !work.result.discardResult &&
717                             work.result.codeAddress) {
718                            dvmJitSetCodeAddr(work.pc, work.result.codeAddress,
719                                              work.result.instructionSet,
720                                              false, /* not method entry */
721                                              work.result.profileCodeSize);
722                        }
723                        dvmUnlockMutex(&gDvmJit.compilerLock);
724                    }
725                    dvmCompilerArenaReset();
726                }
727                free(work.info);
728#if defined(WITH_JIT_TUNING)
729                gDvmJit.jitTime += dvmGetRelativeTimeUsec() - startTime;
730#endif
731                dvmLockMutex(&gDvmJit.compilerLock);
732            } while (workQueueLength() != 0);
733        }
734    }
735    pthread_cond_signal(&gDvmJit.compilerQueueEmpty);
736    dvmUnlockMutex(&gDvmJit.compilerLock);
737
738    /*
739     * As part of detaching the thread we need to call into Java code to update
740     * the ThreadGroup, and we should not be in VMWAIT state while executing
741     * interpreted code.
742     */
743    dvmChangeStatus(NULL, THREAD_RUNNING);
744
745    if (gDvm.verboseShutdown)
746        ALOGD("Compiler thread shutting down");
747    return NULL;
748}
749
750bool dvmCompilerStartup(void)
751{
752
753    dvmInitMutex(&gDvmJit.compilerLock);
754    dvmInitMutex(&gDvmJit.compilerICPatchLock);
755    dvmInitMutex(&gDvmJit.codeCacheProtectionLock);
756    dvmLockMutex(&gDvmJit.compilerLock);
757    pthread_cond_init(&gDvmJit.compilerQueueActivity, NULL);
758    pthread_cond_init(&gDvmJit.compilerQueueEmpty, NULL);
759
760    /* Reset the work queue */
761    gDvmJit.compilerWorkEnqueueIndex = gDvmJit.compilerWorkDequeueIndex = 0;
762    gDvmJit.compilerQueueLength = 0;
763    dvmUnlockMutex(&gDvmJit.compilerLock);
764
765    /*
766     * Defer rest of initialization until we're sure JIT'ng makes sense. Launch
767     * the compiler thread, which will do the real initialization if and
768     * when it is signalled to do so.
769     */
770    return dvmCreateInternalThread(&gDvmJit.compilerHandle, "Compiler",
771                                   compilerThreadStart, NULL);
772}
773
774void dvmCompilerShutdown(void)
775{
776    void *threadReturn;
777
778    /* Disable new translation requests */
779    gDvmJit.pProfTable = NULL;
780    gDvmJit.pProfTableCopy = NULL;
781    dvmJitUpdateThreadStateAll();
782
783    if (gDvm.verboseShutdown ||
784            gDvmJit.profileMode == kTraceProfilingContinuous) {
785        dvmCompilerDumpStats();
786        while (gDvmJit.compilerQueueLength)
787          sleep(5);
788    }
789
790    if (gDvmJit.compilerHandle) {
791
792        gDvmJit.haltCompilerThread = true;
793
794        dvmLockMutex(&gDvmJit.compilerLock);
795        pthread_cond_signal(&gDvmJit.compilerQueueActivity);
796        dvmUnlockMutex(&gDvmJit.compilerLock);
797
798        if (pthread_join(gDvmJit.compilerHandle, &threadReturn) != 0)
799            ALOGW("Compiler thread join failed");
800        else if (gDvm.verboseShutdown)
801            ALOGD("Compiler thread has shut down");
802    }
803
804    /* Break loops within the translation cache */
805    dvmJitUnchainAll();
806
807    /*
808     * NOTE: our current implementatation doesn't allow for the compiler
809     * thread to be restarted after it exits here.  We aren't freeing
810     * the JitTable or the ProfTable because threads which still may be
811     * running or in the process of shutting down may hold references to
812     * them.
813     */
814}
815
816void dvmCompilerUpdateGlobalState()
817{
818    bool jitActive;
819    bool jitActivate;
820    bool needUnchain = false;
821
822    /*
823     * The tableLock might not be initialized yet by the compiler thread if
824     * debugger is attached from the very beginning of the VM launch. If
825     * pProfTableCopy is NULL, the lock is not initialized yet and we don't
826     * need to refresh anything either.
827     */
828    if (gDvmJit.pProfTableCopy == NULL) {
829        return;
830    }
831
832    /*
833     * On the first enabling of method tracing, switch the compiler
834     * into a mode that includes trace support for invokes and returns.
835     * If there are any existing translations, flush them.  NOTE:  we
836     * can't blindly flush the translation cache because this code
837     * may be executed before the compiler thread has finished
838     * initialization.
839     */
840    if ((gDvm.activeProfilers != 0) &&
841        !gDvmJit.methodTraceSupport) {
842        bool resetRequired;
843        /*
844         * compilerLock will prevent new compilations from being
845         * installed while we are working.
846         */
847        dvmLockMutex(&gDvmJit.compilerLock);
848        gDvmJit.cacheVersion++; // invalidate compilations in flight
849        gDvmJit.methodTraceSupport = true;
850        resetRequired = (gDvmJit.numCompilations != 0);
851        dvmUnlockMutex(&gDvmJit.compilerLock);
852        if (resetRequired) {
853            dvmSuspendAllThreads(SUSPEND_FOR_CC_RESET);
854            resetCodeCache();
855            dvmResumeAllThreads(SUSPEND_FOR_CC_RESET);
856        }
857    }
858
859    dvmLockMutex(&gDvmJit.tableLock);
860    jitActive = gDvmJit.pProfTable != NULL;
861    jitActivate = !dvmDebuggerOrProfilerActive();
862
863    if (jitActivate && !jitActive) {
864        gDvmJit.pProfTable = gDvmJit.pProfTableCopy;
865    } else if (!jitActivate && jitActive) {
866        gDvmJit.pProfTable = NULL;
867        needUnchain = true;
868    }
869    dvmUnlockMutex(&gDvmJit.tableLock);
870    if (needUnchain)
871        dvmJitUnchainAll();
872    // Make sure all threads have current values
873    dvmJitUpdateThreadStateAll();
874}
875