Compiler.cpp revision 0c2dc522d0e120f346cf0a40c8cf0c93346131c2
1/*
2 * Copyright (C) 2009 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 *      http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#include <sys/mman.h>
18#include <errno.h>
19#include <cutils/ashmem.h>
20
21#include "Dalvik.h"
22#include "interp/Jit.h"
23#include "CompilerInternals.h"
24#ifdef ARCH_IA32
25#include "codegen/x86/Translator.h"
26#include "codegen/x86/Lower.h"
27#endif
28
29extern "C" void dvmCompilerTemplateStart(void);
30extern "C" void dmvCompilerTemplateEnd(void);
31
32static inline bool workQueueLength(void)
33{
34    return gDvmJit.compilerQueueLength;
35}
36
37static CompilerWorkOrder workDequeue(void)
38{
39    assert(gDvmJit.compilerWorkQueue[gDvmJit.compilerWorkDequeueIndex].kind
40           != kWorkOrderInvalid);
41    CompilerWorkOrder work =
42        gDvmJit.compilerWorkQueue[gDvmJit.compilerWorkDequeueIndex];
43    gDvmJit.compilerWorkQueue[gDvmJit.compilerWorkDequeueIndex++].kind =
44        kWorkOrderInvalid;
45    if (gDvmJit.compilerWorkDequeueIndex == COMPILER_WORK_QUEUE_SIZE) {
46        gDvmJit.compilerWorkDequeueIndex = 0;
47    }
48    gDvmJit.compilerQueueLength--;
49    if (gDvmJit.compilerQueueLength == 0) {
50        dvmSignalCond(&gDvmJit.compilerQueueEmpty);
51    }
52
53    /* Remember the high water mark of the queue length */
54    if (gDvmJit.compilerQueueLength > gDvmJit.compilerMaxQueued)
55        gDvmJit.compilerMaxQueued = gDvmJit.compilerQueueLength;
56
57    return work;
58}
59
60/*
61 * Enqueue a work order - retrying until successful.  If attempt to enqueue
62 * is repeatedly unsuccessful, assume the JIT is in a bad state and force a
63 * code cache reset.
64 */
65#define ENQUEUE_MAX_RETRIES 20
66void dvmCompilerForceWorkEnqueue(const u2 *pc, WorkOrderKind kind, void* info)
67{
68    bool success;
69    int retries = 0;
70    do {
71        success = dvmCompilerWorkEnqueue(pc, kind, info);
72        if (!success) {
73            retries++;
74            if (retries > ENQUEUE_MAX_RETRIES) {
75                ALOGE("JIT: compiler queue wedged - forcing reset");
76                gDvmJit.codeCacheFull = true;  // Force reset
77                success = true;  // Because we'll drop the order now anyway
78            } else {
79                dvmLockMutex(&gDvmJit.compilerLock);
80                pthread_cond_wait(&gDvmJit.compilerQueueActivity,
81                                  &gDvmJit.compilerLock);
82                dvmUnlockMutex(&gDvmJit.compilerLock);
83
84            }
85        }
86    } while (!success);
87}
88
89/*
90 * Attempt to enqueue a work order, returning true if successful.
91 *
92 * NOTE: Make sure that the caller frees the info pointer if the return value
93 * is false.
94 */
95bool dvmCompilerWorkEnqueue(const u2 *pc, WorkOrderKind kind, void* info)
96{
97    int cc;
98    int i;
99    int numWork;
100    bool result = true;
101
102    dvmLockMutex(&gDvmJit.compilerLock);
103
104    /*
105     * Return if queue or code cache is full.
106     */
107    if (gDvmJit.compilerQueueLength == COMPILER_WORK_QUEUE_SIZE ||
108        gDvmJit.codeCacheFull == true) {
109        dvmUnlockMutex(&gDvmJit.compilerLock);
110        return false;
111    }
112
113    for (numWork = gDvmJit.compilerQueueLength,
114           i = gDvmJit.compilerWorkDequeueIndex;
115         numWork > 0;
116         numWork--) {
117        /* Already enqueued */
118        if (gDvmJit.compilerWorkQueue[i++].pc == pc) {
119            dvmUnlockMutex(&gDvmJit.compilerLock);
120            return true;
121        }
122        /* Wrap around */
123        if (i == COMPILER_WORK_QUEUE_SIZE)
124            i = 0;
125    }
126
127    CompilerWorkOrder *newOrder =
128        &gDvmJit.compilerWorkQueue[gDvmJit.compilerWorkEnqueueIndex];
129    newOrder->pc = pc;
130    newOrder->kind = kind;
131    newOrder->info = info;
132    newOrder->result.methodCompilationAborted = NULL;
133    newOrder->result.codeAddress = NULL;
134    newOrder->result.discardResult =
135        (kind == kWorkOrderTraceDebug) ? true : false;
136    newOrder->result.cacheVersion = gDvmJit.cacheVersion;
137    newOrder->result.requestingThread = dvmThreadSelf();
138
139    gDvmJit.compilerWorkEnqueueIndex++;
140    if (gDvmJit.compilerWorkEnqueueIndex == COMPILER_WORK_QUEUE_SIZE)
141        gDvmJit.compilerWorkEnqueueIndex = 0;
142    gDvmJit.compilerQueueLength++;
143    cc = pthread_cond_signal(&gDvmJit.compilerQueueActivity);
144    assert(cc == 0);
145
146    dvmUnlockMutex(&gDvmJit.compilerLock);
147    return result;
148}
149
150/* Block until the queue length is 0, or there is a pending suspend request */
151void dvmCompilerDrainQueue(void)
152{
153    Thread *self = dvmThreadSelf();
154
155    dvmLockMutex(&gDvmJit.compilerLock);
156    while (workQueueLength() != 0 && !gDvmJit.haltCompilerThread &&
157           self->suspendCount == 0) {
158        /*
159         * Use timed wait here - more than one mutator threads may be blocked
160         * but the compiler thread will only signal once when the queue is
161         * emptied. Furthermore, the compiler thread may have been shutdown
162         * so the blocked thread may never get the wakeup signal.
163         */
164        dvmRelativeCondWait(&gDvmJit.compilerQueueEmpty, &gDvmJit.compilerLock,                             1000, 0);
165    }
166    dvmUnlockMutex(&gDvmJit.compilerLock);
167}
168
169bool dvmCompilerSetupCodeCache(void)
170{
171    int fd;
172
173    /* Allocate the code cache */
174    fd = ashmem_create_region("dalvik-jit-code-cache", gDvmJit.codeCacheSize);
175    if (fd < 0) {
176        ALOGE("Could not create %u-byte ashmem region for the JIT code cache",
177             gDvmJit.codeCacheSize);
178        return false;
179    }
180    gDvmJit.codeCache = mmap(NULL, gDvmJit.codeCacheSize,
181                             PROT_READ | PROT_WRITE | PROT_EXEC,
182                             MAP_PRIVATE , fd, 0);
183    close(fd);
184    if (gDvmJit.codeCache == MAP_FAILED) {
185        ALOGE("Failed to mmap the JIT code cache: %s", strerror(errno));
186        return false;
187    }
188
189    gDvmJit.pageSizeMask = getpagesize() - 1;
190
191    /* This can be found through "dalvik-jit-code-cache" in /proc/<pid>/maps */
192    // ALOGD("Code cache starts at %p", gDvmJit.codeCache);
193
194#ifndef ARCH_IA32
195    /* Copy the template code into the beginning of the code cache */
196    int templateSize = (intptr_t) dmvCompilerTemplateEnd -
197                       (intptr_t) dvmCompilerTemplateStart;
198    memcpy((void *) gDvmJit.codeCache,
199           (void *) dvmCompilerTemplateStart,
200           templateSize);
201
202    /*
203     * Work around a CPU bug by keeping the 32-bit ARM handler code in its own
204     * page.
205     */
206    if (dvmCompilerInstructionSet() == DALVIK_JIT_THUMB2) {
207        templateSize = (templateSize + 4095) & ~4095;
208    }
209
210    gDvmJit.templateSize = templateSize;
211    gDvmJit.codeCacheByteUsed = templateSize;
212
213    /* Only flush the part in the code cache that is being used now */
214    dvmCompilerCacheFlush((intptr_t) gDvmJit.codeCache,
215                          (intptr_t) gDvmJit.codeCache + templateSize, 0);
216
217    int result = mprotect(gDvmJit.codeCache, gDvmJit.codeCacheSize,
218                          PROTECT_CODE_CACHE_ATTRS);
219
220    if (result == -1) {
221        ALOGE("Failed to remove the write permission for the code cache");
222        dvmAbort();
223    }
224#else
225    gDvmJit.codeCacheByteUsed = 0;
226    stream = (char*)gDvmJit.codeCache + gDvmJit.codeCacheByteUsed;
227    ALOGV("codeCache = %p stream = %p before initJIT", gDvmJit.codeCache, stream);
228    streamStart = stream;
229    initJIT(NULL, NULL);
230    gDvmJit.templateSize = (stream - streamStart);
231    gDvmJit.codeCacheByteUsed = (stream - streamStart);
232    ALOGV("stream = %p after initJIT", stream);
233#endif
234
235    return true;
236}
237
238static void crawlDalvikStack(Thread *thread, bool print)
239{
240    void *fp = thread->interpSave.curFrame;
241    StackSaveArea* saveArea = NULL;
242    int stackLevel = 0;
243
244    if (print) {
245        ALOGD("Crawling tid %d (%s / %p %s)", thread->systemTid,
246             dvmGetThreadStatusStr(thread->status),
247             thread->inJitCodeCache,
248             thread->inJitCodeCache ? "jit" : "interp");
249    }
250    /* Crawl the Dalvik stack frames to clear the returnAddr field */
251    while (fp != NULL) {
252        saveArea = SAVEAREA_FROM_FP(fp);
253
254        if (print) {
255            if (dvmIsBreakFrame((u4*)fp)) {
256                ALOGD("  #%d: break frame (%p)",
257                     stackLevel, saveArea->returnAddr);
258            }
259            else {
260                ALOGD("  #%d: %s.%s%s (%p)",
261                     stackLevel,
262                     saveArea->method->clazz->descriptor,
263                     saveArea->method->name,
264                     dvmIsNativeMethod(saveArea->method) ?
265                         " (native)" : "",
266                     saveArea->returnAddr);
267            }
268        }
269        stackLevel++;
270        saveArea->returnAddr = NULL;
271        assert(fp != saveArea->prevFrame);
272        fp = saveArea->prevFrame;
273    }
274    /* Make sure the stack is fully unwound to the bottom */
275    assert(saveArea == NULL ||
276           (u1 *) (saveArea+1) == thread->interpStackStart);
277}
278
279static void resetCodeCache(void)
280{
281    Thread* thread;
282    u8 startTime = dvmGetRelativeTimeUsec();
283    int inJit = 0;
284    int byteUsed = gDvmJit.codeCacheByteUsed;
285
286    /* If any thread is found stuck in the JIT state, don't reset the cache  */
287    dvmLockThreadList(NULL);
288    for (thread = gDvm.threadList; thread != NULL; thread = thread->next) {
289        /*
290         * Crawl the stack to wipe out the returnAddr field so that
291         * 1) the soon-to-be-deleted code in the JIT cache won't be used
292         * 2) or the thread stuck in the JIT land will soon return
293         *    to the interpreter land
294         */
295        crawlDalvikStack(thread, false);
296        if (thread->inJitCodeCache) {
297            inJit++;
298        }
299        /* Cancel any ongoing trace selection */
300        dvmDisableSubMode(thread, kSubModeJitTraceBuild);
301    }
302    dvmUnlockThreadList();
303
304    if (inJit) {
305        ALOGD("JIT code cache reset delayed (%d bytes %d/%d)",
306             gDvmJit.codeCacheByteUsed, gDvmJit.numCodeCacheReset,
307             ++gDvmJit.numCodeCacheResetDelayed);
308        return;
309    }
310
311    /* Lock the mutex to clean up the work queue */
312    dvmLockMutex(&gDvmJit.compilerLock);
313
314    /* Update the translation cache version */
315    gDvmJit.cacheVersion++;
316
317    /* Drain the work queue to free the work orders */
318    while (workQueueLength()) {
319        CompilerWorkOrder work = workDequeue();
320        free(work.info);
321    }
322
323    /* Reset the JitEntry table contents to the initial unpopulated state */
324    dvmJitResetTable();
325
326    UNPROTECT_CODE_CACHE(gDvmJit.codeCache, gDvmJit.codeCacheByteUsed);
327    /*
328     * Wipe out the code cache content to force immediate crashes if
329     * stale JIT'ed code is invoked.
330     */
331    dvmCompilerCacheClear((char *) gDvmJit.codeCache + gDvmJit.templateSize,
332                          gDvmJit.codeCacheByteUsed - gDvmJit.templateSize);
333
334    dvmCompilerCacheFlush((intptr_t) gDvmJit.codeCache,
335                          (intptr_t) gDvmJit.codeCache +
336                          gDvmJit.codeCacheByteUsed, 0);
337
338    PROTECT_CODE_CACHE(gDvmJit.codeCache, gDvmJit.codeCacheByteUsed);
339
340    /* Reset the current mark of used bytes to the end of template code */
341    gDvmJit.codeCacheByteUsed = gDvmJit.templateSize;
342    gDvmJit.numCompilations = 0;
343
344    /* Reset the work queue */
345    memset(gDvmJit.compilerWorkQueue, 0,
346           sizeof(CompilerWorkOrder) * COMPILER_WORK_QUEUE_SIZE);
347    gDvmJit.compilerWorkEnqueueIndex = gDvmJit.compilerWorkDequeueIndex = 0;
348    gDvmJit.compilerQueueLength = 0;
349
350    /* Reset the IC patch work queue */
351    dvmLockMutex(&gDvmJit.compilerICPatchLock);
352    gDvmJit.compilerICPatchIndex = 0;
353    dvmUnlockMutex(&gDvmJit.compilerICPatchLock);
354
355    /*
356     * Reset the inflight compilation address (can only be done in safe points
357     * or by the compiler thread when its thread state is RUNNING).
358     */
359    gDvmJit.inflightBaseAddr = NULL;
360
361    /* All clear now */
362    gDvmJit.codeCacheFull = false;
363
364    dvmUnlockMutex(&gDvmJit.compilerLock);
365
366    ALOGD("JIT code cache reset in %lld ms (%d bytes %d/%d)",
367         (dvmGetRelativeTimeUsec() - startTime) / 1000,
368         byteUsed, ++gDvmJit.numCodeCacheReset,
369         gDvmJit.numCodeCacheResetDelayed);
370}
371
372/*
373 * Perform actions that are only safe when all threads are suspended. Currently
374 * we do:
375 * 1) Check if the code cache is full. If so reset it and restart populating it
376 *    from scratch.
377 * 2) Patch predicted chaining cells by consuming recorded work orders.
378 */
379void dvmCompilerPerformSafePointChecks(void)
380{
381    if (gDvmJit.codeCacheFull) {
382        resetCodeCache();
383    }
384    dvmCompilerPatchInlineCache();
385}
386
387static bool compilerThreadStartup(void)
388{
389    JitEntry *pJitTable = NULL;
390    unsigned char *pJitProfTable = NULL;
391    JitTraceProfCounters *pJitTraceProfCounters = NULL;
392    unsigned int i;
393
394    if (!dvmCompilerArchInit())
395        goto fail;
396
397    /*
398     * Setup the code cache if we have not inherited a valid code cache
399     * from the zygote.
400     */
401    if (gDvmJit.codeCache == NULL) {
402        if (!dvmCompilerSetupCodeCache())
403            goto fail;
404    }
405
406    /* Allocate the initial arena block */
407    if (dvmCompilerHeapInit() == false) {
408        goto fail;
409    }
410
411    /* Cache the thread pointer */
412    gDvmJit.compilerThread = dvmThreadSelf();
413
414    dvmLockMutex(&gDvmJit.compilerLock);
415
416    /* Track method-level compilation statistics */
417    gDvmJit.methodStatsTable =  dvmHashTableCreate(32, NULL);
418
419#if defined(WITH_JIT_TUNING)
420    gDvm.verboseShutdown = true;
421#endif
422
423    dvmUnlockMutex(&gDvmJit.compilerLock);
424
425    /* Set up the JitTable */
426
427    /* Power of 2? */
428    assert(gDvmJit.jitTableSize &&
429           !(gDvmJit.jitTableSize & (gDvmJit.jitTableSize - 1)));
430
431    dvmInitMutex(&gDvmJit.tableLock);
432    dvmLockMutex(&gDvmJit.tableLock);
433    pJitTable = (JitEntry*)
434                calloc(gDvmJit.jitTableSize, sizeof(*pJitTable));
435    if (!pJitTable) {
436        ALOGE("jit table allocation failed");
437        dvmUnlockMutex(&gDvmJit.tableLock);
438        goto fail;
439    }
440    /*
441     * NOTE: the profile table must only be allocated once, globally.
442     * Profiling is turned on and off by nulling out gDvm.pJitProfTable
443     * and then restoring its original value.  However, this action
444     * is not synchronized for speed so threads may continue to hold
445     * and update the profile table after profiling has been turned
446     * off by null'ng the global pointer.  Be aware.
447     */
448    pJitProfTable = (unsigned char *)malloc(JIT_PROF_SIZE);
449    if (!pJitProfTable) {
450        ALOGE("jit prof table allocation failed");
451        free(pJitProfTable);
452        dvmUnlockMutex(&gDvmJit.tableLock);
453        goto fail;
454    }
455    memset(pJitProfTable, gDvmJit.threshold, JIT_PROF_SIZE);
456    for (i=0; i < gDvmJit.jitTableSize; i++) {
457       pJitTable[i].u.info.chain = gDvmJit.jitTableSize;
458    }
459    /* Is chain field wide enough for termination pattern? */
460    assert(pJitTable[0].u.info.chain == gDvmJit.jitTableSize);
461
462    /* Allocate the trace profiling structure */
463    pJitTraceProfCounters = (JitTraceProfCounters*)
464                             calloc(1, sizeof(*pJitTraceProfCounters));
465    if (!pJitTraceProfCounters) {
466        ALOGE("jit trace prof counters allocation failed");
467        dvmUnlockMutex(&gDvmJit.tableLock);
468        goto fail;
469    }
470
471    gDvmJit.pJitEntryTable = pJitTable;
472    gDvmJit.jitTableMask = gDvmJit.jitTableSize - 1;
473    gDvmJit.jitTableEntriesUsed = 0;
474    gDvmJit.compilerHighWater =
475        COMPILER_WORK_QUEUE_SIZE - (COMPILER_WORK_QUEUE_SIZE/4);
476    /*
477     * If the VM is launched with wait-on-the-debugger, we will need to hide
478     * the profile table here
479     */
480    gDvmJit.pProfTable = dvmDebuggerOrProfilerActive() ? NULL : pJitProfTable;
481    gDvmJit.pProfTableCopy = pJitProfTable;
482    gDvmJit.pJitTraceProfCounters = pJitTraceProfCounters;
483    dvmJitUpdateThreadStateAll();
484    dvmUnlockMutex(&gDvmJit.tableLock);
485
486    /* Signal running threads to refresh their cached pJitTable pointers */
487    dvmSuspendAllThreads(SUSPEND_FOR_REFRESH);
488    dvmResumeAllThreads(SUSPEND_FOR_REFRESH);
489
490    /* Enable signature breakpoints by customizing the following code */
491#if defined(SIGNATURE_BREAKPOINT)
492    /*
493     * Suppose one sees the following native crash in the bugreport:
494     * I/DEBUG   ( 1638): Build fingerprint: 'unknown'
495     * I/DEBUG   ( 1638): pid: 2468, tid: 2507  >>> com.google.android.gallery3d
496     * I/DEBUG   ( 1638): signal 11 (SIGSEGV), fault addr 00001400
497     * I/DEBUG   ( 1638):  r0 44ea7190  r1 44e4f7b8  r2 44ebc710  r3 00000000
498     * I/DEBUG   ( 1638):  r4 00000a00  r5 41862dec  r6 4710dc10  r7 00000280
499     * I/DEBUG   ( 1638):  r8 ad010f40  r9 46a37a12  10 001116b0  fp 42a78208
500     * I/DEBUG   ( 1638):  ip 00000090  sp 4710dbc8  lr ad060e67  pc 46b90682
501     * cpsr 00000030
502     * I/DEBUG   ( 1638):  #00  pc 46b90682 /dev/ashmem/dalvik-jit-code-cache
503     * I/DEBUG   ( 1638):  #01  pc 00060e62  /system/lib/libdvm.so
504     *
505     * I/DEBUG   ( 1638): code around pc:
506     * I/DEBUG   ( 1638): 46b90660 6888d01c 34091dcc d2174287 4a186b68
507     * I/DEBUG   ( 1638): 46b90670 d0052800 68006809 28004790 6b68d00e
508     * I/DEBUG   ( 1638): 46b90680 512000bc 37016eaf 6ea866af 6f696028
509     * I/DEBUG   ( 1638): 46b90690 682a6069 429a686b e003da08 6df1480b
510     * I/DEBUG   ( 1638): 46b906a0 1c2d4788 47806d70 46a378fa 47806d70
511     *
512     * Clearly it is a JIT bug. To find out which translation contains the
513     * offending code, the content of the memory dump around the faulting PC
514     * can be pasted into the gDvmJit.signatureBreakpoint[] array and next time
515     * when a similar compilation is being created, the JIT compiler replay the
516     * trace in the verbose mode and one can investigate the instruction
517     * sequence in details.
518     *
519     * The length of the signature may need additional experiments to determine.
520     * The rule of thumb is don't include PC-relative instructions in the
521     * signature since it may be affected by the alignment of the compiled code.
522     * However, a signature that's too short might increase the chance of false
523     * positive matches. Using gdbjithelper to disassembly the memory content
524     * first might be a good companion approach.
525     *
526     * For example, if the next 4 words starting from 46b90680 is pasted into
527     * the data structure:
528     */
529
530    gDvmJit.signatureBreakpointSize = 4;
531    gDvmJit.signatureBreakpoint =
532        malloc(sizeof(u4) * gDvmJit.signatureBreakpointSize);
533    gDvmJit.signatureBreakpoint[0] = 0x512000bc;
534    gDvmJit.signatureBreakpoint[1] = 0x37016eaf;
535    gDvmJit.signatureBreakpoint[2] = 0x6ea866af;
536    gDvmJit.signatureBreakpoint[3] = 0x6f696028;
537
538    /*
539     * The following log will be printed when a match is found in subsequent
540     * testings:
541     *
542     * D/dalvikvm( 2468): Signature match starting from offset 0x34 (4 words)
543     * D/dalvikvm( 2468): --------
544     * D/dalvikvm( 2468): Compiler: Building trace for computeVisibleItems,
545     * offset 0x1f7
546     * D/dalvikvm( 2468): 0x46a37a12: 0x0090 add-int v42, v5, v26
547     * D/dalvikvm( 2468): 0x46a37a16: 0x004d aput-object v13, v14, v42
548     * D/dalvikvm( 2468): 0x46a37a1a: 0x0028 goto, (#0), (#0)
549     * D/dalvikvm( 2468): 0x46a3794e: 0x00d8 add-int/lit8 v26, v26, (#1)
550     * D/dalvikvm( 2468): 0x46a37952: 0x0028 goto, (#0), (#0)
551     * D/dalvikvm( 2468): 0x46a378ee: 0x0002 move/from16 v0, v26, (#0)
552     * D/dalvikvm( 2468): 0x46a378f2: 0x0002 move/from16 v1, v29, (#0)
553     * D/dalvikvm( 2468): 0x46a378f6: 0x0035 if-ge v0, v1, (#10)
554     * D/dalvikvm( 2468): TRACEINFO (554): 0x46a37624
555     * Lcom/cooliris/media/GridLayer;computeVisibleItems 0x1f7 14 of 934, 8
556     * blocks
557     *     :
558     *     :
559     * D/dalvikvm( 2468): 0x20 (0020): ldr     r0, [r5, #52]
560     * D/dalvikvm( 2468): 0x22 (0022): ldr     r2, [pc, #96]
561     * D/dalvikvm( 2468): 0x24 (0024): cmp     r0, #0
562     * D/dalvikvm( 2468): 0x26 (0026): beq     0x00000034
563     * D/dalvikvm( 2468): 0x28 (0028): ldr     r1, [r1, #0]
564     * D/dalvikvm( 2468): 0x2a (002a): ldr     r0, [r0, #0]
565     * D/dalvikvm( 2468): 0x2c (002c): blx     r2
566     * D/dalvikvm( 2468): 0x2e (002e): cmp     r0, #0
567     * D/dalvikvm( 2468): 0x30 (0030): beq     0x00000050
568     * D/dalvikvm( 2468): 0x32 (0032): ldr     r0, [r5, #52]
569     * D/dalvikvm( 2468): 0x34 (0034): lsls    r4, r7, #2
570     * D/dalvikvm( 2468): 0x36 (0036): str     r0, [r4, r4]
571     * D/dalvikvm( 2468): -------- dalvik offset: 0x01fb @ goto, (#0), (#0)
572     * D/dalvikvm( 2468): L0x0195:
573     * D/dalvikvm( 2468): -------- dalvik offset: 0x0195 @ add-int/lit8 v26,
574     * v26, (#1)
575     * D/dalvikvm( 2468): 0x38 (0038): ldr     r7, [r5, #104]
576     * D/dalvikvm( 2468): 0x3a (003a): adds    r7, r7, #1
577     * D/dalvikvm( 2468): 0x3c (003c): str     r7, [r5, #104]
578     * D/dalvikvm( 2468): -------- dalvik offset: 0x0197 @ goto, (#0), (#0)
579     * D/dalvikvm( 2468): L0x0165:
580     * D/dalvikvm( 2468): -------- dalvik offset: 0x0165 @ move/from16 v0, v26,
581     * (#0)
582     * D/dalvikvm( 2468): 0x3e (003e): ldr     r0, [r5, #104]
583     * D/dalvikvm( 2468): 0x40 (0040): str     r0, [r5, #0]
584     *
585     * The "str r0, [r4, r4]" is indeed the culprit of the native crash.
586     */
587#endif
588
589    return true;
590
591fail:
592    return false;
593
594}
595
596static void *compilerThreadStart(void *arg)
597{
598    dvmChangeStatus(NULL, THREAD_VMWAIT);
599
600    /*
601     * If we're not running stand-alone, wait a little before
602     * recieving translation requests on the assumption that process start
603     * up code isn't worth compiling.  We'll resume when the framework
604     * signals us that the first screen draw has happened, or the timer
605     * below expires (to catch daemons).
606     *
607     * There is a theoretical race between the callback to
608     * VMRuntime.startJitCompiation and when the compiler thread reaches this
609     * point. In case the callback happens earlier, in order not to permanently
610     * hold the system_server (which is not using the timed wait) in
611     * interpreter-only mode we bypass the delay here.
612     */
613    if (gDvmJit.runningInAndroidFramework &&
614        !gDvmJit.alreadyEnabledViaFramework) {
615        /*
616         * If the current VM instance is the system server (detected by having
617         * 0 in gDvm.systemServerPid), we will use the indefinite wait on the
618         * conditional variable to determine whether to start the JIT or not.
619         * If the system server detects that the whole system is booted in
620         * safe mode, the conditional variable will never be signaled and the
621         * system server will remain in the interpreter-only mode. All
622         * subsequent apps will be started with the --enable-safemode flag
623         * explicitly appended.
624         */
625        if (gDvm.systemServerPid == 0) {
626            dvmLockMutex(&gDvmJit.compilerLock);
627            pthread_cond_wait(&gDvmJit.compilerQueueActivity,
628                              &gDvmJit.compilerLock);
629            dvmUnlockMutex(&gDvmJit.compilerLock);
630            ALOGD("JIT started for system_server");
631        } else {
632            dvmLockMutex(&gDvmJit.compilerLock);
633            /*
634             * TUNING: experiment with the delay & perhaps make it
635             * target-specific
636             */
637            dvmRelativeCondWait(&gDvmJit.compilerQueueActivity,
638                                 &gDvmJit.compilerLock, 3000, 0);
639            dvmUnlockMutex(&gDvmJit.compilerLock);
640        }
641        if (gDvmJit.haltCompilerThread) {
642             return NULL;
643        }
644    }
645
646    compilerThreadStartup();
647
648    dvmLockMutex(&gDvmJit.compilerLock);
649    /*
650     * Since the compiler thread will not touch any objects on the heap once
651     * being created, we just fake its state as VMWAIT so that it can be a
652     * bit late when there is suspend request pending.
653     */
654    while (!gDvmJit.haltCompilerThread) {
655        if (workQueueLength() == 0) {
656            int cc;
657            cc = pthread_cond_signal(&gDvmJit.compilerQueueEmpty);
658            assert(cc == 0);
659            pthread_cond_wait(&gDvmJit.compilerQueueActivity,
660                              &gDvmJit.compilerLock);
661            continue;
662        } else {
663            do {
664                CompilerWorkOrder work = workDequeue();
665                dvmUnlockMutex(&gDvmJit.compilerLock);
666#if defined(WITH_JIT_TUNING)
667                /*
668                 * This is live across setjmp().  Mark it volatile to suppress
669                 * a gcc warning.  We should not need this since it is assigned
670                 * only once but gcc is not smart enough.
671                 */
672                volatile u8 startTime = dvmGetRelativeTimeUsec();
673#endif
674                /*
675                 * Check whether there is a suspend request on me.  This
676                 * is necessary to allow a clean shutdown.
677                 *
678                 * However, in the blocking stress testing mode, let the
679                 * compiler thread continue doing compilations to unblock
680                 * other requesting threads. This may occasionally cause
681                 * shutdown from proceeding cleanly in the standalone invocation
682                 * of the vm but this should be acceptable.
683                 */
684                if (!gDvmJit.blockingMode)
685                    dvmCheckSuspendPending(dvmThreadSelf());
686                /* Is JitTable filling up? */
687                if (gDvmJit.jitTableEntriesUsed >
688                    (gDvmJit.jitTableSize - gDvmJit.jitTableSize/4)) {
689                    bool resizeFail =
690                        dvmJitResizeJitTable(gDvmJit.jitTableSize * 2);
691                    /*
692                     * If the jit table is full, consider it's time to reset
693                     * the code cache too.
694                     */
695                    gDvmJit.codeCacheFull |= resizeFail;
696                }
697                if (gDvmJit.haltCompilerThread) {
698                    ALOGD("Compiler shutdown in progress - discarding request");
699                } else if (!gDvmJit.codeCacheFull) {
700                    jmp_buf jmpBuf;
701                    work.bailPtr = &jmpBuf;
702                    bool aborted = setjmp(jmpBuf);
703                    if (!aborted) {
704                        bool codeCompiled = dvmCompilerDoWork(&work);
705                        /*
706                         * Make sure we are still operating with the
707                         * same translation cache version.  See
708                         * Issue 4271784 for details.
709                         */
710                        dvmLockMutex(&gDvmJit.compilerLock);
711                        if ((work.result.cacheVersion ==
712                             gDvmJit.cacheVersion) &&
713                             codeCompiled &&
714                             !work.result.discardResult &&
715                             work.result.codeAddress) {
716                            dvmJitSetCodeAddr(work.pc, work.result.codeAddress,
717                                              work.result.instructionSet,
718                                              false, /* not method entry */
719                                              work.result.profileCodeSize);
720                        }
721                        dvmUnlockMutex(&gDvmJit.compilerLock);
722                    }
723                    dvmCompilerArenaReset();
724                }
725                free(work.info);
726#if defined(WITH_JIT_TUNING)
727                gDvmJit.jitTime += dvmGetRelativeTimeUsec() - startTime;
728#endif
729                dvmLockMutex(&gDvmJit.compilerLock);
730            } while (workQueueLength() != 0);
731        }
732    }
733    pthread_cond_signal(&gDvmJit.compilerQueueEmpty);
734    dvmUnlockMutex(&gDvmJit.compilerLock);
735
736    /*
737     * As part of detaching the thread we need to call into Java code to update
738     * the ThreadGroup, and we should not be in VMWAIT state while executing
739     * interpreted code.
740     */
741    dvmChangeStatus(NULL, THREAD_RUNNING);
742
743    if (gDvm.verboseShutdown)
744        ALOGD("Compiler thread shutting down");
745    return NULL;
746}
747
748bool dvmCompilerStartup(void)
749{
750
751    dvmInitMutex(&gDvmJit.compilerLock);
752    dvmInitMutex(&gDvmJit.compilerICPatchLock);
753    dvmInitMutex(&gDvmJit.codeCacheProtectionLock);
754    dvmLockMutex(&gDvmJit.compilerLock);
755    pthread_cond_init(&gDvmJit.compilerQueueActivity, NULL);
756    pthread_cond_init(&gDvmJit.compilerQueueEmpty, NULL);
757
758    /* Reset the work queue */
759    gDvmJit.compilerWorkEnqueueIndex = gDvmJit.compilerWorkDequeueIndex = 0;
760    gDvmJit.compilerQueueLength = 0;
761    dvmUnlockMutex(&gDvmJit.compilerLock);
762
763    /*
764     * Defer rest of initialization until we're sure JIT'ng makes sense. Launch
765     * the compiler thread, which will do the real initialization if and
766     * when it is signalled to do so.
767     */
768    return dvmCreateInternalThread(&gDvmJit.compilerHandle, "Compiler",
769                                   compilerThreadStart, NULL);
770}
771
772void dvmCompilerShutdown(void)
773{
774    void *threadReturn;
775
776    /* Disable new translation requests */
777    gDvmJit.pProfTable = NULL;
778    gDvmJit.pProfTableCopy = NULL;
779    dvmJitUpdateThreadStateAll();
780
781    if (gDvm.verboseShutdown ||
782            gDvmJit.profileMode == kTraceProfilingContinuous) {
783        dvmCompilerDumpStats();
784        while (gDvmJit.compilerQueueLength)
785          sleep(5);
786    }
787
788    if (gDvmJit.compilerHandle) {
789
790        gDvmJit.haltCompilerThread = true;
791
792        dvmLockMutex(&gDvmJit.compilerLock);
793        pthread_cond_signal(&gDvmJit.compilerQueueActivity);
794        dvmUnlockMutex(&gDvmJit.compilerLock);
795
796        if (pthread_join(gDvmJit.compilerHandle, &threadReturn) != 0)
797            ALOGW("Compiler thread join failed");
798        else if (gDvm.verboseShutdown)
799            ALOGD("Compiler thread has shut down");
800    }
801
802    /* Break loops within the translation cache */
803    dvmJitUnchainAll();
804
805    /*
806     * NOTE: our current implementatation doesn't allow for the compiler
807     * thread to be restarted after it exits here.  We aren't freeing
808     * the JitTable or the ProfTable because threads which still may be
809     * running or in the process of shutting down may hold references to
810     * them.
811     */
812}
813
814void dvmCompilerUpdateGlobalState()
815{
816    bool jitActive;
817    bool jitActivate;
818    bool needUnchain = false;
819
820    /*
821     * The tableLock might not be initialized yet by the compiler thread if
822     * debugger is attached from the very beginning of the VM launch. If
823     * pProfTableCopy is NULL, the lock is not initialized yet and we don't
824     * need to refresh anything either.
825     */
826    if (gDvmJit.pProfTableCopy == NULL) {
827        return;
828    }
829
830    /*
831     * On the first enabling of method tracing, switch the compiler
832     * into a mode that includes trace support for invokes and returns.
833     * If there are any existing translations, flush them.  NOTE:  we
834     * can't blindly flush the translation cache because this code
835     * may be executed before the compiler thread has finished
836     * initialization.
837     */
838    if ((gDvm.activeProfilers != 0) &&
839        !gDvmJit.methodTraceSupport) {
840        bool resetRequired;
841        /*
842         * compilerLock will prevent new compilations from being
843         * installed while we are working.
844         */
845        dvmLockMutex(&gDvmJit.compilerLock);
846        gDvmJit.cacheVersion++; // invalidate compilations in flight
847        gDvmJit.methodTraceSupport = true;
848        resetRequired = (gDvmJit.numCompilations != 0);
849        dvmUnlockMutex(&gDvmJit.compilerLock);
850        if (resetRequired) {
851            dvmSuspendAllThreads(SUSPEND_FOR_CC_RESET);
852            resetCodeCache();
853            dvmResumeAllThreads(SUSPEND_FOR_CC_RESET);
854        }
855    }
856
857    dvmLockMutex(&gDvmJit.tableLock);
858    jitActive = gDvmJit.pProfTable != NULL;
859    jitActivate = !dvmDebuggerOrProfilerActive();
860
861    if (jitActivate && !jitActive) {
862        gDvmJit.pProfTable = gDvmJit.pProfTableCopy;
863    } else if (!jitActivate && jitActive) {
864        gDvmJit.pProfTable = NULL;
865        needUnchain = true;
866    }
867    dvmUnlockMutex(&gDvmJit.tableLock);
868    if (needUnchain)
869        dvmJitUnchainAll();
870    // Make sure all threads have current values
871    dvmJitUpdateThreadStateAll();
872}
873