Jit.cpp revision 291c84f60853d30e1c0d79dd08c5e5164f588e26
1/* 2 * Copyright (C) 2008 The Android Open Source Project 3 * 4 * Licensed under the Apache License, Version 2.0 (the "License"); 5 * you may not use this file except in compliance with the License. 6 * You may obtain a copy of the License at 7 * 8 * http://www.apache.org/licenses/LICENSE-2.0 9 * 10 * Unless required by applicable law or agreed to in writing, software 11 * distributed under the License is distributed on an "AS IS" BASIS, 12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 * See the License for the specific language governing permissions and 14 * limitations under the License. 15 */ 16#ifdef WITH_JIT 17 18/* 19 * Target independent portion of Android's Jit 20 */ 21 22#include "Dalvik.h" 23#include "Jit.h" 24 25#include "libdex/DexOpcodes.h" 26#include <unistd.h> 27#include <pthread.h> 28#include <sys/time.h> 29#include <signal.h> 30#include "compiler/Compiler.h" 31#include "compiler/CompilerUtility.h" 32#include "compiler/CompilerIR.h" 33#include <errno.h> 34 35#if defined(WITH_SELF_VERIFICATION) 36/* Allocate space for per-thread ShadowSpace data structures */ 37void* dvmSelfVerificationShadowSpaceAlloc(Thread* self) 38{ 39 self->shadowSpace = (ShadowSpace*) calloc(1, sizeof(ShadowSpace)); 40 if (self->shadowSpace == NULL) 41 return NULL; 42 43 self->shadowSpace->registerSpaceSize = REG_SPACE; 44 self->shadowSpace->registerSpace = 45 (int*) calloc(self->shadowSpace->registerSpaceSize, sizeof(int)); 46 47 return self->shadowSpace->registerSpace; 48} 49 50/* Free per-thread ShadowSpace data structures */ 51void dvmSelfVerificationShadowSpaceFree(Thread* self) 52{ 53 free(self->shadowSpace->registerSpace); 54 free(self->shadowSpace); 55} 56 57/* 58 * Save out PC, FP, thread state, and registers to shadow space. 59 * Return a pointer to the shadow space for JIT to use. 60 * 61 * The set of saved state from the Thread structure is: 62 * pc (Dalvik PC) 63 * fp (Dalvik FP) 64 * retval 65 * method 66 * methodClassDex 67 * interpStackEnd 68 */ 69void* dvmSelfVerificationSaveState(const u2* pc, u4* fp, 70 Thread* self, int targetTrace) 71{ 72 ShadowSpace *shadowSpace = self->shadowSpace; 73 unsigned preBytes = self->interpSave.method->outsSize*4 + 74 sizeof(StackSaveArea); 75 unsigned postBytes = self->interpSave.method->registersSize*4; 76 77 //LOGD("### selfVerificationSaveState(%d) pc: %#x fp: %#x", 78 // self->threadId, (int)pc, (int)fp); 79 80 if (shadowSpace->selfVerificationState != kSVSIdle) { 81 LOGD("~~~ Save: INCORRECT PREVIOUS STATE(%d): %d", 82 self->threadId, shadowSpace->selfVerificationState); 83 LOGD("********** SHADOW STATE DUMP **********"); 84 LOGD("PC: %#x FP: %#x", (int)pc, (int)fp); 85 } 86 shadowSpace->selfVerificationState = kSVSStart; 87 88 // Dynamically grow shadow register space if necessary 89 if (preBytes + postBytes > shadowSpace->registerSpaceSize * sizeof(u4)) { 90 free(shadowSpace->registerSpace); 91 shadowSpace->registerSpaceSize = (preBytes + postBytes) / sizeof(u4); 92 shadowSpace->registerSpace = 93 (int*) calloc(shadowSpace->registerSpaceSize, sizeof(u4)); 94 } 95 96 // Remember original state 97 shadowSpace->startPC = pc; 98 shadowSpace->fp = fp; 99 shadowSpace->retval = self->interpSave.retval; 100 shadowSpace->interpStackEnd = self->interpStackEnd; 101 102 /* 103 * Store the original method here in case the trace ends with a 104 * return/invoke, the last method. 105 */ 106 shadowSpace->method = self->interpSave.method; 107 shadowSpace->methodClassDex = self->interpSave.methodClassDex; 108 109 shadowSpace->shadowFP = shadowSpace->registerSpace + 110 shadowSpace->registerSpaceSize - postBytes/4; 111 112 self->interpSave.curFrame = (u4*)shadowSpace->shadowFP; 113 self->interpStackEnd = (u1*)shadowSpace->registerSpace; 114 115 // Create a copy of the stack 116 memcpy(((char*)shadowSpace->shadowFP)-preBytes, ((char*)fp)-preBytes, 117 preBytes+postBytes); 118 119 // Setup the shadowed heap space 120 shadowSpace->heapSpaceTail = shadowSpace->heapSpace; 121 122 // Reset trace length 123 shadowSpace->traceLength = 0; 124 125 return shadowSpace; 126} 127 128/* 129 * Save ending PC, FP and compiled code exit point to shadow space. 130 * Return a pointer to the shadow space for JIT to restore state. 131 */ 132void* dvmSelfVerificationRestoreState(const u2* pc, u4* fp, 133 SelfVerificationState exitState, 134 Thread* self) 135{ 136 ShadowSpace *shadowSpace = self->shadowSpace; 137 shadowSpace->endPC = pc; 138 shadowSpace->endShadowFP = fp; 139 shadowSpace->jitExitState = exitState; 140 141 //LOGD("### selfVerificationRestoreState(%d) pc: %#x fp: %#x endPC: %#x", 142 // self->threadId, (int)shadowSpace->startPC, (int)shadowSpace->fp, 143 // (int)pc); 144 145 if (shadowSpace->selfVerificationState != kSVSStart) { 146 LOGD("~~~ Restore: INCORRECT PREVIOUS STATE(%d): %d", 147 self->threadId, shadowSpace->selfVerificationState); 148 LOGD("********** SHADOW STATE DUMP **********"); 149 LOGD("Dalvik PC: %#x endPC: %#x", (int)shadowSpace->startPC, 150 (int)shadowSpace->endPC); 151 LOGD("Interp FP: %#x", (int)shadowSpace->fp); 152 LOGD("Shadow FP: %#x endFP: %#x", (int)shadowSpace->shadowFP, 153 (int)shadowSpace->endShadowFP); 154 } 155 156 // Special case when punting after a single instruction 157 if (exitState == kSVSPunt && pc == shadowSpace->startPC) { 158 shadowSpace->selfVerificationState = kSVSIdle; 159 } else { 160 shadowSpace->selfVerificationState = exitState; 161 } 162 163 /* Restore state before returning */ 164 self->interpSave.pc = shadowSpace->startPC; 165 self->interpSave.curFrame = shadowSpace->fp; 166 self->interpSave.method = shadowSpace->method; 167 self->interpSave.methodClassDex = shadowSpace->methodClassDex; 168 self->interpSave.retval = shadowSpace->retval; 169 self->interpStackEnd = shadowSpace->interpStackEnd; 170 171 return shadowSpace; 172} 173 174/* Print contents of virtual registers */ 175static void selfVerificationPrintRegisters(int* addr, int* addrRef, 176 int numWords) 177{ 178 int i; 179 for (i = 0; i < numWords; i++) { 180 LOGD("(v%d) 0x%8x%s", i, addr[i], addr[i] != addrRef[i] ? " X" : ""); 181 } 182} 183 184/* Print values maintained in shadowSpace */ 185static void selfVerificationDumpState(const u2* pc, Thread* self) 186{ 187 ShadowSpace* shadowSpace = self->shadowSpace; 188 StackSaveArea* stackSave = SAVEAREA_FROM_FP(self->interpSave.curFrame); 189 int frameBytes = (int) shadowSpace->registerSpace + 190 shadowSpace->registerSpaceSize*4 - 191 (int) shadowSpace->shadowFP; 192 int localRegs = 0; 193 int frameBytes2 = 0; 194 if ((uintptr_t)self->interpSave.curFrame < (uintptr_t)shadowSpace->fp) { 195 localRegs = (stackSave->method->registersSize - 196 stackSave->method->insSize)*4; 197 frameBytes2 = (int) shadowSpace->fp - 198 (int)self->interpSave.curFrame - localRegs; 199 } 200 LOGD("********** SHADOW STATE DUMP **********"); 201 LOGD("CurrentPC: %#x, Offset: 0x%04x", (int)pc, 202 (int)(pc - stackSave->method->insns)); 203 LOGD("Class: %s", shadowSpace->method->clazz->descriptor); 204 LOGD("Method: %s", shadowSpace->method->name); 205 LOGD("Dalvik PC: %#x endPC: %#x", (int)shadowSpace->startPC, 206 (int)shadowSpace->endPC); 207 LOGD("Interp FP: %#x endFP: %#x", (int)shadowSpace->fp, 208 (int)self->interpSave.curFrame); 209 LOGD("Shadow FP: %#x endFP: %#x", (int)shadowSpace->shadowFP, 210 (int)shadowSpace->endShadowFP); 211 LOGD("Frame1 Bytes: %d Frame2 Local: %d Bytes: %d", frameBytes, 212 localRegs, frameBytes2); 213 LOGD("Trace length: %d State: %d", shadowSpace->traceLength, 214 shadowSpace->selfVerificationState); 215} 216 217/* Print decoded instructions in the current trace */ 218static void selfVerificationDumpTrace(const u2* pc, Thread* self) 219{ 220 ShadowSpace* shadowSpace = self->shadowSpace; 221 StackSaveArea* stackSave = SAVEAREA_FROM_FP(self->interpSave.curFrame); 222 int i, addr, offset; 223 DecodedInstruction *decInsn; 224 225 LOGD("********** SHADOW TRACE DUMP **********"); 226 for (i = 0; i < shadowSpace->traceLength; i++) { 227 addr = shadowSpace->trace[i].addr; 228 offset = (int)((u2*)addr - stackSave->method->insns); 229 decInsn = &(shadowSpace->trace[i].decInsn); 230 /* Not properly decoding instruction, some registers may be garbage */ 231 LOGD("%#x: (0x%04x) %s", 232 addr, offset, dexGetOpcodeName(decInsn->opcode)); 233 } 234} 235 236/* Code is forced into this spin loop when a divergence is detected */ 237static void selfVerificationSpinLoop(ShadowSpace *shadowSpace) 238{ 239 const u2 *startPC = shadowSpace->startPC; 240 JitTraceDescription* desc = dvmCopyTraceDescriptor(startPC, NULL); 241 if (desc) { 242 dvmCompilerWorkEnqueue(startPC, kWorkOrderTraceDebug, desc); 243 /* 244 * This function effectively terminates the VM right here, so not 245 * freeing the desc pointer when the enqueuing fails is acceptable. 246 */ 247 } 248 gDvmJit.selfVerificationSpin = true; 249 while(gDvmJit.selfVerificationSpin) sleep(10); 250} 251 252/* 253 * If here, we're re-interpreting an instruction that was included 254 * in a trace that was just executed. This routine is called for 255 * each instruction in the original trace, and compares state 256 * when it reaches the end point. 257 * 258 * TUNING: the interpretation mechanism now supports a counted 259 * single-step mechanism. If we were to associate an instruction 260 * count with each trace exit, we could just single-step the right 261 * number of cycles and then compare. This would improve detection 262 * of control divergences, as well as (slightly) simplify this code. 263 */ 264void dvmCheckSelfVerification(const u2* pc, Thread* self) 265{ 266 ShadowSpace *shadowSpace = self->shadowSpace; 267 SelfVerificationState state = shadowSpace->selfVerificationState; 268 269 DecodedInstruction decInsn; 270 dexDecodeInstruction(pc, &decInsn); 271 272 //LOGD("### DbgIntp(%d): PC: %#x endPC: %#x state: %d len: %d %s", 273 // self->threadId, (int)pc, (int)shadowSpace->endPC, state, 274 // shadowSpace->traceLength, dexGetOpcodeName(decInsn.opcode)); 275 276 if (state == kSVSIdle || state == kSVSStart) { 277 LOGD("~~~ DbgIntrp: INCORRECT PREVIOUS STATE(%d): %d", 278 self->threadId, state); 279 selfVerificationDumpState(pc, self); 280 selfVerificationDumpTrace(pc, self); 281 } 282 283 /* 284 * Generalize the self verification state to kSVSDebugInterp unless the 285 * entry reason is kSVSBackwardBranch or kSVSSingleStep. 286 */ 287 if (state != kSVSBackwardBranch && state != kSVSSingleStep) { 288 shadowSpace->selfVerificationState = kSVSDebugInterp; 289 } 290 291 /* 292 * Check if the current pc matches the endPC. Only check for non-zero 293 * trace length when backward branches are involved. 294 */ 295 if (pc == shadowSpace->endPC && 296 (state == kSVSDebugInterp || state == kSVSSingleStep || 297 (state == kSVSBackwardBranch && shadowSpace->traceLength != 0))) { 298 299 shadowSpace->selfVerificationState = kSVSIdle; 300 301 /* Check register space */ 302 int frameBytes = (int) shadowSpace->registerSpace + 303 shadowSpace->registerSpaceSize*4 - 304 (int) shadowSpace->shadowFP; 305 if (memcmp(shadowSpace->fp, shadowSpace->shadowFP, frameBytes)) { 306 if (state == kSVSBackwardBranch) { 307 /* State mismatch on backward branch - try one more iteration */ 308 shadowSpace->selfVerificationState = kSVSDebugInterp; 309 goto log_and_continue; 310 } 311 LOGD("~~~ DbgIntp(%d): REGISTERS DIVERGENCE!", self->threadId); 312 selfVerificationDumpState(pc, self); 313 selfVerificationDumpTrace(pc, self); 314 LOGD("*** Interp Registers: addr: %#x bytes: %d", 315 (int)shadowSpace->fp, frameBytes); 316 selfVerificationPrintRegisters((int*)shadowSpace->fp, 317 (int*)shadowSpace->shadowFP, 318 frameBytes/4); 319 LOGD("*** Shadow Registers: addr: %#x bytes: %d", 320 (int)shadowSpace->shadowFP, frameBytes); 321 selfVerificationPrintRegisters((int*)shadowSpace->shadowFP, 322 (int*)shadowSpace->fp, 323 frameBytes/4); 324 selfVerificationSpinLoop(shadowSpace); 325 } 326 /* Check new frame if it exists (invokes only) */ 327 if ((uintptr_t)self->interpSave.curFrame < (uintptr_t)shadowSpace->fp) { 328 StackSaveArea* stackSave = 329 SAVEAREA_FROM_FP(self->interpSave.curFrame); 330 int localRegs = (stackSave->method->registersSize - 331 stackSave->method->insSize)*4; 332 int frameBytes2 = (int) shadowSpace->fp - 333 (int) self->interpSave.curFrame - localRegs; 334 if (memcmp(((char*)self->interpSave.curFrame)+localRegs, 335 ((char*)shadowSpace->endShadowFP)+localRegs, frameBytes2)) { 336 if (state == kSVSBackwardBranch) { 337 /* 338 * State mismatch on backward branch - try one more 339 * iteration. 340 */ 341 shadowSpace->selfVerificationState = kSVSDebugInterp; 342 goto log_and_continue; 343 } 344 LOGD("~~~ DbgIntp(%d): REGISTERS (FRAME2) DIVERGENCE!", 345 self->threadId); 346 selfVerificationDumpState(pc, self); 347 selfVerificationDumpTrace(pc, self); 348 LOGD("*** Interp Registers: addr: %#x l: %d bytes: %d", 349 (int)self->interpSave.curFrame, localRegs, frameBytes2); 350 selfVerificationPrintRegisters((int*)self->interpSave.curFrame, 351 (int*)shadowSpace->endShadowFP, 352 (frameBytes2+localRegs)/4); 353 LOGD("*** Shadow Registers: addr: %#x l: %d bytes: %d", 354 (int)shadowSpace->endShadowFP, localRegs, frameBytes2); 355 selfVerificationPrintRegisters((int*)shadowSpace->endShadowFP, 356 (int*)self->interpSave.curFrame, 357 (frameBytes2+localRegs)/4); 358 selfVerificationSpinLoop(shadowSpace); 359 } 360 } 361 362 /* Check memory space */ 363 bool memDiff = false; 364 ShadowHeap* heapSpacePtr; 365 for (heapSpacePtr = shadowSpace->heapSpace; 366 heapSpacePtr != shadowSpace->heapSpaceTail; heapSpacePtr++) { 367 int memData = *((unsigned int*) heapSpacePtr->addr); 368 if (heapSpacePtr->data != memData) { 369 if (state == kSVSBackwardBranch) { 370 /* 371 * State mismatch on backward branch - try one more 372 * iteration. 373 */ 374 shadowSpace->selfVerificationState = kSVSDebugInterp; 375 goto log_and_continue; 376 } 377 LOGD("~~~ DbgIntp(%d): MEMORY DIVERGENCE!", self->threadId); 378 LOGD("Addr: %#x Intrp Data: %#x Jit Data: %#x", 379 heapSpacePtr->addr, memData, heapSpacePtr->data); 380 selfVerificationDumpState(pc, self); 381 selfVerificationDumpTrace(pc, self); 382 memDiff = true; 383 } 384 } 385 if (memDiff) selfVerificationSpinLoop(shadowSpace); 386 387 388 /* 389 * Success. If this shadowed trace included a single-stepped 390 * instruction, we need to stay in the interpreter for one 391 * more interpretation before resuming. 392 */ 393 if (state == kSVSSingleStep) { 394 assert(self->jitResumeNPC != NULL); 395 assert(self->singleStepCount == 0); 396 self->singleStepCount = 1; 397 dvmEnableSubMode(self, kSubModeCountedStep); 398 } 399 400 /* 401 * Switch off shadow replay mode. The next shadowed trace 402 * execution will turn it back on. 403 */ 404 dvmDisableSubMode(self, kSubModeJitSV); 405 406 self->jitState = kJitDone; 407 return; 408 } 409log_and_continue: 410 /* If end not been reached, make sure max length not exceeded */ 411 if (shadowSpace->traceLength >= JIT_MAX_TRACE_LEN) { 412 LOGD("~~~ DbgIntp(%d): CONTROL DIVERGENCE!", self->threadId); 413 LOGD("startPC: %#x endPC: %#x currPC: %#x", 414 (int)shadowSpace->startPC, (int)shadowSpace->endPC, (int)pc); 415 selfVerificationDumpState(pc, self); 416 selfVerificationDumpTrace(pc, self); 417 selfVerificationSpinLoop(shadowSpace); 418 return; 419 } 420 /* Log the instruction address and decoded instruction for debug */ 421 shadowSpace->trace[shadowSpace->traceLength].addr = (int)pc; 422 shadowSpace->trace[shadowSpace->traceLength].decInsn = decInsn; 423 shadowSpace->traceLength++; 424} 425#endif 426 427/* 428 * If one of our fixed tables or the translation buffer fills up, 429 * call this routine to avoid wasting cycles on future translation requests. 430 */ 431void dvmJitStopTranslationRequests() 432{ 433 /* 434 * Note 1: This won't necessarily stop all translation requests, and 435 * operates on a delayed mechanism. Running threads look to the copy 436 * of this value in their private thread structures and won't see 437 * this change until it is refreshed (which happens on interpreter 438 * entry). 439 * Note 2: This is a one-shot memory leak on this table. Because this is a 440 * permanent off switch for Jit profiling, it is a one-time leak of 1K 441 * bytes, and no further attempt will be made to re-allocate it. Can't 442 * free it because some thread may be holding a reference. 443 */ 444 gDvmJit.pProfTable = NULL; 445 dvmJitUpdateThreadStateAll(); 446} 447 448#if defined(WITH_JIT_TUNING) 449/* Convenience function to increment counter from assembly code */ 450void dvmBumpNoChain(int from) 451{ 452 gDvmJit.noChainExit[from]++; 453} 454 455/* Convenience function to increment counter from assembly code */ 456void dvmBumpNormal() 457{ 458 gDvmJit.normalExit++; 459} 460 461/* Convenience function to increment counter from assembly code */ 462void dvmBumpPunt(int from) 463{ 464 gDvmJit.puntExit++; 465} 466#endif 467 468/* Dumps debugging & tuning stats to the log */ 469void dvmJitStats() 470{ 471 int i; 472 int hit; 473 int not_hit; 474 int chains; 475 int stubs; 476 if (gDvmJit.pJitEntryTable) { 477 for (i=0, stubs=chains=hit=not_hit=0; 478 i < (int) gDvmJit.jitTableSize; 479 i++) { 480 if (gDvmJit.pJitEntryTable[i].dPC != 0) { 481 hit++; 482 if (gDvmJit.pJitEntryTable[i].codeAddress == 483 dvmCompilerGetInterpretTemplate()) 484 stubs++; 485 } else 486 not_hit++; 487 if (gDvmJit.pJitEntryTable[i].u.info.chain != gDvmJit.jitTableSize) 488 chains++; 489 } 490 LOGD("JIT: table size is %d, entries used is %d", 491 gDvmJit.jitTableSize, gDvmJit.jitTableEntriesUsed); 492 LOGD("JIT: %d traces, %d slots, %d chains, %d thresh, %s", 493 hit, not_hit + hit, chains, gDvmJit.threshold, 494 gDvmJit.blockingMode ? "Blocking" : "Non-blocking"); 495 496#if defined(WITH_JIT_TUNING) 497 LOGD("JIT: Code cache patches: %d", gDvmJit.codeCachePatches); 498 499 LOGD("JIT: Lookups: %d hits, %d misses; %d normal, %d punt", 500 gDvmJit.addrLookupsFound, gDvmJit.addrLookupsNotFound, 501 gDvmJit.normalExit, gDvmJit.puntExit); 502 503 LOGD("JIT: ICHits: %d", gDvmICHitCount); 504 505 LOGD("JIT: noChainExit: %d IC miss, %d interp callsite, " 506 "%d switch overflow", 507 gDvmJit.noChainExit[kInlineCacheMiss], 508 gDvmJit.noChainExit[kCallsiteInterpreted], 509 gDvmJit.noChainExit[kSwitchOverflow]); 510 511 LOGD("JIT: ICPatch: %d init, %d rejected, %d lock-free, %d queued, " 512 "%d dropped", 513 gDvmJit.icPatchInit, gDvmJit.icPatchRejected, 514 gDvmJit.icPatchLockFree, gDvmJit.icPatchQueued, 515 gDvmJit.icPatchDropped); 516 517 LOGD("JIT: Invoke: %d mono, %d poly, %d native, %d return", 518 gDvmJit.invokeMonomorphic, gDvmJit.invokePolymorphic, 519 gDvmJit.invokeNative, gDvmJit.returnOp); 520 LOGD("JIT: Inline: %d mgetter, %d msetter, %d pgetter, %d psetter", 521 gDvmJit.invokeMonoGetterInlined, gDvmJit.invokeMonoSetterInlined, 522 gDvmJit.invokePolyGetterInlined, gDvmJit.invokePolySetterInlined); 523 LOGD("JIT: Total compilation time: %llu ms", gDvmJit.jitTime / 1000); 524 LOGD("JIT: Avg unit compilation time: %llu us", 525 gDvmJit.numCompilations == 0 ? 0 : 526 gDvmJit.jitTime / gDvmJit.numCompilations); 527 LOGD("JIT: Potential GC blocked by compiler: max %llu us / " 528 "avg %llu us (%d)", 529 gDvmJit.maxCompilerThreadBlockGCTime, 530 gDvmJit.numCompilerThreadBlockGC == 0 ? 531 0 : gDvmJit.compilerThreadBlockGCTime / 532 gDvmJit.numCompilerThreadBlockGC, 533 gDvmJit.numCompilerThreadBlockGC); 534#endif 535 536 LOGD("JIT: %d Translation chains, %d interp stubs", 537 gDvmJit.translationChains, stubs); 538 if (gDvmJit.profileMode == kTraceProfilingContinuous) { 539 dvmCompilerSortAndPrintTraceProfiles(); 540 } 541 } 542} 543 544 545/* End current trace now & don't include current instruction */ 546void dvmJitEndTraceSelect(Thread* self, const u2* dPC) 547{ 548 if (self->jitState == kJitTSelect) { 549 self->jitState = kJitTSelectEnd; 550 } 551 if (self->jitState == kJitTSelectEnd) { 552 // Clean up and finish now. 553 dvmCheckJit(dPC, self); 554 } 555} 556 557/* 558 * Find an entry in the JitTable, creating if necessary. 559 * Returns null if table is full. 560 */ 561static JitEntry *lookupAndAdd(const u2* dPC, bool callerLocked, 562 bool isMethodEntry) 563{ 564 u4 chainEndMarker = gDvmJit.jitTableSize; 565 u4 idx = dvmJitHash(dPC); 566 567 /* 568 * Walk the bucket chain to find an exact match for our PC and trace/method 569 * type 570 */ 571 while ((gDvmJit.pJitEntryTable[idx].u.info.chain != chainEndMarker) && 572 ((gDvmJit.pJitEntryTable[idx].dPC != dPC) || 573 (gDvmJit.pJitEntryTable[idx].u.info.isMethodEntry != 574 isMethodEntry))) { 575 idx = gDvmJit.pJitEntryTable[idx].u.info.chain; 576 } 577 578 if (gDvmJit.pJitEntryTable[idx].dPC != dPC || 579 gDvmJit.pJitEntryTable[idx].u.info.isMethodEntry != isMethodEntry) { 580 /* 581 * No match. Aquire jitTableLock and find the last 582 * slot in the chain. Possibly continue the chain walk in case 583 * some other thread allocated the slot we were looking 584 * at previuosly (perhaps even the dPC we're trying to enter). 585 */ 586 if (!callerLocked) 587 dvmLockMutex(&gDvmJit.tableLock); 588 /* 589 * At this point, if .dPC is NULL, then the slot we're 590 * looking at is the target slot from the primary hash 591 * (the simple, and common case). Otherwise we're going 592 * to have to find a free slot and chain it. 593 */ 594 ANDROID_MEMBAR_FULL(); /* Make sure we reload [].dPC after lock */ 595 if (gDvmJit.pJitEntryTable[idx].dPC != NULL) { 596 u4 prev; 597 while (gDvmJit.pJitEntryTable[idx].u.info.chain != chainEndMarker) { 598 if (gDvmJit.pJitEntryTable[idx].dPC == dPC && 599 gDvmJit.pJitEntryTable[idx].u.info.isMethodEntry == 600 isMethodEntry) { 601 /* Another thread got there first for this dPC */ 602 if (!callerLocked) 603 dvmUnlockMutex(&gDvmJit.tableLock); 604 return &gDvmJit.pJitEntryTable[idx]; 605 } 606 idx = gDvmJit.pJitEntryTable[idx].u.info.chain; 607 } 608 /* Here, idx should be pointing to the last cell of an 609 * active chain whose last member contains a valid dPC */ 610 assert(gDvmJit.pJitEntryTable[idx].dPC != NULL); 611 /* Linear walk to find a free cell and add it to the end */ 612 prev = idx; 613 while (true) { 614 idx++; 615 if (idx == chainEndMarker) 616 idx = 0; /* Wraparound */ 617 if ((gDvmJit.pJitEntryTable[idx].dPC == NULL) || 618 (idx == prev)) 619 break; 620 } 621 if (idx != prev) { 622 JitEntryInfoUnion oldValue; 623 JitEntryInfoUnion newValue; 624 /* 625 * Although we hold the lock so that noone else will 626 * be trying to update a chain field, the other fields 627 * packed into the word may be in use by other threads. 628 */ 629 do { 630 oldValue = gDvmJit.pJitEntryTable[prev].u; 631 newValue = oldValue; 632 newValue.info.chain = idx; 633 } while (android_atomic_release_cas(oldValue.infoWord, 634 newValue.infoWord, 635 &gDvmJit.pJitEntryTable[prev].u.infoWord) != 0); 636 } 637 } 638 if (gDvmJit.pJitEntryTable[idx].dPC == NULL) { 639 gDvmJit.pJitEntryTable[idx].u.info.isMethodEntry = isMethodEntry; 640 /* 641 * Initialize codeAddress and allocate the slot. Must 642 * happen in this order (since dPC is set, the entry is live. 643 */ 644 android_atomic_release_store((int32_t)dPC, 645 (volatile int32_t *)(void *)&gDvmJit.pJitEntryTable[idx].dPC); 646 gDvmJit.pJitEntryTable[idx].dPC = dPC; 647 gDvmJit.jitTableEntriesUsed++; 648 } else { 649 /* Table is full */ 650 idx = chainEndMarker; 651 } 652 if (!callerLocked) 653 dvmUnlockMutex(&gDvmJit.tableLock); 654 } 655 return (idx == chainEndMarker) ? NULL : &gDvmJit.pJitEntryTable[idx]; 656} 657 658/* Dump a trace description */ 659void dvmJitDumpTraceDesc(JitTraceDescription *trace) 660{ 661 int i; 662 bool done = false; 663 const u2* dpc; 664 const u2* dpcBase; 665 int curFrag = 0; 666 LOGD("==========================================="); 667 LOGD("Trace dump %#x, Method %s off %#x",(int)trace, 668 trace->method->name,trace->trace[curFrag].info.frag.startOffset); 669 dpcBase = trace->method->insns; 670 while (!done) { 671 DecodedInstruction decInsn; 672 if (trace->trace[curFrag].isCode) { 673 LOGD("Frag[%d]- Insts: %d, start: %#x, hint: %#x, end: %d", 674 curFrag, trace->trace[curFrag].info.frag.numInsts, 675 trace->trace[curFrag].info.frag.startOffset, 676 trace->trace[curFrag].info.frag.hint, 677 trace->trace[curFrag].info.frag.runEnd); 678 dpc = dpcBase + trace->trace[curFrag].info.frag.startOffset; 679 for (i=0; i<trace->trace[curFrag].info.frag.numInsts; i++) { 680 dexDecodeInstruction(dpc, &decInsn); 681 LOGD(" 0x%04x - %s %#x",(dpc-dpcBase), 682 dexGetOpcodeName(decInsn.opcode),(int)dpc); 683 dpc += dexGetWidthFromOpcode(decInsn.opcode); 684 } 685 if (trace->trace[curFrag].info.frag.runEnd) { 686 done = true; 687 } 688 } else { 689 LOGD("Frag[%d]- META info: 0x%08x", curFrag, 690 (int)trace->trace[curFrag].info.meta); 691 } 692 curFrag++; 693 } 694 LOGD("-------------------------------------------"); 695} 696 697/* 698 * Append the class ptr of "this" and the current method ptr to the current 699 * trace. That is, the trace runs will contain the following components: 700 * + trace run that ends with an invoke (existing entry) 701 * + thisClass (new) 702 * + calleeMethod (new) 703 */ 704static void insertClassMethodInfo(Thread* self, 705 const ClassObject* thisClass, 706 const Method* calleeMethod, 707 const DecodedInstruction* insn) 708{ 709 int currTraceRun = ++self->currTraceRun; 710 self->trace[currTraceRun].info.meta = thisClass ? 711 (void *) thisClass->descriptor : NULL; 712 self->trace[currTraceRun].isCode = false; 713 714 currTraceRun = ++self->currTraceRun; 715 self->trace[currTraceRun].info.meta = thisClass ? 716 (void *) thisClass->classLoader : NULL; 717 self->trace[currTraceRun].isCode = false; 718 719 currTraceRun = ++self->currTraceRun; 720 self->trace[currTraceRun].info.meta = (void *) calleeMethod; 721 self->trace[currTraceRun].isCode = false; 722} 723 724/* 725 * Check if the next instruction following the invoke is a move-result and if 726 * so add it to the trace. That is, this will add the trace run that includes 727 * the move-result to the trace list. 728 * 729 * + trace run that ends with an invoke (existing entry) 730 * + thisClass (existing entry) 731 * + calleeMethod (existing entry) 732 * + move result (new) 733 * 734 * lastPC, len, offset are all from the preceding invoke instruction 735 */ 736static void insertMoveResult(const u2 *lastPC, int len, int offset, 737 Thread *self) 738{ 739 DecodedInstruction nextDecInsn; 740 const u2 *moveResultPC = lastPC + len; 741 742 dexDecodeInstruction(moveResultPC, &nextDecInsn); 743 if ((nextDecInsn.opcode != OP_MOVE_RESULT) && 744 (nextDecInsn.opcode != OP_MOVE_RESULT_WIDE) && 745 (nextDecInsn.opcode != OP_MOVE_RESULT_OBJECT)) 746 return; 747 748 /* We need to start a new trace run */ 749 int currTraceRun = ++self->currTraceRun; 750 self->currRunHead = moveResultPC; 751 self->trace[currTraceRun].info.frag.startOffset = offset + len; 752 self->trace[currTraceRun].info.frag.numInsts = 1; 753 self->trace[currTraceRun].info.frag.runEnd = false; 754 self->trace[currTraceRun].info.frag.hint = kJitHintNone; 755 self->trace[currTraceRun].isCode = true; 756 self->totalTraceLen++; 757 758 self->currRunLen = dexGetWidthFromInstruction(moveResultPC); 759} 760 761/* 762 * Adds to the current trace request one instruction at a time, just 763 * before that instruction is interpreted. This is the primary trace 764 * selection function. NOTE: return instruction are handled a little 765 * differently. In general, instructions are "proposed" to be added 766 * to the current trace prior to interpretation. If the interpreter 767 * then successfully completes the instruction, is will be considered 768 * part of the request. This allows us to examine machine state prior 769 * to interpretation, and also abort the trace request if the instruction 770 * throws or does something unexpected. However, return instructions 771 * will cause an immediate end to the translation request - which will 772 * be passed to the compiler before the return completes. This is done 773 * in response to special handling of returns by the interpreter (and 774 * because returns cannot throw in a way that causes problems for the 775 * translated code. 776 */ 777void dvmCheckJit(const u2* pc, Thread* self) 778{ 779 const ClassObject *thisClass = self->callsiteClass; 780 const Method* curMethod = self->methodToCall; 781 int flags, len; 782 int allDone = false; 783 /* Stay in break/single-stop mode for the next instruction */ 784 bool stayOneMoreInst = false; 785 786 /* Prepare to handle last PC and stage the current PC & method*/ 787 const u2 *lastPC = self->lastPC; 788 789 self->lastPC = pc; 790 791 switch (self->jitState) { 792 int offset; 793 DecodedInstruction decInsn; 794 case kJitTSelect: 795 /* First instruction - just remember the PC and exit */ 796 if (lastPC == NULL) break; 797 /* Grow the trace around the last PC if jitState is kJitTSelect */ 798 dexDecodeInstruction(lastPC, &decInsn); 799 800 /* 801 * Treat {PACKED,SPARSE}_SWITCH as trace-ending instructions due 802 * to the amount of space it takes to generate the chaining 803 * cells. 804 */ 805 if (self->totalTraceLen != 0 && 806 (decInsn.opcode == OP_PACKED_SWITCH || 807 decInsn.opcode == OP_SPARSE_SWITCH)) { 808 self->jitState = kJitTSelectEnd; 809 break; 810 } 811 812#if defined(SHOW_TRACE) 813 LOGD("TraceGen: adding %s. lpc:%#x, pc:%#x", 814 dexGetOpcodeName(decInsn.opcode), (int)lastPC, (int)pc); 815#endif 816 flags = dexGetFlagsFromOpcode(decInsn.opcode); 817 len = dexGetWidthFromInstruction(lastPC); 818 offset = lastPC - self->traceMethod->insns; 819 assert((unsigned) offset < 820 dvmGetMethodInsnsSize(self->traceMethod)); 821 if (lastPC != self->currRunHead + self->currRunLen) { 822 int currTraceRun; 823 /* We need to start a new trace run */ 824 currTraceRun = ++self->currTraceRun; 825 self->currRunLen = 0; 826 self->currRunHead = (u2*)lastPC; 827 self->trace[currTraceRun].info.frag.startOffset = offset; 828 self->trace[currTraceRun].info.frag.numInsts = 0; 829 self->trace[currTraceRun].info.frag.runEnd = false; 830 self->trace[currTraceRun].info.frag.hint = kJitHintNone; 831 self->trace[currTraceRun].isCode = true; 832 } 833 self->trace[self->currTraceRun].info.frag.numInsts++; 834 self->totalTraceLen++; 835 self->currRunLen += len; 836 837 /* 838 * If the last instruction is an invoke, we will try to sneak in 839 * the move-result* (if existent) into a separate trace run. 840 */ 841 { 842 int needReservedRun = (flags & kInstrInvoke) ? 1 : 0; 843 844 /* Will probably never hit this with the current trace builder */ 845 if (self->currTraceRun == 846 (MAX_JIT_RUN_LEN - 1 - needReservedRun)) { 847 self->jitState = kJitTSelectEnd; 848 } 849 } 850 851 if (!dexIsGoto(flags) && 852 ((flags & (kInstrCanBranch | 853 kInstrCanSwitch | 854 kInstrCanReturn | 855 kInstrInvoke)) != 0)) { 856 self->jitState = kJitTSelectEnd; 857#if defined(SHOW_TRACE) 858 LOGD("TraceGen: ending on %s, basic block end", 859 dexGetOpcodeName(decInsn.opcode)); 860#endif 861 862 /* 863 * If the current invoke is a {virtual,interface}, get the 864 * current class/method pair into the trace as well. 865 * If the next instruction is a variant of move-result, insert 866 * it to the trace too. 867 */ 868 if (flags & kInstrInvoke) { 869 insertClassMethodInfo(self, thisClass, curMethod, 870 &decInsn); 871 insertMoveResult(lastPC, len, offset, self); 872 } 873 } 874 /* Break on throw or self-loop */ 875 if ((decInsn.opcode == OP_THROW) || (lastPC == pc)){ 876 self->jitState = kJitTSelectEnd; 877 } 878 if (self->totalTraceLen >= JIT_MAX_TRACE_LEN) { 879 self->jitState = kJitTSelectEnd; 880 } 881 if ((flags & kInstrCanReturn) != kInstrCanReturn) { 882 break; 883 } 884 else { 885 /* 886 * Last instruction is a return - stay in the dbg interpreter 887 * for one more instruction if it is a non-void return, since 888 * we don't want to start a trace with move-result as the first 889 * instruction (which is already included in the trace 890 * containing the invoke. 891 */ 892 if (decInsn.opcode != OP_RETURN_VOID) { 893 stayOneMoreInst = true; 894 } 895 } 896 /* NOTE: intentional fallthrough for returns */ 897 case kJitTSelectEnd: 898 { 899 /* Empty trace - set to bail to interpreter */ 900 if (self->totalTraceLen == 0) { 901 dvmJitSetCodeAddr(self->currTraceHead, 902 dvmCompilerGetInterpretTemplate(), 903 dvmCompilerGetInterpretTemplateSet(), 904 false /* Not method entry */, 0); 905 self->jitState = kJitDone; 906 allDone = true; 907 break; 908 } 909 910 int lastTraceDesc = self->currTraceRun; 911 912 /* Extend a new empty desc if the last slot is meta info */ 913 if (!self->trace[lastTraceDesc].isCode) { 914 lastTraceDesc = ++self->currTraceRun; 915 self->trace[lastTraceDesc].info.frag.startOffset = 0; 916 self->trace[lastTraceDesc].info.frag.numInsts = 0; 917 self->trace[lastTraceDesc].info.frag.hint = kJitHintNone; 918 self->trace[lastTraceDesc].isCode = true; 919 } 920 921 /* Mark the end of the trace runs */ 922 self->trace[lastTraceDesc].info.frag.runEnd = true; 923 924 JitTraceDescription* desc = 925 (JitTraceDescription*)malloc(sizeof(JitTraceDescription) + 926 sizeof(JitTraceRun) * (self->currTraceRun+1)); 927 928 if (desc == NULL) { 929 LOGE("Out of memory in trace selection"); 930 dvmJitStopTranslationRequests(); 931 self->jitState = kJitDone; 932 allDone = true; 933 break; 934 } 935 936 desc->method = self->traceMethod; 937 memcpy((char*)&(desc->trace[0]), 938 (char*)&(self->trace[0]), 939 sizeof(JitTraceRun) * (self->currTraceRun+1)); 940#if defined(SHOW_TRACE) 941 LOGD("TraceGen: trace done, adding to queue"); 942 dvmJitDumpTraceDesc(desc); 943#endif 944 if (dvmCompilerWorkEnqueue( 945 self->currTraceHead,kWorkOrderTrace,desc)) { 946 /* Work order successfully enqueued */ 947 if (gDvmJit.blockingMode) { 948 dvmCompilerDrainQueue(); 949 } 950 } else { 951 /* 952 * Make sure the descriptor for the abandoned work order is 953 * freed. 954 */ 955 free(desc); 956 } 957 self->jitState = kJitDone; 958 allDone = true; 959 } 960 break; 961 case kJitDone: 962 allDone = true; 963 break; 964 case kJitNot: 965 allDone = true; 966 break; 967 default: 968 LOGE("Unexpected JIT state: %d", self->jitState); 969 dvmAbort(); 970 break; 971 } 972 973 /* 974 * If we're done with trace selection, switch off the control flags. 975 */ 976 if (allDone) { 977 dvmDisableSubMode(self, kSubModeJitTraceBuild); 978 if (stayOneMoreInst) { 979 // Keep going in single-step mode for at least one more inst 980 assert(self->jitResumeNPC == NULL); 981 self->singleStepCount = MIN(1, self->singleStepCount); 982 dvmEnableSubMode(self, kSubModeCountedStep); 983 } 984 } 985 return; 986} 987 988JitEntry *dvmJitFindEntry(const u2* pc, bool isMethodEntry) 989{ 990 int idx = dvmJitHash(pc); 991 992 /* Expect a high hit rate on 1st shot */ 993 if ((gDvmJit.pJitEntryTable[idx].dPC == pc) && 994 (gDvmJit.pJitEntryTable[idx].u.info.isMethodEntry == isMethodEntry)) 995 return &gDvmJit.pJitEntryTable[idx]; 996 else { 997 int chainEndMarker = gDvmJit.jitTableSize; 998 while (gDvmJit.pJitEntryTable[idx].u.info.chain != chainEndMarker) { 999 idx = gDvmJit.pJitEntryTable[idx].u.info.chain; 1000 if ((gDvmJit.pJitEntryTable[idx].dPC == pc) && 1001 (gDvmJit.pJitEntryTable[idx].u.info.isMethodEntry == 1002 isMethodEntry)) 1003 return &gDvmJit.pJitEntryTable[idx]; 1004 } 1005 } 1006 return NULL; 1007} 1008 1009/* 1010 * Walk through the JIT profile table and find the corresponding JIT code, in 1011 * the specified format (ie trace vs method). This routine needs to be fast. 1012 */ 1013void* getCodeAddrCommon(const u2* dPC, bool methodEntry) 1014{ 1015 int idx = dvmJitHash(dPC); 1016 const u2* pc = gDvmJit.pJitEntryTable[idx].dPC; 1017 if (pc != NULL) { 1018 bool hideTranslation = dvmJitHideTranslation(); 1019 if (pc == dPC && 1020 gDvmJit.pJitEntryTable[idx].u.info.isMethodEntry == methodEntry) { 1021 int offset = (gDvmJit.profileMode >= kTraceProfilingContinuous) ? 1022 0 : gDvmJit.pJitEntryTable[idx].u.info.profileOffset; 1023 intptr_t codeAddress = 1024 (intptr_t)gDvmJit.pJitEntryTable[idx].codeAddress; 1025#if defined(WITH_JIT_TUNING) 1026 gDvmJit.addrLookupsFound++; 1027#endif 1028 return hideTranslation || !codeAddress ? NULL : 1029 (void *)(codeAddress + offset); 1030 } else { 1031 int chainEndMarker = gDvmJit.jitTableSize; 1032 while (gDvmJit.pJitEntryTable[idx].u.info.chain != chainEndMarker) { 1033 idx = gDvmJit.pJitEntryTable[idx].u.info.chain; 1034 if (gDvmJit.pJitEntryTable[idx].dPC == dPC && 1035 gDvmJit.pJitEntryTable[idx].u.info.isMethodEntry == 1036 methodEntry) { 1037 int offset = (gDvmJit.profileMode >= 1038 kTraceProfilingContinuous) ? 0 : 1039 gDvmJit.pJitEntryTable[idx].u.info.profileOffset; 1040 intptr_t codeAddress = 1041 (intptr_t)gDvmJit.pJitEntryTable[idx].codeAddress; 1042#if defined(WITH_JIT_TUNING) 1043 gDvmJit.addrLookupsFound++; 1044#endif 1045 return hideTranslation || !codeAddress ? NULL : 1046 (void *)(codeAddress + offset); 1047 } 1048 } 1049 } 1050 } 1051#if defined(WITH_JIT_TUNING) 1052 gDvmJit.addrLookupsNotFound++; 1053#endif 1054 return NULL; 1055} 1056 1057/* 1058 * If a translated code address, in trace format, exists for the davik byte code 1059 * pointer return it. 1060 */ 1061void* dvmJitGetTraceAddr(const u2* dPC) 1062{ 1063 return getCodeAddrCommon(dPC, false /* method entry */); 1064} 1065 1066/* 1067 * If a translated code address, in whole-method format, exists for the davik 1068 * byte code pointer return it. 1069 */ 1070void* dvmJitGetMethodAddr(const u2* dPC) 1071{ 1072 return getCodeAddrCommon(dPC, true /* method entry */); 1073} 1074 1075/* 1076 * Similar to dvmJitGetTraceAddr, but returns null if the calling 1077 * thread is in a single-step mode. 1078 */ 1079void* dvmJitGetTraceAddrThread(const u2* dPC, Thread* self) 1080{ 1081 return (self->interpBreak.ctl.breakFlags != 0) ? NULL : 1082 getCodeAddrCommon(dPC, false /* method entry */); 1083} 1084 1085/* 1086 * Similar to dvmJitGetMethodAddr, but returns null if the calling 1087 * thread is in a single-step mode. 1088 */ 1089void* dvmJitGetMethodAddrThread(const u2* dPC, Thread* self) 1090{ 1091 return (self->interpBreak.ctl.breakFlags != 0) ? NULL : 1092 getCodeAddrCommon(dPC, true /* method entry */); 1093} 1094 1095/* 1096 * Register the translated code pointer into the JitTable. 1097 * NOTE: Once a codeAddress field transitions from initial state to 1098 * JIT'd code, it must not be altered without first halting all 1099 * threads. We defer the setting of the profile prefix size until 1100 * after the new code address is set to ensure that the prefix offset 1101 * is never applied to the initial interpret-only translation. All 1102 * translations with non-zero profile prefixes will still be correct 1103 * if entered as if the profile offset is 0, but the interpret-only 1104 * template cannot handle a non-zero prefix. 1105 * NOTE: JitTable must not be in danger of reset while this 1106 * code is executing. see Issue 4271784 for details. 1107 */ 1108void dvmJitSetCodeAddr(const u2* dPC, void *nPC, JitInstructionSetType set, 1109 bool isMethodEntry, int profilePrefixSize) 1110{ 1111 JitEntryInfoUnion oldValue; 1112 JitEntryInfoUnion newValue; 1113 /* 1114 * Get the JitTable slot for this dPC (or create one if JitTable 1115 * has been reset between the time the trace was requested and 1116 * now. 1117 */ 1118 JitEntry *jitEntry = isMethodEntry ? 1119 lookupAndAdd(dPC, false /* caller holds tableLock */, isMethodEntry) : 1120 dvmJitFindEntry(dPC, isMethodEntry); 1121 assert(jitEntry); 1122 /* Note: order of update is important */ 1123 do { 1124 oldValue = jitEntry->u; 1125 newValue = oldValue; 1126 newValue.info.isMethodEntry = isMethodEntry; 1127 newValue.info.instructionSet = set; 1128 newValue.info.profileOffset = profilePrefixSize; 1129 } while (android_atomic_release_cas( 1130 oldValue.infoWord, newValue.infoWord, 1131 &jitEntry->u.infoWord) != 0); 1132 jitEntry->codeAddress = nPC; 1133} 1134 1135/* 1136 * Determine if valid trace-bulding request is active. If so, set 1137 * the proper flags in interpBreak and return. Trace selection will 1138 * then begin normally via dvmCheckBefore. 1139 */ 1140void dvmJitCheckTraceRequest(Thread* self) 1141{ 1142 int i; 1143 /* 1144 * A note on trace "hotness" filtering: 1145 * 1146 * Our first level trigger is intentionally loose - we need it to 1147 * fire easily not just to identify potential traces to compile, but 1148 * also to allow re-entry into the code cache. 1149 * 1150 * The 2nd level filter (done here) exists to be selective about 1151 * what we actually compile. It works by requiring the same 1152 * trace head "key" (defined as filterKey below) to appear twice in 1153 * a relatively short period of time. The difficulty is defining the 1154 * shape of the filterKey. Unfortunately, there is no "one size fits 1155 * all" approach. 1156 * 1157 * For spiky execution profiles dominated by a smallish 1158 * number of very hot loops, we would want the second-level filter 1159 * to be very selective. A good selective filter is requiring an 1160 * exact match of the Dalvik PC. In other words, defining filterKey as: 1161 * intptr_t filterKey = (intptr_t)self->interpSave.pc 1162 * 1163 * However, for flat execution profiles we do best when aggressively 1164 * translating. A heuristically decent proxy for this is to use 1165 * the value of the method pointer containing the trace as the filterKey. 1166 * Intuitively, this is saying that once any trace in a method appears hot, 1167 * immediately translate any other trace from that same method that 1168 * survives the first-level filter. Here, filterKey would be defined as: 1169 * intptr_t filterKey = (intptr_t)self->interpSave.method 1170 * 1171 * The problem is that we can't easily detect whether we're dealing 1172 * with a spiky or flat profile. If we go with the "pc" match approach, 1173 * flat profiles perform poorly. If we go with the loose "method" match, 1174 * we end up generating a lot of useless translations. Probably the 1175 * best approach in the future will be to retain profile information 1176 * across runs of each application in order to determine it's profile, 1177 * and then choose once we have enough history. 1178 * 1179 * However, for now we've decided to chose a compromise filter scheme that 1180 * includes elements of both. The high order bits of the filter key 1181 * are drawn from the enclosing method, and are combined with a slice 1182 * of the low-order bits of the Dalvik pc of the trace head. The 1183 * looseness of the filter can be adjusted by changing with width of 1184 * the Dalvik pc slice (JIT_TRACE_THRESH_FILTER_PC_BITS). The wider 1185 * the slice, the tighter the filter. 1186 * 1187 * Note: the fixed shifts in the function below reflect assumed word 1188 * alignment for method pointers, and half-word alignment of the Dalvik pc. 1189 * for method pointers and half-word alignment for dalvik pc. 1190 */ 1191 u4 methodKey = (u4)self->interpSave.method << 1192 (JIT_TRACE_THRESH_FILTER_PC_BITS - 2); 1193 u4 pcKey = ((u4)self->interpSave.pc >> 1) & 1194 ((1 << JIT_TRACE_THRESH_FILTER_PC_BITS) - 1); 1195 intptr_t filterKey = (intptr_t)(methodKey | pcKey); 1196 1197 // Shouldn't be here if already building a trace. 1198 assert((self->interpBreak.ctl.subMode & kSubModeJitTraceBuild)==0); 1199 1200 /* Check if the JIT request can be handled now */ 1201 if ((gDvmJit.pJitEntryTable != NULL) && 1202 ((self->interpBreak.ctl.breakFlags & kInterpSingleStep) == 0)){ 1203 /* Bypass the filter for hot trace requests or during stress mode */ 1204 if (self->jitState == kJitTSelectRequest && 1205 gDvmJit.threshold > 6) { 1206 /* Two-level filtering scheme */ 1207 for (i=0; i< JIT_TRACE_THRESH_FILTER_SIZE; i++) { 1208 if (filterKey == self->threshFilter[i]) { 1209 self->threshFilter[i] = 0; // Reset filter entry 1210 break; 1211 } 1212 } 1213 if (i == JIT_TRACE_THRESH_FILTER_SIZE) { 1214 /* 1215 * Use random replacement policy - otherwise we could miss a 1216 * large loop that contains more traces than the size of our 1217 * filter array. 1218 */ 1219 i = rand() % JIT_TRACE_THRESH_FILTER_SIZE; 1220 self->threshFilter[i] = filterKey; 1221 self->jitState = kJitDone; 1222 } 1223 } 1224 1225 /* If the compiler is backlogged, cancel any JIT actions */ 1226 if (gDvmJit.compilerQueueLength >= gDvmJit.compilerHighWater) { 1227 self->jitState = kJitDone; 1228 } 1229 1230 /* 1231 * Check for additional reasons that might force the trace select 1232 * request to be dropped 1233 */ 1234 if (self->jitState == kJitTSelectRequest || 1235 self->jitState == kJitTSelectRequestHot) { 1236 if (dvmJitFindEntry(self->interpSave.pc, false)) { 1237 /* In progress - nothing do do */ 1238 self->jitState = kJitDone; 1239 } else { 1240 JitEntry *slot = lookupAndAdd(self->interpSave.pc, 1241 false /* lock */, 1242 false /* method entry */); 1243 if (slot == NULL) { 1244 /* 1245 * Table is full. This should have been 1246 * detected by the compiler thread and the table 1247 * resized before we run into it here. Assume bad things 1248 * are afoot and disable profiling. 1249 */ 1250 self->jitState = kJitDone; 1251 LOGD("JIT: JitTable full, disabling profiling"); 1252 dvmJitStopTranslationRequests(); 1253 } 1254 } 1255 } 1256 1257 switch (self->jitState) { 1258 case kJitTSelectRequest: 1259 case kJitTSelectRequestHot: 1260 self->jitState = kJitTSelect; 1261 self->traceMethod = self->interpSave.method; 1262 self->currTraceHead = self->interpSave.pc; 1263 self->currTraceRun = 0; 1264 self->totalTraceLen = 0; 1265 self->currRunHead = self->interpSave.pc; 1266 self->currRunLen = 0; 1267 self->trace[0].info.frag.startOffset = 1268 self->interpSave.pc - self->interpSave.method->insns; 1269 self->trace[0].info.frag.numInsts = 0; 1270 self->trace[0].info.frag.runEnd = false; 1271 self->trace[0].info.frag.hint = kJitHintNone; 1272 self->trace[0].isCode = true; 1273 self->lastPC = 0; 1274 /* Turn on trace selection mode */ 1275 dvmEnableSubMode(self, kSubModeJitTraceBuild); 1276#if defined(SHOW_TRACE) 1277 LOGD("Starting trace for %s at %#x", 1278 self->interpSave.method->name, (int)self->interpSave.pc); 1279#endif 1280 break; 1281 case kJitDone: 1282 break; 1283 default: 1284 LOGE("Unexpected JIT state: %d", self->jitState); 1285 dvmAbort(); 1286 } 1287 } else { 1288 /* Cannot build trace this time */ 1289 self->jitState = kJitDone; 1290 } 1291} 1292 1293/* 1294 * Resizes the JitTable. Must be a power of 2, and returns true on failure. 1295 * Stops all threads, and thus is a heavyweight operation. May only be called 1296 * by the compiler thread. 1297 */ 1298bool dvmJitResizeJitTable( unsigned int size ) 1299{ 1300 JitEntry *pNewTable; 1301 JitEntry *pOldTable; 1302 JitEntry tempEntry; 1303 u4 newMask; 1304 unsigned int oldSize; 1305 unsigned int i; 1306 1307 assert(gDvmJit.pJitEntryTable != NULL); 1308 assert(size && !(size & (size - 1))); /* Is power of 2? */ 1309 1310 LOGI("Jit: resizing JitTable from %d to %d", gDvmJit.jitTableSize, size); 1311 1312 newMask = size - 1; 1313 1314 if (size <= gDvmJit.jitTableSize) { 1315 return true; 1316 } 1317 1318 /* Make sure requested size is compatible with chain field width */ 1319 tempEntry.u.info.chain = size; 1320 if (tempEntry.u.info.chain != size) { 1321 LOGD("Jit: JitTable request of %d too big", size); 1322 return true; 1323 } 1324 1325 pNewTable = (JitEntry*)calloc(size, sizeof(*pNewTable)); 1326 if (pNewTable == NULL) { 1327 return true; 1328 } 1329 for (i=0; i< size; i++) { 1330 pNewTable[i].u.info.chain = size; /* Initialize chain termination */ 1331 } 1332 1333 /* Stop all other interpreting/jit'ng threads */ 1334 dvmSuspendAllThreads(SUSPEND_FOR_TBL_RESIZE); 1335 1336 pOldTable = gDvmJit.pJitEntryTable; 1337 oldSize = gDvmJit.jitTableSize; 1338 1339 dvmLockMutex(&gDvmJit.tableLock); 1340 gDvmJit.pJitEntryTable = pNewTable; 1341 gDvmJit.jitTableSize = size; 1342 gDvmJit.jitTableMask = size - 1; 1343 gDvmJit.jitTableEntriesUsed = 0; 1344 1345 for (i=0; i < oldSize; i++) { 1346 if (pOldTable[i].dPC) { 1347 JitEntry *p; 1348 u2 chain; 1349 p = lookupAndAdd(pOldTable[i].dPC, true /* holds tableLock*/, 1350 pOldTable[i].u.info.isMethodEntry); 1351 p->codeAddress = pOldTable[i].codeAddress; 1352 /* We need to preserve the new chain field, but copy the rest */ 1353 chain = p->u.info.chain; 1354 p->u = pOldTable[i].u; 1355 p->u.info.chain = chain; 1356 } 1357 } 1358 1359 dvmUnlockMutex(&gDvmJit.tableLock); 1360 1361 free(pOldTable); 1362 1363 /* Restart the world */ 1364 dvmResumeAllThreads(SUSPEND_FOR_TBL_RESIZE); 1365 1366 return false; 1367} 1368 1369/* 1370 * Reset the JitTable to the initial clean state. 1371 */ 1372void dvmJitResetTable() 1373{ 1374 JitEntry *jitEntry = gDvmJit.pJitEntryTable; 1375 unsigned int size = gDvmJit.jitTableSize; 1376 unsigned int i; 1377 1378 dvmLockMutex(&gDvmJit.tableLock); 1379 1380 /* Note: If need to preserve any existing counts. Do so here. */ 1381 if (gDvmJit.pJitTraceProfCounters) { 1382 for (i=0; i < JIT_PROF_BLOCK_BUCKETS; i++) { 1383 if (gDvmJit.pJitTraceProfCounters->buckets[i]) 1384 memset((void *) gDvmJit.pJitTraceProfCounters->buckets[i], 1385 0, sizeof(JitTraceCounter_t) * JIT_PROF_BLOCK_ENTRIES); 1386 } 1387 gDvmJit.pJitTraceProfCounters->next = 0; 1388 } 1389 1390 memset((void *) jitEntry, 0, sizeof(JitEntry) * size); 1391 for (i=0; i< size; i++) { 1392 jitEntry[i].u.info.chain = size; /* Initialize chain termination */ 1393 } 1394 gDvmJit.jitTableEntriesUsed = 0; 1395 dvmUnlockMutex(&gDvmJit.tableLock); 1396} 1397 1398/* 1399 * Return the address of the next trace profile counter. This address 1400 * will be embedded in the generated code for the trace, and thus cannot 1401 * change while the trace exists. 1402 */ 1403JitTraceCounter_t *dvmJitNextTraceCounter() 1404{ 1405 int idx = gDvmJit.pJitTraceProfCounters->next / JIT_PROF_BLOCK_ENTRIES; 1406 int elem = gDvmJit.pJitTraceProfCounters->next % JIT_PROF_BLOCK_ENTRIES; 1407 JitTraceCounter_t *res; 1408 /* Lazily allocate blocks of counters */ 1409 if (!gDvmJit.pJitTraceProfCounters->buckets[idx]) { 1410 JitTraceCounter_t *p = 1411 (JitTraceCounter_t*) calloc(JIT_PROF_BLOCK_ENTRIES, sizeof(*p)); 1412 if (!p) { 1413 LOGE("Failed to allocate block of trace profile counters"); 1414 dvmAbort(); 1415 } 1416 gDvmJit.pJitTraceProfCounters->buckets[idx] = p; 1417 } 1418 res = &gDvmJit.pJitTraceProfCounters->buckets[idx][elem]; 1419 gDvmJit.pJitTraceProfCounters->next++; 1420 return res; 1421} 1422 1423/* 1424 * Float/double conversion requires clamping to min and max of integer form. If 1425 * target doesn't support this normally, use these. 1426 */ 1427s8 dvmJitd2l(double d) 1428{ 1429 static const double kMaxLong = (double)(s8)0x7fffffffffffffffULL; 1430 static const double kMinLong = (double)(s8)0x8000000000000000ULL; 1431 if (d >= kMaxLong) 1432 return (s8)0x7fffffffffffffffULL; 1433 else if (d <= kMinLong) 1434 return (s8)0x8000000000000000ULL; 1435 else if (d != d) // NaN case 1436 return 0; 1437 else 1438 return (s8)d; 1439} 1440 1441s8 dvmJitf2l(float f) 1442{ 1443 static const float kMaxLong = (float)(s8)0x7fffffffffffffffULL; 1444 static const float kMinLong = (float)(s8)0x8000000000000000ULL; 1445 if (f >= kMaxLong) 1446 return (s8)0x7fffffffffffffffULL; 1447 else if (f <= kMinLong) 1448 return (s8)0x8000000000000000ULL; 1449 else if (f != f) // NaN case 1450 return 0; 1451 else 1452 return (s8)f; 1453} 1454 1455/* Should only be called by the compiler thread */ 1456void dvmJitChangeProfileMode(TraceProfilingModes newState) 1457{ 1458 if (gDvmJit.profileMode != newState) { 1459 gDvmJit.profileMode = newState; 1460 dvmJitUnchainAll(); 1461 } 1462} 1463 1464void dvmJitTraceProfilingOn() 1465{ 1466 if (gDvmJit.profileMode == kTraceProfilingPeriodicOff) 1467 dvmCompilerForceWorkEnqueue(NULL, kWorkOrderProfileMode, 1468 (void*) kTraceProfilingPeriodicOn); 1469 else if (gDvmJit.profileMode == kTraceProfilingDisabled) 1470 dvmCompilerForceWorkEnqueue(NULL, kWorkOrderProfileMode, 1471 (void*) kTraceProfilingContinuous); 1472} 1473 1474void dvmJitTraceProfilingOff() 1475{ 1476 if (gDvmJit.profileMode == kTraceProfilingPeriodicOn) 1477 dvmCompilerForceWorkEnqueue(NULL, kWorkOrderProfileMode, 1478 (void*) kTraceProfilingPeriodicOff); 1479 else if (gDvmJit.profileMode == kTraceProfilingContinuous) 1480 dvmCompilerForceWorkEnqueue(NULL, kWorkOrderProfileMode, 1481 (void*) kTraceProfilingDisabled); 1482} 1483 1484/* 1485 * Update JIT-specific info in Thread structure for a single thread 1486 */ 1487void dvmJitUpdateThreadStateSingle(Thread* thread) 1488{ 1489 thread->pJitProfTable = gDvmJit.pProfTable; 1490 thread->jitThreshold = gDvmJit.threshold; 1491} 1492 1493/* 1494 * Walk through the thread list and refresh all local copies of 1495 * JIT global state (which was placed there for fast access). 1496 */ 1497void dvmJitUpdateThreadStateAll() 1498{ 1499 Thread* self = dvmThreadSelf(); 1500 Thread* thread; 1501 1502 dvmLockThreadList(self); 1503 for (thread = gDvm.threadList; thread != NULL; thread = thread->next) { 1504 dvmJitUpdateThreadStateSingle(thread); 1505 } 1506 dvmUnlockThreadList(); 1507 1508} 1509#endif /* WITH_JIT */ 1510