JITEmitter.cpp revision 6098e4be947e0761c6997d98d12535fce85045e9
1//===-- JITEmitter.cpp - Write machine code to executable memory ----------===// 2// 3// The LLVM Compiler Infrastructure 4// 5// This file is distributed under the University of Illinois Open Source 6// License. See LICENSE.TXT for details. 7// 8//===----------------------------------------------------------------------===// 9// 10// This file defines a MachineCodeEmitter object that is used by the JIT to 11// write machine code to memory and remember where relocatable values are. 12// 13//===----------------------------------------------------------------------===// 14 15#define DEBUG_TYPE "jit" 16#include "JIT.h" 17#include "JITDwarfEmitter.h" 18#include "llvm/Constant.h" 19#include "llvm/Module.h" 20#include "llvm/Type.h" 21#include "llvm/CodeGen/MachineCodeEmitter.h" 22#include "llvm/CodeGen/MachineFunction.h" 23#include "llvm/CodeGen/MachineConstantPool.h" 24#include "llvm/CodeGen/MachineJumpTableInfo.h" 25#include "llvm/CodeGen/MachineModuleInfo.h" 26#include "llvm/CodeGen/MachineRelocation.h" 27#include "llvm/ExecutionEngine/JITMemoryManager.h" 28#include "llvm/Target/TargetData.h" 29#include "llvm/Target/TargetJITInfo.h" 30#include "llvm/Target/TargetMachine.h" 31#include "llvm/Target/TargetOptions.h" 32#include "llvm/Support/Debug.h" 33#include "llvm/Support/MutexGuard.h" 34#include "llvm/System/Disassembler.h" 35#include "llvm/ADT/Statistic.h" 36#include <algorithm> 37using namespace llvm; 38 39STATISTIC(NumBytes, "Number of bytes of machine code compiled"); 40STATISTIC(NumRelos, "Number of relocations applied"); 41static JIT *TheJIT = 0; 42 43 44//===----------------------------------------------------------------------===// 45// JIT lazy compilation code. 46// 47namespace { 48 class JITResolverState { 49 private: 50 /// FunctionToStubMap - Keep track of the stub created for a particular 51 /// function so that we can reuse them if necessary. 52 std::map<Function*, void*> FunctionToStubMap; 53 54 /// StubToFunctionMap - Keep track of the function that each stub 55 /// corresponds to. 56 std::map<void*, Function*> StubToFunctionMap; 57 58 /// GlobalToLazyPtrMap - Keep track of the lazy pointer created for a 59 /// particular GlobalVariable so that we can reuse them if necessary. 60 std::map<GlobalValue*, void*> GlobalToLazyPtrMap; 61 62 public: 63 std::map<Function*, void*>& getFunctionToStubMap(const MutexGuard& locked) { 64 assert(locked.holds(TheJIT->lock)); 65 return FunctionToStubMap; 66 } 67 68 std::map<void*, Function*>& getStubToFunctionMap(const MutexGuard& locked) { 69 assert(locked.holds(TheJIT->lock)); 70 return StubToFunctionMap; 71 } 72 73 std::map<GlobalValue*, void*>& 74 getGlobalToLazyPtrMap(const MutexGuard& locked) { 75 assert(locked.holds(TheJIT->lock)); 76 return GlobalToLazyPtrMap; 77 } 78 }; 79 80 /// JITResolver - Keep track of, and resolve, call sites for functions that 81 /// have not yet been compiled. 82 class JITResolver { 83 /// LazyResolverFn - The target lazy resolver function that we actually 84 /// rewrite instructions to use. 85 TargetJITInfo::LazyResolverFn LazyResolverFn; 86 87 JITResolverState state; 88 89 /// ExternalFnToStubMap - This is the equivalent of FunctionToStubMap for 90 /// external functions. 91 std::map<void*, void*> ExternalFnToStubMap; 92 93 //map addresses to indexes in the GOT 94 std::map<void*, unsigned> revGOTMap; 95 unsigned nextGOTIndex; 96 97 static JITResolver *TheJITResolver; 98 public: 99 explicit JITResolver(JIT &jit) : nextGOTIndex(0) { 100 TheJIT = &jit; 101 102 LazyResolverFn = jit.getJITInfo().getLazyResolverFunction(JITCompilerFn); 103 assert(TheJITResolver == 0 && "Multiple JIT resolvers?"); 104 TheJITResolver = this; 105 } 106 107 ~JITResolver() { 108 TheJITResolver = 0; 109 } 110 111 /// getFunctionStub - This returns a pointer to a function stub, creating 112 /// one on demand as needed. 113 void *getFunctionStub(Function *F); 114 115 /// getExternalFunctionStub - Return a stub for the function at the 116 /// specified address, created lazily on demand. 117 void *getExternalFunctionStub(void *FnAddr); 118 119 /// getGlobalValueLazyPtr - Return a lazy pointer containing the specified 120 /// GV address. 121 void *getGlobalValueLazyPtr(GlobalValue *V, void *GVAddress); 122 123 /// AddCallbackAtLocation - If the target is capable of rewriting an 124 /// instruction without the use of a stub, record the location of the use so 125 /// we know which function is being used at the location. 126 void *AddCallbackAtLocation(Function *F, void *Location) { 127 MutexGuard locked(TheJIT->lock); 128 /// Get the target-specific JIT resolver function. 129 state.getStubToFunctionMap(locked)[Location] = F; 130 return (void*)(intptr_t)LazyResolverFn; 131 } 132 133 /// getGOTIndexForAddress - Return a new or existing index in the GOT for 134 /// an address. This function only manages slots, it does not manage the 135 /// contents of the slots or the memory associated with the GOT. 136 unsigned getGOTIndexForAddr(void *addr); 137 138 /// JITCompilerFn - This function is called to resolve a stub to a compiled 139 /// address. If the LLVM Function corresponding to the stub has not yet 140 /// been compiled, this function compiles it first. 141 static void *JITCompilerFn(void *Stub); 142 }; 143} 144 145JITResolver *JITResolver::TheJITResolver = 0; 146 147#if (defined(__POWERPC__) || defined (__ppc__) || defined(_POWER)) && \ 148 defined(__APPLE__) 149extern "C" void sys_icache_invalidate(const void *Addr, size_t len); 150#endif 151 152/// synchronizeICache - On some targets, the JIT emitted code must be 153/// explicitly refetched to ensure correct execution. 154static void synchronizeICache(const void *Addr, size_t len) { 155#if (defined(__POWERPC__) || defined (__ppc__) || defined(_POWER)) && \ 156 defined(__APPLE__) 157 sys_icache_invalidate(Addr, len); 158#endif 159} 160 161/// getFunctionStub - This returns a pointer to a function stub, creating 162/// one on demand as needed. 163void *JITResolver::getFunctionStub(Function *F) { 164 MutexGuard locked(TheJIT->lock); 165 166 // If we already have a stub for this function, recycle it. 167 void *&Stub = state.getFunctionToStubMap(locked)[F]; 168 if (Stub) return Stub; 169 170 // Call the lazy resolver function unless we already KNOW it is an external 171 // function, in which case we just skip the lazy resolution step. 172 void *Actual = (void*)(intptr_t)LazyResolverFn; 173 if (F->isDeclaration() && !F->hasNotBeenReadFromBitcode()) 174 Actual = TheJIT->getPointerToFunction(F); 175 176 // Otherwise, codegen a new stub. For now, the stub will call the lazy 177 // resolver function. 178 Stub = TheJIT->getJITInfo().emitFunctionStub(Actual, 179 *TheJIT->getCodeEmitter()); 180 181 if (Actual != (void*)(intptr_t)LazyResolverFn) { 182 // If we are getting the stub for an external function, we really want the 183 // address of the stub in the GlobalAddressMap for the JIT, not the address 184 // of the external function. 185 TheJIT->updateGlobalMapping(F, Stub); 186 } 187 188 DOUT << "JIT: Stub emitted at [" << Stub << "] for function '" 189 << F->getName() << "'\n"; 190 191 // Finally, keep track of the stub-to-Function mapping so that the 192 // JITCompilerFn knows which function to compile! 193 state.getStubToFunctionMap(locked)[Stub] = F; 194 return Stub; 195} 196 197/// getGlobalValueLazyPtr - Return a lazy pointer containing the specified 198/// GV address. 199void *JITResolver::getGlobalValueLazyPtr(GlobalValue *GV, void *GVAddress) { 200 MutexGuard locked(TheJIT->lock); 201 202 // If we already have a stub for this global variable, recycle it. 203 void *&LazyPtr = state.getGlobalToLazyPtrMap(locked)[GV]; 204 if (LazyPtr) return LazyPtr; 205 206 // Otherwise, codegen a new lazy pointer. 207 LazyPtr = TheJIT->getJITInfo().emitGlobalValueLazyPtr(GVAddress, 208 *TheJIT->getCodeEmitter()); 209 210 DOUT << "JIT: Stub emitted at [" << LazyPtr << "] for GV '" 211 << GV->getName() << "'\n"; 212 213 return LazyPtr; 214} 215 216/// getExternalFunctionStub - Return a stub for the function at the 217/// specified address, created lazily on demand. 218void *JITResolver::getExternalFunctionStub(void *FnAddr) { 219 // If we already have a stub for this function, recycle it. 220 void *&Stub = ExternalFnToStubMap[FnAddr]; 221 if (Stub) return Stub; 222 223 Stub = TheJIT->getJITInfo().emitFunctionStub(FnAddr, 224 *TheJIT->getCodeEmitter()); 225 226 DOUT << "JIT: Stub emitted at [" << Stub 227 << "] for external function at '" << FnAddr << "'\n"; 228 return Stub; 229} 230 231unsigned JITResolver::getGOTIndexForAddr(void* addr) { 232 unsigned idx = revGOTMap[addr]; 233 if (!idx) { 234 idx = ++nextGOTIndex; 235 revGOTMap[addr] = idx; 236 DOUT << "Adding GOT entry " << idx 237 << " for addr " << addr << "\n"; 238 } 239 return idx; 240} 241 242/// JITCompilerFn - This function is called when a lazy compilation stub has 243/// been entered. It looks up which function this stub corresponds to, compiles 244/// it if necessary, then returns the resultant function pointer. 245void *JITResolver::JITCompilerFn(void *Stub) { 246 JITResolver &JR = *TheJITResolver; 247 248 MutexGuard locked(TheJIT->lock); 249 250 // The address given to us for the stub may not be exactly right, it might be 251 // a little bit after the stub. As such, use upper_bound to find it. 252 std::map<void*, Function*>::iterator I = 253 JR.state.getStubToFunctionMap(locked).upper_bound(Stub); 254 assert(I != JR.state.getStubToFunctionMap(locked).begin() && 255 "This is not a known stub!"); 256 Function *F = (--I)->second; 257 258 // If we have already code generated the function, just return the address. 259 void *Result = TheJIT->getPointerToGlobalIfAvailable(F); 260 261 if (!Result) { 262 // Otherwise we don't have it, do lazy compilation now. 263 264 // If lazy compilation is disabled, emit a useful error message and abort. 265 if (TheJIT->isLazyCompilationDisabled()) { 266 cerr << "LLVM JIT requested to do lazy compilation of function '" 267 << F->getName() << "' when lazy compiles are disabled!\n"; 268 abort(); 269 } 270 271 // We might like to remove the stub from the StubToFunction map. 272 // We can't do that! Multiple threads could be stuck, waiting to acquire the 273 // lock above. As soon as the 1st function finishes compiling the function, 274 // the next one will be released, and needs to be able to find the function 275 // it needs to call. 276 //JR.state.getStubToFunctionMap(locked).erase(I); 277 278 DOUT << "JIT: Lazily resolving function '" << F->getName() 279 << "' In stub ptr = " << Stub << " actual ptr = " 280 << I->first << "\n"; 281 282 Result = TheJIT->getPointerToFunction(F); 283 } 284 285 // We don't need to reuse this stub in the future, as F is now compiled. 286 JR.state.getFunctionToStubMap(locked).erase(F); 287 288 // FIXME: We could rewrite all references to this stub if we knew them. 289 290 // What we will do is set the compiled function address to map to the 291 // same GOT entry as the stub so that later clients may update the GOT 292 // if they see it still using the stub address. 293 // Note: this is done so the Resolver doesn't have to manage GOT memory 294 // Do this without allocating map space if the target isn't using a GOT 295 if(JR.revGOTMap.find(Stub) != JR.revGOTMap.end()) 296 JR.revGOTMap[Result] = JR.revGOTMap[Stub]; 297 298 return Result; 299} 300 301//===----------------------------------------------------------------------===// 302// Function Index Support 303 304// On MacOS we generate an index of currently JIT'd functions so that 305// performance tools can determine a symbol name and accurate code range for a 306// PC value. Because performance tools are generally asynchronous, the code 307// below is written with the hope that it could be interrupted at any time and 308// have useful answers. However, we don't go crazy with atomic operations, we 309// just do a "reasonable effort". 310#ifdef __APPLE__ 311#define ENABLE_JIT_SYMBOL_TABLE 1 312#endif 313 314/// JitSymbolEntry - Each function that is JIT compiled results in one of these 315/// being added to an array of symbols. This indicates the name of the function 316/// as well as the address range it occupies. This allows the client to map 317/// from a PC value to the name of the function. 318struct JitSymbolEntry { 319 const char *FnName; // FnName - a strdup'd string. 320 void *FnStart; 321 intptr_t FnSize; 322}; 323 324 325struct JitSymbolTable { 326 /// NextPtr - This forms a linked list of JitSymbolTable entries. This 327 /// pointer is not used right now, but might be used in the future. Consider 328 /// it reserved for future use. 329 JitSymbolTable *NextPtr; 330 331 /// Symbols - This is an array of JitSymbolEntry entries. Only the first 332 /// 'NumSymbols' symbols are valid. 333 JitSymbolEntry *Symbols; 334 335 /// NumSymbols - This indicates the number entries in the Symbols array that 336 /// are valid. 337 unsigned NumSymbols; 338 339 /// NumAllocated - This indicates the amount of space we have in the Symbols 340 /// array. This is a private field that should not be read by external tools. 341 unsigned NumAllocated; 342}; 343 344#if ENABLE_JIT_SYMBOL_TABLE 345JitSymbolTable *__jitSymbolTable; 346#endif 347 348static void AddFunctionToSymbolTable(const char *FnName, 349 void *FnStart, intptr_t FnSize) { 350 assert(FnName != 0 && FnStart != 0 && "Bad symbol to add"); 351 JitSymbolTable **SymTabPtrPtr = 0; 352#if !ENABLE_JIT_SYMBOL_TABLE 353 return; 354#else 355 SymTabPtrPtr = &__jitSymbolTable; 356#endif 357 358 // If this is the first entry in the symbol table, add the JitSymbolTable 359 // index. 360 if (*SymTabPtrPtr == 0) { 361 JitSymbolTable *New = new JitSymbolTable(); 362 New->NextPtr = 0; 363 New->Symbols = 0; 364 New->NumSymbols = 0; 365 New->NumAllocated = 0; 366 *SymTabPtrPtr = New; 367 } 368 369 JitSymbolTable *SymTabPtr = *SymTabPtrPtr; 370 371 // If we have space in the table, reallocate the table. 372 if (SymTabPtr->NumSymbols >= SymTabPtr->NumAllocated) { 373 // If we don't have space, reallocate the table. 374 unsigned NewSize = std::min(64U, SymTabPtr->NumAllocated*2); 375 JitSymbolEntry *NewSymbols = new JitSymbolEntry[NewSize]; 376 JitSymbolEntry *OldSymbols = SymTabPtr->Symbols; 377 378 // Copy the old entries over. 379 memcpy(NewSymbols, OldSymbols, 380 SymTabPtr->NumAllocated*sizeof(JitSymbolEntry)); 381 382 // Swap the new symbols in, delete the old ones. 383 SymTabPtr->Symbols = NewSymbols; 384 SymTabPtr->NumSymbols = NewSize; 385 delete [] OldSymbols; 386 } 387 388 // Otherwise, we have enough space, just tack it onto the end of the array. 389 JitSymbolEntry &Entry = SymTabPtr->Symbols[SymTabPtr->NumSymbols]; 390 Entry.FnName = strdup(FnName); 391 Entry.FnStart = FnStart; 392 Entry.FnSize = FnSize; 393 ++SymTabPtr->NumSymbols; 394} 395 396static void RemoveFunctionFromSymbolTable(void *FnStart) { 397 assert(FnStart && "Invalid function pointer"); 398 JitSymbolTable **SymTabPtrPtr = 0; 399#if !ENABLE_JIT_SYMBOL_TABLE 400 return; 401#else 402 SymTabPtrPtr = &__jitSymbolTable; 403#endif 404 405 JitSymbolTable *SymTabPtr = *SymTabPtrPtr; 406 JitSymbolEntry *Symbols = SymTabPtr->Symbols; 407 408 // Scan the table to find its index. The table is not sorted, so do a linear 409 // scan. 410 unsigned Index; 411 for (Index = 0; Symbols[Index].FnStart != FnStart; ++Index) 412 assert(Index != SymTabPtr->NumSymbols && "Didn't find function!"); 413 414 // Once we have an index, we know to nuke this entry, overwrite it with the 415 // entry at the end of the array, making the last entry redundant. 416 const char *OldName = Symbols[Index].FnName; 417 Symbols[Index] = Symbols[SymTabPtr->NumSymbols-1]; 418 free((void*)OldName); 419 420 // Drop the number of symbols in the table. 421 --SymTabPtr->NumSymbols; 422 423 // Finally, if we deleted the final symbol, deallocate the table itself. 424 if (SymTabPtr->NumSymbols == 0) 425 return; 426 427 *SymTabPtrPtr = 0; 428 delete [] Symbols; 429 delete SymTabPtr; 430} 431 432//===----------------------------------------------------------------------===// 433// JITEmitter code. 434// 435namespace { 436 /// JITEmitter - The JIT implementation of the MachineCodeEmitter, which is 437 /// used to output functions to memory for execution. 438 class JITEmitter : public MachineCodeEmitter { 439 JITMemoryManager *MemMgr; 440 441 // When outputting a function stub in the context of some other function, we 442 // save BufferBegin/BufferEnd/CurBufferPtr here. 443 unsigned char *SavedBufferBegin, *SavedBufferEnd, *SavedCurBufferPtr; 444 445 /// Relocations - These are the relocations that the function needs, as 446 /// emitted. 447 std::vector<MachineRelocation> Relocations; 448 449 /// MBBLocations - This vector is a mapping from MBB ID's to their address. 450 /// It is filled in by the StartMachineBasicBlock callback and queried by 451 /// the getMachineBasicBlockAddress callback. 452 std::vector<intptr_t> MBBLocations; 453 454 /// ConstantPool - The constant pool for the current function. 455 /// 456 MachineConstantPool *ConstantPool; 457 458 /// ConstantPoolBase - A pointer to the first entry in the constant pool. 459 /// 460 void *ConstantPoolBase; 461 462 /// JumpTable - The jump tables for the current function. 463 /// 464 MachineJumpTableInfo *JumpTable; 465 466 /// JumpTableBase - A pointer to the first entry in the jump table. 467 /// 468 void *JumpTableBase; 469 470 /// Resolver - This contains info about the currently resolved functions. 471 JITResolver Resolver; 472 473 /// DE - The dwarf emitter for the jit. 474 JITDwarfEmitter *DE; 475 476 /// LabelLocations - This vector is a mapping from Label ID's to their 477 /// address. 478 std::vector<intptr_t> LabelLocations; 479 480 /// MMI - Machine module info for exception informations 481 MachineModuleInfo* MMI; 482 483 public: 484 JITEmitter(JIT &jit, JITMemoryManager *JMM) : Resolver(jit) { 485 MemMgr = JMM ? JMM : JITMemoryManager::CreateDefaultMemManager(); 486 if (jit.getJITInfo().needsGOT()) { 487 MemMgr->AllocateGOT(); 488 DOUT << "JIT is managing a GOT\n"; 489 } 490 491 if (ExceptionHandling) DE = new JITDwarfEmitter(jit); 492 } 493 ~JITEmitter() { 494 delete MemMgr; 495 if (ExceptionHandling) delete DE; 496 } 497 498 JITResolver &getJITResolver() { return Resolver; } 499 500 virtual void startFunction(MachineFunction &F); 501 virtual bool finishFunction(MachineFunction &F); 502 503 void emitConstantPool(MachineConstantPool *MCP); 504 void initJumpTableInfo(MachineJumpTableInfo *MJTI); 505 void emitJumpTableInfo(MachineJumpTableInfo *MJTI); 506 507 virtual void startFunctionStub(unsigned StubSize, unsigned Alignment = 1); 508 virtual void* finishFunctionStub(const Function *F); 509 510 virtual void addRelocation(const MachineRelocation &MR) { 511 Relocations.push_back(MR); 512 } 513 514 virtual void StartMachineBasicBlock(MachineBasicBlock *MBB) { 515 if (MBBLocations.size() <= (unsigned)MBB->getNumber()) 516 MBBLocations.resize((MBB->getNumber()+1)*2); 517 MBBLocations[MBB->getNumber()] = getCurrentPCValue(); 518 } 519 520 virtual intptr_t getConstantPoolEntryAddress(unsigned Entry) const; 521 virtual intptr_t getJumpTableEntryAddress(unsigned Entry) const; 522 523 virtual intptr_t getMachineBasicBlockAddress(MachineBasicBlock *MBB) const { 524 assert(MBBLocations.size() > (unsigned)MBB->getNumber() && 525 MBBLocations[MBB->getNumber()] && "MBB not emitted!"); 526 return MBBLocations[MBB->getNumber()]; 527 } 528 529 /// deallocateMemForFunction - Deallocate all memory for the specified 530 /// function body. 531 void deallocateMemForFunction(Function *F) { 532 MemMgr->deallocateMemForFunction(F); 533 } 534 535 virtual void emitLabel(uint64_t LabelID) { 536 if (LabelLocations.size() <= LabelID) 537 LabelLocations.resize((LabelID+1)*2); 538 LabelLocations[LabelID] = getCurrentPCValue(); 539 } 540 541 virtual intptr_t getLabelAddress(uint64_t LabelID) const { 542 assert(LabelLocations.size() > (unsigned)LabelID && 543 LabelLocations[LabelID] && "Label not emitted!"); 544 return LabelLocations[LabelID]; 545 } 546 547 virtual void setModuleInfo(MachineModuleInfo* Info) { 548 MMI = Info; 549 if (ExceptionHandling) DE->setModuleInfo(Info); 550 } 551 552 private: 553 void *getPointerToGlobal(GlobalValue *GV, void *Reference, bool NoNeedStub); 554 void *getPointerToGVLazyPtr(GlobalValue *V, void *Reference, 555 bool NoNeedStub); 556 }; 557} 558 559void *JITEmitter::getPointerToGlobal(GlobalValue *V, void *Reference, 560 bool DoesntNeedStub) { 561 if (GlobalVariable *GV = dyn_cast<GlobalVariable>(V)) { 562 /// FIXME: If we straightened things out, this could actually emit the 563 /// global immediately instead of queuing it for codegen later! 564 return TheJIT->getOrEmitGlobalVariable(GV); 565 } 566 567 // If we have already compiled the function, return a pointer to its body. 568 Function *F = cast<Function>(V); 569 void *ResultPtr = TheJIT->getPointerToGlobalIfAvailable(F); 570 if (ResultPtr) return ResultPtr; 571 572 if (F->isDeclaration() && !F->hasNotBeenReadFromBitcode()) { 573 // If this is an external function pointer, we can force the JIT to 574 // 'compile' it, which really just adds it to the map. 575 if (DoesntNeedStub) 576 return TheJIT->getPointerToFunction(F); 577 578 return Resolver.getFunctionStub(F); 579 } 580 581 // Okay, the function has not been compiled yet, if the target callback 582 // mechanism is capable of rewriting the instruction directly, prefer to do 583 // that instead of emitting a stub. 584 if (DoesntNeedStub) 585 return Resolver.AddCallbackAtLocation(F, Reference); 586 587 // Otherwise, we have to emit a lazy resolving stub. 588 return Resolver.getFunctionStub(F); 589} 590 591void *JITEmitter::getPointerToGVLazyPtr(GlobalValue *V, void *Reference, 592 bool DoesntNeedStub) { 593 // Make sure GV is emitted first. 594 // FIXME: For now, if the GV is an external function we force the JIT to 595 // compile it so the lazy pointer will contain the fully resolved address. 596 void *GVAddress = getPointerToGlobal(V, Reference, true); 597 return Resolver.getGlobalValueLazyPtr(V, GVAddress); 598} 599 600 601void JITEmitter::startFunction(MachineFunction &F) { 602 uintptr_t ActualSize; 603 BufferBegin = CurBufferPtr = MemMgr->startFunctionBody(F.getFunction(), 604 ActualSize); 605 BufferEnd = BufferBegin+ActualSize; 606 607 // Ensure the constant pool/jump table info is at least 4-byte aligned. 608 emitAlignment(16); 609 610 emitConstantPool(F.getConstantPool()); 611 initJumpTableInfo(F.getJumpTableInfo()); 612 613 // About to start emitting the machine code for the function. 614 emitAlignment(std::max(F.getFunction()->getAlignment(), 8U)); 615 TheJIT->updateGlobalMapping(F.getFunction(), CurBufferPtr); 616 617 MBBLocations.clear(); 618} 619 620bool JITEmitter::finishFunction(MachineFunction &F) { 621 if (CurBufferPtr == BufferEnd) { 622 // FIXME: Allocate more space, then try again. 623 cerr << "JIT: Ran out of space for generated machine code!\n"; 624 abort(); 625 } 626 627 emitJumpTableInfo(F.getJumpTableInfo()); 628 629 // FnStart is the start of the text, not the start of the constant pool and 630 // other per-function data. 631 unsigned char *FnStart = 632 (unsigned char *)TheJIT->getPointerToGlobalIfAvailable(F.getFunction()); 633 unsigned char *FnEnd = CurBufferPtr; 634 635 MemMgr->endFunctionBody(F.getFunction(), BufferBegin, FnEnd); 636 NumBytes += FnEnd-FnStart; 637 638 if (!Relocations.empty()) { 639 NumRelos += Relocations.size(); 640 641 // Resolve the relocations to concrete pointers. 642 for (unsigned i = 0, e = Relocations.size(); i != e; ++i) { 643 MachineRelocation &MR = Relocations[i]; 644 void *ResultPtr; 645 if (MR.isString()) { 646 ResultPtr = TheJIT->getPointerToNamedFunction(MR.getString()); 647 648 // If the target REALLY wants a stub for this function, emit it now. 649 if (!MR.doesntNeedStub()) 650 ResultPtr = Resolver.getExternalFunctionStub(ResultPtr); 651 } else if (MR.isGlobalValue()) { 652 ResultPtr = getPointerToGlobal(MR.getGlobalValue(), 653 BufferBegin+MR.getMachineCodeOffset(), 654 MR.doesntNeedStub()); 655 } else if (MR.isGlobalValueLazyPtr()) { 656 ResultPtr = getPointerToGVLazyPtr(MR.getGlobalValue(), 657 BufferBegin+MR.getMachineCodeOffset(), 658 MR.doesntNeedStub()); 659 } else if (MR.isBasicBlock()) { 660 ResultPtr = (void*)getMachineBasicBlockAddress(MR.getBasicBlock()); 661 } else if (MR.isConstantPoolIndex()) { 662 ResultPtr=(void*)getConstantPoolEntryAddress(MR.getConstantPoolIndex()); 663 } else { 664 assert(MR.isJumpTableIndex()); 665 ResultPtr=(void*)getJumpTableEntryAddress(MR.getJumpTableIndex()); 666 } 667 668 MR.setResultPointer(ResultPtr); 669 670 // if we are managing the GOT and the relocation wants an index, 671 // give it one 672 if (MR.isGOTRelative() && MemMgr->isManagingGOT()) { 673 unsigned idx = Resolver.getGOTIndexForAddr(ResultPtr); 674 MR.setGOTIndex(idx); 675 if (((void**)MemMgr->getGOTBase())[idx] != ResultPtr) { 676 DOUT << "GOT was out of date for " << ResultPtr 677 << " pointing at " << ((void**)MemMgr->getGOTBase())[idx] 678 << "\n"; 679 ((void**)MemMgr->getGOTBase())[idx] = ResultPtr; 680 } 681 } 682 } 683 684 TheJIT->getJITInfo().relocate(BufferBegin, &Relocations[0], 685 Relocations.size(), MemMgr->getGOTBase()); 686 } 687 688 // Update the GOT entry for F to point to the new code. 689 if (MemMgr->isManagingGOT()) { 690 unsigned idx = Resolver.getGOTIndexForAddr((void*)BufferBegin); 691 if (((void**)MemMgr->getGOTBase())[idx] != (void*)BufferBegin) { 692 DOUT << "GOT was out of date for " << (void*)BufferBegin 693 << " pointing at " << ((void**)MemMgr->getGOTBase())[idx] << "\n"; 694 ((void**)MemMgr->getGOTBase())[idx] = (void*)BufferBegin; 695 } 696 } 697 698 // Invalidate the icache if necessary. 699 synchronizeICache(FnStart, FnEnd-FnStart); 700 701 // Add it to the JIT symbol table if the host wants it. 702 AddFunctionToSymbolTable(F.getFunction()->getNameStart(), 703 FnStart, FnEnd-FnStart); 704 705 DOUT << "JIT: Finished CodeGen of [" << (void*)FnStart 706 << "] Function: " << F.getFunction()->getName() 707 << ": " << (FnEnd-FnStart) << " bytes of text, " 708 << Relocations.size() << " relocations\n"; 709 Relocations.clear(); 710 711#ifndef NDEBUG 712 if (sys::hasDisassembler()) 713 DOUT << "Disassembled code:\n" 714 << sys::disassembleBuffer(FnStart, FnEnd-FnStart, (uintptr_t)FnStart); 715#endif 716 if (ExceptionHandling) { 717 uintptr_t ActualSize; 718 SavedBufferBegin = BufferBegin; 719 SavedBufferEnd = BufferEnd; 720 SavedCurBufferPtr = CurBufferPtr; 721 722 BufferBegin = CurBufferPtr = MemMgr->startExceptionTable(F.getFunction(), 723 ActualSize); 724 BufferEnd = BufferBegin+ActualSize; 725 unsigned char* FrameRegister = DE->EmitDwarfTable(F, *this, FnStart, FnEnd); 726 MemMgr->endExceptionTable(F.getFunction(), BufferBegin, CurBufferPtr, 727 FrameRegister); 728 BufferBegin = SavedBufferBegin; 729 BufferEnd = SavedBufferEnd; 730 CurBufferPtr = SavedCurBufferPtr; 731 732 TheJIT->RegisterTable(FrameRegister); 733 } 734 MMI->EndFunction(); 735 736 return false; 737} 738 739void JITEmitter::emitConstantPool(MachineConstantPool *MCP) { 740 const std::vector<MachineConstantPoolEntry> &Constants = MCP->getConstants(); 741 if (Constants.empty()) return; 742 743 MachineConstantPoolEntry CPE = Constants.back(); 744 unsigned Size = CPE.Offset; 745 const Type *Ty = CPE.isMachineConstantPoolEntry() 746 ? CPE.Val.MachineCPVal->getType() : CPE.Val.ConstVal->getType(); 747 Size += TheJIT->getTargetData()->getABITypeSize(Ty); 748 749 ConstantPoolBase = allocateSpace(Size, 1 << MCP->getConstantPoolAlignment()); 750 ConstantPool = MCP; 751 752 if (ConstantPoolBase == 0) return; // Buffer overflow. 753 754 // Initialize the memory for all of the constant pool entries. 755 for (unsigned i = 0, e = Constants.size(); i != e; ++i) { 756 void *CAddr = (char*)ConstantPoolBase+Constants[i].Offset; 757 if (Constants[i].isMachineConstantPoolEntry()) { 758 // FIXME: add support to lower machine constant pool values into bytes! 759 cerr << "Initialize memory with machine specific constant pool entry" 760 << " has not been implemented!\n"; 761 abort(); 762 } 763 TheJIT->InitializeMemory(Constants[i].Val.ConstVal, CAddr); 764 } 765} 766 767void JITEmitter::initJumpTableInfo(MachineJumpTableInfo *MJTI) { 768 const std::vector<MachineJumpTableEntry> &JT = MJTI->getJumpTables(); 769 if (JT.empty()) return; 770 771 unsigned NumEntries = 0; 772 for (unsigned i = 0, e = JT.size(); i != e; ++i) 773 NumEntries += JT[i].MBBs.size(); 774 775 unsigned EntrySize = MJTI->getEntrySize(); 776 777 // Just allocate space for all the jump tables now. We will fix up the actual 778 // MBB entries in the tables after we emit the code for each block, since then 779 // we will know the final locations of the MBBs in memory. 780 JumpTable = MJTI; 781 JumpTableBase = allocateSpace(NumEntries * EntrySize, MJTI->getAlignment()); 782} 783 784void JITEmitter::emitJumpTableInfo(MachineJumpTableInfo *MJTI) { 785 const std::vector<MachineJumpTableEntry> &JT = MJTI->getJumpTables(); 786 if (JT.empty() || JumpTableBase == 0) return; 787 788 if (TargetMachine::getRelocationModel() == Reloc::PIC_) { 789 assert(MJTI->getEntrySize() == 4 && "Cross JIT'ing?"); 790 // For each jump table, place the offset from the beginning of the table 791 // to the target address. 792 int *SlotPtr = (int*)JumpTableBase; 793 794 for (unsigned i = 0, e = JT.size(); i != e; ++i) { 795 const std::vector<MachineBasicBlock*> &MBBs = JT[i].MBBs; 796 // Store the offset of the basic block for this jump table slot in the 797 // memory we allocated for the jump table in 'initJumpTableInfo' 798 intptr_t Base = (intptr_t)SlotPtr; 799 for (unsigned mi = 0, me = MBBs.size(); mi != me; ++mi) { 800 intptr_t MBBAddr = getMachineBasicBlockAddress(MBBs[mi]); 801 *SlotPtr++ = TheJIT->getJITInfo().getPICJumpTableEntry(MBBAddr, Base); 802 } 803 } 804 } else { 805 assert(MJTI->getEntrySize() == sizeof(void*) && "Cross JIT'ing?"); 806 807 // For each jump table, map each target in the jump table to the address of 808 // an emitted MachineBasicBlock. 809 intptr_t *SlotPtr = (intptr_t*)JumpTableBase; 810 811 for (unsigned i = 0, e = JT.size(); i != e; ++i) { 812 const std::vector<MachineBasicBlock*> &MBBs = JT[i].MBBs; 813 // Store the address of the basic block for this jump table slot in the 814 // memory we allocated for the jump table in 'initJumpTableInfo' 815 for (unsigned mi = 0, me = MBBs.size(); mi != me; ++mi) 816 *SlotPtr++ = getMachineBasicBlockAddress(MBBs[mi]); 817 } 818 } 819} 820 821void JITEmitter::startFunctionStub(unsigned StubSize, unsigned Alignment) { 822 SavedBufferBegin = BufferBegin; 823 SavedBufferEnd = BufferEnd; 824 SavedCurBufferPtr = CurBufferPtr; 825 826 BufferBegin = CurBufferPtr = MemMgr->allocateStub(StubSize, Alignment); 827 BufferEnd = BufferBegin+StubSize+1; 828} 829 830void *JITEmitter::finishFunctionStub(const Function *F) { 831 NumBytes += getCurrentPCOffset(); 832 std::swap(SavedBufferBegin, BufferBegin); 833 BufferEnd = SavedBufferEnd; 834 CurBufferPtr = SavedCurBufferPtr; 835 return SavedBufferBegin; 836} 837 838// getConstantPoolEntryAddress - Return the address of the 'ConstantNum' entry 839// in the constant pool that was last emitted with the 'emitConstantPool' 840// method. 841// 842intptr_t JITEmitter::getConstantPoolEntryAddress(unsigned ConstantNum) const { 843 assert(ConstantNum < ConstantPool->getConstants().size() && 844 "Invalid ConstantPoolIndex!"); 845 return (intptr_t)ConstantPoolBase + 846 ConstantPool->getConstants()[ConstantNum].Offset; 847} 848 849// getJumpTableEntryAddress - Return the address of the JumpTable with index 850// 'Index' in the jumpp table that was last initialized with 'initJumpTableInfo' 851// 852intptr_t JITEmitter::getJumpTableEntryAddress(unsigned Index) const { 853 const std::vector<MachineJumpTableEntry> &JT = JumpTable->getJumpTables(); 854 assert(Index < JT.size() && "Invalid jump table index!"); 855 856 unsigned Offset = 0; 857 unsigned EntrySize = JumpTable->getEntrySize(); 858 859 for (unsigned i = 0; i < Index; ++i) 860 Offset += JT[i].MBBs.size(); 861 862 Offset *= EntrySize; 863 864 return (intptr_t)((char *)JumpTableBase + Offset); 865} 866 867//===----------------------------------------------------------------------===// 868// Public interface to this file 869//===----------------------------------------------------------------------===// 870 871MachineCodeEmitter *JIT::createEmitter(JIT &jit, JITMemoryManager *JMM) { 872 return new JITEmitter(jit, JMM); 873} 874 875// getPointerToNamedFunction - This function is used as a global wrapper to 876// JIT::getPointerToNamedFunction for the purpose of resolving symbols when 877// bugpoint is debugging the JIT. In that scenario, we are loading an .so and 878// need to resolve function(s) that are being mis-codegenerated, so we need to 879// resolve their addresses at runtime, and this is the way to do it. 880extern "C" { 881 void *getPointerToNamedFunction(const char *Name) { 882 if (Function *F = TheJIT->FindFunctionNamed(Name)) 883 return TheJIT->getPointerToFunction(F); 884 return TheJIT->getPointerToNamedFunction(Name); 885 } 886} 887 888// getPointerToFunctionOrStub - If the specified function has been 889// code-gen'd, return a pointer to the function. If not, compile it, or use 890// a stub to implement lazy compilation if available. 891// 892void *JIT::getPointerToFunctionOrStub(Function *F) { 893 // If we have already code generated the function, just return the address. 894 if (void *Addr = getPointerToGlobalIfAvailable(F)) 895 return Addr; 896 897 // Get a stub if the target supports it. 898 assert(dynamic_cast<JITEmitter*>(MCE) && "Unexpected MCE?"); 899 JITEmitter *JE = static_cast<JITEmitter*>(getCodeEmitter()); 900 return JE->getJITResolver().getFunctionStub(F); 901} 902 903/// freeMachineCodeForFunction - release machine code memory for given Function. 904/// 905void JIT::freeMachineCodeForFunction(Function *F) { 906 907 // Delete translation for this from the ExecutionEngine, so it will get 908 // retranslated next time it is used. 909 void *OldPtr = updateGlobalMapping(F, 0); 910 911 if (OldPtr) 912 RemoveFunctionFromSymbolTable(OldPtr); 913 914 // Free the actual memory for the function body and related stuff. 915 assert(dynamic_cast<JITEmitter*>(MCE) && "Unexpected MCE?"); 916 static_cast<JITEmitter*>(MCE)->deallocateMemForFunction(F); 917} 918 919