JITEmitter.cpp revision 9200605cd5f6db50be20efb7df926dc5a0d19a4d
1//===-- JITEmitter.cpp - Write machine code to executable memory ----------===// 2// 3// The LLVM Compiler Infrastructure 4// 5// This file is distributed under the University of Illinois Open Source 6// License. See LICENSE.TXT for details. 7// 8//===----------------------------------------------------------------------===// 9// 10// This file defines a MachineCodeEmitter object that is used by the JIT to 11// write machine code to memory and remember where relocatable values are. 12// 13//===----------------------------------------------------------------------===// 14 15#define DEBUG_TYPE "jit" 16#include "JIT.h" 17#include "JITDwarfEmitter.h" 18#include "llvm/Constants.h" 19#include "llvm/Module.h" 20#include "llvm/DerivedTypes.h" 21#include "llvm/CodeGen/MachineCodeEmitter.h" 22#include "llvm/CodeGen/MachineFunction.h" 23#include "llvm/CodeGen/MachineConstantPool.h" 24#include "llvm/CodeGen/MachineJumpTableInfo.h" 25#include "llvm/CodeGen/MachineModuleInfo.h" 26#include "llvm/CodeGen/MachineRelocation.h" 27#include "llvm/ExecutionEngine/JITMemoryManager.h" 28#include "llvm/ExecutionEngine/GenericValue.h" 29#include "llvm/Target/TargetData.h" 30#include "llvm/Target/TargetJITInfo.h" 31#include "llvm/Target/TargetMachine.h" 32#include "llvm/Target/TargetOptions.h" 33#include "llvm/Support/Debug.h" 34#include "llvm/Support/MutexGuard.h" 35#include "llvm/System/Disassembler.h" 36#include "llvm/System/Memory.h" 37#include "llvm/Target/TargetInstrInfo.h" 38#include "llvm/ADT/Statistic.h" 39#include <algorithm> 40#include <set> 41using namespace llvm; 42 43STATISTIC(NumBytes, "Number of bytes of machine code compiled"); 44STATISTIC(NumRelos, "Number of relocations applied"); 45static JIT *TheJIT = 0; 46 47 48//===----------------------------------------------------------------------===// 49// JIT lazy compilation code. 50// 51namespace { 52 class JITResolverState { 53 private: 54 /// FunctionToStubMap - Keep track of the stub created for a particular 55 /// function so that we can reuse them if necessary. 56 std::map<Function*, void*> FunctionToStubMap; 57 58 /// StubToFunctionMap - Keep track of the function that each stub 59 /// corresponds to. 60 std::map<void*, Function*> StubToFunctionMap; 61 62 /// GlobalToLazyPtrMap - Keep track of the lazy pointer created for a 63 /// particular GlobalVariable so that we can reuse them if necessary. 64 std::map<GlobalValue*, void*> GlobalToLazyPtrMap; 65 66 public: 67 std::map<Function*, void*>& getFunctionToStubMap(const MutexGuard& locked) { 68 assert(locked.holds(TheJIT->lock)); 69 return FunctionToStubMap; 70 } 71 72 std::map<void*, Function*>& getStubToFunctionMap(const MutexGuard& locked) { 73 assert(locked.holds(TheJIT->lock)); 74 return StubToFunctionMap; 75 } 76 77 std::map<GlobalValue*, void*>& 78 getGlobalToLazyPtrMap(const MutexGuard& locked) { 79 assert(locked.holds(TheJIT->lock)); 80 return GlobalToLazyPtrMap; 81 } 82 }; 83 84 /// JITResolver - Keep track of, and resolve, call sites for functions that 85 /// have not yet been compiled. 86 class JITResolver { 87 /// LazyResolverFn - The target lazy resolver function that we actually 88 /// rewrite instructions to use. 89 TargetJITInfo::LazyResolverFn LazyResolverFn; 90 91 JITResolverState state; 92 93 /// ExternalFnToStubMap - This is the equivalent of FunctionToStubMap for 94 /// external functions. 95 std::map<void*, void*> ExternalFnToStubMap; 96 97 //map addresses to indexes in the GOT 98 std::map<void*, unsigned> revGOTMap; 99 unsigned nextGOTIndex; 100 101 static JITResolver *TheJITResolver; 102 public: 103 explicit JITResolver(JIT &jit) : nextGOTIndex(0) { 104 TheJIT = &jit; 105 106 LazyResolverFn = jit.getJITInfo().getLazyResolverFunction(JITCompilerFn); 107 assert(TheJITResolver == 0 && "Multiple JIT resolvers?"); 108 TheJITResolver = this; 109 } 110 111 ~JITResolver() { 112 TheJITResolver = 0; 113 } 114 115 /// getFunctionStub - This returns a pointer to a function stub, creating 116 /// one on demand as needed. 117 void *getFunctionStub(Function *F); 118 119 /// getExternalFunctionStub - Return a stub for the function at the 120 /// specified address, created lazily on demand. 121 void *getExternalFunctionStub(void *FnAddr); 122 123 /// getGlobalValueLazyPtr - Return a lazy pointer containing the specified 124 /// GV address. 125 void *getGlobalValueLazyPtr(GlobalValue *V, void *GVAddress); 126 127 /// AddCallbackAtLocation - If the target is capable of rewriting an 128 /// instruction without the use of a stub, record the location of the use so 129 /// we know which function is being used at the location. 130 void *AddCallbackAtLocation(Function *F, void *Location) { 131 MutexGuard locked(TheJIT->lock); 132 /// Get the target-specific JIT resolver function. 133 state.getStubToFunctionMap(locked)[Location] = F; 134 return (void*)(intptr_t)LazyResolverFn; 135 } 136 137 /// getGOTIndexForAddress - Return a new or existing index in the GOT for 138 /// an address. This function only manages slots, it does not manage the 139 /// contents of the slots or the memory associated with the GOT. 140 unsigned getGOTIndexForAddr(void *addr); 141 142 /// JITCompilerFn - This function is called to resolve a stub to a compiled 143 /// address. If the LLVM Function corresponding to the stub has not yet 144 /// been compiled, this function compiles it first. 145 static void *JITCompilerFn(void *Stub); 146 }; 147} 148 149JITResolver *JITResolver::TheJITResolver = 0; 150 151/// getFunctionStub - This returns a pointer to a function stub, creating 152/// one on demand as needed. 153void *JITResolver::getFunctionStub(Function *F) { 154 MutexGuard locked(TheJIT->lock); 155 156 // If we already have a stub for this function, recycle it. 157 void *&Stub = state.getFunctionToStubMap(locked)[F]; 158 if (Stub) return Stub; 159 160 // Call the lazy resolver function unless we already KNOW it is an external 161 // function, in which case we just skip the lazy resolution step. 162 void *Actual = (void*)(intptr_t)LazyResolverFn; 163 if (F->isDeclaration() && !F->hasNotBeenReadFromBitcode()) 164 Actual = TheJIT->getPointerToFunction(F); 165 166 // Otherwise, codegen a new stub. For now, the stub will call the lazy 167 // resolver function. 168 Stub = TheJIT->getJITInfo().emitFunctionStub(F, Actual, 169 *TheJIT->getCodeEmitter()); 170 171 if (Actual != (void*)(intptr_t)LazyResolverFn) { 172 // If we are getting the stub for an external function, we really want the 173 // address of the stub in the GlobalAddressMap for the JIT, not the address 174 // of the external function. 175 TheJIT->updateGlobalMapping(F, Stub); 176 } 177 178 DOUT << "JIT: Stub emitted at [" << Stub << "] for function '" 179 << F->getName() << "'\n"; 180 181 // Finally, keep track of the stub-to-Function mapping so that the 182 // JITCompilerFn knows which function to compile! 183 state.getStubToFunctionMap(locked)[Stub] = F; 184 return Stub; 185} 186 187/// getGlobalValueLazyPtr - Return a lazy pointer containing the specified 188/// GV address. 189void *JITResolver::getGlobalValueLazyPtr(GlobalValue *GV, void *GVAddress) { 190 MutexGuard locked(TheJIT->lock); 191 192 // If we already have a stub for this global variable, recycle it. 193 void *&LazyPtr = state.getGlobalToLazyPtrMap(locked)[GV]; 194 if (LazyPtr) return LazyPtr; 195 196 // Otherwise, codegen a new lazy pointer. 197 LazyPtr = TheJIT->getJITInfo().emitGlobalValueLazyPtr(GV, GVAddress, 198 *TheJIT->getCodeEmitter()); 199 200 DOUT << "JIT: Stub emitted at [" << LazyPtr << "] for GV '" 201 << GV->getName() << "'\n"; 202 203 return LazyPtr; 204} 205 206/// getExternalFunctionStub - Return a stub for the function at the 207/// specified address, created lazily on demand. 208void *JITResolver::getExternalFunctionStub(void *FnAddr) { 209 // If we already have a stub for this function, recycle it. 210 void *&Stub = ExternalFnToStubMap[FnAddr]; 211 if (Stub) return Stub; 212 213 Stub = TheJIT->getJITInfo().emitFunctionStub(0, FnAddr, 214 *TheJIT->getCodeEmitter()); 215 216 DOUT << "JIT: Stub emitted at [" << Stub 217 << "] for external function at '" << FnAddr << "'\n"; 218 return Stub; 219} 220 221unsigned JITResolver::getGOTIndexForAddr(void* addr) { 222 unsigned idx = revGOTMap[addr]; 223 if (!idx) { 224 idx = ++nextGOTIndex; 225 revGOTMap[addr] = idx; 226 DOUT << "Adding GOT entry " << idx << " for addr " << addr << "\n"; 227 } 228 return idx; 229} 230 231/// JITCompilerFn - This function is called when a lazy compilation stub has 232/// been entered. It looks up which function this stub corresponds to, compiles 233/// it if necessary, then returns the resultant function pointer. 234void *JITResolver::JITCompilerFn(void *Stub) { 235 JITResolver &JR = *TheJITResolver; 236 237 Function* F = 0; 238 void* ActualPtr = 0; 239 240 { 241 // Only lock for getting the Function. The call getPointerToFunction made 242 // in this function might trigger function materializing, which requires 243 // JIT lock to be unlocked. 244 MutexGuard locked(TheJIT->lock); 245 246 // The address given to us for the stub may not be exactly right, it might be 247 // a little bit after the stub. As such, use upper_bound to find it. 248 std::map<void*, Function*>::iterator I = 249 JR.state.getStubToFunctionMap(locked).upper_bound(Stub); 250 assert(I != JR.state.getStubToFunctionMap(locked).begin() && 251 "This is not a known stub!"); 252 F = (--I)->second; 253 ActualPtr = I->first; 254 } 255 256 // If we have already code generated the function, just return the address. 257 void *Result = TheJIT->getPointerToGlobalIfAvailable(F); 258 259 if (!Result) { 260 // Otherwise we don't have it, do lazy compilation now. 261 262 // If lazy compilation is disabled, emit a useful error message and abort. 263 if (TheJIT->isLazyCompilationDisabled()) { 264 cerr << "LLVM JIT requested to do lazy compilation of function '" 265 << F->getName() << "' when lazy compiles are disabled!\n"; 266 abort(); 267 } 268 269 // We might like to remove the stub from the StubToFunction map. 270 // We can't do that! Multiple threads could be stuck, waiting to acquire the 271 // lock above. As soon as the 1st function finishes compiling the function, 272 // the next one will be released, and needs to be able to find the function 273 // it needs to call. 274 //JR.state.getStubToFunctionMap(locked).erase(I); 275 276 DOUT << "JIT: Lazily resolving function '" << F->getName() 277 << "' In stub ptr = " << Stub << " actual ptr = " 278 << ActualPtr << "\n"; 279 280 Result = TheJIT->getPointerToFunction(F); 281 } 282 283 // Reacquire the lock to erase the stub in the map. 284 MutexGuard locked(TheJIT->lock); 285 286 // We don't need to reuse this stub in the future, as F is now compiled. 287 JR.state.getFunctionToStubMap(locked).erase(F); 288 289 // FIXME: We could rewrite all references to this stub if we knew them. 290 291 // What we will do is set the compiled function address to map to the 292 // same GOT entry as the stub so that later clients may update the GOT 293 // if they see it still using the stub address. 294 // Note: this is done so the Resolver doesn't have to manage GOT memory 295 // Do this without allocating map space if the target isn't using a GOT 296 if(JR.revGOTMap.find(Stub) != JR.revGOTMap.end()) 297 JR.revGOTMap[Result] = JR.revGOTMap[Stub]; 298 299 return Result; 300} 301 302//===----------------------------------------------------------------------===// 303// Function Index Support 304 305// On MacOS we generate an index of currently JIT'd functions so that 306// performance tools can determine a symbol name and accurate code range for a 307// PC value. Because performance tools are generally asynchronous, the code 308// below is written with the hope that it could be interrupted at any time and 309// have useful answers. However, we don't go crazy with atomic operations, we 310// just do a "reasonable effort". 311#ifdef __APPLE__ 312#define ENABLE_JIT_SYMBOL_TABLE 0 313#endif 314 315/// JitSymbolEntry - Each function that is JIT compiled results in one of these 316/// being added to an array of symbols. This indicates the name of the function 317/// as well as the address range it occupies. This allows the client to map 318/// from a PC value to the name of the function. 319struct JitSymbolEntry { 320 const char *FnName; // FnName - a strdup'd string. 321 void *FnStart; 322 intptr_t FnSize; 323}; 324 325 326struct JitSymbolTable { 327 /// NextPtr - This forms a linked list of JitSymbolTable entries. This 328 /// pointer is not used right now, but might be used in the future. Consider 329 /// it reserved for future use. 330 JitSymbolTable *NextPtr; 331 332 /// Symbols - This is an array of JitSymbolEntry entries. Only the first 333 /// 'NumSymbols' symbols are valid. 334 JitSymbolEntry *Symbols; 335 336 /// NumSymbols - This indicates the number entries in the Symbols array that 337 /// are valid. 338 unsigned NumSymbols; 339 340 /// NumAllocated - This indicates the amount of space we have in the Symbols 341 /// array. This is a private field that should not be read by external tools. 342 unsigned NumAllocated; 343}; 344 345#if ENABLE_JIT_SYMBOL_TABLE 346JitSymbolTable *__jitSymbolTable; 347#endif 348 349static void AddFunctionToSymbolTable(const char *FnName, 350 void *FnStart, intptr_t FnSize) { 351 assert(FnName != 0 && FnStart != 0 && "Bad symbol to add"); 352 JitSymbolTable **SymTabPtrPtr = 0; 353#if !ENABLE_JIT_SYMBOL_TABLE 354 return; 355#else 356 SymTabPtrPtr = &__jitSymbolTable; 357#endif 358 359 // If this is the first entry in the symbol table, add the JitSymbolTable 360 // index. 361 if (*SymTabPtrPtr == 0) { 362 JitSymbolTable *New = new JitSymbolTable(); 363 New->NextPtr = 0; 364 New->Symbols = 0; 365 New->NumSymbols = 0; 366 New->NumAllocated = 0; 367 *SymTabPtrPtr = New; 368 } 369 370 JitSymbolTable *SymTabPtr = *SymTabPtrPtr; 371 372 // If we have space in the table, reallocate the table. 373 if (SymTabPtr->NumSymbols >= SymTabPtr->NumAllocated) { 374 // If we don't have space, reallocate the table. 375 unsigned NewSize = std::max(64U, SymTabPtr->NumAllocated*2); 376 JitSymbolEntry *NewSymbols = new JitSymbolEntry[NewSize]; 377 JitSymbolEntry *OldSymbols = SymTabPtr->Symbols; 378 379 // Copy the old entries over. 380 memcpy(NewSymbols, OldSymbols, 381 SymTabPtr->NumSymbols*sizeof(OldSymbols[0])); 382 383 // Swap the new symbols in, delete the old ones. 384 SymTabPtr->Symbols = NewSymbols; 385 SymTabPtr->NumAllocated = NewSize; 386 delete [] OldSymbols; 387 } 388 389 // Otherwise, we have enough space, just tack it onto the end of the array. 390 JitSymbolEntry &Entry = SymTabPtr->Symbols[SymTabPtr->NumSymbols]; 391 Entry.FnName = strdup(FnName); 392 Entry.FnStart = FnStart; 393 Entry.FnSize = FnSize; 394 ++SymTabPtr->NumSymbols; 395} 396 397static void RemoveFunctionFromSymbolTable(void *FnStart) { 398 assert(FnStart && "Invalid function pointer"); 399 JitSymbolTable **SymTabPtrPtr = 0; 400#if !ENABLE_JIT_SYMBOL_TABLE 401 return; 402#else 403 SymTabPtrPtr = &__jitSymbolTable; 404#endif 405 406 JitSymbolTable *SymTabPtr = *SymTabPtrPtr; 407 JitSymbolEntry *Symbols = SymTabPtr->Symbols; 408 409 // Scan the table to find its index. The table is not sorted, so do a linear 410 // scan. 411 unsigned Index; 412 for (Index = 0; Symbols[Index].FnStart != FnStart; ++Index) 413 assert(Index != SymTabPtr->NumSymbols && "Didn't find function!"); 414 415 // Once we have an index, we know to nuke this entry, overwrite it with the 416 // entry at the end of the array, making the last entry redundant. 417 const char *OldName = Symbols[Index].FnName; 418 Symbols[Index] = Symbols[SymTabPtr->NumSymbols-1]; 419 free((void*)OldName); 420 421 // Drop the number of symbols in the table. 422 --SymTabPtr->NumSymbols; 423 424 // Finally, if we deleted the final symbol, deallocate the table itself. 425 if (SymTabPtr->NumSymbols != 0) 426 return; 427 428 *SymTabPtrPtr = 0; 429 delete [] Symbols; 430 delete SymTabPtr; 431} 432 433//===----------------------------------------------------------------------===// 434// JITEmitter code. 435// 436namespace { 437 /// JITEmitter - The JIT implementation of the MachineCodeEmitter, which is 438 /// used to output functions to memory for execution. 439 class JITEmitter : public MachineCodeEmitter { 440 JITMemoryManager *MemMgr; 441 442 // When outputting a function stub in the context of some other function, we 443 // save BufferBegin/BufferEnd/CurBufferPtr here. 444 unsigned char *SavedBufferBegin, *SavedBufferEnd, *SavedCurBufferPtr; 445 446 /// Relocations - These are the relocations that the function needs, as 447 /// emitted. 448 std::vector<MachineRelocation> Relocations; 449 450 /// MBBLocations - This vector is a mapping from MBB ID's to their address. 451 /// It is filled in by the StartMachineBasicBlock callback and queried by 452 /// the getMachineBasicBlockAddress callback. 453 std::vector<intptr_t> MBBLocations; 454 455 /// ConstantPool - The constant pool for the current function. 456 /// 457 MachineConstantPool *ConstantPool; 458 459 /// ConstantPoolBase - A pointer to the first entry in the constant pool. 460 /// 461 void *ConstantPoolBase; 462 463 /// JumpTable - The jump tables for the current function. 464 /// 465 MachineJumpTableInfo *JumpTable; 466 467 /// JumpTableBase - A pointer to the first entry in the jump table. 468 /// 469 void *JumpTableBase; 470 471 /// Resolver - This contains info about the currently resolved functions. 472 JITResolver Resolver; 473 474 /// DE - The dwarf emitter for the jit. 475 JITDwarfEmitter *DE; 476 477 /// LabelLocations - This vector is a mapping from Label ID's to their 478 /// address. 479 std::vector<intptr_t> LabelLocations; 480 481 /// MMI - Machine module info for exception informations 482 MachineModuleInfo* MMI; 483 484 // GVSet - a set to keep track of which globals have been seen 485 std::set<const GlobalVariable*> GVSet; 486 487 public: 488 JITEmitter(JIT &jit, JITMemoryManager *JMM) : Resolver(jit) { 489 MemMgr = JMM ? JMM : JITMemoryManager::CreateDefaultMemManager(); 490 if (jit.getJITInfo().needsGOT()) { 491 MemMgr->AllocateGOT(); 492 DOUT << "JIT is managing a GOT\n"; 493 } 494 495 if (ExceptionHandling) DE = new JITDwarfEmitter(jit); 496 } 497 ~JITEmitter() { 498 delete MemMgr; 499 if (ExceptionHandling) delete DE; 500 } 501 502 /// classof - Methods for support type inquiry through isa, cast, and 503 /// dyn_cast: 504 /// 505 static inline bool classof(const JITEmitter*) { return true; } 506 static inline bool classof(const MachineCodeEmitter*) { return true; } 507 508 JITResolver &getJITResolver() { return Resolver; } 509 510 virtual void startFunction(MachineFunction &F); 511 virtual bool finishFunction(MachineFunction &F); 512 513 void emitConstantPool(MachineConstantPool *MCP); 514 void initJumpTableInfo(MachineJumpTableInfo *MJTI); 515 void emitJumpTableInfo(MachineJumpTableInfo *MJTI); 516 517 virtual void startFunctionStub(const GlobalValue* F, unsigned StubSize, 518 unsigned Alignment = 1); 519 virtual void* finishFunctionStub(const GlobalValue *F); 520 521 /// allocateSpace - Reserves space in the current block if any, or 522 /// allocate a new one of the given size. 523 virtual void *allocateSpace(intptr_t Size, unsigned Alignment); 524 525 virtual void addRelocation(const MachineRelocation &MR) { 526 Relocations.push_back(MR); 527 } 528 529 virtual void StartMachineBasicBlock(MachineBasicBlock *MBB) { 530 if (MBBLocations.size() <= (unsigned)MBB->getNumber()) 531 MBBLocations.resize((MBB->getNumber()+1)*2); 532 MBBLocations[MBB->getNumber()] = getCurrentPCValue(); 533 } 534 535 virtual intptr_t getConstantPoolEntryAddress(unsigned Entry) const; 536 virtual intptr_t getJumpTableEntryAddress(unsigned Entry) const; 537 538 virtual intptr_t getMachineBasicBlockAddress(MachineBasicBlock *MBB) const { 539 assert(MBBLocations.size() > (unsigned)MBB->getNumber() && 540 MBBLocations[MBB->getNumber()] && "MBB not emitted!"); 541 return MBBLocations[MBB->getNumber()]; 542 } 543 544 /// deallocateMemForFunction - Deallocate all memory for the specified 545 /// function body. 546 void deallocateMemForFunction(Function *F) { 547 MemMgr->deallocateMemForFunction(F); 548 } 549 550 virtual void emitLabel(uint64_t LabelID) { 551 if (LabelLocations.size() <= LabelID) 552 LabelLocations.resize((LabelID+1)*2); 553 LabelLocations[LabelID] = getCurrentPCValue(); 554 } 555 556 virtual intptr_t getLabelAddress(uint64_t LabelID) const { 557 assert(LabelLocations.size() > (unsigned)LabelID && 558 LabelLocations[LabelID] && "Label not emitted!"); 559 return LabelLocations[LabelID]; 560 } 561 562 virtual void setModuleInfo(MachineModuleInfo* Info) { 563 MMI = Info; 564 if (ExceptionHandling) DE->setModuleInfo(Info); 565 } 566 567 void setMemoryExecutable(void) { 568 MemMgr->setMemoryExecutable(); 569 } 570 571 private: 572 void *getPointerToGlobal(GlobalValue *GV, void *Reference, bool NoNeedStub); 573 void *getPointerToGVLazyPtr(GlobalValue *V, void *Reference, 574 bool NoNeedStub); 575 unsigned addSizeOfGlobal(const GlobalVariable *GV, unsigned Size); 576 unsigned addSizeOfGlobalsInConstantVal(const Constant *C, unsigned Size); 577 unsigned addSizeOfGlobalsInInitializer(const Constant *Init, unsigned Size); 578 unsigned GetSizeOfGlobalsInBytes(MachineFunction &MF); 579 }; 580} 581 582void *JITEmitter::getPointerToGlobal(GlobalValue *V, void *Reference, 583 bool DoesntNeedStub) { 584 if (GlobalVariable *GV = dyn_cast<GlobalVariable>(V)) { 585 /// FIXME: If we straightened things out, this could actually emit the 586 /// global immediately instead of queuing it for codegen later! 587 return TheJIT->getOrEmitGlobalVariable(GV); 588 } 589 if (GlobalAlias *GA = dyn_cast<GlobalAlias>(V)) 590 return TheJIT->getPointerToGlobal(GA->resolveAliasedGlobal(false)); 591 592 // If we have already compiled the function, return a pointer to its body. 593 Function *F = cast<Function>(V); 594 void *ResultPtr = TheJIT->getPointerToGlobalIfAvailable(F); 595 if (ResultPtr) return ResultPtr; 596 597 if (F->isDeclaration() && !F->hasNotBeenReadFromBitcode()) { 598 // If this is an external function pointer, we can force the JIT to 599 // 'compile' it, which really just adds it to the map. 600 if (DoesntNeedStub) 601 return TheJIT->getPointerToFunction(F); 602 603 return Resolver.getFunctionStub(F); 604 } 605 606 // Okay, the function has not been compiled yet, if the target callback 607 // mechanism is capable of rewriting the instruction directly, prefer to do 608 // that instead of emitting a stub. 609 if (DoesntNeedStub) 610 return Resolver.AddCallbackAtLocation(F, Reference); 611 612 // Otherwise, we have to emit a lazy resolving stub. 613 return Resolver.getFunctionStub(F); 614} 615 616void *JITEmitter::getPointerToGVLazyPtr(GlobalValue *V, void *Reference, 617 bool DoesntNeedStub) { 618 // Make sure GV is emitted first. 619 // FIXME: For now, if the GV is an external function we force the JIT to 620 // compile it so the lazy pointer will contain the fully resolved address. 621 void *GVAddress = getPointerToGlobal(V, Reference, true); 622 return Resolver.getGlobalValueLazyPtr(V, GVAddress); 623} 624 625static unsigned GetConstantPoolSizeInBytes(MachineConstantPool *MCP) { 626 const std::vector<MachineConstantPoolEntry> &Constants = MCP->getConstants(); 627 if (Constants.empty()) return 0; 628 629 MachineConstantPoolEntry CPE = Constants.back(); 630 unsigned Size = CPE.Offset; 631 const Type *Ty = CPE.isMachineConstantPoolEntry() 632 ? CPE.Val.MachineCPVal->getType() : CPE.Val.ConstVal->getType(); 633 Size += TheJIT->getTargetData()->getABITypeSize(Ty); 634 return Size; 635} 636 637static unsigned GetJumpTableSizeInBytes(MachineJumpTableInfo *MJTI) { 638 const std::vector<MachineJumpTableEntry> &JT = MJTI->getJumpTables(); 639 if (JT.empty()) return 0; 640 641 unsigned NumEntries = 0; 642 for (unsigned i = 0, e = JT.size(); i != e; ++i) 643 NumEntries += JT[i].MBBs.size(); 644 645 unsigned EntrySize = MJTI->getEntrySize(); 646 647 return NumEntries * EntrySize; 648} 649 650static uintptr_t RoundUpToAlign(uintptr_t Size, unsigned Alignment) { 651 if (Alignment == 0) Alignment = 1; 652 // Since we do not know where the buffer will be allocated, be pessimistic. 653 return Size + Alignment; 654} 655 656/// addSizeOfGlobal - add the size of the global (plus any alignment padding) 657/// into the running total Size. 658 659unsigned JITEmitter::addSizeOfGlobal(const GlobalVariable *GV, unsigned Size) { 660 const Type *ElTy = GV->getType()->getElementType(); 661 size_t GVSize = (size_t)TheJIT->getTargetData()->getABITypeSize(ElTy); 662 size_t GVAlign = 663 (size_t)TheJIT->getTargetData()->getPreferredAlignment(GV); 664 DOUT << "Adding in size " << GVSize << " alignment " << GVAlign; 665 DEBUG(GV->dump()); 666 // Assume code section ends with worst possible alignment, so first 667 // variable needs maximal padding. 668 if (Size==0) 669 Size = 1; 670 Size = ((Size+GVAlign-1)/GVAlign)*GVAlign; 671 Size += GVSize; 672 return Size; 673} 674 675/// addSizeOfGlobalsInConstantVal - find any globals that we haven't seen yet 676/// but are referenced from the constant; put them in GVSet and add their 677/// size into the running total Size. 678 679unsigned JITEmitter::addSizeOfGlobalsInConstantVal(const Constant *C, 680 unsigned Size) { 681 // If its undefined, return the garbage. 682 if (isa<UndefValue>(C)) 683 return Size; 684 685 // If the value is a ConstantExpr 686 if (const ConstantExpr *CE = dyn_cast<ConstantExpr>(C)) { 687 Constant *Op0 = CE->getOperand(0); 688 switch (CE->getOpcode()) { 689 case Instruction::GetElementPtr: 690 case Instruction::Trunc: 691 case Instruction::ZExt: 692 case Instruction::SExt: 693 case Instruction::FPTrunc: 694 case Instruction::FPExt: 695 case Instruction::UIToFP: 696 case Instruction::SIToFP: 697 case Instruction::FPToUI: 698 case Instruction::FPToSI: 699 case Instruction::PtrToInt: 700 case Instruction::IntToPtr: 701 case Instruction::BitCast: { 702 Size = addSizeOfGlobalsInConstantVal(Op0, Size); 703 break; 704 } 705 case Instruction::Add: 706 case Instruction::Sub: 707 case Instruction::Mul: 708 case Instruction::UDiv: 709 case Instruction::SDiv: 710 case Instruction::URem: 711 case Instruction::SRem: 712 case Instruction::And: 713 case Instruction::Or: 714 case Instruction::Xor: { 715 Size = addSizeOfGlobalsInConstantVal(Op0, Size); 716 Size = addSizeOfGlobalsInConstantVal(CE->getOperand(1), Size); 717 break; 718 } 719 default: { 720 cerr << "ConstantExpr not handled: " << *CE << "\n"; 721 abort(); 722 } 723 } 724 } 725 726 if (C->getType()->getTypeID() == Type::PointerTyID) 727 if (const GlobalVariable* GV = dyn_cast<GlobalVariable>(C)) 728 if (GVSet.insert(GV).second) 729 Size = addSizeOfGlobal(GV, Size); 730 731 return Size; 732} 733 734/// addSizeOfGLobalsInInitializer - handle any globals that we haven't seen yet 735/// but are referenced from the given initializer. 736 737unsigned JITEmitter::addSizeOfGlobalsInInitializer(const Constant *Init, 738 unsigned Size) { 739 if (!isa<UndefValue>(Init) && 740 !isa<ConstantVector>(Init) && 741 !isa<ConstantAggregateZero>(Init) && 742 !isa<ConstantArray>(Init) && 743 !isa<ConstantStruct>(Init) && 744 Init->getType()->isFirstClassType()) 745 Size = addSizeOfGlobalsInConstantVal(Init, Size); 746 return Size; 747} 748 749/// GetSizeOfGlobalsInBytes - walk the code for the function, looking for 750/// globals; then walk the initializers of those globals looking for more. 751/// If their size has not been considered yet, add it into the running total 752/// Size. 753 754unsigned JITEmitter::GetSizeOfGlobalsInBytes(MachineFunction &MF) { 755 unsigned Size = 0; 756 GVSet.clear(); 757 758 for (MachineFunction::iterator MBB = MF.begin(), E = MF.end(); 759 MBB != E; ++MBB) { 760 for (MachineBasicBlock::const_iterator I = MBB->begin(), E = MBB->end(); 761 I != E; ++I) { 762 const TargetInstrDesc &Desc = I->getDesc(); 763 const MachineInstr &MI = *I; 764 unsigned NumOps = Desc.getNumOperands(); 765 for (unsigned CurOp = 0; CurOp < NumOps; CurOp++) { 766 const MachineOperand &MO = MI.getOperand(CurOp); 767 if (MO.isGlobal()) { 768 GlobalValue* V = MO.getGlobal(); 769 const GlobalVariable *GV = dyn_cast<const GlobalVariable>(V); 770 if (!GV) 771 continue; 772 // If seen in previous function, it will have an entry here. 773 if (TheJIT->getPointerToGlobalIfAvailable(GV)) 774 continue; 775 // If seen earlier in this function, it will have an entry here. 776 // FIXME: it should be possible to combine these tables, by 777 // assuming the addresses of the new globals in this module 778 // start at 0 (or something) and adjusting them after codegen 779 // complete. Another possibility is to grab a marker bit in GV. 780 if (GVSet.insert(GV).second) 781 // A variable as yet unseen. Add in its size. 782 Size = addSizeOfGlobal(GV, Size); 783 } 784 } 785 } 786 } 787 DOUT << "About to look through initializers\n"; 788 // Look for more globals that are referenced only from initializers. 789 // GVSet.end is computed each time because the set can grow as we go. 790 for (std::set<const GlobalVariable *>::iterator I = GVSet.begin(); 791 I != GVSet.end(); I++) { 792 const GlobalVariable* GV = *I; 793 if (GV->hasInitializer()) 794 Size = addSizeOfGlobalsInInitializer(GV->getInitializer(), Size); 795 } 796 797 return Size; 798} 799 800void JITEmitter::startFunction(MachineFunction &F) { 801 uintptr_t ActualSize = 0; 802 // Set the memory writable, if it's not already 803 MemMgr->setMemoryWritable(); 804 if (MemMgr->NeedsExactSize()) { 805 DOUT << "ExactSize\n"; 806 const TargetInstrInfo* TII = F.getTarget().getInstrInfo(); 807 MachineJumpTableInfo *MJTI = F.getJumpTableInfo(); 808 MachineConstantPool *MCP = F.getConstantPool(); 809 810 // Ensure the constant pool/jump table info is at least 4-byte aligned. 811 ActualSize = RoundUpToAlign(ActualSize, 16); 812 813 // Add the alignment of the constant pool 814 ActualSize = RoundUpToAlign(ActualSize, 815 1 << MCP->getConstantPoolAlignment()); 816 817 // Add the constant pool size 818 ActualSize += GetConstantPoolSizeInBytes(MCP); 819 820 // Add the aligment of the jump table info 821 ActualSize = RoundUpToAlign(ActualSize, MJTI->getAlignment()); 822 823 // Add the jump table size 824 ActualSize += GetJumpTableSizeInBytes(MJTI); 825 826 // Add the alignment for the function 827 ActualSize = RoundUpToAlign(ActualSize, 828 std::max(F.getFunction()->getAlignment(), 8U)); 829 830 // Add the function size 831 ActualSize += TII->GetFunctionSizeInBytes(F); 832 833 DOUT << "ActualSize before globals " << ActualSize << "\n"; 834 // Add the size of the globals that will be allocated after this function. 835 // These are all the ones referenced from this function that were not 836 // previously allocated. 837 ActualSize += GetSizeOfGlobalsInBytes(F); 838 DOUT << "ActualSize after globals " << ActualSize << "\n"; 839 } 840 841 BufferBegin = CurBufferPtr = MemMgr->startFunctionBody(F.getFunction(), 842 ActualSize); 843 BufferEnd = BufferBegin+ActualSize; 844 845 // Ensure the constant pool/jump table info is at least 4-byte aligned. 846 emitAlignment(16); 847 848 emitConstantPool(F.getConstantPool()); 849 initJumpTableInfo(F.getJumpTableInfo()); 850 851 // About to start emitting the machine code for the function. 852 emitAlignment(std::max(F.getFunction()->getAlignment(), 8U)); 853 TheJIT->updateGlobalMapping(F.getFunction(), CurBufferPtr); 854 855 MBBLocations.clear(); 856} 857 858bool JITEmitter::finishFunction(MachineFunction &F) { 859 if (CurBufferPtr == BufferEnd) { 860 // FIXME: Allocate more space, then try again. 861 cerr << "JIT: Ran out of space for generated machine code!\n"; 862 abort(); 863 } 864 865 emitJumpTableInfo(F.getJumpTableInfo()); 866 867 // FnStart is the start of the text, not the start of the constant pool and 868 // other per-function data. 869 unsigned char *FnStart = 870 (unsigned char *)TheJIT->getPointerToGlobalIfAvailable(F.getFunction()); 871 872 if (!Relocations.empty()) { 873 NumRelos += Relocations.size(); 874 875 // Resolve the relocations to concrete pointers. 876 for (unsigned i = 0, e = Relocations.size(); i != e; ++i) { 877 MachineRelocation &MR = Relocations[i]; 878 void *ResultPtr = 0; 879 if (!MR.letTargetResolve()) { 880 if (MR.isString()) { 881 ResultPtr = TheJIT->getPointerToNamedFunction(MR.getString()); 882 883 // If the target REALLY wants a stub for this function, emit it now. 884 if (!MR.doesntNeedStub()) 885 ResultPtr = Resolver.getExternalFunctionStub(ResultPtr); 886 } else if (MR.isGlobalValue()) { 887 ResultPtr = getPointerToGlobal(MR.getGlobalValue(), 888 BufferBegin+MR.getMachineCodeOffset(), 889 MR.doesntNeedStub()); 890 } else if (MR.isGlobalValueLazyPtr()) { 891 ResultPtr = getPointerToGVLazyPtr(MR.getGlobalValue(), 892 BufferBegin+MR.getMachineCodeOffset(), 893 MR.doesntNeedStub()); 894 } else if (MR.isBasicBlock()) { 895 ResultPtr = (void*)getMachineBasicBlockAddress(MR.getBasicBlock()); 896 } else if (MR.isConstantPoolIndex()) { 897 ResultPtr = (void*)getConstantPoolEntryAddress(MR.getConstantPoolIndex()); 898 } else { 899 assert(MR.isJumpTableIndex()); 900 ResultPtr=(void*)getJumpTableEntryAddress(MR.getJumpTableIndex()); 901 } 902 903 MR.setResultPointer(ResultPtr); 904 } 905 906 // if we are managing the GOT and the relocation wants an index, 907 // give it one 908 if (MR.isGOTRelative() && MemMgr->isManagingGOT()) { 909 unsigned idx = Resolver.getGOTIndexForAddr(ResultPtr); 910 MR.setGOTIndex(idx); 911 if (((void**)MemMgr->getGOTBase())[idx] != ResultPtr) { 912 DOUT << "GOT was out of date for " << ResultPtr 913 << " pointing at " << ((void**)MemMgr->getGOTBase())[idx] 914 << "\n"; 915 ((void**)MemMgr->getGOTBase())[idx] = ResultPtr; 916 } 917 } 918 } 919 920 TheJIT->getJITInfo().relocate(BufferBegin, &Relocations[0], 921 Relocations.size(), MemMgr->getGOTBase()); 922 } 923 924 // Update the GOT entry for F to point to the new code. 925 if (MemMgr->isManagingGOT()) { 926 unsigned idx = Resolver.getGOTIndexForAddr((void*)BufferBegin); 927 if (((void**)MemMgr->getGOTBase())[idx] != (void*)BufferBegin) { 928 DOUT << "GOT was out of date for " << (void*)BufferBegin 929 << " pointing at " << ((void**)MemMgr->getGOTBase())[idx] << "\n"; 930 ((void**)MemMgr->getGOTBase())[idx] = (void*)BufferBegin; 931 } 932 } 933 934 unsigned char *FnEnd = CurBufferPtr; 935 936 MemMgr->endFunctionBody(F.getFunction(), BufferBegin, FnEnd); 937 BufferBegin = CurBufferPtr = 0; 938 NumBytes += FnEnd-FnStart; 939 940 // Invalidate the icache if necessary. 941 sys::Memory::InvalidateInstructionCache(FnStart, FnEnd-FnStart); 942 943 // Add it to the JIT symbol table if the host wants it. 944 AddFunctionToSymbolTable(F.getFunction()->getNameStart(), 945 FnStart, FnEnd-FnStart); 946 947 DOUT << "JIT: Finished CodeGen of [" << (void*)FnStart 948 << "] Function: " << F.getFunction()->getName() 949 << ": " << (FnEnd-FnStart) << " bytes of text, " 950 << Relocations.size() << " relocations\n"; 951 Relocations.clear(); 952 953 // Mark code region readable and executable if it's not so already. 954 MemMgr->setMemoryExecutable(); 955 956#ifndef NDEBUG 957 { 958 DOUT << std::hex; 959 int i; 960 unsigned char* q = FnStart; 961 for (i=1; q!=FnEnd; q++, i++) { 962 if (i%8==1) 963 DOUT << "0x" << (long)q << ": "; 964 DOUT<< (unsigned short)*q << " "; 965 if (i%8==0) 966 DOUT<<"\n"; 967 } 968 DOUT << std::dec; 969 if (sys::hasDisassembler()) 970 DOUT << "Disassembled code:\n" 971 << sys::disassembleBuffer(FnStart, FnEnd-FnStart, (uintptr_t)FnStart); 972 } 973#endif 974 if (ExceptionHandling) { 975 uintptr_t ActualSize = 0; 976 SavedBufferBegin = BufferBegin; 977 SavedBufferEnd = BufferEnd; 978 SavedCurBufferPtr = CurBufferPtr; 979 980 if (MemMgr->NeedsExactSize()) { 981 ActualSize = DE->GetDwarfTableSizeInBytes(F, *this, FnStart, FnEnd); 982 } 983 984 BufferBegin = CurBufferPtr = MemMgr->startExceptionTable(F.getFunction(), 985 ActualSize); 986 BufferEnd = BufferBegin+ActualSize; 987 unsigned char* FrameRegister = DE->EmitDwarfTable(F, *this, FnStart, FnEnd); 988 MemMgr->endExceptionTable(F.getFunction(), BufferBegin, CurBufferPtr, 989 FrameRegister); 990 BufferBegin = SavedBufferBegin; 991 BufferEnd = SavedBufferEnd; 992 CurBufferPtr = SavedCurBufferPtr; 993 994 TheJIT->RegisterTable(FrameRegister); 995 } 996 997 if (MMI) 998 MMI->EndFunction(); 999 1000 return false; 1001} 1002 1003void* JITEmitter::allocateSpace(intptr_t Size, unsigned Alignment) { 1004 if (BufferBegin) 1005 return MachineCodeEmitter::allocateSpace(Size, Alignment); 1006 1007 // create a new memory block if there is no active one. 1008 // care must be taken so that BufferBegin is invalidated when a 1009 // block is trimmed 1010 BufferBegin = CurBufferPtr = MemMgr->allocateSpace(Size, Alignment); 1011 BufferEnd = BufferBegin+Size; 1012 return CurBufferPtr; 1013} 1014 1015void JITEmitter::emitConstantPool(MachineConstantPool *MCP) { 1016 if (TheJIT->getJITInfo().hasCustomConstantPool()) { 1017 DOUT << "JIT: Target has custom constant pool handling. Omitting standard " 1018 "constant pool\n"; 1019 return; 1020 } 1021 const std::vector<MachineConstantPoolEntry> &Constants = MCP->getConstants(); 1022 if (Constants.empty()) return; 1023 1024 MachineConstantPoolEntry CPE = Constants.back(); 1025 unsigned Size = CPE.Offset; 1026 const Type *Ty = CPE.isMachineConstantPoolEntry() 1027 ? CPE.Val.MachineCPVal->getType() : CPE.Val.ConstVal->getType(); 1028 Size += TheJIT->getTargetData()->getABITypeSize(Ty); 1029 1030 unsigned Align = 1 << MCP->getConstantPoolAlignment(); 1031 ConstantPoolBase = allocateSpace(Size, Align); 1032 ConstantPool = MCP; 1033 1034 if (ConstantPoolBase == 0) return; // Buffer overflow. 1035 1036 DOUT << "JIT: Emitted constant pool at [" << ConstantPoolBase 1037 << "] (size: " << Size << ", alignment: " << Align << ")\n"; 1038 1039 // Initialize the memory for all of the constant pool entries. 1040 for (unsigned i = 0, e = Constants.size(); i != e; ++i) { 1041 void *CAddr = (char*)ConstantPoolBase+Constants[i].Offset; 1042 if (Constants[i].isMachineConstantPoolEntry()) { 1043 // FIXME: add support to lower machine constant pool values into bytes! 1044 cerr << "Initialize memory with machine specific constant pool entry" 1045 << " has not been implemented!\n"; 1046 abort(); 1047 } 1048 TheJIT->InitializeMemory(Constants[i].Val.ConstVal, CAddr); 1049 DOUT << "JIT: CP" << i << " at [" << CAddr << "]\n"; 1050 } 1051} 1052 1053void JITEmitter::initJumpTableInfo(MachineJumpTableInfo *MJTI) { 1054 const std::vector<MachineJumpTableEntry> &JT = MJTI->getJumpTables(); 1055 if (JT.empty()) return; 1056 1057 unsigned NumEntries = 0; 1058 for (unsigned i = 0, e = JT.size(); i != e; ++i) 1059 NumEntries += JT[i].MBBs.size(); 1060 1061 unsigned EntrySize = MJTI->getEntrySize(); 1062 1063 // Just allocate space for all the jump tables now. We will fix up the actual 1064 // MBB entries in the tables after we emit the code for each block, since then 1065 // we will know the final locations of the MBBs in memory. 1066 JumpTable = MJTI; 1067 JumpTableBase = allocateSpace(NumEntries * EntrySize, MJTI->getAlignment()); 1068} 1069 1070void JITEmitter::emitJumpTableInfo(MachineJumpTableInfo *MJTI) { 1071 const std::vector<MachineJumpTableEntry> &JT = MJTI->getJumpTables(); 1072 if (JT.empty() || JumpTableBase == 0) return; 1073 1074 if (TargetMachine::getRelocationModel() == Reloc::PIC_) { 1075 assert(MJTI->getEntrySize() == 4 && "Cross JIT'ing?"); 1076 // For each jump table, place the offset from the beginning of the table 1077 // to the target address. 1078 int *SlotPtr = (int*)JumpTableBase; 1079 1080 for (unsigned i = 0, e = JT.size(); i != e; ++i) { 1081 const std::vector<MachineBasicBlock*> &MBBs = JT[i].MBBs; 1082 // Store the offset of the basic block for this jump table slot in the 1083 // memory we allocated for the jump table in 'initJumpTableInfo' 1084 intptr_t Base = (intptr_t)SlotPtr; 1085 for (unsigned mi = 0, me = MBBs.size(); mi != me; ++mi) { 1086 intptr_t MBBAddr = getMachineBasicBlockAddress(MBBs[mi]); 1087 *SlotPtr++ = TheJIT->getJITInfo().getPICJumpTableEntry(MBBAddr, Base); 1088 } 1089 } 1090 } else { 1091 assert(MJTI->getEntrySize() == sizeof(void*) && "Cross JIT'ing?"); 1092 1093 // For each jump table, map each target in the jump table to the address of 1094 // an emitted MachineBasicBlock. 1095 intptr_t *SlotPtr = (intptr_t*)JumpTableBase; 1096 1097 for (unsigned i = 0, e = JT.size(); i != e; ++i) { 1098 const std::vector<MachineBasicBlock*> &MBBs = JT[i].MBBs; 1099 // Store the address of the basic block for this jump table slot in the 1100 // memory we allocated for the jump table in 'initJumpTableInfo' 1101 for (unsigned mi = 0, me = MBBs.size(); mi != me; ++mi) 1102 *SlotPtr++ = getMachineBasicBlockAddress(MBBs[mi]); 1103 } 1104 } 1105} 1106 1107void JITEmitter::startFunctionStub(const GlobalValue* F, unsigned StubSize, 1108 unsigned Alignment) { 1109 SavedBufferBegin = BufferBegin; 1110 SavedBufferEnd = BufferEnd; 1111 SavedCurBufferPtr = CurBufferPtr; 1112 1113 BufferBegin = CurBufferPtr = MemMgr->allocateStub(F, StubSize, Alignment); 1114 BufferEnd = BufferBegin+StubSize+1; 1115} 1116 1117void *JITEmitter::finishFunctionStub(const GlobalValue* F) { 1118 NumBytes += getCurrentPCOffset(); 1119 1120 // Invalidate the icache if necessary. 1121 sys::Memory::InvalidateInstructionCache(BufferBegin, NumBytes); 1122 1123 std::swap(SavedBufferBegin, BufferBegin); 1124 BufferEnd = SavedBufferEnd; 1125 CurBufferPtr = SavedCurBufferPtr; 1126 return SavedBufferBegin; 1127} 1128 1129// getConstantPoolEntryAddress - Return the address of the 'ConstantNum' entry 1130// in the constant pool that was last emitted with the 'emitConstantPool' 1131// method. 1132// 1133intptr_t JITEmitter::getConstantPoolEntryAddress(unsigned ConstantNum) const { 1134 assert(ConstantNum < ConstantPool->getConstants().size() && 1135 "Invalid ConstantPoolIndex!"); 1136 return (intptr_t)ConstantPoolBase + 1137 ConstantPool->getConstants()[ConstantNum].Offset; 1138} 1139 1140// getJumpTableEntryAddress - Return the address of the JumpTable with index 1141// 'Index' in the jumpp table that was last initialized with 'initJumpTableInfo' 1142// 1143intptr_t JITEmitter::getJumpTableEntryAddress(unsigned Index) const { 1144 const std::vector<MachineJumpTableEntry> &JT = JumpTable->getJumpTables(); 1145 assert(Index < JT.size() && "Invalid jump table index!"); 1146 1147 unsigned Offset = 0; 1148 unsigned EntrySize = JumpTable->getEntrySize(); 1149 1150 for (unsigned i = 0; i < Index; ++i) 1151 Offset += JT[i].MBBs.size(); 1152 1153 Offset *= EntrySize; 1154 1155 return (intptr_t)((char *)JumpTableBase + Offset); 1156} 1157 1158//===----------------------------------------------------------------------===// 1159// Public interface to this file 1160//===----------------------------------------------------------------------===// 1161 1162MachineCodeEmitter *JIT::createEmitter(JIT &jit, JITMemoryManager *JMM) { 1163 return new JITEmitter(jit, JMM); 1164} 1165 1166// getPointerToNamedFunction - This function is used as a global wrapper to 1167// JIT::getPointerToNamedFunction for the purpose of resolving symbols when 1168// bugpoint is debugging the JIT. In that scenario, we are loading an .so and 1169// need to resolve function(s) that are being mis-codegenerated, so we need to 1170// resolve their addresses at runtime, and this is the way to do it. 1171extern "C" { 1172 void *getPointerToNamedFunction(const char *Name) { 1173 if (Function *F = TheJIT->FindFunctionNamed(Name)) 1174 return TheJIT->getPointerToFunction(F); 1175 return TheJIT->getPointerToNamedFunction(Name); 1176 } 1177} 1178 1179// getPointerToFunctionOrStub - If the specified function has been 1180// code-gen'd, return a pointer to the function. If not, compile it, or use 1181// a stub to implement lazy compilation if available. 1182// 1183void *JIT::getPointerToFunctionOrStub(Function *F) { 1184 // If we have already code generated the function, just return the address. 1185 if (void *Addr = getPointerToGlobalIfAvailable(F)) 1186 return Addr; 1187 1188 // Get a stub if the target supports it. 1189 assert(isa<JITEmitter>(MCE) && "Unexpected MCE?"); 1190 JITEmitter *JE = cast<JITEmitter>(getCodeEmitter()); 1191 return JE->getJITResolver().getFunctionStub(F); 1192} 1193 1194/// freeMachineCodeForFunction - release machine code memory for given Function. 1195/// 1196void JIT::freeMachineCodeForFunction(Function *F) { 1197 1198 // Delete translation for this from the ExecutionEngine, so it will get 1199 // retranslated next time it is used. 1200 void *OldPtr = updateGlobalMapping(F, 0); 1201 1202 if (OldPtr) 1203 RemoveFunctionFromSymbolTable(OldPtr); 1204 1205 // Free the actual memory for the function body and related stuff. 1206 assert(isa<JITEmitter>(MCE) && "Unexpected MCE?"); 1207 cast<JITEmitter>(MCE)->deallocateMemForFunction(F); 1208} 1209 1210