JITEmitter.cpp revision ce4a70bd7608861e104b04265a0c71e5df8ecefe
1//===-- JITEmitter.cpp - Write machine code to executable memory ----------===// 2// 3// The LLVM Compiler Infrastructure 4// 5// This file is distributed under the University of Illinois Open Source 6// License. See LICENSE.TXT for details. 7// 8//===----------------------------------------------------------------------===// 9// 10// This file defines a MachineCodeEmitter object that is used by the JIT to 11// write machine code to memory and remember where relocatable values are. 12// 13//===----------------------------------------------------------------------===// 14 15#define DEBUG_TYPE "jit" 16#include "JIT.h" 17#include "JITDwarfEmitter.h" 18#include "llvm/Constants.h" 19#include "llvm/Module.h" 20#include "llvm/DerivedTypes.h" 21#include "llvm/CodeGen/MachineCodeEmitter.h" 22#include "llvm/CodeGen/MachineFunction.h" 23#include "llvm/CodeGen/MachineConstantPool.h" 24#include "llvm/CodeGen/MachineJumpTableInfo.h" 25#include "llvm/CodeGen/MachineModuleInfo.h" 26#include "llvm/CodeGen/MachineRelocation.h" 27#include "llvm/ExecutionEngine/JITMemoryManager.h" 28#include "llvm/ExecutionEngine/GenericValue.h" 29#include "llvm/Target/TargetData.h" 30#include "llvm/Target/TargetJITInfo.h" 31#include "llvm/Target/TargetMachine.h" 32#include "llvm/Target/TargetOptions.h" 33#include "llvm/Support/Debug.h" 34#include "llvm/Support/MutexGuard.h" 35#include "llvm/System/Disassembler.h" 36#include "llvm/System/Memory.h" 37#include "llvm/Target/TargetInstrInfo.h" 38#include "llvm/ADT/SmallPtrSet.h" 39#include "llvm/ADT/Statistic.h" 40#include <algorithm> 41#ifndef NDEBUG 42#include <iomanip> 43#endif 44using namespace llvm; 45 46STATISTIC(NumBytes, "Number of bytes of machine code compiled"); 47STATISTIC(NumRelos, "Number of relocations applied"); 48static JIT *TheJIT = 0; 49 50 51//===----------------------------------------------------------------------===// 52// JIT lazy compilation code. 53// 54namespace { 55 class JITResolverState { 56 private: 57 /// FunctionToStubMap - Keep track of the stub created for a particular 58 /// function so that we can reuse them if necessary. 59 std::map<Function*, void*> FunctionToStubMap; 60 61 /// StubToFunctionMap - Keep track of the function that each stub 62 /// corresponds to. 63 std::map<void*, Function*> StubToFunctionMap; 64 65 /// GlobalToNonLazyPtrMap - Keep track of the lazy pointer created for a 66 /// particular GlobalVariable so that we can reuse them if necessary. 67 std::map<GlobalValue*, void*> GlobalToNonLazyPtrMap; 68 69 public: 70 std::map<Function*, void*>& getFunctionToStubMap(const MutexGuard& locked) { 71 assert(locked.holds(TheJIT->lock)); 72 return FunctionToStubMap; 73 } 74 75 std::map<void*, Function*>& getStubToFunctionMap(const MutexGuard& locked) { 76 assert(locked.holds(TheJIT->lock)); 77 return StubToFunctionMap; 78 } 79 80 std::map<GlobalValue*, void*>& 81 getGlobalToNonLazyPtrMap(const MutexGuard& locked) { 82 assert(locked.holds(TheJIT->lock)); 83 return GlobalToNonLazyPtrMap; 84 } 85 }; 86 87 /// JITResolver - Keep track of, and resolve, call sites for functions that 88 /// have not yet been compiled. 89 class JITResolver { 90 /// LazyResolverFn - The target lazy resolver function that we actually 91 /// rewrite instructions to use. 92 TargetJITInfo::LazyResolverFn LazyResolverFn; 93 94 JITResolverState state; 95 96 /// ExternalFnToStubMap - This is the equivalent of FunctionToStubMap for 97 /// external functions. 98 std::map<void*, void*> ExternalFnToStubMap; 99 100 //map addresses to indexes in the GOT 101 std::map<void*, unsigned> revGOTMap; 102 unsigned nextGOTIndex; 103 104 static JITResolver *TheJITResolver; 105 public: 106 explicit JITResolver(JIT &jit) : nextGOTIndex(0) { 107 TheJIT = &jit; 108 109 LazyResolverFn = jit.getJITInfo().getLazyResolverFunction(JITCompilerFn); 110 assert(TheJITResolver == 0 && "Multiple JIT resolvers?"); 111 TheJITResolver = this; 112 } 113 114 ~JITResolver() { 115 TheJITResolver = 0; 116 } 117 118 /// getFunctionStub - This returns a pointer to a function stub, creating 119 /// one on demand as needed. 120 void *getFunctionStub(Function *F); 121 122 /// getExternalFunctionStub - Return a stub for the function at the 123 /// specified address, created lazily on demand. 124 void *getExternalFunctionStub(void *FnAddr); 125 126 /// getGlobalValueNonLazyPtr - Return a non-lazy pointer containing the 127 /// specified GV address. 128 void *getGlobalValueNonLazyPtr(GlobalValue *V, void *GVAddress); 129 130 /// AddCallbackAtLocation - If the target is capable of rewriting an 131 /// instruction without the use of a stub, record the location of the use so 132 /// we know which function is being used at the location. 133 void *AddCallbackAtLocation(Function *F, void *Location) { 134 MutexGuard locked(TheJIT->lock); 135 /// Get the target-specific JIT resolver function. 136 state.getStubToFunctionMap(locked)[Location] = F; 137 return (void*)(intptr_t)LazyResolverFn; 138 } 139 140 /// getGOTIndexForAddress - Return a new or existing index in the GOT for 141 /// an address. This function only manages slots, it does not manage the 142 /// contents of the slots or the memory associated with the GOT. 143 unsigned getGOTIndexForAddr(void *addr); 144 145 /// JITCompilerFn - This function is called to resolve a stub to a compiled 146 /// address. If the LLVM Function corresponding to the stub has not yet 147 /// been compiled, this function compiles it first. 148 static void *JITCompilerFn(void *Stub); 149 }; 150} 151 152JITResolver *JITResolver::TheJITResolver = 0; 153 154/// getFunctionStub - This returns a pointer to a function stub, creating 155/// one on demand as needed. 156void *JITResolver::getFunctionStub(Function *F) { 157 MutexGuard locked(TheJIT->lock); 158 159 // If we already have a stub for this function, recycle it. 160 void *&Stub = state.getFunctionToStubMap(locked)[F]; 161 if (Stub) return Stub; 162 163 // Call the lazy resolver function unless we already KNOW it is an external 164 // function, in which case we just skip the lazy resolution step. 165 void *Actual = (void*)(intptr_t)LazyResolverFn; 166 if (F->isDeclaration() && !F->hasNotBeenReadFromBitcode()) 167 Actual = TheJIT->getPointerToFunction(F); 168 169 // Otherwise, codegen a new stub. For now, the stub will call the lazy 170 // resolver function. 171 Stub = TheJIT->getJITInfo().emitFunctionStub(F, Actual, 172 *TheJIT->getCodeEmitter()); 173 174 if (Actual != (void*)(intptr_t)LazyResolverFn) { 175 // If we are getting the stub for an external function, we really want the 176 // address of the stub in the GlobalAddressMap for the JIT, not the address 177 // of the external function. 178 TheJIT->updateGlobalMapping(F, Stub); 179 } 180 181 DOUT << "JIT: Stub emitted at [" << Stub << "] for function '" 182 << F->getName() << "'\n"; 183 184 // Finally, keep track of the stub-to-Function mapping so that the 185 // JITCompilerFn knows which function to compile! 186 state.getStubToFunctionMap(locked)[Stub] = F; 187 return Stub; 188} 189 190/// getGlobalValueNonLazyPtr - Return a lazy pointer containing the specified 191/// GV address. 192void *JITResolver::getGlobalValueNonLazyPtr(GlobalValue *GV, void *GVAddress) { 193 MutexGuard locked(TheJIT->lock); 194 195 // If we already have a stub for this global variable, recycle it. 196 void *&NonLazyPtr = state.getGlobalToNonLazyPtrMap(locked)[GV]; 197 if (NonLazyPtr) return NonLazyPtr; 198 199 // Otherwise, codegen a new lazy pointer. 200 NonLazyPtr = TheJIT->getJITInfo().emitGlobalValueNonLazyPtr(GV, GVAddress, 201 *TheJIT->getCodeEmitter()); 202 203 DOUT << "JIT: Stub emitted at [" << NonLazyPtr << "] for GV '" 204 << GV->getName() << "'\n"; 205 206 return NonLazyPtr; 207} 208 209/// getExternalFunctionStub - Return a stub for the function at the 210/// specified address, created lazily on demand. 211void *JITResolver::getExternalFunctionStub(void *FnAddr) { 212 // If we already have a stub for this function, recycle it. 213 void *&Stub = ExternalFnToStubMap[FnAddr]; 214 if (Stub) return Stub; 215 216 Stub = TheJIT->getJITInfo().emitFunctionStub(0, FnAddr, 217 *TheJIT->getCodeEmitter()); 218 219 DOUT << "JIT: Stub emitted at [" << Stub 220 << "] for external function at '" << FnAddr << "'\n"; 221 return Stub; 222} 223 224unsigned JITResolver::getGOTIndexForAddr(void* addr) { 225 unsigned idx = revGOTMap[addr]; 226 if (!idx) { 227 idx = ++nextGOTIndex; 228 revGOTMap[addr] = idx; 229 DOUT << "JIT: Adding GOT entry " << idx << " for addr [" << addr << "]\n"; 230 } 231 return idx; 232} 233 234/// JITCompilerFn - This function is called when a lazy compilation stub has 235/// been entered. It looks up which function this stub corresponds to, compiles 236/// it if necessary, then returns the resultant function pointer. 237void *JITResolver::JITCompilerFn(void *Stub) { 238 JITResolver &JR = *TheJITResolver; 239 240 Function* F = 0; 241 void* ActualPtr = 0; 242 243 { 244 // Only lock for getting the Function. The call getPointerToFunction made 245 // in this function might trigger function materializing, which requires 246 // JIT lock to be unlocked. 247 MutexGuard locked(TheJIT->lock); 248 249 // The address given to us for the stub may not be exactly right, it might be 250 // a little bit after the stub. As such, use upper_bound to find it. 251 std::map<void*, Function*>::iterator I = 252 JR.state.getStubToFunctionMap(locked).upper_bound(Stub); 253 assert(I != JR.state.getStubToFunctionMap(locked).begin() && 254 "This is not a known stub!"); 255 F = (--I)->second; 256 ActualPtr = I->first; 257 } 258 259 // If we have already code generated the function, just return the address. 260 void *Result = TheJIT->getPointerToGlobalIfAvailable(F); 261 262 if (!Result) { 263 // Otherwise we don't have it, do lazy compilation now. 264 265 // If lazy compilation is disabled, emit a useful error message and abort. 266 if (TheJIT->isLazyCompilationDisabled()) { 267 cerr << "LLVM JIT requested to do lazy compilation of function '" 268 << F->getName() << "' when lazy compiles are disabled!\n"; 269 abort(); 270 } 271 272 // We might like to remove the stub from the StubToFunction map. 273 // We can't do that! Multiple threads could be stuck, waiting to acquire the 274 // lock above. As soon as the 1st function finishes compiling the function, 275 // the next one will be released, and needs to be able to find the function 276 // it needs to call. 277 //JR.state.getStubToFunctionMap(locked).erase(I); 278 279 DOUT << "JIT: Lazily resolving function '" << F->getName() 280 << "' In stub ptr = " << Stub << " actual ptr = " 281 << ActualPtr << "\n"; 282 283 Result = TheJIT->getPointerToFunction(F); 284 } 285 286 // Reacquire the lock to erase the stub in the map. 287 MutexGuard locked(TheJIT->lock); 288 289 // We don't need to reuse this stub in the future, as F is now compiled. 290 JR.state.getFunctionToStubMap(locked).erase(F); 291 292 // FIXME: We could rewrite all references to this stub if we knew them. 293 294 // What we will do is set the compiled function address to map to the 295 // same GOT entry as the stub so that later clients may update the GOT 296 // if they see it still using the stub address. 297 // Note: this is done so the Resolver doesn't have to manage GOT memory 298 // Do this without allocating map space if the target isn't using a GOT 299 if(JR.revGOTMap.find(Stub) != JR.revGOTMap.end()) 300 JR.revGOTMap[Result] = JR.revGOTMap[Stub]; 301 302 return Result; 303} 304 305//===----------------------------------------------------------------------===// 306// Function Index Support 307 308// On MacOS we generate an index of currently JIT'd functions so that 309// performance tools can determine a symbol name and accurate code range for a 310// PC value. Because performance tools are generally asynchronous, the code 311// below is written with the hope that it could be interrupted at any time and 312// have useful answers. However, we don't go crazy with atomic operations, we 313// just do a "reasonable effort". 314#ifdef __APPLE__ 315#define ENABLE_JIT_SYMBOL_TABLE 0 316#endif 317 318/// JitSymbolEntry - Each function that is JIT compiled results in one of these 319/// being added to an array of symbols. This indicates the name of the function 320/// as well as the address range it occupies. This allows the client to map 321/// from a PC value to the name of the function. 322struct JitSymbolEntry { 323 const char *FnName; // FnName - a strdup'd string. 324 void *FnStart; 325 intptr_t FnSize; 326}; 327 328 329struct JitSymbolTable { 330 /// NextPtr - This forms a linked list of JitSymbolTable entries. This 331 /// pointer is not used right now, but might be used in the future. Consider 332 /// it reserved for future use. 333 JitSymbolTable *NextPtr; 334 335 /// Symbols - This is an array of JitSymbolEntry entries. Only the first 336 /// 'NumSymbols' symbols are valid. 337 JitSymbolEntry *Symbols; 338 339 /// NumSymbols - This indicates the number entries in the Symbols array that 340 /// are valid. 341 unsigned NumSymbols; 342 343 /// NumAllocated - This indicates the amount of space we have in the Symbols 344 /// array. This is a private field that should not be read by external tools. 345 unsigned NumAllocated; 346}; 347 348#if ENABLE_JIT_SYMBOL_TABLE 349JitSymbolTable *__jitSymbolTable; 350#endif 351 352static void AddFunctionToSymbolTable(const char *FnName, 353 void *FnStart, intptr_t FnSize) { 354 assert(FnName != 0 && FnStart != 0 && "Bad symbol to add"); 355 JitSymbolTable **SymTabPtrPtr = 0; 356#if !ENABLE_JIT_SYMBOL_TABLE 357 return; 358#else 359 SymTabPtrPtr = &__jitSymbolTable; 360#endif 361 362 // If this is the first entry in the symbol table, add the JitSymbolTable 363 // index. 364 if (*SymTabPtrPtr == 0) { 365 JitSymbolTable *New = new JitSymbolTable(); 366 New->NextPtr = 0; 367 New->Symbols = 0; 368 New->NumSymbols = 0; 369 New->NumAllocated = 0; 370 *SymTabPtrPtr = New; 371 } 372 373 JitSymbolTable *SymTabPtr = *SymTabPtrPtr; 374 375 // If we have space in the table, reallocate the table. 376 if (SymTabPtr->NumSymbols >= SymTabPtr->NumAllocated) { 377 // If we don't have space, reallocate the table. 378 unsigned NewSize = std::max(64U, SymTabPtr->NumAllocated*2); 379 JitSymbolEntry *NewSymbols = new JitSymbolEntry[NewSize]; 380 JitSymbolEntry *OldSymbols = SymTabPtr->Symbols; 381 382 // Copy the old entries over. 383 memcpy(NewSymbols, OldSymbols, 384 SymTabPtr->NumSymbols*sizeof(OldSymbols[0])); 385 386 // Swap the new symbols in, delete the old ones. 387 SymTabPtr->Symbols = NewSymbols; 388 SymTabPtr->NumAllocated = NewSize; 389 delete [] OldSymbols; 390 } 391 392 // Otherwise, we have enough space, just tack it onto the end of the array. 393 JitSymbolEntry &Entry = SymTabPtr->Symbols[SymTabPtr->NumSymbols]; 394 Entry.FnName = strdup(FnName); 395 Entry.FnStart = FnStart; 396 Entry.FnSize = FnSize; 397 ++SymTabPtr->NumSymbols; 398} 399 400static void RemoveFunctionFromSymbolTable(void *FnStart) { 401 assert(FnStart && "Invalid function pointer"); 402 JitSymbolTable **SymTabPtrPtr = 0; 403#if !ENABLE_JIT_SYMBOL_TABLE 404 return; 405#else 406 SymTabPtrPtr = &__jitSymbolTable; 407#endif 408 409 JitSymbolTable *SymTabPtr = *SymTabPtrPtr; 410 JitSymbolEntry *Symbols = SymTabPtr->Symbols; 411 412 // Scan the table to find its index. The table is not sorted, so do a linear 413 // scan. 414 unsigned Index; 415 for (Index = 0; Symbols[Index].FnStart != FnStart; ++Index) 416 assert(Index != SymTabPtr->NumSymbols && "Didn't find function!"); 417 418 // Once we have an index, we know to nuke this entry, overwrite it with the 419 // entry at the end of the array, making the last entry redundant. 420 const char *OldName = Symbols[Index].FnName; 421 Symbols[Index] = Symbols[SymTabPtr->NumSymbols-1]; 422 free((void*)OldName); 423 424 // Drop the number of symbols in the table. 425 --SymTabPtr->NumSymbols; 426 427 // Finally, if we deleted the final symbol, deallocate the table itself. 428 if (SymTabPtr->NumSymbols != 0) 429 return; 430 431 *SymTabPtrPtr = 0; 432 delete [] Symbols; 433 delete SymTabPtr; 434} 435 436//===----------------------------------------------------------------------===// 437// JITEmitter code. 438// 439namespace { 440 /// JITEmitter - The JIT implementation of the MachineCodeEmitter, which is 441 /// used to output functions to memory for execution. 442 class JITEmitter : public MachineCodeEmitter { 443 JITMemoryManager *MemMgr; 444 445 // When outputting a function stub in the context of some other function, we 446 // save BufferBegin/BufferEnd/CurBufferPtr here. 447 unsigned char *SavedBufferBegin, *SavedBufferEnd, *SavedCurBufferPtr; 448 449 /// Relocations - These are the relocations that the function needs, as 450 /// emitted. 451 std::vector<MachineRelocation> Relocations; 452 453 /// MBBLocations - This vector is a mapping from MBB ID's to their address. 454 /// It is filled in by the StartMachineBasicBlock callback and queried by 455 /// the getMachineBasicBlockAddress callback. 456 std::vector<intptr_t> MBBLocations; 457 458 /// ConstantPool - The constant pool for the current function. 459 /// 460 MachineConstantPool *ConstantPool; 461 462 /// ConstantPoolBase - A pointer to the first entry in the constant pool. 463 /// 464 void *ConstantPoolBase; 465 466 /// JumpTable - The jump tables for the current function. 467 /// 468 MachineJumpTableInfo *JumpTable; 469 470 /// JumpTableBase - A pointer to the first entry in the jump table. 471 /// 472 void *JumpTableBase; 473 474 /// Resolver - This contains info about the currently resolved functions. 475 JITResolver Resolver; 476 477 /// DE - The dwarf emitter for the jit. 478 JITDwarfEmitter *DE; 479 480 /// LabelLocations - This vector is a mapping from Label ID's to their 481 /// address. 482 std::vector<intptr_t> LabelLocations; 483 484 /// MMI - Machine module info for exception informations 485 MachineModuleInfo* MMI; 486 487 // GVSet - a set to keep track of which globals have been seen 488 SmallPtrSet<const GlobalVariable*, 8> GVSet; 489 490 public: 491 JITEmitter(JIT &jit, JITMemoryManager *JMM) : Resolver(jit) { 492 MemMgr = JMM ? JMM : JITMemoryManager::CreateDefaultMemManager(); 493 if (jit.getJITInfo().needsGOT()) { 494 MemMgr->AllocateGOT(); 495 DOUT << "JIT is managing a GOT\n"; 496 } 497 498 if (ExceptionHandling) DE = new JITDwarfEmitter(jit); 499 } 500 ~JITEmitter() { 501 delete MemMgr; 502 if (ExceptionHandling) delete DE; 503 } 504 505 /// classof - Methods for support type inquiry through isa, cast, and 506 /// dyn_cast: 507 /// 508 static inline bool classof(const JITEmitter*) { return true; } 509 static inline bool classof(const MachineCodeEmitter*) { return true; } 510 511 JITResolver &getJITResolver() { return Resolver; } 512 513 virtual void startFunction(MachineFunction &F); 514 virtual bool finishFunction(MachineFunction &F); 515 516 void emitConstantPool(MachineConstantPool *MCP); 517 void initJumpTableInfo(MachineJumpTableInfo *MJTI); 518 void emitJumpTableInfo(MachineJumpTableInfo *MJTI); 519 520 virtual void startGVStub(const GlobalValue* GV, unsigned StubSize, 521 unsigned Alignment = 1); 522 virtual void* finishGVStub(const GlobalValue *GV); 523 524 /// allocateSpace - Reserves space in the current block if any, or 525 /// allocate a new one of the given size. 526 virtual void *allocateSpace(intptr_t Size, unsigned Alignment); 527 528 virtual void addRelocation(const MachineRelocation &MR) { 529 Relocations.push_back(MR); 530 } 531 532 virtual void StartMachineBasicBlock(MachineBasicBlock *MBB) { 533 if (MBBLocations.size() <= (unsigned)MBB->getNumber()) 534 MBBLocations.resize((MBB->getNumber()+1)*2); 535 MBBLocations[MBB->getNumber()] = getCurrentPCValue(); 536 DOUT << "JIT: Emitting BB" << MBB->getNumber() << " at [" 537 << (void*) getCurrentPCValue() << "]\n"; 538 } 539 540 virtual intptr_t getConstantPoolEntryAddress(unsigned Entry) const; 541 virtual intptr_t getJumpTableEntryAddress(unsigned Entry) const; 542 543 virtual intptr_t getMachineBasicBlockAddress(MachineBasicBlock *MBB) const { 544 assert(MBBLocations.size() > (unsigned)MBB->getNumber() && 545 MBBLocations[MBB->getNumber()] && "MBB not emitted!"); 546 return MBBLocations[MBB->getNumber()]; 547 } 548 549 /// deallocateMemForFunction - Deallocate all memory for the specified 550 /// function body. 551 void deallocateMemForFunction(Function *F) { 552 MemMgr->deallocateMemForFunction(F); 553 } 554 555 virtual void emitLabel(uint64_t LabelID) { 556 if (LabelLocations.size() <= LabelID) 557 LabelLocations.resize((LabelID+1)*2); 558 LabelLocations[LabelID] = getCurrentPCValue(); 559 } 560 561 virtual intptr_t getLabelAddress(uint64_t LabelID) const { 562 assert(LabelLocations.size() > (unsigned)LabelID && 563 LabelLocations[LabelID] && "Label not emitted!"); 564 return LabelLocations[LabelID]; 565 } 566 567 virtual void setModuleInfo(MachineModuleInfo* Info) { 568 MMI = Info; 569 if (ExceptionHandling) DE->setModuleInfo(Info); 570 } 571 572 void setMemoryExecutable(void) { 573 MemMgr->setMemoryExecutable(); 574 } 575 576 private: 577 void *getPointerToGlobal(GlobalValue *GV, void *Reference, bool NoNeedStub); 578 void *getPointerToGVNonLazyPtr(GlobalValue *V, void *Reference, 579 bool NoNeedStub); 580 unsigned addSizeOfGlobal(const GlobalVariable *GV, unsigned Size); 581 unsigned addSizeOfGlobalsInConstantVal(const Constant *C, unsigned Size); 582 unsigned addSizeOfGlobalsInInitializer(const Constant *Init, unsigned Size); 583 unsigned GetSizeOfGlobalsInBytes(MachineFunction &MF); 584 }; 585} 586 587void *JITEmitter::getPointerToGlobal(GlobalValue *V, void *Reference, 588 bool DoesntNeedStub) { 589 if (GlobalVariable *GV = dyn_cast<GlobalVariable>(V)) { 590 /// FIXME: If we straightened things out, this could actually emit the 591 /// global immediately instead of queuing it for codegen later! 592 return TheJIT->getOrEmitGlobalVariable(GV); 593 } 594 if (GlobalAlias *GA = dyn_cast<GlobalAlias>(V)) 595 return TheJIT->getPointerToGlobal(GA->resolveAliasedGlobal(false)); 596 597 // If we have already compiled the function, return a pointer to its body. 598 Function *F = cast<Function>(V); 599 void *ResultPtr = TheJIT->getPointerToGlobalIfAvailable(F); 600 if (ResultPtr) return ResultPtr; 601 602 if (F->isDeclaration() && !F->hasNotBeenReadFromBitcode()) { 603 // If this is an external function pointer, we can force the JIT to 604 // 'compile' it, which really just adds it to the map. 605 if (DoesntNeedStub) 606 return TheJIT->getPointerToFunction(F); 607 608 return Resolver.getFunctionStub(F); 609 } 610 611 // Okay, the function has not been compiled yet, if the target callback 612 // mechanism is capable of rewriting the instruction directly, prefer to do 613 // that instead of emitting a stub. 614 if (DoesntNeedStub) 615 return Resolver.AddCallbackAtLocation(F, Reference); 616 617 // Otherwise, we have to emit a lazy resolving stub. 618 return Resolver.getFunctionStub(F); 619} 620 621void *JITEmitter::getPointerToGVNonLazyPtr(GlobalValue *V, void *Reference, 622 bool DoesntNeedStub) { 623 // Make sure GV is emitted first. 624 // FIXME: For now, if the GV is an external function we force the JIT to 625 // compile it so the non-lazy pointer will contain the fully resolved address. 626 void *GVAddress = getPointerToGlobal(V, Reference, true); 627 return Resolver.getGlobalValueNonLazyPtr(V, GVAddress); 628} 629 630static unsigned GetConstantPoolSizeInBytes(MachineConstantPool *MCP) { 631 const std::vector<MachineConstantPoolEntry> &Constants = MCP->getConstants(); 632 if (Constants.empty()) return 0; 633 634 MachineConstantPoolEntry CPE = Constants.back(); 635 unsigned Size = CPE.Offset; 636 const Type *Ty = CPE.isMachineConstantPoolEntry() 637 ? CPE.Val.MachineCPVal->getType() : CPE.Val.ConstVal->getType(); 638 Size += TheJIT->getTargetData()->getABITypeSize(Ty); 639 return Size; 640} 641 642static unsigned GetJumpTableSizeInBytes(MachineJumpTableInfo *MJTI) { 643 const std::vector<MachineJumpTableEntry> &JT = MJTI->getJumpTables(); 644 if (JT.empty()) return 0; 645 646 unsigned NumEntries = 0; 647 for (unsigned i = 0, e = JT.size(); i != e; ++i) 648 NumEntries += JT[i].MBBs.size(); 649 650 unsigned EntrySize = MJTI->getEntrySize(); 651 652 return NumEntries * EntrySize; 653} 654 655static uintptr_t RoundUpToAlign(uintptr_t Size, unsigned Alignment) { 656 if (Alignment == 0) Alignment = 1; 657 // Since we do not know where the buffer will be allocated, be pessimistic. 658 return Size + Alignment; 659} 660 661/// addSizeOfGlobal - add the size of the global (plus any alignment padding) 662/// into the running total Size. 663 664unsigned JITEmitter::addSizeOfGlobal(const GlobalVariable *GV, unsigned Size) { 665 const Type *ElTy = GV->getType()->getElementType(); 666 size_t GVSize = (size_t)TheJIT->getTargetData()->getABITypeSize(ElTy); 667 size_t GVAlign = 668 (size_t)TheJIT->getTargetData()->getPreferredAlignment(GV); 669 DOUT << "JIT: Adding in size " << GVSize << " alignment " << GVAlign; 670 DEBUG(GV->dump()); 671 // Assume code section ends with worst possible alignment, so first 672 // variable needs maximal padding. 673 if (Size==0) 674 Size = 1; 675 Size = ((Size+GVAlign-1)/GVAlign)*GVAlign; 676 Size += GVSize; 677 return Size; 678} 679 680/// addSizeOfGlobalsInConstantVal - find any globals that we haven't seen yet 681/// but are referenced from the constant; put them in GVSet and add their 682/// size into the running total Size. 683 684unsigned JITEmitter::addSizeOfGlobalsInConstantVal(const Constant *C, 685 unsigned Size) { 686 // If its undefined, return the garbage. 687 if (isa<UndefValue>(C)) 688 return Size; 689 690 // If the value is a ConstantExpr 691 if (const ConstantExpr *CE = dyn_cast<ConstantExpr>(C)) { 692 Constant *Op0 = CE->getOperand(0); 693 switch (CE->getOpcode()) { 694 case Instruction::GetElementPtr: 695 case Instruction::Trunc: 696 case Instruction::ZExt: 697 case Instruction::SExt: 698 case Instruction::FPTrunc: 699 case Instruction::FPExt: 700 case Instruction::UIToFP: 701 case Instruction::SIToFP: 702 case Instruction::FPToUI: 703 case Instruction::FPToSI: 704 case Instruction::PtrToInt: 705 case Instruction::IntToPtr: 706 case Instruction::BitCast: { 707 Size = addSizeOfGlobalsInConstantVal(Op0, Size); 708 break; 709 } 710 case Instruction::Add: 711 case Instruction::Sub: 712 case Instruction::Mul: 713 case Instruction::UDiv: 714 case Instruction::SDiv: 715 case Instruction::URem: 716 case Instruction::SRem: 717 case Instruction::And: 718 case Instruction::Or: 719 case Instruction::Xor: { 720 Size = addSizeOfGlobalsInConstantVal(Op0, Size); 721 Size = addSizeOfGlobalsInConstantVal(CE->getOperand(1), Size); 722 break; 723 } 724 default: { 725 cerr << "ConstantExpr not handled: " << *CE << "\n"; 726 abort(); 727 } 728 } 729 } 730 731 if (C->getType()->getTypeID() == Type::PointerTyID) 732 if (const GlobalVariable* GV = dyn_cast<GlobalVariable>(C)) 733 if (GVSet.insert(GV)) 734 Size = addSizeOfGlobal(GV, Size); 735 736 return Size; 737} 738 739/// addSizeOfGLobalsInInitializer - handle any globals that we haven't seen yet 740/// but are referenced from the given initializer. 741 742unsigned JITEmitter::addSizeOfGlobalsInInitializer(const Constant *Init, 743 unsigned Size) { 744 if (!isa<UndefValue>(Init) && 745 !isa<ConstantVector>(Init) && 746 !isa<ConstantAggregateZero>(Init) && 747 !isa<ConstantArray>(Init) && 748 !isa<ConstantStruct>(Init) && 749 Init->getType()->isFirstClassType()) 750 Size = addSizeOfGlobalsInConstantVal(Init, Size); 751 return Size; 752} 753 754/// GetSizeOfGlobalsInBytes - walk the code for the function, looking for 755/// globals; then walk the initializers of those globals looking for more. 756/// If their size has not been considered yet, add it into the running total 757/// Size. 758 759unsigned JITEmitter::GetSizeOfGlobalsInBytes(MachineFunction &MF) { 760 unsigned Size = 0; 761 GVSet.clear(); 762 763 for (MachineFunction::iterator MBB = MF.begin(), E = MF.end(); 764 MBB != E; ++MBB) { 765 for (MachineBasicBlock::const_iterator I = MBB->begin(), E = MBB->end(); 766 I != E; ++I) { 767 const TargetInstrDesc &Desc = I->getDesc(); 768 const MachineInstr &MI = *I; 769 unsigned NumOps = Desc.getNumOperands(); 770 for (unsigned CurOp = 0; CurOp < NumOps; CurOp++) { 771 const MachineOperand &MO = MI.getOperand(CurOp); 772 if (MO.isGlobal()) { 773 GlobalValue* V = MO.getGlobal(); 774 const GlobalVariable *GV = dyn_cast<const GlobalVariable>(V); 775 if (!GV) 776 continue; 777 // If seen in previous function, it will have an entry here. 778 if (TheJIT->getPointerToGlobalIfAvailable(GV)) 779 continue; 780 // If seen earlier in this function, it will have an entry here. 781 // FIXME: it should be possible to combine these tables, by 782 // assuming the addresses of the new globals in this module 783 // start at 0 (or something) and adjusting them after codegen 784 // complete. Another possibility is to grab a marker bit in GV. 785 if (GVSet.insert(GV)) 786 // A variable as yet unseen. Add in its size. 787 Size = addSizeOfGlobal(GV, Size); 788 } 789 } 790 } 791 } 792 DOUT << "JIT: About to look through initializers\n"; 793 // Look for more globals that are referenced only from initializers. 794 // GVSet.end is computed each time because the set can grow as we go. 795 for (SmallPtrSet<const GlobalVariable *, 8>::iterator I = GVSet.begin(); 796 I != GVSet.end(); I++) { 797 const GlobalVariable* GV = *I; 798 if (GV->hasInitializer()) 799 Size = addSizeOfGlobalsInInitializer(GV->getInitializer(), Size); 800 } 801 802 return Size; 803} 804 805void JITEmitter::startFunction(MachineFunction &F) { 806 DOUT << "JIT: Starting CodeGen of Function " 807 << F.getFunction()->getName() << "\n"; 808 809 uintptr_t ActualSize = 0; 810 // Set the memory writable, if it's not already 811 MemMgr->setMemoryWritable(); 812 if (MemMgr->NeedsExactSize()) { 813 DOUT << "JIT: ExactSize\n"; 814 const TargetInstrInfo* TII = F.getTarget().getInstrInfo(); 815 MachineJumpTableInfo *MJTI = F.getJumpTableInfo(); 816 MachineConstantPool *MCP = F.getConstantPool(); 817 818 // Ensure the constant pool/jump table info is at least 4-byte aligned. 819 ActualSize = RoundUpToAlign(ActualSize, 16); 820 821 // Add the alignment of the constant pool 822 ActualSize = RoundUpToAlign(ActualSize, 823 1 << MCP->getConstantPoolAlignment()); 824 825 // Add the constant pool size 826 ActualSize += GetConstantPoolSizeInBytes(MCP); 827 828 // Add the aligment of the jump table info 829 ActualSize = RoundUpToAlign(ActualSize, MJTI->getAlignment()); 830 831 // Add the jump table size 832 ActualSize += GetJumpTableSizeInBytes(MJTI); 833 834 // Add the alignment for the function 835 ActualSize = RoundUpToAlign(ActualSize, 836 std::max(F.getFunction()->getAlignment(), 8U)); 837 838 // Add the function size 839 ActualSize += TII->GetFunctionSizeInBytes(F); 840 841 DOUT << "JIT: ActualSize before globals " << ActualSize << "\n"; 842 // Add the size of the globals that will be allocated after this function. 843 // These are all the ones referenced from this function that were not 844 // previously allocated. 845 ActualSize += GetSizeOfGlobalsInBytes(F); 846 DOUT << "JIT: ActualSize after globals " << ActualSize << "\n"; 847 } 848 849 BufferBegin = CurBufferPtr = MemMgr->startFunctionBody(F.getFunction(), 850 ActualSize); 851 BufferEnd = BufferBegin+ActualSize; 852 853 // Ensure the constant pool/jump table info is at least 4-byte aligned. 854 emitAlignment(16); 855 856 emitConstantPool(F.getConstantPool()); 857 initJumpTableInfo(F.getJumpTableInfo()); 858 859 // About to start emitting the machine code for the function. 860 emitAlignment(std::max(F.getFunction()->getAlignment(), 8U)); 861 TheJIT->updateGlobalMapping(F.getFunction(), CurBufferPtr); 862 863 MBBLocations.clear(); 864} 865 866bool JITEmitter::finishFunction(MachineFunction &F) { 867 if (CurBufferPtr == BufferEnd) { 868 // FIXME: Allocate more space, then try again. 869 cerr << "JIT: Ran out of space for generated machine code!\n"; 870 abort(); 871 } 872 873 emitJumpTableInfo(F.getJumpTableInfo()); 874 875 // FnStart is the start of the text, not the start of the constant pool and 876 // other per-function data. 877 unsigned char *FnStart = 878 (unsigned char *)TheJIT->getPointerToGlobalIfAvailable(F.getFunction()); 879 880 if (!Relocations.empty()) { 881 NumRelos += Relocations.size(); 882 883 // Resolve the relocations to concrete pointers. 884 for (unsigned i = 0, e = Relocations.size(); i != e; ++i) { 885 MachineRelocation &MR = Relocations[i]; 886 void *ResultPtr = 0; 887 if (!MR.letTargetResolve()) { 888 if (MR.isExternalSymbol()) { 889 ResultPtr = TheJIT->getPointerToNamedFunction(MR.getExternalSymbol()); 890 DOUT << "JIT: Map \'" << MR.getExternalSymbol() << "\' to [" 891 << ResultPtr << "]\n"; 892 893 // If the target REALLY wants a stub for this function, emit it now. 894 if (!MR.doesntNeedStub()) 895 ResultPtr = Resolver.getExternalFunctionStub(ResultPtr); 896 } else if (MR.isGlobalValue()) { 897 ResultPtr = getPointerToGlobal(MR.getGlobalValue(), 898 BufferBegin+MR.getMachineCodeOffset(), 899 MR.doesntNeedStub()); 900 } else if (MR.isGlobalValueNonLazyPtr()) { 901 ResultPtr = getPointerToGVNonLazyPtr(MR.getGlobalValue(), 902 BufferBegin+MR.getMachineCodeOffset(), 903 MR.doesntNeedStub()); 904 } else if (MR.isBasicBlock()) { 905 ResultPtr = (void*)getMachineBasicBlockAddress(MR.getBasicBlock()); 906 } else if (MR.isConstantPoolIndex()) { 907 ResultPtr = (void*)getConstantPoolEntryAddress(MR.getConstantPoolIndex()); 908 } else { 909 assert(MR.isJumpTableIndex()); 910 ResultPtr=(void*)getJumpTableEntryAddress(MR.getJumpTableIndex()); 911 } 912 913 MR.setResultPointer(ResultPtr); 914 } 915 916 // if we are managing the GOT and the relocation wants an index, 917 // give it one 918 if (MR.isGOTRelative() && MemMgr->isManagingGOT()) { 919 unsigned idx = Resolver.getGOTIndexForAddr(ResultPtr); 920 MR.setGOTIndex(idx); 921 if (((void**)MemMgr->getGOTBase())[idx] != ResultPtr) { 922 DOUT << "JIT: GOT was out of date for " << ResultPtr 923 << " pointing at " << ((void**)MemMgr->getGOTBase())[idx] 924 << "\n"; 925 ((void**)MemMgr->getGOTBase())[idx] = ResultPtr; 926 } 927 } 928 } 929 930 TheJIT->getJITInfo().relocate(BufferBegin, &Relocations[0], 931 Relocations.size(), MemMgr->getGOTBase()); 932 } 933 934 // Update the GOT entry for F to point to the new code. 935 if (MemMgr->isManagingGOT()) { 936 unsigned idx = Resolver.getGOTIndexForAddr((void*)BufferBegin); 937 if (((void**)MemMgr->getGOTBase())[idx] != (void*)BufferBegin) { 938 DOUT << "JIT: GOT was out of date for " << (void*)BufferBegin 939 << " pointing at " << ((void**)MemMgr->getGOTBase())[idx] << "\n"; 940 ((void**)MemMgr->getGOTBase())[idx] = (void*)BufferBegin; 941 } 942 } 943 944 unsigned char *FnEnd = CurBufferPtr; 945 946 MemMgr->endFunctionBody(F.getFunction(), BufferBegin, FnEnd); 947 BufferBegin = CurBufferPtr = 0; 948 NumBytes += FnEnd-FnStart; 949 950 // Invalidate the icache if necessary. 951 sys::Memory::InvalidateInstructionCache(FnStart, FnEnd-FnStart); 952 953 // Add it to the JIT symbol table if the host wants it. 954 AddFunctionToSymbolTable(F.getFunction()->getNameStart(), 955 FnStart, FnEnd-FnStart); 956 957 DOUT << "JIT: Finished CodeGen of [" << (void*)FnStart 958 << "] Function: " << F.getFunction()->getName() 959 << ": " << (FnEnd-FnStart) << " bytes of text, " 960 << Relocations.size() << " relocations\n"; 961 Relocations.clear(); 962 963 // Mark code region readable and executable if it's not so already. 964 MemMgr->setMemoryExecutable(); 965 966#ifndef NDEBUG 967 { 968 DOUT << "JIT: Disassembled code:\n"; 969 if (sys::hasDisassembler()) 970 DOUT << sys::disassembleBuffer(FnStart, FnEnd-FnStart, (uintptr_t)FnStart); 971 else { 972 DOUT << std::hex; 973 int i; 974 unsigned char* q = FnStart; 975 for (i=1; q!=FnEnd; q++, i++) { 976 if (i%8==1) 977 DOUT << "JIT: 0x" << (long)q << ": "; 978 DOUT<< std::setw(2) << std::setfill('0') << (unsigned short)*q << " "; 979 if (i%8==0) 980 DOUT << '\n'; 981 } 982 DOUT << std::dec; 983 DOUT<< '\n'; 984 } 985 } 986#endif 987 if (ExceptionHandling) { 988 uintptr_t ActualSize = 0; 989 SavedBufferBegin = BufferBegin; 990 SavedBufferEnd = BufferEnd; 991 SavedCurBufferPtr = CurBufferPtr; 992 993 if (MemMgr->NeedsExactSize()) { 994 ActualSize = DE->GetDwarfTableSizeInBytes(F, *this, FnStart, FnEnd); 995 } 996 997 BufferBegin = CurBufferPtr = MemMgr->startExceptionTable(F.getFunction(), 998 ActualSize); 999 BufferEnd = BufferBegin+ActualSize; 1000 unsigned char* FrameRegister = DE->EmitDwarfTable(F, *this, FnStart, FnEnd); 1001 MemMgr->endExceptionTable(F.getFunction(), BufferBegin, CurBufferPtr, 1002 FrameRegister); 1003 BufferBegin = SavedBufferBegin; 1004 BufferEnd = SavedBufferEnd; 1005 CurBufferPtr = SavedCurBufferPtr; 1006 1007 TheJIT->RegisterTable(FrameRegister); 1008 } 1009 1010 if (MMI) 1011 MMI->EndFunction(); 1012 1013 return false; 1014} 1015 1016void* JITEmitter::allocateSpace(intptr_t Size, unsigned Alignment) { 1017 if (BufferBegin) 1018 return MachineCodeEmitter::allocateSpace(Size, Alignment); 1019 1020 // create a new memory block if there is no active one. 1021 // care must be taken so that BufferBegin is invalidated when a 1022 // block is trimmed 1023 BufferBegin = CurBufferPtr = MemMgr->allocateSpace(Size, Alignment); 1024 BufferEnd = BufferBegin+Size; 1025 return CurBufferPtr; 1026} 1027 1028void JITEmitter::emitConstantPool(MachineConstantPool *MCP) { 1029 if (TheJIT->getJITInfo().hasCustomConstantPool()) 1030 return; 1031 1032 const std::vector<MachineConstantPoolEntry> &Constants = MCP->getConstants(); 1033 if (Constants.empty()) return; 1034 1035 MachineConstantPoolEntry CPE = Constants.back(); 1036 unsigned Size = CPE.Offset; 1037 const Type *Ty = CPE.isMachineConstantPoolEntry() 1038 ? CPE.Val.MachineCPVal->getType() : CPE.Val.ConstVal->getType(); 1039 Size += TheJIT->getTargetData()->getABITypeSize(Ty); 1040 1041 unsigned Align = 1 << MCP->getConstantPoolAlignment(); 1042 ConstantPoolBase = allocateSpace(Size, Align); 1043 ConstantPool = MCP; 1044 1045 if (ConstantPoolBase == 0) return; // Buffer overflow. 1046 1047 DOUT << "JIT: Emitted constant pool at [" << ConstantPoolBase 1048 << "] (size: " << Size << ", alignment: " << Align << ")\n"; 1049 1050 // Initialize the memory for all of the constant pool entries. 1051 for (unsigned i = 0, e = Constants.size(); i != e; ++i) { 1052 void *CAddr = (char*)ConstantPoolBase+Constants[i].Offset; 1053 if (Constants[i].isMachineConstantPoolEntry()) { 1054 // FIXME: add support to lower machine constant pool values into bytes! 1055 cerr << "Initialize memory with machine specific constant pool entry" 1056 << " has not been implemented!\n"; 1057 abort(); 1058 } 1059 TheJIT->InitializeMemory(Constants[i].Val.ConstVal, CAddr); 1060 DOUT << "JIT: CP" << i << " at [" << CAddr << "]\n"; 1061 } 1062} 1063 1064void JITEmitter::initJumpTableInfo(MachineJumpTableInfo *MJTI) { 1065 if (TheJIT->getJITInfo().hasCustomJumpTables()) 1066 return; 1067 1068 const std::vector<MachineJumpTableEntry> &JT = MJTI->getJumpTables(); 1069 if (JT.empty()) return; 1070 1071 unsigned NumEntries = 0; 1072 for (unsigned i = 0, e = JT.size(); i != e; ++i) 1073 NumEntries += JT[i].MBBs.size(); 1074 1075 unsigned EntrySize = MJTI->getEntrySize(); 1076 1077 // Just allocate space for all the jump tables now. We will fix up the actual 1078 // MBB entries in the tables after we emit the code for each block, since then 1079 // we will know the final locations of the MBBs in memory. 1080 JumpTable = MJTI; 1081 JumpTableBase = allocateSpace(NumEntries * EntrySize, MJTI->getAlignment()); 1082} 1083 1084void JITEmitter::emitJumpTableInfo(MachineJumpTableInfo *MJTI) { 1085 if (TheJIT->getJITInfo().hasCustomJumpTables()) 1086 return; 1087 1088 const std::vector<MachineJumpTableEntry> &JT = MJTI->getJumpTables(); 1089 if (JT.empty() || JumpTableBase == 0) return; 1090 1091 if (TargetMachine::getRelocationModel() == Reloc::PIC_) { 1092 assert(MJTI->getEntrySize() == 4 && "Cross JIT'ing?"); 1093 // For each jump table, place the offset from the beginning of the table 1094 // to the target address. 1095 int *SlotPtr = (int*)JumpTableBase; 1096 1097 for (unsigned i = 0, e = JT.size(); i != e; ++i) { 1098 const std::vector<MachineBasicBlock*> &MBBs = JT[i].MBBs; 1099 // Store the offset of the basic block for this jump table slot in the 1100 // memory we allocated for the jump table in 'initJumpTableInfo' 1101 intptr_t Base = (intptr_t)SlotPtr; 1102 for (unsigned mi = 0, me = MBBs.size(); mi != me; ++mi) { 1103 intptr_t MBBAddr = getMachineBasicBlockAddress(MBBs[mi]); 1104 *SlotPtr++ = TheJIT->getJITInfo().getPICJumpTableEntry(MBBAddr, Base); 1105 } 1106 } 1107 } else { 1108 assert(MJTI->getEntrySize() == sizeof(void*) && "Cross JIT'ing?"); 1109 1110 // For each jump table, map each target in the jump table to the address of 1111 // an emitted MachineBasicBlock. 1112 intptr_t *SlotPtr = (intptr_t*)JumpTableBase; 1113 1114 for (unsigned i = 0, e = JT.size(); i != e; ++i) { 1115 const std::vector<MachineBasicBlock*> &MBBs = JT[i].MBBs; 1116 // Store the address of the basic block for this jump table slot in the 1117 // memory we allocated for the jump table in 'initJumpTableInfo' 1118 for (unsigned mi = 0, me = MBBs.size(); mi != me; ++mi) 1119 *SlotPtr++ = getMachineBasicBlockAddress(MBBs[mi]); 1120 } 1121 } 1122} 1123 1124void JITEmitter::startGVStub(const GlobalValue* GV, unsigned StubSize, 1125 unsigned Alignment) { 1126 SavedBufferBegin = BufferBegin; 1127 SavedBufferEnd = BufferEnd; 1128 SavedCurBufferPtr = CurBufferPtr; 1129 1130 BufferBegin = CurBufferPtr = MemMgr->allocateStub(GV, StubSize, Alignment); 1131 BufferEnd = BufferBegin+StubSize+1; 1132} 1133 1134void *JITEmitter::finishGVStub(const GlobalValue* GV) { 1135 NumBytes += getCurrentPCOffset(); 1136 1137 // Invalidate the icache if necessary. 1138 sys::Memory::InvalidateInstructionCache(BufferBegin, NumBytes); 1139 1140 std::swap(SavedBufferBegin, BufferBegin); 1141 BufferEnd = SavedBufferEnd; 1142 CurBufferPtr = SavedCurBufferPtr; 1143 return SavedBufferBegin; 1144} 1145 1146// getConstantPoolEntryAddress - Return the address of the 'ConstantNum' entry 1147// in the constant pool that was last emitted with the 'emitConstantPool' 1148// method. 1149// 1150intptr_t JITEmitter::getConstantPoolEntryAddress(unsigned ConstantNum) const { 1151 assert(ConstantNum < ConstantPool->getConstants().size() && 1152 "Invalid ConstantPoolIndex!"); 1153 return (intptr_t)ConstantPoolBase + 1154 ConstantPool->getConstants()[ConstantNum].Offset; 1155} 1156 1157// getJumpTableEntryAddress - Return the address of the JumpTable with index 1158// 'Index' in the jumpp table that was last initialized with 'initJumpTableInfo' 1159// 1160intptr_t JITEmitter::getJumpTableEntryAddress(unsigned Index) const { 1161 const std::vector<MachineJumpTableEntry> &JT = JumpTable->getJumpTables(); 1162 assert(Index < JT.size() && "Invalid jump table index!"); 1163 1164 unsigned Offset = 0; 1165 unsigned EntrySize = JumpTable->getEntrySize(); 1166 1167 for (unsigned i = 0; i < Index; ++i) 1168 Offset += JT[i].MBBs.size(); 1169 1170 Offset *= EntrySize; 1171 1172 return (intptr_t)((char *)JumpTableBase + Offset); 1173} 1174 1175//===----------------------------------------------------------------------===// 1176// Public interface to this file 1177//===----------------------------------------------------------------------===// 1178 1179MachineCodeEmitter *JIT::createEmitter(JIT &jit, JITMemoryManager *JMM) { 1180 return new JITEmitter(jit, JMM); 1181} 1182 1183// getPointerToNamedFunction - This function is used as a global wrapper to 1184// JIT::getPointerToNamedFunction for the purpose of resolving symbols when 1185// bugpoint is debugging the JIT. In that scenario, we are loading an .so and 1186// need to resolve function(s) that are being mis-codegenerated, so we need to 1187// resolve their addresses at runtime, and this is the way to do it. 1188extern "C" { 1189 void *getPointerToNamedFunction(const char *Name) { 1190 if (Function *F = TheJIT->FindFunctionNamed(Name)) 1191 return TheJIT->getPointerToFunction(F); 1192 return TheJIT->getPointerToNamedFunction(Name); 1193 } 1194} 1195 1196// getPointerToFunctionOrStub - If the specified function has been 1197// code-gen'd, return a pointer to the function. If not, compile it, or use 1198// a stub to implement lazy compilation if available. 1199// 1200void *JIT::getPointerToFunctionOrStub(Function *F) { 1201 // If we have already code generated the function, just return the address. 1202 if (void *Addr = getPointerToGlobalIfAvailable(F)) 1203 return Addr; 1204 1205 // Get a stub if the target supports it. 1206 assert(isa<JITEmitter>(MCE) && "Unexpected MCE?"); 1207 JITEmitter *JE = cast<JITEmitter>(getCodeEmitter()); 1208 return JE->getJITResolver().getFunctionStub(F); 1209} 1210 1211/// freeMachineCodeForFunction - release machine code memory for given Function. 1212/// 1213void JIT::freeMachineCodeForFunction(Function *F) { 1214 1215 // Delete translation for this from the ExecutionEngine, so it will get 1216 // retranslated next time it is used. 1217 void *OldPtr = updateGlobalMapping(F, 0); 1218 1219 if (OldPtr) 1220 RemoveFunctionFromSymbolTable(OldPtr); 1221 1222 // Free the actual memory for the function body and related stuff. 1223 assert(isa<JITEmitter>(MCE) && "Unexpected MCE?"); 1224 cast<JITEmitter>(MCE)->deallocateMemForFunction(F); 1225} 1226 1227