JITMemoryManager.cpp revision a9ad04191cb56c42944b17980b8b2bb2afe11ab2
1//===-- JITMemoryManager.cpp - Memory Allocator for JIT'd code ------------===// 2// 3// The LLVM Compiler Infrastructure 4// 5// This file is distributed under the University of Illinois Open Source 6// License. See LICENSE.TXT for details. 7// 8//===----------------------------------------------------------------------===// 9// 10// This file defines the DefaultJITMemoryManager class. 11// 12//===----------------------------------------------------------------------===// 13 14#define DEBUG_TYPE "jit" 15#include "llvm/ExecutionEngine/JITMemoryManager.h" 16#include "llvm/ADT/SmallPtrSet.h" 17#include "llvm/ADT/Statistic.h" 18#include "llvm/GlobalValue.h" 19#include "llvm/Support/Allocator.h" 20#include "llvm/Support/Compiler.h" 21#include "llvm/Support/Debug.h" 22#include "llvm/Support/ErrorHandling.h" 23#include "llvm/Support/raw_ostream.h" 24#include "llvm/System/Memory.h" 25#include <map> 26#include <vector> 27#include <cassert> 28#include <climits> 29#include <cstdio> 30#include <cstdlib> 31#include <cstring> 32using namespace llvm; 33 34STATISTIC(NumSlabs, "Number of slabs of memory allocated by the JIT"); 35 36JITMemoryManager::~JITMemoryManager() {} 37 38//===----------------------------------------------------------------------===// 39// Memory Block Implementation. 40//===----------------------------------------------------------------------===// 41 42namespace { 43 /// MemoryRangeHeader - For a range of memory, this is the header that we put 44 /// on the block of memory. It is carefully crafted to be one word of memory. 45 /// Allocated blocks have just this header, free'd blocks have FreeRangeHeader 46 /// which starts with this. 47 struct FreeRangeHeader; 48 struct MemoryRangeHeader { 49 /// ThisAllocated - This is true if this block is currently allocated. If 50 /// not, this can be converted to a FreeRangeHeader. 51 unsigned ThisAllocated : 1; 52 53 /// PrevAllocated - Keep track of whether the block immediately before us is 54 /// allocated. If not, the word immediately before this header is the size 55 /// of the previous block. 56 unsigned PrevAllocated : 1; 57 58 /// BlockSize - This is the size in bytes of this memory block, 59 /// including this header. 60 uintptr_t BlockSize : (sizeof(intptr_t)*CHAR_BIT - 2); 61 62 63 /// getBlockAfter - Return the memory block immediately after this one. 64 /// 65 MemoryRangeHeader &getBlockAfter() const { 66 return *(MemoryRangeHeader*)((char*)this+BlockSize); 67 } 68 69 /// getFreeBlockBefore - If the block before this one is free, return it, 70 /// otherwise return null. 71 FreeRangeHeader *getFreeBlockBefore() const { 72 if (PrevAllocated) return 0; 73 intptr_t PrevSize = ((intptr_t *)this)[-1]; 74 return (FreeRangeHeader*)((char*)this-PrevSize); 75 } 76 77 /// FreeBlock - Turn an allocated block into a free block, adjusting 78 /// bits in the object headers, and adding an end of region memory block. 79 FreeRangeHeader *FreeBlock(FreeRangeHeader *FreeList); 80 81 /// TrimAllocationToSize - If this allocated block is significantly larger 82 /// than NewSize, split it into two pieces (where the former is NewSize 83 /// bytes, including the header), and add the new block to the free list. 84 FreeRangeHeader *TrimAllocationToSize(FreeRangeHeader *FreeList, 85 uint64_t NewSize); 86 }; 87 88 /// FreeRangeHeader - For a memory block that isn't already allocated, this 89 /// keeps track of the current block and has a pointer to the next free block. 90 /// Free blocks are kept on a circularly linked list. 91 struct FreeRangeHeader : public MemoryRangeHeader { 92 FreeRangeHeader *Prev; 93 FreeRangeHeader *Next; 94 95 /// getMinBlockSize - Get the minimum size for a memory block. Blocks 96 /// smaller than this size cannot be created. 97 static unsigned getMinBlockSize() { 98 return sizeof(FreeRangeHeader)+sizeof(intptr_t); 99 } 100 101 /// SetEndOfBlockSizeMarker - The word at the end of every free block is 102 /// known to be the size of the free block. Set it for this block. 103 void SetEndOfBlockSizeMarker() { 104 void *EndOfBlock = (char*)this + BlockSize; 105 ((intptr_t *)EndOfBlock)[-1] = BlockSize; 106 } 107 108 FreeRangeHeader *RemoveFromFreeList() { 109 assert(Next->Prev == this && Prev->Next == this && "Freelist broken!"); 110 Next->Prev = Prev; 111 return Prev->Next = Next; 112 } 113 114 void AddToFreeList(FreeRangeHeader *FreeList) { 115 Next = FreeList; 116 Prev = FreeList->Prev; 117 Prev->Next = this; 118 Next->Prev = this; 119 } 120 121 /// GrowBlock - The block after this block just got deallocated. Merge it 122 /// into the current block. 123 void GrowBlock(uintptr_t NewSize); 124 125 /// AllocateBlock - Mark this entire block allocated, updating freelists 126 /// etc. This returns a pointer to the circular free-list. 127 FreeRangeHeader *AllocateBlock(); 128 }; 129} 130 131 132/// AllocateBlock - Mark this entire block allocated, updating freelists 133/// etc. This returns a pointer to the circular free-list. 134FreeRangeHeader *FreeRangeHeader::AllocateBlock() { 135 assert(!ThisAllocated && !getBlockAfter().PrevAllocated && 136 "Cannot allocate an allocated block!"); 137 // Mark this block allocated. 138 ThisAllocated = 1; 139 getBlockAfter().PrevAllocated = 1; 140 141 // Remove it from the free list. 142 return RemoveFromFreeList(); 143} 144 145/// FreeBlock - Turn an allocated block into a free block, adjusting 146/// bits in the object headers, and adding an end of region memory block. 147/// If possible, coalesce this block with neighboring blocks. Return the 148/// FreeRangeHeader to allocate from. 149FreeRangeHeader *MemoryRangeHeader::FreeBlock(FreeRangeHeader *FreeList) { 150 MemoryRangeHeader *FollowingBlock = &getBlockAfter(); 151 assert(ThisAllocated && "This block is already free!"); 152 assert(FollowingBlock->PrevAllocated && "Flags out of sync!"); 153 154 FreeRangeHeader *FreeListToReturn = FreeList; 155 156 // If the block after this one is free, merge it into this block. 157 if (!FollowingBlock->ThisAllocated) { 158 FreeRangeHeader &FollowingFreeBlock = *(FreeRangeHeader *)FollowingBlock; 159 // "FreeList" always needs to be a valid free block. If we're about to 160 // coalesce with it, update our notion of what the free list is. 161 if (&FollowingFreeBlock == FreeList) { 162 FreeList = FollowingFreeBlock.Next; 163 FreeListToReturn = 0; 164 assert(&FollowingFreeBlock != FreeList && "No tombstone block?"); 165 } 166 FollowingFreeBlock.RemoveFromFreeList(); 167 168 // Include the following block into this one. 169 BlockSize += FollowingFreeBlock.BlockSize; 170 FollowingBlock = &FollowingFreeBlock.getBlockAfter(); 171 172 // Tell the block after the block we are coalescing that this block is 173 // allocated. 174 FollowingBlock->PrevAllocated = 1; 175 } 176 177 assert(FollowingBlock->ThisAllocated && "Missed coalescing?"); 178 179 if (FreeRangeHeader *PrevFreeBlock = getFreeBlockBefore()) { 180 PrevFreeBlock->GrowBlock(PrevFreeBlock->BlockSize + BlockSize); 181 return FreeListToReturn ? FreeListToReturn : PrevFreeBlock; 182 } 183 184 // Otherwise, mark this block free. 185 FreeRangeHeader &FreeBlock = *(FreeRangeHeader*)this; 186 FollowingBlock->PrevAllocated = 0; 187 FreeBlock.ThisAllocated = 0; 188 189 // Link this into the linked list of free blocks. 190 FreeBlock.AddToFreeList(FreeList); 191 192 // Add a marker at the end of the block, indicating the size of this free 193 // block. 194 FreeBlock.SetEndOfBlockSizeMarker(); 195 return FreeListToReturn ? FreeListToReturn : &FreeBlock; 196} 197 198/// GrowBlock - The block after this block just got deallocated. Merge it 199/// into the current block. 200void FreeRangeHeader::GrowBlock(uintptr_t NewSize) { 201 assert(NewSize > BlockSize && "Not growing block?"); 202 BlockSize = NewSize; 203 SetEndOfBlockSizeMarker(); 204 getBlockAfter().PrevAllocated = 0; 205} 206 207/// TrimAllocationToSize - If this allocated block is significantly larger 208/// than NewSize, split it into two pieces (where the former is NewSize 209/// bytes, including the header), and add the new block to the free list. 210FreeRangeHeader *MemoryRangeHeader:: 211TrimAllocationToSize(FreeRangeHeader *FreeList, uint64_t NewSize) { 212 assert(ThisAllocated && getBlockAfter().PrevAllocated && 213 "Cannot deallocate part of an allocated block!"); 214 215 // Don't allow blocks to be trimmed below minimum required size 216 NewSize = std::max<uint64_t>(FreeRangeHeader::getMinBlockSize(), NewSize); 217 218 // Round up size for alignment of header. 219 unsigned HeaderAlign = __alignof(FreeRangeHeader); 220 NewSize = (NewSize+ (HeaderAlign-1)) & ~(HeaderAlign-1); 221 222 // Size is now the size of the block we will remove from the start of the 223 // current block. 224 assert(NewSize <= BlockSize && 225 "Allocating more space from this block than exists!"); 226 227 // If splitting this block will cause the remainder to be too small, do not 228 // split the block. 229 if (BlockSize <= NewSize+FreeRangeHeader::getMinBlockSize()) 230 return FreeList; 231 232 // Otherwise, we splice the required number of bytes out of this block, form 233 // a new block immediately after it, then mark this block allocated. 234 MemoryRangeHeader &FormerNextBlock = getBlockAfter(); 235 236 // Change the size of this block. 237 BlockSize = NewSize; 238 239 // Get the new block we just sliced out and turn it into a free block. 240 FreeRangeHeader &NewNextBlock = (FreeRangeHeader &)getBlockAfter(); 241 NewNextBlock.BlockSize = (char*)&FormerNextBlock - (char*)&NewNextBlock; 242 NewNextBlock.ThisAllocated = 0; 243 NewNextBlock.PrevAllocated = 1; 244 NewNextBlock.SetEndOfBlockSizeMarker(); 245 FormerNextBlock.PrevAllocated = 0; 246 NewNextBlock.AddToFreeList(FreeList); 247 return &NewNextBlock; 248} 249 250//===----------------------------------------------------------------------===// 251// Memory Block Implementation. 252//===----------------------------------------------------------------------===// 253 254namespace { 255 256 class DefaultJITMemoryManager; 257 258 class JITSlabAllocator : public SlabAllocator { 259 DefaultJITMemoryManager &JMM; 260 public: 261 JITSlabAllocator(DefaultJITMemoryManager &jmm) : JMM(jmm) { } 262 virtual ~JITSlabAllocator() { } 263 virtual MemSlab *Allocate(size_t Size); 264 virtual void Deallocate(MemSlab *Slab); 265 }; 266 267 /// DefaultJITMemoryManager - Manage memory for the JIT code generation. 268 /// This splits a large block of MAP_NORESERVE'd memory into two 269 /// sections, one for function stubs, one for the functions themselves. We 270 /// have to do this because we may need to emit a function stub while in the 271 /// middle of emitting a function, and we don't know how large the function we 272 /// are emitting is. 273 class DefaultJITMemoryManager : public JITMemoryManager { 274 275 // Whether to poison freed memory. 276 bool PoisonMemory; 277 278 /// LastSlab - This points to the last slab allocated and is used as the 279 /// NearBlock parameter to AllocateRWX so that we can attempt to lay out all 280 /// stubs, data, and code contiguously in memory. In general, however, this 281 /// is not possible because the NearBlock parameter is ignored on Windows 282 /// platforms and even on Unix it works on a best-effort pasis. 283 sys::MemoryBlock LastSlab; 284 285 // Memory slabs allocated by the JIT. We refer to them as slabs so we don't 286 // confuse them with the blocks of memory descibed above. 287 std::vector<sys::MemoryBlock> CodeSlabs; 288 JITSlabAllocator BumpSlabAllocator; 289 BumpPtrAllocator StubAllocator; 290 BumpPtrAllocator DataAllocator; 291 292 // Circular list of free blocks. 293 FreeRangeHeader *FreeMemoryList; 294 295 // When emitting code into a memory block, this is the block. 296 MemoryRangeHeader *CurBlock; 297 298 uint8_t *GOTBase; // Target Specific reserved memory 299 void *DlsymTable; // Stub external symbol information 300 301 std::map<const Function*, MemoryRangeHeader*> FunctionBlocks; 302 std::map<const Function*, MemoryRangeHeader*> TableBlocks; 303 public: 304 DefaultJITMemoryManager(); 305 ~DefaultJITMemoryManager(); 306 307 /// allocateNewSlab - Allocates a new MemoryBlock and remembers it as the 308 /// last slab it allocated, so that subsequent allocations follow it. 309 sys::MemoryBlock allocateNewSlab(size_t size); 310 311 /// DefaultCodeSlabSize - When we have to go map more memory, we allocate at 312 /// least this much unless more is requested. 313 static const size_t DefaultCodeSlabSize; 314 315 /// DefaultSlabSize - Allocate data into slabs of this size unless we get 316 /// an allocation above SizeThreshold. 317 static const size_t DefaultSlabSize; 318 319 /// DefaultSizeThreshold - For any allocation larger than this threshold, we 320 /// should allocate a separate slab. 321 static const size_t DefaultSizeThreshold; 322 323 void AllocateGOT(); 324 void SetDlsymTable(void *); 325 326 // Testing methods. 327 virtual bool CheckInvariants(std::string &ErrorStr); 328 size_t GetDefaultCodeSlabSize() { return DefaultCodeSlabSize; } 329 size_t GetDefaultDataSlabSize() { return DefaultSlabSize; } 330 size_t GetDefaultStubSlabSize() { return DefaultSlabSize; } 331 unsigned GetNumCodeSlabs() { return CodeSlabs.size(); } 332 unsigned GetNumDataSlabs() { return DataAllocator.GetNumSlabs(); } 333 unsigned GetNumStubSlabs() { return StubAllocator.GetNumSlabs(); } 334 335 /// startFunctionBody - When a function starts, allocate a block of free 336 /// executable memory, returning a pointer to it and its actual size. 337 uint8_t *startFunctionBody(const Function *F, uintptr_t &ActualSize) { 338 339 FreeRangeHeader* candidateBlock = FreeMemoryList; 340 FreeRangeHeader* head = FreeMemoryList; 341 FreeRangeHeader* iter = head->Next; 342 343 uintptr_t largest = candidateBlock->BlockSize; 344 345 // Search for the largest free block 346 while (iter != head) { 347 if (iter->BlockSize > largest) { 348 largest = iter->BlockSize; 349 candidateBlock = iter; 350 } 351 iter = iter->Next; 352 } 353 354 largest = largest - sizeof(MemoryRangeHeader); 355 356 // If this block isn't big enough for the allocation desired, allocate 357 // another block of memory and add it to the free list. 358 if (largest < ActualSize || 359 largest <= FreeRangeHeader::getMinBlockSize()) { 360 DOUT << "JIT: Allocating another slab of memory for function."; 361 candidateBlock = allocateNewCodeSlab((size_t)ActualSize); 362 } 363 364 // Select this candidate block for allocation 365 CurBlock = candidateBlock; 366 367 // Allocate the entire memory block. 368 FreeMemoryList = candidateBlock->AllocateBlock(); 369 ActualSize = CurBlock->BlockSize - sizeof(MemoryRangeHeader); 370 return (uint8_t *)(CurBlock + 1); 371 } 372 373 /// allocateNewCodeSlab - Helper method to allocate a new slab of code 374 /// memory from the OS and add it to the free list. Returns the new 375 /// FreeRangeHeader at the base of the slab. 376 FreeRangeHeader *allocateNewCodeSlab(size_t MinSize) { 377 // If the user needs at least MinSize free memory, then we account for 378 // two MemoryRangeHeaders: the one in the user's block, and the one at the 379 // end of the slab. 380 size_t PaddedMin = MinSize + 2 * sizeof(MemoryRangeHeader); 381 size_t SlabSize = std::max(DefaultCodeSlabSize, PaddedMin); 382 sys::MemoryBlock B = allocateNewSlab(SlabSize); 383 CodeSlabs.push_back(B); 384 char *MemBase = (char*)(B.base()); 385 386 // Put a tiny allocated block at the end of the memory chunk, so when 387 // FreeBlock calls getBlockAfter it doesn't fall off the end. 388 MemoryRangeHeader *EndBlock = 389 (MemoryRangeHeader*)(MemBase + B.size()) - 1; 390 EndBlock->ThisAllocated = 1; 391 EndBlock->PrevAllocated = 0; 392 EndBlock->BlockSize = sizeof(MemoryRangeHeader); 393 394 // Start out with a vast new block of free memory. 395 FreeRangeHeader *NewBlock = (FreeRangeHeader*)MemBase; 396 NewBlock->ThisAllocated = 0; 397 // Make sure getFreeBlockBefore doesn't look into unmapped memory. 398 NewBlock->PrevAllocated = 1; 399 NewBlock->BlockSize = (uintptr_t)EndBlock - (uintptr_t)NewBlock; 400 NewBlock->SetEndOfBlockSizeMarker(); 401 NewBlock->AddToFreeList(FreeMemoryList); 402 403 assert(NewBlock->BlockSize - sizeof(MemoryRangeHeader) >= MinSize && 404 "The block was too small!"); 405 return NewBlock; 406 } 407 408 /// endFunctionBody - The function F is now allocated, and takes the memory 409 /// in the range [FunctionStart,FunctionEnd). 410 void endFunctionBody(const Function *F, uint8_t *FunctionStart, 411 uint8_t *FunctionEnd) { 412 assert(FunctionEnd > FunctionStart); 413 assert(FunctionStart == (uint8_t *)(CurBlock+1) && 414 "Mismatched function start/end!"); 415 416 uintptr_t BlockSize = FunctionEnd - (uint8_t *)CurBlock; 417 FunctionBlocks[F] = CurBlock; 418 419 // Release the memory at the end of this block that isn't needed. 420 FreeMemoryList =CurBlock->TrimAllocationToSize(FreeMemoryList, BlockSize); 421 } 422 423 /// allocateSpace - Allocate a memory block of the given size. This method 424 /// cannot be called between calls to startFunctionBody and endFunctionBody. 425 uint8_t *allocateSpace(intptr_t Size, unsigned Alignment) { 426 CurBlock = FreeMemoryList; 427 FreeMemoryList = FreeMemoryList->AllocateBlock(); 428 429 uint8_t *result = (uint8_t *)(CurBlock + 1); 430 431 if (Alignment == 0) Alignment = 1; 432 result = (uint8_t*)(((intptr_t)result+Alignment-1) & 433 ~(intptr_t)(Alignment-1)); 434 435 uintptr_t BlockSize = result + Size - (uint8_t *)CurBlock; 436 FreeMemoryList =CurBlock->TrimAllocationToSize(FreeMemoryList, BlockSize); 437 438 return result; 439 } 440 441 /// allocateStub - Allocate memory for a function stub. 442 uint8_t *allocateStub(const GlobalValue* F, unsigned StubSize, 443 unsigned Alignment) { 444 return (uint8_t*)StubAllocator.Allocate(StubSize, Alignment); 445 } 446 447 /// allocateGlobal - Allocate memory for a global. 448 uint8_t *allocateGlobal(uintptr_t Size, unsigned Alignment) { 449 return (uint8_t*)DataAllocator.Allocate(Size, Alignment); 450 } 451 452 /// startExceptionTable - Use startFunctionBody to allocate memory for the 453 /// function's exception table. 454 uint8_t* startExceptionTable(const Function* F, uintptr_t &ActualSize) { 455 return startFunctionBody(F, ActualSize); 456 } 457 458 /// endExceptionTable - The exception table of F is now allocated, 459 /// and takes the memory in the range [TableStart,TableEnd). 460 void endExceptionTable(const Function *F, uint8_t *TableStart, 461 uint8_t *TableEnd, uint8_t* FrameRegister) { 462 assert(TableEnd > TableStart); 463 assert(TableStart == (uint8_t *)(CurBlock+1) && 464 "Mismatched table start/end!"); 465 466 uintptr_t BlockSize = TableEnd - (uint8_t *)CurBlock; 467 TableBlocks[F] = CurBlock; 468 469 // Release the memory at the end of this block that isn't needed. 470 FreeMemoryList =CurBlock->TrimAllocationToSize(FreeMemoryList, BlockSize); 471 } 472 473 uint8_t *getGOTBase() const { 474 return GOTBase; 475 } 476 477 void *getDlsymTable() const { 478 return DlsymTable; 479 } 480 481 /// deallocateMemForFunction - Deallocate all memory for the specified 482 /// function body. 483 void deallocateMemForFunction(const Function *F) { 484 std::map<const Function*, MemoryRangeHeader*>::iterator 485 I = FunctionBlocks.find(F); 486 if (I == FunctionBlocks.end()) return; 487 488 // Find the block that is allocated for this function. 489 MemoryRangeHeader *MemRange = I->second; 490 assert(MemRange->ThisAllocated && "Block isn't allocated!"); 491 492 // Fill the buffer with garbage! 493 if (PoisonMemory) { 494 memset(MemRange+1, 0xCD, MemRange->BlockSize-sizeof(*MemRange)); 495 } 496 497 // Free the memory. 498 FreeMemoryList = MemRange->FreeBlock(FreeMemoryList); 499 500 // Finally, remove this entry from FunctionBlocks. 501 FunctionBlocks.erase(I); 502 503 I = TableBlocks.find(F); 504 if (I == TableBlocks.end()) return; 505 506 // Find the block that is allocated for this function. 507 MemRange = I->second; 508 assert(MemRange->ThisAllocated && "Block isn't allocated!"); 509 510 // Fill the buffer with garbage! 511 if (PoisonMemory) { 512 memset(MemRange+1, 0xCD, MemRange->BlockSize-sizeof(*MemRange)); 513 } 514 515 // Free the memory. 516 FreeMemoryList = MemRange->FreeBlock(FreeMemoryList); 517 518 // Finally, remove this entry from TableBlocks. 519 TableBlocks.erase(I); 520 } 521 522 /// setMemoryWritable - When code generation is in progress, 523 /// the code pages may need permissions changed. 524 void setMemoryWritable() 525 { 526 for (unsigned i = 0, e = CodeSlabs.size(); i != e; ++i) 527 sys::Memory::setWritable(CodeSlabs[i]); 528 } 529 /// setMemoryExecutable - When code generation is done and we're ready to 530 /// start execution, the code pages may need permissions changed. 531 void setMemoryExecutable() 532 { 533 for (unsigned i = 0, e = CodeSlabs.size(); i != e; ++i) 534 sys::Memory::setExecutable(CodeSlabs[i]); 535 } 536 537 /// setPoisonMemory - Controls whether we write garbage over freed memory. 538 /// 539 void setPoisonMemory(bool poison) { 540 PoisonMemory = poison; 541 } 542 }; 543} 544 545MemSlab *JITSlabAllocator::Allocate(size_t Size) { 546 sys::MemoryBlock B = JMM.allocateNewSlab(Size); 547 MemSlab *Slab = (MemSlab*)B.base(); 548 Slab->Size = B.size(); 549 Slab->NextPtr = 0; 550 return Slab; 551} 552 553void JITSlabAllocator::Deallocate(MemSlab *Slab) { 554 sys::MemoryBlock B(Slab, Slab->Size); 555 sys::Memory::ReleaseRWX(B); 556} 557 558DefaultJITMemoryManager::DefaultJITMemoryManager() 559 : LastSlab(0, 0), 560 BumpSlabAllocator(*this), 561 StubAllocator(DefaultSlabSize, DefaultSizeThreshold, BumpSlabAllocator), 562 DataAllocator(DefaultSlabSize, DefaultSizeThreshold, BumpSlabAllocator) { 563 564#ifdef NDEBUG 565 PoisonMemory = false; 566#else 567 PoisonMemory = true; 568#endif 569 570 // Allocate space for code. 571 sys::MemoryBlock MemBlock = allocateNewSlab(DefaultCodeSlabSize); 572 CodeSlabs.push_back(MemBlock); 573 uint8_t *MemBase = (uint8_t*)MemBlock.base(); 574 575 // We set up the memory chunk with 4 mem regions, like this: 576 // [ START 577 // [ Free #0 ] -> Large space to allocate functions from. 578 // [ Allocated #1 ] -> Tiny space to separate regions. 579 // [ Free #2 ] -> Tiny space so there is always at least 1 free block. 580 // [ Allocated #3 ] -> Tiny space to prevent looking past end of block. 581 // END ] 582 // 583 // The last three blocks are never deallocated or touched. 584 585 // Add MemoryRangeHeader to the end of the memory region, indicating that 586 // the space after the block of memory is allocated. This is block #3. 587 MemoryRangeHeader *Mem3 = (MemoryRangeHeader*)(MemBase+MemBlock.size())-1; 588 Mem3->ThisAllocated = 1; 589 Mem3->PrevAllocated = 0; 590 Mem3->BlockSize = sizeof(MemoryRangeHeader); 591 592 /// Add a tiny free region so that the free list always has one entry. 593 FreeRangeHeader *Mem2 = 594 (FreeRangeHeader *)(((char*)Mem3)-FreeRangeHeader::getMinBlockSize()); 595 Mem2->ThisAllocated = 0; 596 Mem2->PrevAllocated = 1; 597 Mem2->BlockSize = FreeRangeHeader::getMinBlockSize(); 598 Mem2->SetEndOfBlockSizeMarker(); 599 Mem2->Prev = Mem2; // Mem2 *is* the free list for now. 600 Mem2->Next = Mem2; 601 602 /// Add a tiny allocated region so that Mem2 is never coalesced away. 603 MemoryRangeHeader *Mem1 = (MemoryRangeHeader*)Mem2-1; 604 Mem1->ThisAllocated = 1; 605 Mem1->PrevAllocated = 0; 606 Mem1->BlockSize = sizeof(MemoryRangeHeader); 607 608 // Add a FreeRangeHeader to the start of the function body region, indicating 609 // that the space is free. Mark the previous block allocated so we never look 610 // at it. 611 FreeRangeHeader *Mem0 = (FreeRangeHeader*)MemBase; 612 Mem0->ThisAllocated = 0; 613 Mem0->PrevAllocated = 1; 614 Mem0->BlockSize = (char*)Mem1-(char*)Mem0; 615 Mem0->SetEndOfBlockSizeMarker(); 616 Mem0->AddToFreeList(Mem2); 617 618 // Start out with the freelist pointing to Mem0. 619 FreeMemoryList = Mem0; 620 621 GOTBase = NULL; 622 DlsymTable = NULL; 623} 624 625void DefaultJITMemoryManager::AllocateGOT() { 626 assert(GOTBase == 0 && "Cannot allocate the got multiple times"); 627 GOTBase = new uint8_t[sizeof(void*) * 8192]; 628 HasGOT = true; 629} 630 631void DefaultJITMemoryManager::SetDlsymTable(void *ptr) { 632 DlsymTable = ptr; 633} 634 635DefaultJITMemoryManager::~DefaultJITMemoryManager() { 636 for (unsigned i = 0, e = CodeSlabs.size(); i != e; ++i) 637 sys::Memory::ReleaseRWX(CodeSlabs[i]); 638 639 delete[] GOTBase; 640} 641 642sys::MemoryBlock DefaultJITMemoryManager::allocateNewSlab(size_t size) { 643 // Allocate a new block close to the last one. 644 std::string ErrMsg; 645 sys::MemoryBlock *LastSlabPtr = LastSlab.base() ? &LastSlab : 0; 646 sys::MemoryBlock B = sys::Memory::AllocateRWX(size, LastSlabPtr, &ErrMsg); 647 if (B.base() == 0) { 648 llvm_report_error("Allocation failed when allocating new memory in the" 649 " JIT\n" + ErrMsg); 650 } 651 LastSlab = B; 652 ++NumSlabs; 653 return B; 654} 655 656/// CheckInvariants - For testing only. Return "" if all internal invariants 657/// are preserved, and a helpful error message otherwise. For free and 658/// allocated blocks, make sure that adding BlockSize gives a valid block. 659/// For free blocks, make sure they're in the free list and that their end of 660/// block size marker is correct. This function should return an error before 661/// accessing bad memory. This function is defined here instead of in 662/// JITMemoryManagerTest.cpp so that we don't have to expose all of the 663/// implementation details of DefaultJITMemoryManager. 664bool DefaultJITMemoryManager::CheckInvariants(std::string &ErrorStr) { 665 raw_string_ostream Err(ErrorStr); 666 667 // Construct a the set of FreeRangeHeader pointers so we can query it 668 // efficiently. 669 llvm::SmallPtrSet<MemoryRangeHeader*, 16> FreeHdrSet; 670 FreeRangeHeader* FreeHead = FreeMemoryList; 671 FreeRangeHeader* FreeRange = FreeHead; 672 673 do { 674 // Check that the free range pointer is in the blocks we've allocated. 675 bool Found = false; 676 for (std::vector<sys::MemoryBlock>::iterator I = CodeSlabs.begin(), 677 E = CodeSlabs.end(); I != E && !Found; ++I) { 678 char *Start = (char*)I->base(); 679 char *End = Start + I->size(); 680 Found = (Start <= (char*)FreeRange && (char*)FreeRange < End); 681 } 682 if (!Found) { 683 Err << "Corrupt free list; points to " << FreeRange; 684 return false; 685 } 686 687 if (FreeRange->Next->Prev != FreeRange) { 688 Err << "Next and Prev pointers do not match."; 689 return false; 690 } 691 692 // Otherwise, add it to the set. 693 FreeHdrSet.insert(FreeRange); 694 FreeRange = FreeRange->Next; 695 } while (FreeRange != FreeHead); 696 697 // Go over each block, and look at each MemoryRangeHeader. 698 for (std::vector<sys::MemoryBlock>::iterator I = CodeSlabs.begin(), 699 E = CodeSlabs.end(); I != E; ++I) { 700 char *Start = (char*)I->base(); 701 char *End = Start + I->size(); 702 703 // Check each memory range. 704 for (MemoryRangeHeader *Hdr = (MemoryRangeHeader*)Start, *LastHdr = NULL; 705 Start <= (char*)Hdr && (char*)Hdr < End; 706 Hdr = &Hdr->getBlockAfter()) { 707 if (Hdr->ThisAllocated == 0) { 708 // Check that this range is in the free list. 709 if (!FreeHdrSet.count(Hdr)) { 710 Err << "Found free header at " << Hdr << " that is not in free list."; 711 return false; 712 } 713 714 // Now make sure the size marker at the end of the block is correct. 715 uintptr_t *Marker = ((uintptr_t*)&Hdr->getBlockAfter()) - 1; 716 if (!(Start <= (char*)Marker && (char*)Marker < End)) { 717 Err << "Block size in header points out of current MemoryBlock."; 718 return false; 719 } 720 if (Hdr->BlockSize != *Marker) { 721 Err << "End of block size marker (" << *Marker << ") " 722 << "and BlockSize (" << Hdr->BlockSize << ") don't match."; 723 return false; 724 } 725 } 726 727 if (LastHdr && LastHdr->ThisAllocated != Hdr->PrevAllocated) { 728 Err << "Hdr->PrevAllocated (" << Hdr->PrevAllocated << ") != " 729 << "LastHdr->ThisAllocated (" << LastHdr->ThisAllocated << ")"; 730 return false; 731 } else if (!LastHdr && !Hdr->PrevAllocated) { 732 Err << "The first header should have PrevAllocated true."; 733 return false; 734 } 735 736 // Remember the last header. 737 LastHdr = Hdr; 738 } 739 } 740 741 // All invariants are preserved. 742 return true; 743} 744 745JITMemoryManager *JITMemoryManager::CreateDefaultMemManager() { 746 return new DefaultJITMemoryManager(); 747} 748 749// Allocate memory for code in 512K slabs. 750const size_t DefaultJITMemoryManager::DefaultCodeSlabSize = 512 * 1024; 751 752// Allocate globals and stubs in slabs of 64K. (probably 16 pages) 753const size_t DefaultJITMemoryManager::DefaultSlabSize = 64 * 1024; 754 755// Waste at most 16K at the end of each bump slab. (probably 4 pages) 756const size_t DefaultJITMemoryManager::DefaultSizeThreshold = 16 * 1024; 757