ShadowStackGC.cpp revision 492d06efde44a4e38a6ed321ada4af5a75494df6
1//===-- ShadowStackGC.cpp - GC support for uncooperative targets ----------===// 2// 3// The LLVM Compiler Infrastructure 4// 5// This file is distributed under the University of Illinois Open Source 6// License. See LICENSE.TXT for details. 7// 8//===----------------------------------------------------------------------===// 9// 10// This file implements lowering for the llvm.gc* intrinsics for targets that do 11// not natively support them (which includes the C backend). Note that the code 12// generated is not quite as efficient as algorithms which generate stack maps 13// to identify roots. 14// 15// This pass implements the code transformation described in this paper: 16// "Accurate Garbage Collection in an Uncooperative Environment" 17// Fergus Henderson, ISMM, 2002 18// 19// In runtime/GC/SemiSpace.cpp is a prototype runtime which is compatible with 20// ShadowStackGC. 21// 22// In order to support this particular transformation, all stack roots are 23// coallocated in the stack. This allows a fully target-independent stack map 24// while introducing only minor runtime overhead. 25// 26//===----------------------------------------------------------------------===// 27 28#define DEBUG_TYPE "shadowstackgc" 29#include "llvm/CodeGen/GCs.h" 30#include "llvm/ADT/StringExtras.h" 31#include "llvm/CodeGen/GCStrategy.h" 32#include "llvm/IntrinsicInst.h" 33#include "llvm/Module.h" 34#include "llvm/Support/Compiler.h" 35#include "llvm/Support/IRBuilder.h" 36 37using namespace llvm; 38 39namespace { 40 41 class ShadowStackGC : public GCStrategy { 42 /// RootChain - This is the global linked-list that contains the chain of GC 43 /// roots. 44 GlobalVariable *Head; 45 46 /// StackEntryTy - Abstract type of a link in the shadow stack. 47 /// 48 const StructType *StackEntryTy; 49 50 /// Roots - GC roots in the current function. Each is a pair of the 51 /// intrinsic call and its corresponding alloca. 52 std::vector<std::pair<CallInst*,AllocaInst*> > Roots; 53 54 public: 55 ShadowStackGC(); 56 57 bool initializeCustomLowering(Module &M); 58 bool performCustomLowering(Function &F); 59 60 private: 61 bool IsNullValue(Value *V); 62 Constant *GetFrameMap(Function &F); 63 const Type* GetConcreteStackEntryType(Function &F); 64 void CollectRoots(Function &F); 65 static GetElementPtrInst *CreateGEP(LLVMContext &Context, 66 IRBuilder<> &B, Value *BasePtr, 67 int Idx1, const char *Name); 68 static GetElementPtrInst *CreateGEP(LLVMContext &Context, 69 IRBuilder<> &B, Value *BasePtr, 70 int Idx1, int Idx2, const char *Name); 71 }; 72 73} 74 75static GCRegistry::Add<ShadowStackGC> 76X("shadow-stack", "Very portable GC for uncooperative code generators"); 77 78namespace { 79 /// EscapeEnumerator - This is a little algorithm to find all escape points 80 /// from a function so that "finally"-style code can be inserted. In addition 81 /// to finding the existing return and unwind instructions, it also (if 82 /// necessary) transforms any call instructions into invokes and sends them to 83 /// a landing pad. 84 /// 85 /// It's wrapped up in a state machine using the same transform C# uses for 86 /// 'yield return' enumerators, This transform allows it to be non-allocating. 87 class EscapeEnumerator { 88 Function &F; 89 const char *CleanupBBName; 90 91 // State. 92 int State; 93 Function::iterator StateBB, StateE; 94 IRBuilder<> Builder; 95 96 public: 97 EscapeEnumerator(Function &F, const char *N = "cleanup") 98 : F(F), CleanupBBName(N), State(0), Builder(F.getContext()) {} 99 100 IRBuilder<> *Next() { 101 switch (State) { 102 default: 103 return 0; 104 105 case 0: 106 StateBB = F.begin(); 107 StateE = F.end(); 108 State = 1; 109 110 case 1: 111 // Find all 'return' and 'unwind' instructions. 112 while (StateBB != StateE) { 113 BasicBlock *CurBB = StateBB++; 114 115 // Branches and invokes do not escape, only unwind and return do. 116 TerminatorInst *TI = CurBB->getTerminator(); 117 if (!isa<UnwindInst>(TI) && !isa<ReturnInst>(TI)) 118 continue; 119 120 Builder.SetInsertPoint(TI->getParent(), TI); 121 return &Builder; 122 } 123 124 State = 2; 125 126 // Find all 'call' instructions. 127 SmallVector<Instruction*,16> Calls; 128 for (Function::iterator BB = F.begin(), 129 E = F.end(); BB != E; ++BB) 130 for (BasicBlock::iterator II = BB->begin(), 131 EE = BB->end(); II != EE; ++II) 132 if (CallInst *CI = dyn_cast<CallInst>(II)) 133 if (!CI->getCalledFunction() || 134 !CI->getCalledFunction()->getIntrinsicID()) 135 Calls.push_back(CI); 136 137 if (Calls.empty()) 138 return 0; 139 140 // Create a cleanup block. 141 BasicBlock *CleanupBB = BasicBlock::Create(F.getContext(), 142 CleanupBBName, &F); 143 UnwindInst *UI = new UnwindInst(F.getContext(), CleanupBB); 144 145 // Transform the 'call' instructions into 'invoke's branching to the 146 // cleanup block. Go in reverse order to make prettier BB names. 147 SmallVector<Value*,16> Args; 148 for (unsigned I = Calls.size(); I != 0; ) { 149 CallInst *CI = cast<CallInst>(Calls[--I]); 150 151 // Split the basic block containing the function call. 152 BasicBlock *CallBB = CI->getParent(); 153 BasicBlock *NewBB = 154 CallBB->splitBasicBlock(CI, CallBB->getName() + ".cont"); 155 156 // Remove the unconditional branch inserted at the end of CallBB. 157 CallBB->getInstList().pop_back(); 158 NewBB->getInstList().remove(CI); 159 160 // Create a new invoke instruction. 161 Args.clear(); 162 Args.append(CI->op_begin() + 1, CI->op_end()); 163 164 InvokeInst *II = InvokeInst::Create(CI->getOperand(0), 165 NewBB, CleanupBB, 166 Args.begin(), Args.end(), 167 CI->getName(), CallBB); 168 II->setCallingConv(CI->getCallingConv()); 169 II->setAttributes(CI->getAttributes()); 170 CI->replaceAllUsesWith(II); 171 delete CI; 172 } 173 174 Builder.SetInsertPoint(UI->getParent(), UI); 175 return &Builder; 176 } 177 } 178 }; 179} 180 181// ----------------------------------------------------------------------------- 182 183void llvm::linkShadowStackGC() { } 184 185ShadowStackGC::ShadowStackGC() : Head(0), StackEntryTy(0) { 186 InitRoots = true; 187 CustomRoots = true; 188} 189 190Constant *ShadowStackGC::GetFrameMap(Function &F) { 191 // doInitialization creates the abstract type of this value. 192 const Type *VoidPtr = Type::getInt8PtrTy(F.getContext()); 193 194 // Truncate the ShadowStackDescriptor if some metadata is null. 195 unsigned NumMeta = 0; 196 SmallVector<Constant*,16> Metadata; 197 for (unsigned I = 0; I != Roots.size(); ++I) { 198 Constant *C = cast<Constant>(Roots[I].first->getOperand(2)); 199 if (!C->isNullValue()) 200 NumMeta = I + 1; 201 Metadata.push_back(ConstantExpr::getBitCast(C, VoidPtr)); 202 } 203 204 Constant *BaseElts[] = { 205 ConstantInt::get(Type::getInt32Ty(F.getContext()), Roots.size(), false), 206 ConstantInt::get(Type::getInt32Ty(F.getContext()), NumMeta, false), 207 }; 208 209 Constant *DescriptorElts[] = { 210 ConstantStruct::get(F.getContext(), BaseElts, 2, false), 211 ConstantArray::get(ArrayType::get(VoidPtr, NumMeta), 212 Metadata.begin(), NumMeta) 213 }; 214 215 Constant *FrameMap = ConstantStruct::get(F.getContext(), DescriptorElts, 2, 216 false); 217 218 std::string TypeName("gc_map."); 219 TypeName += utostr(NumMeta); 220 F.getParent()->addTypeName(TypeName, FrameMap->getType()); 221 222 // FIXME: Is this actually dangerous as WritingAnLLVMPass.html claims? Seems 223 // that, short of multithreaded LLVM, it should be safe; all that is 224 // necessary is that a simple Module::iterator loop not be invalidated. 225 // Appending to the GlobalVariable list is safe in that sense. 226 // 227 // All of the output passes emit globals last. The ExecutionEngine 228 // explicitly supports adding globals to the module after 229 // initialization. 230 // 231 // Still, if it isn't deemed acceptable, then this transformation needs 232 // to be a ModulePass (which means it cannot be in the 'llc' pipeline 233 // (which uses a FunctionPassManager (which segfaults (not asserts) if 234 // provided a ModulePass))). 235 Constant *GV = new GlobalVariable(*F.getParent(), FrameMap->getType(), true, 236 GlobalVariable::InternalLinkage, 237 FrameMap, "__gc_" + F.getName()); 238 239 Constant *GEPIndices[2] = { 240 ConstantInt::get(Type::getInt32Ty(F.getContext()), 0), 241 ConstantInt::get(Type::getInt32Ty(F.getContext()), 0) 242 }; 243 return ConstantExpr::getGetElementPtr(GV, GEPIndices, 2); 244} 245 246const Type* ShadowStackGC::GetConcreteStackEntryType(Function &F) { 247 // doInitialization creates the generic version of this type. 248 std::vector<const Type*> EltTys; 249 EltTys.push_back(StackEntryTy); 250 for (size_t I = 0; I != Roots.size(); I++) 251 EltTys.push_back(Roots[I].second->getAllocatedType()); 252 Type *Ty = StructType::get(F.getContext(), EltTys); 253 254 std::string TypeName("gc_stackentry."); 255 TypeName += F.getName(); 256 F.getParent()->addTypeName(TypeName, Ty); 257 258 return Ty; 259} 260 261/// doInitialization - If this module uses the GC intrinsics, find them now. If 262/// not, exit fast. 263bool ShadowStackGC::initializeCustomLowering(Module &M) { 264 // struct FrameMap { 265 // int32_t NumRoots; // Number of roots in stack frame. 266 // int32_t NumMeta; // Number of metadata descriptors. May be < NumRoots. 267 // void *Meta[]; // May be absent for roots without metadata. 268 // }; 269 std::vector<const Type*> EltTys; 270 // 32 bits is ok up to a 32GB stack frame. :) 271 EltTys.push_back(Type::getInt32Ty(M.getContext())); 272 // Specifies length of variable length array. 273 EltTys.push_back(Type::getInt32Ty(M.getContext())); 274 StructType *FrameMapTy = StructType::get(M.getContext(), EltTys); 275 M.addTypeName("gc_map", FrameMapTy); 276 PointerType *FrameMapPtrTy = PointerType::getUnqual(FrameMapTy); 277 278 // struct StackEntry { 279 // ShadowStackEntry *Next; // Caller's stack entry. 280 // FrameMap *Map; // Pointer to constant FrameMap. 281 // void *Roots[]; // Stack roots (in-place array, so we pretend). 282 // }; 283 OpaqueType *RecursiveTy = OpaqueType::get(M.getContext()); 284 285 EltTys.clear(); 286 EltTys.push_back(PointerType::getUnqual(RecursiveTy)); 287 EltTys.push_back(FrameMapPtrTy); 288 PATypeHolder LinkTyH = StructType::get(M.getContext(), EltTys); 289 290 RecursiveTy->refineAbstractTypeTo(LinkTyH.get()); 291 StackEntryTy = cast<StructType>(LinkTyH.get()); 292 const PointerType *StackEntryPtrTy = PointerType::getUnqual(StackEntryTy); 293 M.addTypeName("gc_stackentry", LinkTyH.get()); // FIXME: Is this safe from 294 // a FunctionPass? 295 296 // Get the root chain if it already exists. 297 Head = M.getGlobalVariable("llvm_gc_root_chain"); 298 if (!Head) { 299 // If the root chain does not exist, insert a new one with linkonce 300 // linkage! 301 Head = new GlobalVariable(M, StackEntryPtrTy, false, 302 GlobalValue::LinkOnceAnyLinkage, 303 Constant::getNullValue(StackEntryPtrTy), 304 "llvm_gc_root_chain"); 305 } else if (Head->hasExternalLinkage() && Head->isDeclaration()) { 306 Head->setInitializer(Constant::getNullValue(StackEntryPtrTy)); 307 Head->setLinkage(GlobalValue::LinkOnceAnyLinkage); 308 } 309 310 return true; 311} 312 313bool ShadowStackGC::IsNullValue(Value *V) { 314 if (Constant *C = dyn_cast<Constant>(V)) 315 return C->isNullValue(); 316 return false; 317} 318 319void ShadowStackGC::CollectRoots(Function &F) { 320 // FIXME: Account for original alignment. Could fragment the root array. 321 // Approach 1: Null initialize empty slots at runtime. Yuck. 322 // Approach 2: Emit a map of the array instead of just a count. 323 324 assert(Roots.empty() && "Not cleaned up?"); 325 326 SmallVector<std::pair<CallInst*,AllocaInst*>,16> MetaRoots; 327 328 for (Function::iterator BB = F.begin(), E = F.end(); BB != E; ++BB) 329 for (BasicBlock::iterator II = BB->begin(), E = BB->end(); II != E;) 330 if (IntrinsicInst *CI = dyn_cast<IntrinsicInst>(II++)) 331 if (Function *F = CI->getCalledFunction()) 332 if (F->getIntrinsicID() == Intrinsic::gcroot) { 333 std::pair<CallInst*,AllocaInst*> Pair = std::make_pair( 334 CI, cast<AllocaInst>(CI->getOperand(1)->stripPointerCasts())); 335 if (IsNullValue(CI->getOperand(2))) 336 Roots.push_back(Pair); 337 else 338 MetaRoots.push_back(Pair); 339 } 340 341 // Number roots with metadata (usually empty) at the beginning, so that the 342 // FrameMap::Meta array can be elided. 343 Roots.insert(Roots.begin(), MetaRoots.begin(), MetaRoots.end()); 344} 345 346GetElementPtrInst * 347ShadowStackGC::CreateGEP(LLVMContext &Context, IRBuilder<> &B, Value *BasePtr, 348 int Idx, int Idx2, const char *Name) { 349 Value *Indices[] = { ConstantInt::get(Type::getInt32Ty(Context), 0), 350 ConstantInt::get(Type::getInt32Ty(Context), Idx), 351 ConstantInt::get(Type::getInt32Ty(Context), Idx2) }; 352 Value* Val = B.CreateGEP(BasePtr, Indices, Indices + 3, Name); 353 354 assert(isa<GetElementPtrInst>(Val) && "Unexpected folded constant"); 355 356 return dyn_cast<GetElementPtrInst>(Val); 357} 358 359GetElementPtrInst * 360ShadowStackGC::CreateGEP(LLVMContext &Context, IRBuilder<> &B, Value *BasePtr, 361 int Idx, const char *Name) { 362 Value *Indices[] = { ConstantInt::get(Type::getInt32Ty(Context), 0), 363 ConstantInt::get(Type::getInt32Ty(Context), Idx) }; 364 Value *Val = B.CreateGEP(BasePtr, Indices, Indices + 2, Name); 365 366 assert(isa<GetElementPtrInst>(Val) && "Unexpected folded constant"); 367 368 return dyn_cast<GetElementPtrInst>(Val); 369} 370 371/// runOnFunction - Insert code to maintain the shadow stack. 372bool ShadowStackGC::performCustomLowering(Function &F) { 373 LLVMContext &Context = F.getContext(); 374 375 // Find calls to llvm.gcroot. 376 CollectRoots(F); 377 378 // If there are no roots in this function, then there is no need to add a 379 // stack map entry for it. 380 if (Roots.empty()) 381 return false; 382 383 // Build the constant map and figure the type of the shadow stack entry. 384 Value *FrameMap = GetFrameMap(F); 385 const Type *ConcreteStackEntryTy = GetConcreteStackEntryType(F); 386 387 // Build the shadow stack entry at the very start of the function. 388 BasicBlock::iterator IP = F.getEntryBlock().begin(); 389 IRBuilder<> AtEntry(IP->getParent(), IP); 390 391 Instruction *StackEntry = AtEntry.CreateAlloca(ConcreteStackEntryTy, 0, 392 "gc_frame"); 393 394 while (isa<AllocaInst>(IP)) ++IP; 395 AtEntry.SetInsertPoint(IP->getParent(), IP); 396 397 // Initialize the map pointer and load the current head of the shadow stack. 398 Instruction *CurrentHead = AtEntry.CreateLoad(Head, "gc_currhead"); 399 Instruction *EntryMapPtr = CreateGEP(Context, AtEntry, StackEntry, 400 0,1,"gc_frame.map"); 401 AtEntry.CreateStore(FrameMap, EntryMapPtr); 402 403 // After all the allocas... 404 for (unsigned I = 0, E = Roots.size(); I != E; ++I) { 405 // For each root, find the corresponding slot in the aggregate... 406 Value *SlotPtr = CreateGEP(Context, AtEntry, StackEntry, 1 + I, "gc_root"); 407 408 // And use it in lieu of the alloca. 409 AllocaInst *OriginalAlloca = Roots[I].second; 410 SlotPtr->takeName(OriginalAlloca); 411 OriginalAlloca->replaceAllUsesWith(SlotPtr); 412 } 413 414 // Move past the original stores inserted by GCStrategy::InitRoots. This isn't 415 // really necessary (the collector would never see the intermediate state at 416 // runtime), but it's nicer not to push the half-initialized entry onto the 417 // shadow stack. 418 while (isa<StoreInst>(IP)) ++IP; 419 AtEntry.SetInsertPoint(IP->getParent(), IP); 420 421 // Push the entry onto the shadow stack. 422 Instruction *EntryNextPtr = CreateGEP(Context, AtEntry, 423 StackEntry,0,0,"gc_frame.next"); 424 Instruction *NewHeadVal = CreateGEP(Context, AtEntry, 425 StackEntry, 0, "gc_newhead"); 426 AtEntry.CreateStore(CurrentHead, EntryNextPtr); 427 AtEntry.CreateStore(NewHeadVal, Head); 428 429 // For each instruction that escapes... 430 EscapeEnumerator EE(F, "gc_cleanup"); 431 while (IRBuilder<> *AtExit = EE.Next()) { 432 // Pop the entry from the shadow stack. Don't reuse CurrentHead from 433 // AtEntry, since that would make the value live for the entire function. 434 Instruction *EntryNextPtr2 = CreateGEP(Context, *AtExit, StackEntry, 0, 0, 435 "gc_frame.next"); 436 Value *SavedHead = AtExit->CreateLoad(EntryNextPtr2, "gc_savedhead"); 437 AtExit->CreateStore(SavedHead, Head); 438 } 439 440 // Delete the original allocas (which are no longer used) and the intrinsic 441 // calls (which are no longer valid). Doing this last avoids invalidating 442 // iterators. 443 for (unsigned I = 0, E = Roots.size(); I != E; ++I) { 444 Roots[I].first->eraseFromParent(); 445 Roots[I].second->eraseFromParent(); 446 } 447 448 Roots.clear(); 449 return true; 450} 451