1//===-- ThreadSanitizer.cpp - race detector -------------------------------===// 2// 3// The LLVM Compiler Infrastructure 4// 5// This file is distributed under the University of Illinois Open Source 6// License. See LICENSE.TXT for details. 7// 8//===----------------------------------------------------------------------===// 9// 10// This file is a part of ThreadSanitizer, a race detector. 11// 12// The tool is under development, for the details about previous versions see 13// http://code.google.com/p/data-race-test 14// 15// The instrumentation phase is quite simple: 16// - Insert calls to run-time library before every memory access. 17// - Optimizations may apply to avoid instrumenting some of the accesses. 18// - Insert calls at function entry/exit. 19// The rest is handled by the run-time library. 20//===----------------------------------------------------------------------===// 21 22#include "llvm/Transforms/Instrumentation.h" 23#include "llvm/ADT/SmallSet.h" 24#include "llvm/ADT/SmallString.h" 25#include "llvm/ADT/SmallVector.h" 26#include "llvm/ADT/Statistic.h" 27#include "llvm/ADT/StringExtras.h" 28#include "llvm/Analysis/CaptureTracking.h" 29#include "llvm/Analysis/ValueTracking.h" 30#include "llvm/IR/DataLayout.h" 31#include "llvm/IR/Function.h" 32#include "llvm/IR/IRBuilder.h" 33#include "llvm/IR/IntrinsicInst.h" 34#include "llvm/IR/Intrinsics.h" 35#include "llvm/IR/LLVMContext.h" 36#include "llvm/IR/Metadata.h" 37#include "llvm/IR/Module.h" 38#include "llvm/IR/Type.h" 39#include "llvm/Support/CommandLine.h" 40#include "llvm/Support/Debug.h" 41#include "llvm/Support/MathExtras.h" 42#include "llvm/Support/raw_ostream.h" 43#include "llvm/Transforms/Utils/BasicBlockUtils.h" 44#include "llvm/Transforms/Utils/ModuleUtils.h" 45 46using namespace llvm; 47 48#define DEBUG_TYPE "tsan" 49 50static cl::opt<bool> ClInstrumentMemoryAccesses( 51 "tsan-instrument-memory-accesses", cl::init(true), 52 cl::desc("Instrument memory accesses"), cl::Hidden); 53static cl::opt<bool> ClInstrumentFuncEntryExit( 54 "tsan-instrument-func-entry-exit", cl::init(true), 55 cl::desc("Instrument function entry and exit"), cl::Hidden); 56static cl::opt<bool> ClInstrumentAtomics( 57 "tsan-instrument-atomics", cl::init(true), 58 cl::desc("Instrument atomics"), cl::Hidden); 59static cl::opt<bool> ClInstrumentMemIntrinsics( 60 "tsan-instrument-memintrinsics", cl::init(true), 61 cl::desc("Instrument memintrinsics (memset/memcpy/memmove)"), cl::Hidden); 62 63STATISTIC(NumInstrumentedReads, "Number of instrumented reads"); 64STATISTIC(NumInstrumentedWrites, "Number of instrumented writes"); 65STATISTIC(NumOmittedReadsBeforeWrite, 66 "Number of reads ignored due to following writes"); 67STATISTIC(NumAccessesWithBadSize, "Number of accesses with bad size"); 68STATISTIC(NumInstrumentedVtableWrites, "Number of vtable ptr writes"); 69STATISTIC(NumInstrumentedVtableReads, "Number of vtable ptr reads"); 70STATISTIC(NumOmittedReadsFromConstantGlobals, 71 "Number of reads from constant globals"); 72STATISTIC(NumOmittedReadsFromVtable, "Number of vtable reads"); 73STATISTIC(NumOmittedNonCaptured, "Number of accesses ignored due to capturing"); 74 75static const char *const kTsanModuleCtorName = "tsan.module_ctor"; 76static const char *const kTsanInitName = "__tsan_init"; 77 78namespace { 79 80/// ThreadSanitizer: instrument the code in module to find races. 81struct ThreadSanitizer : public FunctionPass { 82 ThreadSanitizer() : FunctionPass(ID) {} 83 const char *getPassName() const override; 84 bool runOnFunction(Function &F) override; 85 bool doInitialization(Module &M) override; 86 static char ID; // Pass identification, replacement for typeid. 87 88 private: 89 void initializeCallbacks(Module &M); 90 bool instrumentLoadOrStore(Instruction *I, const DataLayout &DL); 91 bool instrumentAtomic(Instruction *I, const DataLayout &DL); 92 bool instrumentMemIntrinsic(Instruction *I); 93 void chooseInstructionsToInstrument(SmallVectorImpl<Instruction *> &Local, 94 SmallVectorImpl<Instruction *> &All, 95 const DataLayout &DL); 96 bool addrPointsToConstantData(Value *Addr); 97 int getMemoryAccessFuncIndex(Value *Addr, const DataLayout &DL); 98 99 Type *IntptrTy; 100 IntegerType *OrdTy; 101 // Callbacks to run-time library are computed in doInitialization. 102 Function *TsanFuncEntry; 103 Function *TsanFuncExit; 104 // Accesses sizes are powers of two: 1, 2, 4, 8, 16. 105 static const size_t kNumberOfAccessSizes = 5; 106 Function *TsanRead[kNumberOfAccessSizes]; 107 Function *TsanWrite[kNumberOfAccessSizes]; 108 Function *TsanUnalignedRead[kNumberOfAccessSizes]; 109 Function *TsanUnalignedWrite[kNumberOfAccessSizes]; 110 Function *TsanAtomicLoad[kNumberOfAccessSizes]; 111 Function *TsanAtomicStore[kNumberOfAccessSizes]; 112 Function *TsanAtomicRMW[AtomicRMWInst::LAST_BINOP + 1][kNumberOfAccessSizes]; 113 Function *TsanAtomicCAS[kNumberOfAccessSizes]; 114 Function *TsanAtomicThreadFence; 115 Function *TsanAtomicSignalFence; 116 Function *TsanVptrUpdate; 117 Function *TsanVptrLoad; 118 Function *MemmoveFn, *MemcpyFn, *MemsetFn; 119 Function *TsanCtorFunction; 120}; 121} // namespace 122 123char ThreadSanitizer::ID = 0; 124INITIALIZE_PASS(ThreadSanitizer, "tsan", 125 "ThreadSanitizer: detects data races.", 126 false, false) 127 128const char *ThreadSanitizer::getPassName() const { 129 return "ThreadSanitizer"; 130} 131 132FunctionPass *llvm::createThreadSanitizerPass() { 133 return new ThreadSanitizer(); 134} 135 136void ThreadSanitizer::initializeCallbacks(Module &M) { 137 IRBuilder<> IRB(M.getContext()); 138 // Initialize the callbacks. 139 TsanFuncEntry = checkSanitizerInterfaceFunction(M.getOrInsertFunction( 140 "__tsan_func_entry", IRB.getVoidTy(), IRB.getInt8PtrTy(), nullptr)); 141 TsanFuncExit = checkSanitizerInterfaceFunction( 142 M.getOrInsertFunction("__tsan_func_exit", IRB.getVoidTy(), nullptr)); 143 OrdTy = IRB.getInt32Ty(); 144 for (size_t i = 0; i < kNumberOfAccessSizes; ++i) { 145 const unsigned ByteSize = 1U << i; 146 const unsigned BitSize = ByteSize * 8; 147 std::string ByteSizeStr = utostr(ByteSize); 148 std::string BitSizeStr = utostr(BitSize); 149 SmallString<32> ReadName("__tsan_read" + ByteSizeStr); 150 TsanRead[i] = checkSanitizerInterfaceFunction(M.getOrInsertFunction( 151 ReadName, IRB.getVoidTy(), IRB.getInt8PtrTy(), nullptr)); 152 153 SmallString<32> WriteName("__tsan_write" + ByteSizeStr); 154 TsanWrite[i] = checkSanitizerInterfaceFunction(M.getOrInsertFunction( 155 WriteName, IRB.getVoidTy(), IRB.getInt8PtrTy(), nullptr)); 156 157 SmallString<64> UnalignedReadName("__tsan_unaligned_read" + ByteSizeStr); 158 TsanUnalignedRead[i] = 159 checkSanitizerInterfaceFunction(M.getOrInsertFunction( 160 UnalignedReadName, IRB.getVoidTy(), IRB.getInt8PtrTy(), nullptr)); 161 162 SmallString<64> UnalignedWriteName("__tsan_unaligned_write" + ByteSizeStr); 163 TsanUnalignedWrite[i] = 164 checkSanitizerInterfaceFunction(M.getOrInsertFunction( 165 UnalignedWriteName, IRB.getVoidTy(), IRB.getInt8PtrTy(), nullptr)); 166 167 Type *Ty = Type::getIntNTy(M.getContext(), BitSize); 168 Type *PtrTy = Ty->getPointerTo(); 169 SmallString<32> AtomicLoadName("__tsan_atomic" + BitSizeStr + "_load"); 170 TsanAtomicLoad[i] = checkSanitizerInterfaceFunction( 171 M.getOrInsertFunction(AtomicLoadName, Ty, PtrTy, OrdTy, nullptr)); 172 173 SmallString<32> AtomicStoreName("__tsan_atomic" + BitSizeStr + "_store"); 174 TsanAtomicStore[i] = checkSanitizerInterfaceFunction(M.getOrInsertFunction( 175 AtomicStoreName, IRB.getVoidTy(), PtrTy, Ty, OrdTy, nullptr)); 176 177 for (int op = AtomicRMWInst::FIRST_BINOP; 178 op <= AtomicRMWInst::LAST_BINOP; ++op) { 179 TsanAtomicRMW[op][i] = nullptr; 180 const char *NamePart = nullptr; 181 if (op == AtomicRMWInst::Xchg) 182 NamePart = "_exchange"; 183 else if (op == AtomicRMWInst::Add) 184 NamePart = "_fetch_add"; 185 else if (op == AtomicRMWInst::Sub) 186 NamePart = "_fetch_sub"; 187 else if (op == AtomicRMWInst::And) 188 NamePart = "_fetch_and"; 189 else if (op == AtomicRMWInst::Or) 190 NamePart = "_fetch_or"; 191 else if (op == AtomicRMWInst::Xor) 192 NamePart = "_fetch_xor"; 193 else if (op == AtomicRMWInst::Nand) 194 NamePart = "_fetch_nand"; 195 else 196 continue; 197 SmallString<32> RMWName("__tsan_atomic" + itostr(BitSize) + NamePart); 198 TsanAtomicRMW[op][i] = checkSanitizerInterfaceFunction( 199 M.getOrInsertFunction(RMWName, Ty, PtrTy, Ty, OrdTy, nullptr)); 200 } 201 202 SmallString<32> AtomicCASName("__tsan_atomic" + BitSizeStr + 203 "_compare_exchange_val"); 204 TsanAtomicCAS[i] = checkSanitizerInterfaceFunction(M.getOrInsertFunction( 205 AtomicCASName, Ty, PtrTy, Ty, Ty, OrdTy, OrdTy, nullptr)); 206 } 207 TsanVptrUpdate = checkSanitizerInterfaceFunction( 208 M.getOrInsertFunction("__tsan_vptr_update", IRB.getVoidTy(), 209 IRB.getInt8PtrTy(), IRB.getInt8PtrTy(), nullptr)); 210 TsanVptrLoad = checkSanitizerInterfaceFunction(M.getOrInsertFunction( 211 "__tsan_vptr_read", IRB.getVoidTy(), IRB.getInt8PtrTy(), nullptr)); 212 TsanAtomicThreadFence = checkSanitizerInterfaceFunction(M.getOrInsertFunction( 213 "__tsan_atomic_thread_fence", IRB.getVoidTy(), OrdTy, nullptr)); 214 TsanAtomicSignalFence = checkSanitizerInterfaceFunction(M.getOrInsertFunction( 215 "__tsan_atomic_signal_fence", IRB.getVoidTy(), OrdTy, nullptr)); 216 217 MemmoveFn = checkSanitizerInterfaceFunction( 218 M.getOrInsertFunction("memmove", IRB.getInt8PtrTy(), IRB.getInt8PtrTy(), 219 IRB.getInt8PtrTy(), IntptrTy, nullptr)); 220 MemcpyFn = checkSanitizerInterfaceFunction( 221 M.getOrInsertFunction("memcpy", IRB.getInt8PtrTy(), IRB.getInt8PtrTy(), 222 IRB.getInt8PtrTy(), IntptrTy, nullptr)); 223 MemsetFn = checkSanitizerInterfaceFunction( 224 M.getOrInsertFunction("memset", IRB.getInt8PtrTy(), IRB.getInt8PtrTy(), 225 IRB.getInt32Ty(), IntptrTy, nullptr)); 226} 227 228bool ThreadSanitizer::doInitialization(Module &M) { 229 const DataLayout &DL = M.getDataLayout(); 230 IntptrTy = DL.getIntPtrType(M.getContext()); 231 std::tie(TsanCtorFunction, std::ignore) = createSanitizerCtorAndInitFunctions( 232 M, kTsanModuleCtorName, kTsanInitName, /*InitArgTypes=*/{}, 233 /*InitArgs=*/{}); 234 235 appendToGlobalCtors(M, TsanCtorFunction, 0); 236 237 return true; 238} 239 240static bool isVtableAccess(Instruction *I) { 241 if (MDNode *Tag = I->getMetadata(LLVMContext::MD_tbaa)) 242 return Tag->isTBAAVtableAccess(); 243 return false; 244} 245 246bool ThreadSanitizer::addrPointsToConstantData(Value *Addr) { 247 // If this is a GEP, just analyze its pointer operand. 248 if (GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(Addr)) 249 Addr = GEP->getPointerOperand(); 250 251 if (GlobalVariable *GV = dyn_cast<GlobalVariable>(Addr)) { 252 if (GV->isConstant()) { 253 // Reads from constant globals can not race with any writes. 254 NumOmittedReadsFromConstantGlobals++; 255 return true; 256 } 257 } else if (LoadInst *L = dyn_cast<LoadInst>(Addr)) { 258 if (isVtableAccess(L)) { 259 // Reads from a vtable pointer can not race with any writes. 260 NumOmittedReadsFromVtable++; 261 return true; 262 } 263 } 264 return false; 265} 266 267// Instrumenting some of the accesses may be proven redundant. 268// Currently handled: 269// - read-before-write (within same BB, no calls between) 270// - not captured variables 271// 272// We do not handle some of the patterns that should not survive 273// after the classic compiler optimizations. 274// E.g. two reads from the same temp should be eliminated by CSE, 275// two writes should be eliminated by DSE, etc. 276// 277// 'Local' is a vector of insns within the same BB (no calls between). 278// 'All' is a vector of insns that will be instrumented. 279void ThreadSanitizer::chooseInstructionsToInstrument( 280 SmallVectorImpl<Instruction *> &Local, SmallVectorImpl<Instruction *> &All, 281 const DataLayout &DL) { 282 SmallSet<Value*, 8> WriteTargets; 283 // Iterate from the end. 284 for (SmallVectorImpl<Instruction*>::reverse_iterator It = Local.rbegin(), 285 E = Local.rend(); It != E; ++It) { 286 Instruction *I = *It; 287 if (StoreInst *Store = dyn_cast<StoreInst>(I)) { 288 WriteTargets.insert(Store->getPointerOperand()); 289 } else { 290 LoadInst *Load = cast<LoadInst>(I); 291 Value *Addr = Load->getPointerOperand(); 292 if (WriteTargets.count(Addr)) { 293 // We will write to this temp, so no reason to analyze the read. 294 NumOmittedReadsBeforeWrite++; 295 continue; 296 } 297 if (addrPointsToConstantData(Addr)) { 298 // Addr points to some constant data -- it can not race with any writes. 299 continue; 300 } 301 } 302 Value *Addr = isa<StoreInst>(*I) 303 ? cast<StoreInst>(I)->getPointerOperand() 304 : cast<LoadInst>(I)->getPointerOperand(); 305 if (isa<AllocaInst>(GetUnderlyingObject(Addr, DL)) && 306 !PointerMayBeCaptured(Addr, true, true)) { 307 // The variable is addressable but not captured, so it cannot be 308 // referenced from a different thread and participate in a data race 309 // (see llvm/Analysis/CaptureTracking.h for details). 310 NumOmittedNonCaptured++; 311 continue; 312 } 313 All.push_back(I); 314 } 315 Local.clear(); 316} 317 318static bool isAtomic(Instruction *I) { 319 if (LoadInst *LI = dyn_cast<LoadInst>(I)) 320 return LI->isAtomic() && LI->getSynchScope() == CrossThread; 321 if (StoreInst *SI = dyn_cast<StoreInst>(I)) 322 return SI->isAtomic() && SI->getSynchScope() == CrossThread; 323 if (isa<AtomicRMWInst>(I)) 324 return true; 325 if (isa<AtomicCmpXchgInst>(I)) 326 return true; 327 if (isa<FenceInst>(I)) 328 return true; 329 return false; 330} 331 332bool ThreadSanitizer::runOnFunction(Function &F) { 333 // This is required to prevent instrumenting call to __tsan_init from within 334 // the module constructor. 335 if (&F == TsanCtorFunction) 336 return false; 337 initializeCallbacks(*F.getParent()); 338 SmallVector<Instruction*, 8> RetVec; 339 SmallVector<Instruction*, 8> AllLoadsAndStores; 340 SmallVector<Instruction*, 8> LocalLoadsAndStores; 341 SmallVector<Instruction*, 8> AtomicAccesses; 342 SmallVector<Instruction*, 8> MemIntrinCalls; 343 bool Res = false; 344 bool HasCalls = false; 345 bool SanitizeFunction = F.hasFnAttribute(Attribute::SanitizeThread); 346 const DataLayout &DL = F.getParent()->getDataLayout(); 347 348 // Traverse all instructions, collect loads/stores/returns, check for calls. 349 for (auto &BB : F) { 350 for (auto &Inst : BB) { 351 if (isAtomic(&Inst)) 352 AtomicAccesses.push_back(&Inst); 353 else if (isa<LoadInst>(Inst) || isa<StoreInst>(Inst)) 354 LocalLoadsAndStores.push_back(&Inst); 355 else if (isa<ReturnInst>(Inst)) 356 RetVec.push_back(&Inst); 357 else if (isa<CallInst>(Inst) || isa<InvokeInst>(Inst)) { 358 if (isa<MemIntrinsic>(Inst)) 359 MemIntrinCalls.push_back(&Inst); 360 HasCalls = true; 361 chooseInstructionsToInstrument(LocalLoadsAndStores, AllLoadsAndStores, 362 DL); 363 } 364 } 365 chooseInstructionsToInstrument(LocalLoadsAndStores, AllLoadsAndStores, DL); 366 } 367 368 // We have collected all loads and stores. 369 // FIXME: many of these accesses do not need to be checked for races 370 // (e.g. variables that do not escape, etc). 371 372 // Instrument memory accesses only if we want to report bugs in the function. 373 if (ClInstrumentMemoryAccesses && SanitizeFunction) 374 for (auto Inst : AllLoadsAndStores) { 375 Res |= instrumentLoadOrStore(Inst, DL); 376 } 377 378 // Instrument atomic memory accesses in any case (they can be used to 379 // implement synchronization). 380 if (ClInstrumentAtomics) 381 for (auto Inst : AtomicAccesses) { 382 Res |= instrumentAtomic(Inst, DL); 383 } 384 385 if (ClInstrumentMemIntrinsics && SanitizeFunction) 386 for (auto Inst : MemIntrinCalls) { 387 Res |= instrumentMemIntrinsic(Inst); 388 } 389 390 // Instrument function entry/exit points if there were instrumented accesses. 391 if ((Res || HasCalls) && ClInstrumentFuncEntryExit) { 392 IRBuilder<> IRB(F.getEntryBlock().getFirstNonPHI()); 393 Value *ReturnAddress = IRB.CreateCall( 394 Intrinsic::getDeclaration(F.getParent(), Intrinsic::returnaddress), 395 IRB.getInt32(0)); 396 IRB.CreateCall(TsanFuncEntry, ReturnAddress); 397 for (auto RetInst : RetVec) { 398 IRBuilder<> IRBRet(RetInst); 399 IRBRet.CreateCall(TsanFuncExit, {}); 400 } 401 Res = true; 402 } 403 return Res; 404} 405 406bool ThreadSanitizer::instrumentLoadOrStore(Instruction *I, 407 const DataLayout &DL) { 408 IRBuilder<> IRB(I); 409 bool IsWrite = isa<StoreInst>(*I); 410 Value *Addr = IsWrite 411 ? cast<StoreInst>(I)->getPointerOperand() 412 : cast<LoadInst>(I)->getPointerOperand(); 413 int Idx = getMemoryAccessFuncIndex(Addr, DL); 414 if (Idx < 0) 415 return false; 416 if (IsWrite && isVtableAccess(I)) { 417 DEBUG(dbgs() << " VPTR : " << *I << "\n"); 418 Value *StoredValue = cast<StoreInst>(I)->getValueOperand(); 419 // StoredValue may be a vector type if we are storing several vptrs at once. 420 // In this case, just take the first element of the vector since this is 421 // enough to find vptr races. 422 if (isa<VectorType>(StoredValue->getType())) 423 StoredValue = IRB.CreateExtractElement( 424 StoredValue, ConstantInt::get(IRB.getInt32Ty(), 0)); 425 if (StoredValue->getType()->isIntegerTy()) 426 StoredValue = IRB.CreateIntToPtr(StoredValue, IRB.getInt8PtrTy()); 427 // Call TsanVptrUpdate. 428 IRB.CreateCall(TsanVptrUpdate, 429 {IRB.CreatePointerCast(Addr, IRB.getInt8PtrTy()), 430 IRB.CreatePointerCast(StoredValue, IRB.getInt8PtrTy())}); 431 NumInstrumentedVtableWrites++; 432 return true; 433 } 434 if (!IsWrite && isVtableAccess(I)) { 435 IRB.CreateCall(TsanVptrLoad, 436 IRB.CreatePointerCast(Addr, IRB.getInt8PtrTy())); 437 NumInstrumentedVtableReads++; 438 return true; 439 } 440 const unsigned Alignment = IsWrite 441 ? cast<StoreInst>(I)->getAlignment() 442 : cast<LoadInst>(I)->getAlignment(); 443 Type *OrigTy = cast<PointerType>(Addr->getType())->getElementType(); 444 const uint32_t TypeSize = DL.getTypeStoreSizeInBits(OrigTy); 445 Value *OnAccessFunc = nullptr; 446 if (Alignment == 0 || Alignment >= 8 || (Alignment % (TypeSize / 8)) == 0) 447 OnAccessFunc = IsWrite ? TsanWrite[Idx] : TsanRead[Idx]; 448 else 449 OnAccessFunc = IsWrite ? TsanUnalignedWrite[Idx] : TsanUnalignedRead[Idx]; 450 IRB.CreateCall(OnAccessFunc, IRB.CreatePointerCast(Addr, IRB.getInt8PtrTy())); 451 if (IsWrite) NumInstrumentedWrites++; 452 else NumInstrumentedReads++; 453 return true; 454} 455 456static ConstantInt *createOrdering(IRBuilder<> *IRB, AtomicOrdering ord) { 457 uint32_t v = 0; 458 switch (ord) { 459 case NotAtomic: llvm_unreachable("unexpected atomic ordering!"); 460 case Unordered: // Fall-through. 461 case Monotonic: v = 0; break; 462 // case Consume: v = 1; break; // Not specified yet. 463 case Acquire: v = 2; break; 464 case Release: v = 3; break; 465 case AcquireRelease: v = 4; break; 466 case SequentiallyConsistent: v = 5; break; 467 } 468 return IRB->getInt32(v); 469} 470 471// If a memset intrinsic gets inlined by the code gen, we will miss races on it. 472// So, we either need to ensure the intrinsic is not inlined, or instrument it. 473// We do not instrument memset/memmove/memcpy intrinsics (too complicated), 474// instead we simply replace them with regular function calls, which are then 475// intercepted by the run-time. 476// Since tsan is running after everyone else, the calls should not be 477// replaced back with intrinsics. If that becomes wrong at some point, 478// we will need to call e.g. __tsan_memset to avoid the intrinsics. 479bool ThreadSanitizer::instrumentMemIntrinsic(Instruction *I) { 480 IRBuilder<> IRB(I); 481 if (MemSetInst *M = dyn_cast<MemSetInst>(I)) { 482 IRB.CreateCall( 483 MemsetFn, 484 {IRB.CreatePointerCast(M->getArgOperand(0), IRB.getInt8PtrTy()), 485 IRB.CreateIntCast(M->getArgOperand(1), IRB.getInt32Ty(), false), 486 IRB.CreateIntCast(M->getArgOperand(2), IntptrTy, false)}); 487 I->eraseFromParent(); 488 } else if (MemTransferInst *M = dyn_cast<MemTransferInst>(I)) { 489 IRB.CreateCall( 490 isa<MemCpyInst>(M) ? MemcpyFn : MemmoveFn, 491 {IRB.CreatePointerCast(M->getArgOperand(0), IRB.getInt8PtrTy()), 492 IRB.CreatePointerCast(M->getArgOperand(1), IRB.getInt8PtrTy()), 493 IRB.CreateIntCast(M->getArgOperand(2), IntptrTy, false)}); 494 I->eraseFromParent(); 495 } 496 return false; 497} 498 499// Both llvm and ThreadSanitizer atomic operations are based on C++11/C1x 500// standards. For background see C++11 standard. A slightly older, publicly 501// available draft of the standard (not entirely up-to-date, but close enough 502// for casual browsing) is available here: 503// http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2011/n3242.pdf 504// The following page contains more background information: 505// http://www.hpl.hp.com/personal/Hans_Boehm/c++mm/ 506 507bool ThreadSanitizer::instrumentAtomic(Instruction *I, const DataLayout &DL) { 508 IRBuilder<> IRB(I); 509 if (LoadInst *LI = dyn_cast<LoadInst>(I)) { 510 Value *Addr = LI->getPointerOperand(); 511 int Idx = getMemoryAccessFuncIndex(Addr, DL); 512 if (Idx < 0) 513 return false; 514 const unsigned ByteSize = 1U << Idx; 515 const unsigned BitSize = ByteSize * 8; 516 Type *Ty = Type::getIntNTy(IRB.getContext(), BitSize); 517 Type *PtrTy = Ty->getPointerTo(); 518 Value *Args[] = {IRB.CreatePointerCast(Addr, PtrTy), 519 createOrdering(&IRB, LI->getOrdering())}; 520 CallInst *C = CallInst::Create(TsanAtomicLoad[Idx], Args); 521 ReplaceInstWithInst(I, C); 522 523 } else if (StoreInst *SI = dyn_cast<StoreInst>(I)) { 524 Value *Addr = SI->getPointerOperand(); 525 int Idx = getMemoryAccessFuncIndex(Addr, DL); 526 if (Idx < 0) 527 return false; 528 const unsigned ByteSize = 1U << Idx; 529 const unsigned BitSize = ByteSize * 8; 530 Type *Ty = Type::getIntNTy(IRB.getContext(), BitSize); 531 Type *PtrTy = Ty->getPointerTo(); 532 Value *Args[] = {IRB.CreatePointerCast(Addr, PtrTy), 533 IRB.CreateIntCast(SI->getValueOperand(), Ty, false), 534 createOrdering(&IRB, SI->getOrdering())}; 535 CallInst *C = CallInst::Create(TsanAtomicStore[Idx], Args); 536 ReplaceInstWithInst(I, C); 537 } else if (AtomicRMWInst *RMWI = dyn_cast<AtomicRMWInst>(I)) { 538 Value *Addr = RMWI->getPointerOperand(); 539 int Idx = getMemoryAccessFuncIndex(Addr, DL); 540 if (Idx < 0) 541 return false; 542 Function *F = TsanAtomicRMW[RMWI->getOperation()][Idx]; 543 if (!F) 544 return false; 545 const unsigned ByteSize = 1U << Idx; 546 const unsigned BitSize = ByteSize * 8; 547 Type *Ty = Type::getIntNTy(IRB.getContext(), BitSize); 548 Type *PtrTy = Ty->getPointerTo(); 549 Value *Args[] = {IRB.CreatePointerCast(Addr, PtrTy), 550 IRB.CreateIntCast(RMWI->getValOperand(), Ty, false), 551 createOrdering(&IRB, RMWI->getOrdering())}; 552 CallInst *C = CallInst::Create(F, Args); 553 ReplaceInstWithInst(I, C); 554 } else if (AtomicCmpXchgInst *CASI = dyn_cast<AtomicCmpXchgInst>(I)) { 555 Value *Addr = CASI->getPointerOperand(); 556 int Idx = getMemoryAccessFuncIndex(Addr, DL); 557 if (Idx < 0) 558 return false; 559 const unsigned ByteSize = 1U << Idx; 560 const unsigned BitSize = ByteSize * 8; 561 Type *Ty = Type::getIntNTy(IRB.getContext(), BitSize); 562 Type *PtrTy = Ty->getPointerTo(); 563 Value *Args[] = {IRB.CreatePointerCast(Addr, PtrTy), 564 IRB.CreateIntCast(CASI->getCompareOperand(), Ty, false), 565 IRB.CreateIntCast(CASI->getNewValOperand(), Ty, false), 566 createOrdering(&IRB, CASI->getSuccessOrdering()), 567 createOrdering(&IRB, CASI->getFailureOrdering())}; 568 CallInst *C = IRB.CreateCall(TsanAtomicCAS[Idx], Args); 569 Value *Success = IRB.CreateICmpEQ(C, CASI->getCompareOperand()); 570 571 Value *Res = IRB.CreateInsertValue(UndefValue::get(CASI->getType()), C, 0); 572 Res = IRB.CreateInsertValue(Res, Success, 1); 573 574 I->replaceAllUsesWith(Res); 575 I->eraseFromParent(); 576 } else if (FenceInst *FI = dyn_cast<FenceInst>(I)) { 577 Value *Args[] = {createOrdering(&IRB, FI->getOrdering())}; 578 Function *F = FI->getSynchScope() == SingleThread ? 579 TsanAtomicSignalFence : TsanAtomicThreadFence; 580 CallInst *C = CallInst::Create(F, Args); 581 ReplaceInstWithInst(I, C); 582 } 583 return true; 584} 585 586int ThreadSanitizer::getMemoryAccessFuncIndex(Value *Addr, 587 const DataLayout &DL) { 588 Type *OrigPtrTy = Addr->getType(); 589 Type *OrigTy = cast<PointerType>(OrigPtrTy)->getElementType(); 590 assert(OrigTy->isSized()); 591 uint32_t TypeSize = DL.getTypeStoreSizeInBits(OrigTy); 592 if (TypeSize != 8 && TypeSize != 16 && 593 TypeSize != 32 && TypeSize != 64 && TypeSize != 128) { 594 NumAccessesWithBadSize++; 595 // Ignore all unusual sizes. 596 return -1; 597 } 598 size_t Idx = countTrailingZeros(TypeSize / 8); 599 assert(Idx < kNumberOfAccessSizes); 600 return Idx; 601} 602