MemorySanitizer.cpp revision 90230c84668269fbd53d163e398cd16486d5d414
1//===-- MemorySanitizer.cpp - detector of uninitialized reads -------------===//
2//
3//                     The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9/// \file
10/// This file is a part of MemorySanitizer, a detector of uninitialized
11/// reads.
12///
13/// Status: early prototype.
14///
15/// The algorithm of the tool is similar to Memcheck
16/// (http://goo.gl/QKbem). We associate a few shadow bits with every
17/// byte of the application memory, poison the shadow of the malloc-ed
18/// or alloca-ed memory, load the shadow bits on every memory read,
19/// propagate the shadow bits through some of the arithmetic
20/// instruction (including MOV), store the shadow bits on every memory
21/// write, report a bug on some other instructions (e.g. JMP) if the
22/// associated shadow is poisoned.
23///
24/// But there are differences too. The first and the major one:
25/// compiler instrumentation instead of binary instrumentation. This
26/// gives us much better register allocation, possible compiler
27/// optimizations and a fast start-up. But this brings the major issue
28/// as well: msan needs to see all program events, including system
29/// calls and reads/writes in system libraries, so we either need to
30/// compile *everything* with msan or use a binary translation
31/// component (e.g. DynamoRIO) to instrument pre-built libraries.
32/// Another difference from Memcheck is that we use 8 shadow bits per
33/// byte of application memory and use a direct shadow mapping. This
34/// greatly simplifies the instrumentation code and avoids races on
35/// shadow updates (Memcheck is single-threaded so races are not a
36/// concern there. Memcheck uses 2 shadow bits per byte with a slow
37/// path storage that uses 8 bits per byte).
38///
39/// The default value of shadow is 0, which means "clean" (not poisoned).
40///
41/// Every module initializer should call __msan_init to ensure that the
42/// shadow memory is ready. On error, __msan_warning is called. Since
43/// parameters and return values may be passed via registers, we have a
44/// specialized thread-local shadow for return values
45/// (__msan_retval_tls) and parameters (__msan_param_tls).
46///
47///                           Origin tracking.
48///
49/// MemorySanitizer can track origins (allocation points) of all uninitialized
50/// values. This behavior is controlled with a flag (msan-track-origins) and is
51/// disabled by default.
52///
53/// Origins are 4-byte values created and interpreted by the runtime library.
54/// They are stored in a second shadow mapping, one 4-byte value for 4 bytes
55/// of application memory. Propagation of origins is basically a bunch of
56/// "select" instructions that pick the origin of a dirty argument, if an
57/// instruction has one.
58///
59/// Every 4 aligned, consecutive bytes of application memory have one origin
60/// value associated with them. If these bytes contain uninitialized data
61/// coming from 2 different allocations, the last store wins. Because of this,
62/// MemorySanitizer reports can show unrelated origins, but this is unlikely in
63/// practice.
64///
65/// Origins are meaningless for fully initialized values, so MemorySanitizer
66/// avoids storing origin to memory when a fully initialized value is stored.
67/// This way it avoids needless overwritting origin of the 4-byte region on
68/// a short (i.e. 1 byte) clean store, and it is also good for performance.
69//===----------------------------------------------------------------------===//
70
71#define DEBUG_TYPE "msan"
72
73#include "llvm/Transforms/Instrumentation.h"
74#include "llvm/ADT/DepthFirstIterator.h"
75#include "llvm/ADT/SmallString.h"
76#include "llvm/ADT/SmallVector.h"
77#include "llvm/ADT/ValueMap.h"
78#include "llvm/IR/DataLayout.h"
79#include "llvm/IR/Function.h"
80#include "llvm/IR/IRBuilder.h"
81#include "llvm/IR/InlineAsm.h"
82#include "llvm/IR/IntrinsicInst.h"
83#include "llvm/IR/LLVMContext.h"
84#include "llvm/IR/MDBuilder.h"
85#include "llvm/IR/Module.h"
86#include "llvm/IR/Type.h"
87#include "llvm/InstVisitor.h"
88#include "llvm/Support/CommandLine.h"
89#include "llvm/Support/Compiler.h"
90#include "llvm/Support/Debug.h"
91#include "llvm/Support/raw_ostream.h"
92#include "llvm/Transforms/Utils/BasicBlockUtils.h"
93#include "llvm/Transforms/Utils/BlackList.h"
94#include "llvm/Transforms/Utils/Local.h"
95#include "llvm/Transforms/Utils/ModuleUtils.h"
96
97using namespace llvm;
98
99static const uint64_t kShadowMask32 = 1ULL << 31;
100static const uint64_t kShadowMask64 = 1ULL << 46;
101static const uint64_t kOriginOffset32 = 1ULL << 30;
102static const uint64_t kOriginOffset64 = 1ULL << 45;
103static const unsigned kMinOriginAlignment = 4;
104static const unsigned kShadowTLSAlignment = 8;
105
106/// \brief Track origins of uninitialized values.
107///
108/// Adds a section to MemorySanitizer report that points to the allocation
109/// (stack or heap) the uninitialized bits came from originally.
110static cl::opt<bool> ClTrackOrigins("msan-track-origins",
111       cl::desc("Track origins (allocation sites) of poisoned memory"),
112       cl::Hidden, cl::init(false));
113static cl::opt<bool> ClKeepGoing("msan-keep-going",
114       cl::desc("keep going after reporting a UMR"),
115       cl::Hidden, cl::init(false));
116static cl::opt<bool> ClPoisonStack("msan-poison-stack",
117       cl::desc("poison uninitialized stack variables"),
118       cl::Hidden, cl::init(true));
119static cl::opt<bool> ClPoisonStackWithCall("msan-poison-stack-with-call",
120       cl::desc("poison uninitialized stack variables with a call"),
121       cl::Hidden, cl::init(false));
122static cl::opt<int> ClPoisonStackPattern("msan-poison-stack-pattern",
123       cl::desc("poison uninitialized stack variables with the given patter"),
124       cl::Hidden, cl::init(0xff));
125
126static cl::opt<bool> ClHandleICmp("msan-handle-icmp",
127       cl::desc("propagate shadow through ICmpEQ and ICmpNE"),
128       cl::Hidden, cl::init(true));
129
130static cl::opt<bool> ClStoreCleanOrigin("msan-store-clean-origin",
131       cl::desc("store origin for clean (fully initialized) values"),
132       cl::Hidden, cl::init(false));
133
134// This flag controls whether we check the shadow of the address
135// operand of load or store. Such bugs are very rare, since load from
136// a garbage address typically results in SEGV, but still happen
137// (e.g. only lower bits of address are garbage, or the access happens
138// early at program startup where malloc-ed memory is more likely to
139// be zeroed. As of 2012-08-28 this flag adds 20% slowdown.
140static cl::opt<bool> ClCheckAccessAddress("msan-check-access-address",
141       cl::desc("report accesses through a pointer which has poisoned shadow"),
142       cl::Hidden, cl::init(true));
143
144static cl::opt<bool> ClDumpStrictInstructions("msan-dump-strict-instructions",
145       cl::desc("print out instructions with default strict semantics"),
146       cl::Hidden, cl::init(false));
147
148static cl::opt<std::string>  ClBlacklistFile("msan-blacklist",
149       cl::desc("File containing the list of functions where MemorySanitizer "
150                "should not report bugs"), cl::Hidden);
151
152namespace {
153
154/// \brief An instrumentation pass implementing detection of uninitialized
155/// reads.
156///
157/// MemorySanitizer: instrument the code in module to find
158/// uninitialized reads.
159class MemorySanitizer : public FunctionPass {
160 public:
161  MemorySanitizer(bool TrackOrigins = false,
162                  StringRef BlacklistFile = StringRef())
163    : FunctionPass(ID),
164      TrackOrigins(TrackOrigins || ClTrackOrigins),
165      TD(0),
166      WarningFn(0),
167      BlacklistFile(BlacklistFile.empty() ? ClBlacklistFile
168                                          : BlacklistFile) { }
169  const char *getPassName() const { return "MemorySanitizer"; }
170  bool runOnFunction(Function &F);
171  bool doInitialization(Module &M);
172  static char ID;  // Pass identification, replacement for typeid.
173
174 private:
175  void initializeCallbacks(Module &M);
176
177  /// \brief Track origins (allocation points) of uninitialized values.
178  bool TrackOrigins;
179
180  DataLayout *TD;
181  LLVMContext *C;
182  Type *IntptrTy;
183  Type *OriginTy;
184  /// \brief Thread-local shadow storage for function parameters.
185  GlobalVariable *ParamTLS;
186  /// \brief Thread-local origin storage for function parameters.
187  GlobalVariable *ParamOriginTLS;
188  /// \brief Thread-local shadow storage for function return value.
189  GlobalVariable *RetvalTLS;
190  /// \brief Thread-local origin storage for function return value.
191  GlobalVariable *RetvalOriginTLS;
192  /// \brief Thread-local shadow storage for in-register va_arg function
193  /// parameters (x86_64-specific).
194  GlobalVariable *VAArgTLS;
195  /// \brief Thread-local shadow storage for va_arg overflow area
196  /// (x86_64-specific).
197  GlobalVariable *VAArgOverflowSizeTLS;
198  /// \brief Thread-local space used to pass origin value to the UMR reporting
199  /// function.
200  GlobalVariable *OriginTLS;
201
202  /// \brief The run-time callback to print a warning.
203  Value *WarningFn;
204  /// \brief Run-time helper that copies origin info for a memory range.
205  Value *MsanCopyOriginFn;
206  /// \brief Run-time helper that generates a new origin value for a stack
207  /// allocation.
208  Value *MsanSetAllocaOriginFn;
209  /// \brief Run-time helper that poisons stack on function entry.
210  Value *MsanPoisonStackFn;
211  /// \brief MSan runtime replacements for memmove, memcpy and memset.
212  Value *MemmoveFn, *MemcpyFn, *MemsetFn;
213
214  /// \brief Address mask used in application-to-shadow address calculation.
215  /// ShadowAddr is computed as ApplicationAddr & ~ShadowMask.
216  uint64_t ShadowMask;
217  /// \brief Offset of the origin shadow from the "normal" shadow.
218  /// OriginAddr is computed as (ShadowAddr + OriginOffset) & ~3ULL
219  uint64_t OriginOffset;
220  /// \brief Branch weights for error reporting.
221  MDNode *ColdCallWeights;
222  /// \brief Branch weights for origin store.
223  MDNode *OriginStoreWeights;
224  /// \bried Path to blacklist file.
225  SmallString<64> BlacklistFile;
226  /// \brief The blacklist.
227  OwningPtr<BlackList> BL;
228  /// \brief An empty volatile inline asm that prevents callback merge.
229  InlineAsm *EmptyAsm;
230
231  friend struct MemorySanitizerVisitor;
232  friend struct VarArgAMD64Helper;
233};
234}  // namespace
235
236char MemorySanitizer::ID = 0;
237INITIALIZE_PASS(MemorySanitizer, "msan",
238                "MemorySanitizer: detects uninitialized reads.",
239                false, false)
240
241FunctionPass *llvm::createMemorySanitizerPass(bool TrackOrigins,
242                                              StringRef BlacklistFile) {
243  return new MemorySanitizer(TrackOrigins, BlacklistFile);
244}
245
246/// \brief Create a non-const global initialized with the given string.
247///
248/// Creates a writable global for Str so that we can pass it to the
249/// run-time lib. Runtime uses first 4 bytes of the string to store the
250/// frame ID, so the string needs to be mutable.
251static GlobalVariable *createPrivateNonConstGlobalForString(Module &M,
252                                                            StringRef Str) {
253  Constant *StrConst = ConstantDataArray::getString(M.getContext(), Str);
254  return new GlobalVariable(M, StrConst->getType(), /*isConstant=*/false,
255                            GlobalValue::PrivateLinkage, StrConst, "");
256}
257
258
259/// \brief Insert extern declaration of runtime-provided functions and globals.
260void MemorySanitizer::initializeCallbacks(Module &M) {
261  // Only do this once.
262  if (WarningFn)
263    return;
264
265  IRBuilder<> IRB(*C);
266  // Create the callback.
267  // FIXME: this function should have "Cold" calling conv,
268  // which is not yet implemented.
269  StringRef WarningFnName = ClKeepGoing ? "__msan_warning"
270                                        : "__msan_warning_noreturn";
271  WarningFn = M.getOrInsertFunction(WarningFnName, IRB.getVoidTy(), NULL);
272
273  MsanCopyOriginFn = M.getOrInsertFunction(
274    "__msan_copy_origin", IRB.getVoidTy(), IRB.getInt8PtrTy(),
275    IRB.getInt8PtrTy(), IntptrTy, NULL);
276  MsanSetAllocaOriginFn = M.getOrInsertFunction(
277    "__msan_set_alloca_origin", IRB.getVoidTy(), IRB.getInt8PtrTy(), IntptrTy,
278    IRB.getInt8PtrTy(), NULL);
279  MsanPoisonStackFn = M.getOrInsertFunction(
280    "__msan_poison_stack", IRB.getVoidTy(), IRB.getInt8PtrTy(), IntptrTy, NULL);
281  MemmoveFn = M.getOrInsertFunction(
282    "__msan_memmove", IRB.getInt8PtrTy(), IRB.getInt8PtrTy(),
283    IRB.getInt8PtrTy(), IntptrTy, NULL);
284  MemcpyFn = M.getOrInsertFunction(
285    "__msan_memcpy", IRB.getInt8PtrTy(), IRB.getInt8PtrTy(), IRB.getInt8PtrTy(),
286    IntptrTy, NULL);
287  MemsetFn = M.getOrInsertFunction(
288    "__msan_memset", IRB.getInt8PtrTy(), IRB.getInt8PtrTy(), IRB.getInt32Ty(),
289    IntptrTy, NULL);
290
291  // Create globals.
292  RetvalTLS = new GlobalVariable(
293    M, ArrayType::get(IRB.getInt64Ty(), 8), false,
294    GlobalVariable::ExternalLinkage, 0, "__msan_retval_tls", 0,
295    GlobalVariable::GeneralDynamicTLSModel);
296  RetvalOriginTLS = new GlobalVariable(
297    M, OriginTy, false, GlobalVariable::ExternalLinkage, 0,
298    "__msan_retval_origin_tls", 0, GlobalVariable::GeneralDynamicTLSModel);
299
300  ParamTLS = new GlobalVariable(
301    M, ArrayType::get(IRB.getInt64Ty(), 1000), false,
302    GlobalVariable::ExternalLinkage, 0, "__msan_param_tls", 0,
303    GlobalVariable::GeneralDynamicTLSModel);
304  ParamOriginTLS = new GlobalVariable(
305    M, ArrayType::get(OriginTy, 1000), false, GlobalVariable::ExternalLinkage,
306    0, "__msan_param_origin_tls", 0, GlobalVariable::GeneralDynamicTLSModel);
307
308  VAArgTLS = new GlobalVariable(
309    M, ArrayType::get(IRB.getInt64Ty(), 1000), false,
310    GlobalVariable::ExternalLinkage, 0, "__msan_va_arg_tls", 0,
311    GlobalVariable::GeneralDynamicTLSModel);
312  VAArgOverflowSizeTLS = new GlobalVariable(
313    M, IRB.getInt64Ty(), false, GlobalVariable::ExternalLinkage, 0,
314    "__msan_va_arg_overflow_size_tls", 0,
315    GlobalVariable::GeneralDynamicTLSModel);
316  OriginTLS = new GlobalVariable(
317    M, IRB.getInt32Ty(), false, GlobalVariable::ExternalLinkage, 0,
318    "__msan_origin_tls", 0, GlobalVariable::GeneralDynamicTLSModel);
319
320  // We insert an empty inline asm after __msan_report* to avoid callback merge.
321  EmptyAsm = InlineAsm::get(FunctionType::get(IRB.getVoidTy(), false),
322                            StringRef(""), StringRef(""),
323                            /*hasSideEffects=*/true);
324}
325
326/// \brief Module-level initialization.
327///
328/// inserts a call to __msan_init to the module's constructor list.
329bool MemorySanitizer::doInitialization(Module &M) {
330  TD = getAnalysisIfAvailable<DataLayout>();
331  if (!TD)
332    return false;
333  BL.reset(new BlackList(BlacklistFile));
334  C = &(M.getContext());
335  unsigned PtrSize = TD->getPointerSizeInBits(/* AddressSpace */0);
336  switch (PtrSize) {
337    case 64:
338      ShadowMask = kShadowMask64;
339      OriginOffset = kOriginOffset64;
340      break;
341    case 32:
342      ShadowMask = kShadowMask32;
343      OriginOffset = kOriginOffset32;
344      break;
345    default:
346      report_fatal_error("unsupported pointer size");
347      break;
348  }
349
350  IRBuilder<> IRB(*C);
351  IntptrTy = IRB.getIntPtrTy(TD);
352  OriginTy = IRB.getInt32Ty();
353
354  ColdCallWeights = MDBuilder(*C).createBranchWeights(1, 1000);
355  OriginStoreWeights = MDBuilder(*C).createBranchWeights(1, 1000);
356
357  // Insert a call to __msan_init/__msan_track_origins into the module's CTORs.
358  appendToGlobalCtors(M, cast<Function>(M.getOrInsertFunction(
359                      "__msan_init", IRB.getVoidTy(), NULL)), 0);
360
361  new GlobalVariable(M, IRB.getInt32Ty(), true, GlobalValue::WeakODRLinkage,
362                     IRB.getInt32(TrackOrigins), "__msan_track_origins");
363
364  return true;
365}
366
367namespace {
368
369/// \brief A helper class that handles instrumentation of VarArg
370/// functions on a particular platform.
371///
372/// Implementations are expected to insert the instrumentation
373/// necessary to propagate argument shadow through VarArg function
374/// calls. Visit* methods are called during an InstVisitor pass over
375/// the function, and should avoid creating new basic blocks. A new
376/// instance of this class is created for each instrumented function.
377struct VarArgHelper {
378  /// \brief Visit a CallSite.
379  virtual void visitCallSite(CallSite &CS, IRBuilder<> &IRB) = 0;
380
381  /// \brief Visit a va_start call.
382  virtual void visitVAStartInst(VAStartInst &I) = 0;
383
384  /// \brief Visit a va_copy call.
385  virtual void visitVACopyInst(VACopyInst &I) = 0;
386
387  /// \brief Finalize function instrumentation.
388  ///
389  /// This method is called after visiting all interesting (see above)
390  /// instructions in a function.
391  virtual void finalizeInstrumentation() = 0;
392
393  virtual ~VarArgHelper() {}
394};
395
396struct MemorySanitizerVisitor;
397
398VarArgHelper*
399CreateVarArgHelper(Function &Func, MemorySanitizer &Msan,
400                   MemorySanitizerVisitor &Visitor);
401
402/// This class does all the work for a given function. Store and Load
403/// instructions store and load corresponding shadow and origin
404/// values. Most instructions propagate shadow from arguments to their
405/// return values. Certain instructions (most importantly, BranchInst)
406/// test their argument shadow and print reports (with a runtime call) if it's
407/// non-zero.
408struct MemorySanitizerVisitor : public InstVisitor<MemorySanitizerVisitor> {
409  Function &F;
410  MemorySanitizer &MS;
411  SmallVector<PHINode *, 16> ShadowPHINodes, OriginPHINodes;
412  ValueMap<Value*, Value*> ShadowMap, OriginMap;
413  bool InsertChecks;
414  OwningPtr<VarArgHelper> VAHelper;
415
416  // An unfortunate workaround for asymmetric lowering of va_arg stuff.
417  // See a comment in visitCallSite for more details.
418  static const unsigned AMD64GpEndOffset = 48;  // AMD64 ABI Draft 0.99.6 p3.5.7
419  static const unsigned AMD64FpEndOffset = 176;
420
421  struct ShadowOriginAndInsertPoint {
422    Instruction *Shadow;
423    Instruction *Origin;
424    Instruction *OrigIns;
425    ShadowOriginAndInsertPoint(Instruction *S, Instruction *O, Instruction *I)
426      : Shadow(S), Origin(O), OrigIns(I) { }
427    ShadowOriginAndInsertPoint() : Shadow(0), Origin(0), OrigIns(0) { }
428  };
429  SmallVector<ShadowOriginAndInsertPoint, 16> InstrumentationList;
430  SmallVector<Instruction*, 16> StoreList;
431
432  MemorySanitizerVisitor(Function &F, MemorySanitizer &MS)
433    : F(F), MS(MS), VAHelper(CreateVarArgHelper(F, MS, *this)) {
434    InsertChecks = !MS.BL->isIn(F);
435    DEBUG(if (!InsertChecks)
436            dbgs() << "MemorySanitizer is not inserting checks into '"
437                   << F.getName() << "'\n");
438  }
439
440  void materializeStores() {
441    for (size_t i = 0, n = StoreList.size(); i < n; i++) {
442      StoreInst& I = *dyn_cast<StoreInst>(StoreList[i]);
443
444      IRBuilder<> IRB(&I);
445      Value *Val = I.getValueOperand();
446      Value *Addr = I.getPointerOperand();
447      Value *Shadow = getShadow(Val);
448      Value *ShadowPtr = getShadowPtr(Addr, Shadow->getType(), IRB);
449
450      StoreInst *NewSI =
451        IRB.CreateAlignedStore(Shadow, ShadowPtr, I.getAlignment());
452      DEBUG(dbgs() << "  STORE: " << *NewSI << "\n");
453      (void)NewSI;
454      // If the store is volatile, add a check.
455      if (I.isVolatile())
456        insertCheck(Val, &I);
457      if (ClCheckAccessAddress)
458        insertCheck(Addr, &I);
459
460      if (MS.TrackOrigins) {
461        unsigned Alignment = std::max(kMinOriginAlignment, I.getAlignment());
462        if (ClStoreCleanOrigin || isa<StructType>(Shadow->getType())) {
463          IRB.CreateAlignedStore(getOrigin(Val), getOriginPtr(Addr, IRB),
464                                 Alignment);
465        } else {
466          Value *ConvertedShadow = convertToShadowTyNoVec(Shadow, IRB);
467
468          Constant *Cst = dyn_cast_or_null<Constant>(ConvertedShadow);
469          // TODO(eugenis): handle non-zero constant shadow by inserting an
470          // unconditional check (can not simply fail compilation as this could
471          // be in the dead code).
472          if (Cst)
473            continue;
474
475          Value *Cmp = IRB.CreateICmpNE(ConvertedShadow,
476              getCleanShadow(ConvertedShadow), "_mscmp");
477          Instruction *CheckTerm =
478            SplitBlockAndInsertIfThen(cast<Instruction>(Cmp), false,
479                                      MS.OriginStoreWeights);
480          IRBuilder<> IRBNew(CheckTerm);
481          IRBNew.CreateAlignedStore(getOrigin(Val), getOriginPtr(Addr, IRBNew),
482                                    Alignment);
483        }
484      }
485    }
486  }
487
488  void materializeChecks() {
489    for (size_t i = 0, n = InstrumentationList.size(); i < n; i++) {
490      Instruction *Shadow = InstrumentationList[i].Shadow;
491      Instruction *OrigIns = InstrumentationList[i].OrigIns;
492      IRBuilder<> IRB(OrigIns);
493      DEBUG(dbgs() << "  SHAD0 : " << *Shadow << "\n");
494      Value *ConvertedShadow = convertToShadowTyNoVec(Shadow, IRB);
495      DEBUG(dbgs() << "  SHAD1 : " << *ConvertedShadow << "\n");
496      Value *Cmp = IRB.CreateICmpNE(ConvertedShadow,
497                                    getCleanShadow(ConvertedShadow), "_mscmp");
498      Instruction *CheckTerm =
499        SplitBlockAndInsertIfThen(cast<Instruction>(Cmp),
500                                  /* Unreachable */ !ClKeepGoing,
501                                  MS.ColdCallWeights);
502
503      IRB.SetInsertPoint(CheckTerm);
504      if (MS.TrackOrigins) {
505        Instruction *Origin = InstrumentationList[i].Origin;
506        IRB.CreateStore(Origin ? (Value*)Origin : (Value*)IRB.getInt32(0),
507                        MS.OriginTLS);
508      }
509      CallInst *Call = IRB.CreateCall(MS.WarningFn);
510      Call->setDebugLoc(OrigIns->getDebugLoc());
511      IRB.CreateCall(MS.EmptyAsm);
512      DEBUG(dbgs() << "  CHECK: " << *Cmp << "\n");
513    }
514    DEBUG(dbgs() << "DONE:\n" << F);
515  }
516
517  /// \brief Add MemorySanitizer instrumentation to a function.
518  bool runOnFunction() {
519    MS.initializeCallbacks(*F.getParent());
520    if (!MS.TD) return false;
521
522    // In the presence of unreachable blocks, we may see Phi nodes with
523    // incoming nodes from such blocks. Since InstVisitor skips unreachable
524    // blocks, such nodes will not have any shadow value associated with them.
525    // It's easier to remove unreachable blocks than deal with missing shadow.
526    removeUnreachableBlocks(F);
527
528    // Iterate all BBs in depth-first order and create shadow instructions
529    // for all instructions (where applicable).
530    // For PHI nodes we create dummy shadow PHIs which will be finalized later.
531    for (df_iterator<BasicBlock*> DI = df_begin(&F.getEntryBlock()),
532         DE = df_end(&F.getEntryBlock()); DI != DE; ++DI) {
533      BasicBlock *BB = *DI;
534      visit(*BB);
535    }
536
537    // Finalize PHI nodes.
538    for (size_t i = 0, n = ShadowPHINodes.size(); i < n; i++) {
539      PHINode *PN = ShadowPHINodes[i];
540      PHINode *PNS = cast<PHINode>(getShadow(PN));
541      PHINode *PNO = MS.TrackOrigins ? cast<PHINode>(getOrigin(PN)) : 0;
542      size_t NumValues = PN->getNumIncomingValues();
543      for (size_t v = 0; v < NumValues; v++) {
544        PNS->addIncoming(getShadow(PN, v), PN->getIncomingBlock(v));
545        if (PNO)
546          PNO->addIncoming(getOrigin(PN, v), PN->getIncomingBlock(v));
547      }
548    }
549
550    VAHelper->finalizeInstrumentation();
551
552    // Delayed instrumentation of StoreInst.
553    // This may add new checks to be inserted later.
554    materializeStores();
555
556    // Insert shadow value checks.
557    materializeChecks();
558
559    return true;
560  }
561
562  /// \brief Compute the shadow type that corresponds to a given Value.
563  Type *getShadowTy(Value *V) {
564    return getShadowTy(V->getType());
565  }
566
567  /// \brief Compute the shadow type that corresponds to a given Type.
568  Type *getShadowTy(Type *OrigTy) {
569    if (!OrigTy->isSized()) {
570      return 0;
571    }
572    // For integer type, shadow is the same as the original type.
573    // This may return weird-sized types like i1.
574    if (IntegerType *IT = dyn_cast<IntegerType>(OrigTy))
575      return IT;
576    if (VectorType *VT = dyn_cast<VectorType>(OrigTy)) {
577      uint32_t EltSize = MS.TD->getTypeSizeInBits(VT->getElementType());
578      return VectorType::get(IntegerType::get(*MS.C, EltSize),
579                             VT->getNumElements());
580    }
581    if (StructType *ST = dyn_cast<StructType>(OrigTy)) {
582      SmallVector<Type*, 4> Elements;
583      for (unsigned i = 0, n = ST->getNumElements(); i < n; i++)
584        Elements.push_back(getShadowTy(ST->getElementType(i)));
585      StructType *Res = StructType::get(*MS.C, Elements, ST->isPacked());
586      DEBUG(dbgs() << "getShadowTy: " << *ST << " ===> " << *Res << "\n");
587      return Res;
588    }
589    uint32_t TypeSize = MS.TD->getTypeSizeInBits(OrigTy);
590    return IntegerType::get(*MS.C, TypeSize);
591  }
592
593  /// \brief Flatten a vector type.
594  Type *getShadowTyNoVec(Type *ty) {
595    if (VectorType *vt = dyn_cast<VectorType>(ty))
596      return IntegerType::get(*MS.C, vt->getBitWidth());
597    return ty;
598  }
599
600  /// \brief Convert a shadow value to it's flattened variant.
601  Value *convertToShadowTyNoVec(Value *V, IRBuilder<> &IRB) {
602    Type *Ty = V->getType();
603    Type *NoVecTy = getShadowTyNoVec(Ty);
604    if (Ty == NoVecTy) return V;
605    return IRB.CreateBitCast(V, NoVecTy);
606  }
607
608  /// \brief Compute the shadow address that corresponds to a given application
609  /// address.
610  ///
611  /// Shadow = Addr & ~ShadowMask.
612  Value *getShadowPtr(Value *Addr, Type *ShadowTy,
613                      IRBuilder<> &IRB) {
614    Value *ShadowLong =
615      IRB.CreateAnd(IRB.CreatePointerCast(Addr, MS.IntptrTy),
616                    ConstantInt::get(MS.IntptrTy, ~MS.ShadowMask));
617    return IRB.CreateIntToPtr(ShadowLong, PointerType::get(ShadowTy, 0));
618  }
619
620  /// \brief Compute the origin address that corresponds to a given application
621  /// address.
622  ///
623  /// OriginAddr = (ShadowAddr + OriginOffset) & ~3ULL
624  Value *getOriginPtr(Value *Addr, IRBuilder<> &IRB) {
625    Value *ShadowLong =
626      IRB.CreateAnd(IRB.CreatePointerCast(Addr, MS.IntptrTy),
627                    ConstantInt::get(MS.IntptrTy, ~MS.ShadowMask));
628    Value *Add =
629      IRB.CreateAdd(ShadowLong,
630                    ConstantInt::get(MS.IntptrTy, MS.OriginOffset));
631    Value *SecondAnd =
632      IRB.CreateAnd(Add, ConstantInt::get(MS.IntptrTy, ~3ULL));
633    return IRB.CreateIntToPtr(SecondAnd, PointerType::get(IRB.getInt32Ty(), 0));
634  }
635
636  /// \brief Compute the shadow address for a given function argument.
637  ///
638  /// Shadow = ParamTLS+ArgOffset.
639  Value *getShadowPtrForArgument(Value *A, IRBuilder<> &IRB,
640                                 int ArgOffset) {
641    Value *Base = IRB.CreatePointerCast(MS.ParamTLS, MS.IntptrTy);
642    Base = IRB.CreateAdd(Base, ConstantInt::get(MS.IntptrTy, ArgOffset));
643    return IRB.CreateIntToPtr(Base, PointerType::get(getShadowTy(A), 0),
644                              "_msarg");
645  }
646
647  /// \brief Compute the origin address for a given function argument.
648  Value *getOriginPtrForArgument(Value *A, IRBuilder<> &IRB,
649                                 int ArgOffset) {
650    if (!MS.TrackOrigins) return 0;
651    Value *Base = IRB.CreatePointerCast(MS.ParamOriginTLS, MS.IntptrTy);
652    Base = IRB.CreateAdd(Base, ConstantInt::get(MS.IntptrTy, ArgOffset));
653    return IRB.CreateIntToPtr(Base, PointerType::get(MS.OriginTy, 0),
654                              "_msarg_o");
655  }
656
657  /// \brief Compute the shadow address for a retval.
658  Value *getShadowPtrForRetval(Value *A, IRBuilder<> &IRB) {
659    Value *Base = IRB.CreatePointerCast(MS.RetvalTLS, MS.IntptrTy);
660    return IRB.CreateIntToPtr(Base, PointerType::get(getShadowTy(A), 0),
661                              "_msret");
662  }
663
664  /// \brief Compute the origin address for a retval.
665  Value *getOriginPtrForRetval(IRBuilder<> &IRB) {
666    // We keep a single origin for the entire retval. Might be too optimistic.
667    return MS.RetvalOriginTLS;
668  }
669
670  /// \brief Set SV to be the shadow value for V.
671  void setShadow(Value *V, Value *SV) {
672    assert(!ShadowMap.count(V) && "Values may only have one shadow");
673    ShadowMap[V] = SV;
674  }
675
676  /// \brief Set Origin to be the origin value for V.
677  void setOrigin(Value *V, Value *Origin) {
678    if (!MS.TrackOrigins) return;
679    assert(!OriginMap.count(V) && "Values may only have one origin");
680    DEBUG(dbgs() << "ORIGIN: " << *V << "  ==> " << *Origin << "\n");
681    OriginMap[V] = Origin;
682  }
683
684  /// \brief Create a clean shadow value for a given value.
685  ///
686  /// Clean shadow (all zeroes) means all bits of the value are defined
687  /// (initialized).
688  Value *getCleanShadow(Value *V) {
689    Type *ShadowTy = getShadowTy(V);
690    if (!ShadowTy)
691      return 0;
692    return Constant::getNullValue(ShadowTy);
693  }
694
695  /// \brief Create a dirty shadow of a given shadow type.
696  Constant *getPoisonedShadow(Type *ShadowTy) {
697    assert(ShadowTy);
698    if (isa<IntegerType>(ShadowTy) || isa<VectorType>(ShadowTy))
699      return Constant::getAllOnesValue(ShadowTy);
700    StructType *ST = cast<StructType>(ShadowTy);
701    SmallVector<Constant *, 4> Vals;
702    for (unsigned i = 0, n = ST->getNumElements(); i < n; i++)
703      Vals.push_back(getPoisonedShadow(ST->getElementType(i)));
704    return ConstantStruct::get(ST, Vals);
705  }
706
707  /// \brief Create a clean (zero) origin.
708  Value *getCleanOrigin() {
709    return Constant::getNullValue(MS.OriginTy);
710  }
711
712  /// \brief Get the shadow value for a given Value.
713  ///
714  /// This function either returns the value set earlier with setShadow,
715  /// or extracts if from ParamTLS (for function arguments).
716  Value *getShadow(Value *V) {
717    if (Instruction *I = dyn_cast<Instruction>(V)) {
718      // For instructions the shadow is already stored in the map.
719      Value *Shadow = ShadowMap[V];
720      if (!Shadow) {
721        DEBUG(dbgs() << "No shadow: " << *V << "\n" << *(I->getParent()));
722        (void)I;
723        assert(Shadow && "No shadow for a value");
724      }
725      return Shadow;
726    }
727    if (UndefValue *U = dyn_cast<UndefValue>(V)) {
728      Value *AllOnes = getPoisonedShadow(getShadowTy(V));
729      DEBUG(dbgs() << "Undef: " << *U << " ==> " << *AllOnes << "\n");
730      (void)U;
731      return AllOnes;
732    }
733    if (Argument *A = dyn_cast<Argument>(V)) {
734      // For arguments we compute the shadow on demand and store it in the map.
735      Value **ShadowPtr = &ShadowMap[V];
736      if (*ShadowPtr)
737        return *ShadowPtr;
738      Function *F = A->getParent();
739      IRBuilder<> EntryIRB(F->getEntryBlock().getFirstNonPHI());
740      unsigned ArgOffset = 0;
741      for (Function::arg_iterator AI = F->arg_begin(), AE = F->arg_end();
742           AI != AE; ++AI) {
743        if (!AI->getType()->isSized()) {
744          DEBUG(dbgs() << "Arg is not sized\n");
745          continue;
746        }
747        unsigned Size = AI->hasByValAttr()
748          ? MS.TD->getTypeAllocSize(AI->getType()->getPointerElementType())
749          : MS.TD->getTypeAllocSize(AI->getType());
750        if (A == AI) {
751          Value *Base = getShadowPtrForArgument(AI, EntryIRB, ArgOffset);
752          if (AI->hasByValAttr()) {
753            // ByVal pointer itself has clean shadow. We copy the actual
754            // argument shadow to the underlying memory.
755            Value *Cpy = EntryIRB.CreateMemCpy(
756              getShadowPtr(V, EntryIRB.getInt8Ty(), EntryIRB),
757              Base, Size, AI->getParamAlignment());
758            DEBUG(dbgs() << "  ByValCpy: " << *Cpy << "\n");
759            (void)Cpy;
760            *ShadowPtr = getCleanShadow(V);
761          } else {
762            *ShadowPtr = EntryIRB.CreateLoad(Base);
763          }
764          DEBUG(dbgs() << "  ARG:    "  << *AI << " ==> " <<
765                **ShadowPtr << "\n");
766          if (MS.TrackOrigins) {
767            Value* OriginPtr = getOriginPtrForArgument(AI, EntryIRB, ArgOffset);
768            setOrigin(A, EntryIRB.CreateLoad(OriginPtr));
769          }
770        }
771        ArgOffset += DataLayout::RoundUpAlignment(Size, 8);
772      }
773      assert(*ShadowPtr && "Could not find shadow for an argument");
774      return *ShadowPtr;
775    }
776    // For everything else the shadow is zero.
777    return getCleanShadow(V);
778  }
779
780  /// \brief Get the shadow for i-th argument of the instruction I.
781  Value *getShadow(Instruction *I, int i) {
782    return getShadow(I->getOperand(i));
783  }
784
785  /// \brief Get the origin for a value.
786  Value *getOrigin(Value *V) {
787    if (!MS.TrackOrigins) return 0;
788    if (isa<Instruction>(V) || isa<Argument>(V)) {
789      Value *Origin = OriginMap[V];
790      if (!Origin) {
791        DEBUG(dbgs() << "NO ORIGIN: " << *V << "\n");
792        Origin = getCleanOrigin();
793      }
794      return Origin;
795    }
796    return getCleanOrigin();
797  }
798
799  /// \brief Get the origin for i-th argument of the instruction I.
800  Value *getOrigin(Instruction *I, int i) {
801    return getOrigin(I->getOperand(i));
802  }
803
804  /// \brief Remember the place where a shadow check should be inserted.
805  ///
806  /// This location will be later instrumented with a check that will print a
807  /// UMR warning in runtime if the value is not fully defined.
808  void insertCheck(Value *Val, Instruction *OrigIns) {
809    assert(Val);
810    if (!InsertChecks) return;
811    Instruction *Shadow = dyn_cast_or_null<Instruction>(getShadow(Val));
812    if (!Shadow) return;
813#ifndef NDEBUG
814    Type *ShadowTy = Shadow->getType();
815    assert((isa<IntegerType>(ShadowTy) || isa<VectorType>(ShadowTy)) &&
816           "Can only insert checks for integer and vector shadow types");
817#endif
818    Instruction *Origin = dyn_cast_or_null<Instruction>(getOrigin(Val));
819    InstrumentationList.push_back(
820      ShadowOriginAndInsertPoint(Shadow, Origin, OrigIns));
821  }
822
823  // ------------------- Visitors.
824
825  /// \brief Instrument LoadInst
826  ///
827  /// Loads the corresponding shadow and (optionally) origin.
828  /// Optionally, checks that the load address is fully defined.
829  void visitLoadInst(LoadInst &I) {
830    assert(I.getType()->isSized() && "Load type must have size");
831    IRBuilder<> IRB(&I);
832    Type *ShadowTy = getShadowTy(&I);
833    Value *Addr = I.getPointerOperand();
834    Value *ShadowPtr = getShadowPtr(Addr, ShadowTy, IRB);
835    setShadow(&I, IRB.CreateAlignedLoad(ShadowPtr, I.getAlignment(), "_msld"));
836
837    if (ClCheckAccessAddress)
838      insertCheck(I.getPointerOperand(), &I);
839
840    if (MS.TrackOrigins) {
841      unsigned Alignment = std::max(kMinOriginAlignment, I.getAlignment());
842      setOrigin(&I, IRB.CreateAlignedLoad(getOriginPtr(Addr, IRB), Alignment));
843    }
844  }
845
846  /// \brief Instrument StoreInst
847  ///
848  /// Stores the corresponding shadow and (optionally) origin.
849  /// Optionally, checks that the store address is fully defined.
850  /// Volatile stores check that the value being stored is fully defined.
851  void visitStoreInst(StoreInst &I) {
852    StoreList.push_back(&I);
853  }
854
855  // Vector manipulation.
856  void visitExtractElementInst(ExtractElementInst &I) {
857    insertCheck(I.getOperand(1), &I);
858    IRBuilder<> IRB(&I);
859    setShadow(&I, IRB.CreateExtractElement(getShadow(&I, 0), I.getOperand(1),
860              "_msprop"));
861    setOrigin(&I, getOrigin(&I, 0));
862  }
863
864  void visitInsertElementInst(InsertElementInst &I) {
865    insertCheck(I.getOperand(2), &I);
866    IRBuilder<> IRB(&I);
867    setShadow(&I, IRB.CreateInsertElement(getShadow(&I, 0), getShadow(&I, 1),
868              I.getOperand(2), "_msprop"));
869    setOriginForNaryOp(I);
870  }
871
872  void visitShuffleVectorInst(ShuffleVectorInst &I) {
873    insertCheck(I.getOperand(2), &I);
874    IRBuilder<> IRB(&I);
875    setShadow(&I, IRB.CreateShuffleVector(getShadow(&I, 0), getShadow(&I, 1),
876              I.getOperand(2), "_msprop"));
877    setOriginForNaryOp(I);
878  }
879
880  // Casts.
881  void visitSExtInst(SExtInst &I) {
882    IRBuilder<> IRB(&I);
883    setShadow(&I, IRB.CreateSExt(getShadow(&I, 0), I.getType(), "_msprop"));
884    setOrigin(&I, getOrigin(&I, 0));
885  }
886
887  void visitZExtInst(ZExtInst &I) {
888    IRBuilder<> IRB(&I);
889    setShadow(&I, IRB.CreateZExt(getShadow(&I, 0), I.getType(), "_msprop"));
890    setOrigin(&I, getOrigin(&I, 0));
891  }
892
893  void visitTruncInst(TruncInst &I) {
894    IRBuilder<> IRB(&I);
895    setShadow(&I, IRB.CreateTrunc(getShadow(&I, 0), I.getType(), "_msprop"));
896    setOrigin(&I, getOrigin(&I, 0));
897  }
898
899  void visitBitCastInst(BitCastInst &I) {
900    IRBuilder<> IRB(&I);
901    setShadow(&I, IRB.CreateBitCast(getShadow(&I, 0), getShadowTy(&I)));
902    setOrigin(&I, getOrigin(&I, 0));
903  }
904
905  void visitPtrToIntInst(PtrToIntInst &I) {
906    IRBuilder<> IRB(&I);
907    setShadow(&I, IRB.CreateIntCast(getShadow(&I, 0), getShadowTy(&I), false,
908             "_msprop_ptrtoint"));
909    setOrigin(&I, getOrigin(&I, 0));
910  }
911
912  void visitIntToPtrInst(IntToPtrInst &I) {
913    IRBuilder<> IRB(&I);
914    setShadow(&I, IRB.CreateIntCast(getShadow(&I, 0), getShadowTy(&I), false,
915             "_msprop_inttoptr"));
916    setOrigin(&I, getOrigin(&I, 0));
917  }
918
919  void visitFPToSIInst(CastInst& I) { handleShadowOr(I); }
920  void visitFPToUIInst(CastInst& I) { handleShadowOr(I); }
921  void visitSIToFPInst(CastInst& I) { handleShadowOr(I); }
922  void visitUIToFPInst(CastInst& I) { handleShadowOr(I); }
923  void visitFPExtInst(CastInst& I) { handleShadowOr(I); }
924  void visitFPTruncInst(CastInst& I) { handleShadowOr(I); }
925
926  /// \brief Propagate shadow for bitwise AND.
927  ///
928  /// This code is exact, i.e. if, for example, a bit in the left argument
929  /// is defined and 0, then neither the value not definedness of the
930  /// corresponding bit in B don't affect the resulting shadow.
931  void visitAnd(BinaryOperator &I) {
932    IRBuilder<> IRB(&I);
933    //  "And" of 0 and a poisoned value results in unpoisoned value.
934    //  1&1 => 1;     0&1 => 0;     p&1 => p;
935    //  1&0 => 0;     0&0 => 0;     p&0 => 0;
936    //  1&p => p;     0&p => 0;     p&p => p;
937    //  S = (S1 & S2) | (V1 & S2) | (S1 & V2)
938    Value *S1 = getShadow(&I, 0);
939    Value *S2 = getShadow(&I, 1);
940    Value *V1 = I.getOperand(0);
941    Value *V2 = I.getOperand(1);
942    if (V1->getType() != S1->getType()) {
943      V1 = IRB.CreateIntCast(V1, S1->getType(), false);
944      V2 = IRB.CreateIntCast(V2, S2->getType(), false);
945    }
946    Value *S1S2 = IRB.CreateAnd(S1, S2);
947    Value *V1S2 = IRB.CreateAnd(V1, S2);
948    Value *S1V2 = IRB.CreateAnd(S1, V2);
949    setShadow(&I, IRB.CreateOr(S1S2, IRB.CreateOr(V1S2, S1V2)));
950    setOriginForNaryOp(I);
951  }
952
953  void visitOr(BinaryOperator &I) {
954    IRBuilder<> IRB(&I);
955    //  "Or" of 1 and a poisoned value results in unpoisoned value.
956    //  1|1 => 1;     0|1 => 1;     p|1 => 1;
957    //  1|0 => 1;     0|0 => 0;     p|0 => p;
958    //  1|p => 1;     0|p => p;     p|p => p;
959    //  S = (S1 & S2) | (~V1 & S2) | (S1 & ~V2)
960    Value *S1 = getShadow(&I, 0);
961    Value *S2 = getShadow(&I, 1);
962    Value *V1 = IRB.CreateNot(I.getOperand(0));
963    Value *V2 = IRB.CreateNot(I.getOperand(1));
964    if (V1->getType() != S1->getType()) {
965      V1 = IRB.CreateIntCast(V1, S1->getType(), false);
966      V2 = IRB.CreateIntCast(V2, S2->getType(), false);
967    }
968    Value *S1S2 = IRB.CreateAnd(S1, S2);
969    Value *V1S2 = IRB.CreateAnd(V1, S2);
970    Value *S1V2 = IRB.CreateAnd(S1, V2);
971    setShadow(&I, IRB.CreateOr(S1S2, IRB.CreateOr(V1S2, S1V2)));
972    setOriginForNaryOp(I);
973  }
974
975  /// \brief Default propagation of shadow and/or origin.
976  ///
977  /// This class implements the general case of shadow propagation, used in all
978  /// cases where we don't know and/or don't care about what the operation
979  /// actually does. It converts all input shadow values to a common type
980  /// (extending or truncating as necessary), and bitwise OR's them.
981  ///
982  /// This is much cheaper than inserting checks (i.e. requiring inputs to be
983  /// fully initialized), and less prone to false positives.
984  ///
985  /// This class also implements the general case of origin propagation. For a
986  /// Nary operation, result origin is set to the origin of an argument that is
987  /// not entirely initialized. If there is more than one such arguments, the
988  /// rightmost of them is picked. It does not matter which one is picked if all
989  /// arguments are initialized.
990  template <bool CombineShadow>
991  class Combiner {
992    Value *Shadow;
993    Value *Origin;
994    IRBuilder<> &IRB;
995    MemorySanitizerVisitor *MSV;
996
997  public:
998    Combiner(MemorySanitizerVisitor *MSV, IRBuilder<> &IRB) :
999      Shadow(0), Origin(0), IRB(IRB), MSV(MSV) {}
1000
1001    /// \brief Add a pair of shadow and origin values to the mix.
1002    Combiner &Add(Value *OpShadow, Value *OpOrigin) {
1003      if (CombineShadow) {
1004        assert(OpShadow);
1005        if (!Shadow)
1006          Shadow = OpShadow;
1007        else {
1008          OpShadow = MSV->CreateShadowCast(IRB, OpShadow, Shadow->getType());
1009          Shadow = IRB.CreateOr(Shadow, OpShadow, "_msprop");
1010        }
1011      }
1012
1013      if (MSV->MS.TrackOrigins) {
1014        assert(OpOrigin);
1015        if (!Origin) {
1016          Origin = OpOrigin;
1017        } else {
1018          Value *FlatShadow = MSV->convertToShadowTyNoVec(OpShadow, IRB);
1019          Value *Cond = IRB.CreateICmpNE(FlatShadow,
1020                                         MSV->getCleanShadow(FlatShadow));
1021          Origin = IRB.CreateSelect(Cond, OpOrigin, Origin);
1022        }
1023      }
1024      return *this;
1025    }
1026
1027    /// \brief Add an application value to the mix.
1028    Combiner &Add(Value *V) {
1029      Value *OpShadow = MSV->getShadow(V);
1030      Value *OpOrigin = MSV->MS.TrackOrigins ? MSV->getOrigin(V) : 0;
1031      return Add(OpShadow, OpOrigin);
1032    }
1033
1034    /// \brief Set the current combined values as the given instruction's shadow
1035    /// and origin.
1036    void Done(Instruction *I) {
1037      if (CombineShadow) {
1038        assert(Shadow);
1039        Shadow = MSV->CreateShadowCast(IRB, Shadow, MSV->getShadowTy(I));
1040        MSV->setShadow(I, Shadow);
1041      }
1042      if (MSV->MS.TrackOrigins) {
1043        assert(Origin);
1044        MSV->setOrigin(I, Origin);
1045      }
1046    }
1047  };
1048
1049  typedef Combiner<true> ShadowAndOriginCombiner;
1050  typedef Combiner<false> OriginCombiner;
1051
1052  /// \brief Propagate origin for arbitrary operation.
1053  void setOriginForNaryOp(Instruction &I) {
1054    if (!MS.TrackOrigins) return;
1055    IRBuilder<> IRB(&I);
1056    OriginCombiner OC(this, IRB);
1057    for (Instruction::op_iterator OI = I.op_begin(); OI != I.op_end(); ++OI)
1058      OC.Add(OI->get());
1059    OC.Done(&I);
1060  }
1061
1062  size_t VectorOrPrimitiveTypeSizeInBits(Type *Ty) {
1063    assert(!(Ty->isVectorTy() && Ty->getScalarType()->isPointerTy()) &&
1064           "Vector of pointers is not a valid shadow type");
1065    return Ty->isVectorTy() ?
1066      Ty->getVectorNumElements() * Ty->getScalarSizeInBits() :
1067      Ty->getPrimitiveSizeInBits();
1068  }
1069
1070  /// \brief Cast between two shadow types, extending or truncating as
1071  /// necessary.
1072  Value *CreateShadowCast(IRBuilder<> &IRB, Value *V, Type *dstTy) {
1073    Type *srcTy = V->getType();
1074    if (dstTy->isIntegerTy() && srcTy->isIntegerTy())
1075      return IRB.CreateIntCast(V, dstTy, false);
1076    if (dstTy->isVectorTy() && srcTy->isVectorTy() &&
1077        dstTy->getVectorNumElements() == srcTy->getVectorNumElements())
1078      return IRB.CreateIntCast(V, dstTy, false);
1079    size_t srcSizeInBits = VectorOrPrimitiveTypeSizeInBits(srcTy);
1080    size_t dstSizeInBits = VectorOrPrimitiveTypeSizeInBits(dstTy);
1081    Value *V1 = IRB.CreateBitCast(V, Type::getIntNTy(*MS.C, srcSizeInBits));
1082    Value *V2 =
1083      IRB.CreateIntCast(V1, Type::getIntNTy(*MS.C, dstSizeInBits), false);
1084    return IRB.CreateBitCast(V2, dstTy);
1085    // TODO: handle struct types.
1086  }
1087
1088  /// \brief Propagate shadow for arbitrary operation.
1089  void handleShadowOr(Instruction &I) {
1090    IRBuilder<> IRB(&I);
1091    ShadowAndOriginCombiner SC(this, IRB);
1092    for (Instruction::op_iterator OI = I.op_begin(); OI != I.op_end(); ++OI)
1093      SC.Add(OI->get());
1094    SC.Done(&I);
1095  }
1096
1097  void visitFAdd(BinaryOperator &I) { handleShadowOr(I); }
1098  void visitFSub(BinaryOperator &I) { handleShadowOr(I); }
1099  void visitFMul(BinaryOperator &I) { handleShadowOr(I); }
1100  void visitAdd(BinaryOperator &I) { handleShadowOr(I); }
1101  void visitSub(BinaryOperator &I) { handleShadowOr(I); }
1102  void visitXor(BinaryOperator &I) { handleShadowOr(I); }
1103  void visitMul(BinaryOperator &I) { handleShadowOr(I); }
1104
1105  void handleDiv(Instruction &I) {
1106    IRBuilder<> IRB(&I);
1107    // Strict on the second argument.
1108    insertCheck(I.getOperand(1), &I);
1109    setShadow(&I, getShadow(&I, 0));
1110    setOrigin(&I, getOrigin(&I, 0));
1111  }
1112
1113  void visitUDiv(BinaryOperator &I) { handleDiv(I); }
1114  void visitSDiv(BinaryOperator &I) { handleDiv(I); }
1115  void visitFDiv(BinaryOperator &I) { handleDiv(I); }
1116  void visitURem(BinaryOperator &I) { handleDiv(I); }
1117  void visitSRem(BinaryOperator &I) { handleDiv(I); }
1118  void visitFRem(BinaryOperator &I) { handleDiv(I); }
1119
1120  /// \brief Instrument == and != comparisons.
1121  ///
1122  /// Sometimes the comparison result is known even if some of the bits of the
1123  /// arguments are not.
1124  void handleEqualityComparison(ICmpInst &I) {
1125    IRBuilder<> IRB(&I);
1126    Value *A = I.getOperand(0);
1127    Value *B = I.getOperand(1);
1128    Value *Sa = getShadow(A);
1129    Value *Sb = getShadow(B);
1130
1131    // Get rid of pointers and vectors of pointers.
1132    // For ints (and vectors of ints), types of A and Sa match,
1133    // and this is a no-op.
1134    A = IRB.CreatePointerCast(A, Sa->getType());
1135    B = IRB.CreatePointerCast(B, Sb->getType());
1136
1137    // A == B  <==>  (C = A^B) == 0
1138    // A != B  <==>  (C = A^B) != 0
1139    // Sc = Sa | Sb
1140    Value *C = IRB.CreateXor(A, B);
1141    Value *Sc = IRB.CreateOr(Sa, Sb);
1142    // Now dealing with i = (C == 0) comparison (or C != 0, does not matter now)
1143    // Result is defined if one of the following is true
1144    // * there is a defined 1 bit in C
1145    // * C is fully defined
1146    // Si = !(C & ~Sc) && Sc
1147    Value *Zero = Constant::getNullValue(Sc->getType());
1148    Value *MinusOne = Constant::getAllOnesValue(Sc->getType());
1149    Value *Si =
1150      IRB.CreateAnd(IRB.CreateICmpNE(Sc, Zero),
1151                    IRB.CreateICmpEQ(
1152                      IRB.CreateAnd(IRB.CreateXor(Sc, MinusOne), C), Zero));
1153    Si->setName("_msprop_icmp");
1154    setShadow(&I, Si);
1155    setOriginForNaryOp(I);
1156  }
1157
1158  /// \brief Instrument signed relational comparisons.
1159  ///
1160  /// Handle (x<0) and (x>=0) comparisons (essentially, sign bit tests) by
1161  /// propagating the highest bit of the shadow. Everything else is delegated
1162  /// to handleShadowOr().
1163  void handleSignedRelationalComparison(ICmpInst &I) {
1164    Constant *constOp0 = dyn_cast<Constant>(I.getOperand(0));
1165    Constant *constOp1 = dyn_cast<Constant>(I.getOperand(1));
1166    Value* op = NULL;
1167    CmpInst::Predicate pre = I.getPredicate();
1168    if (constOp0 && constOp0->isNullValue() &&
1169        (pre == CmpInst::ICMP_SGT || pre == CmpInst::ICMP_SLE)) {
1170      op = I.getOperand(1);
1171    } else if (constOp1 && constOp1->isNullValue() &&
1172               (pre == CmpInst::ICMP_SLT || pre == CmpInst::ICMP_SGE)) {
1173      op = I.getOperand(0);
1174    }
1175    if (op) {
1176      IRBuilder<> IRB(&I);
1177      Value* Shadow =
1178        IRB.CreateICmpSLT(getShadow(op), getCleanShadow(op), "_msprop_icmpslt");
1179      setShadow(&I, Shadow);
1180      setOrigin(&I, getOrigin(op));
1181    } else {
1182      handleShadowOr(I);
1183    }
1184  }
1185
1186  void visitICmpInst(ICmpInst &I) {
1187    if (ClHandleICmp && I.isEquality())
1188      handleEqualityComparison(I);
1189    else if (ClHandleICmp && I.isSigned() && I.isRelational())
1190      handleSignedRelationalComparison(I);
1191    else
1192      handleShadowOr(I);
1193  }
1194
1195  void visitFCmpInst(FCmpInst &I) {
1196    handleShadowOr(I);
1197  }
1198
1199  void handleShift(BinaryOperator &I) {
1200    IRBuilder<> IRB(&I);
1201    // If any of the S2 bits are poisoned, the whole thing is poisoned.
1202    // Otherwise perform the same shift on S1.
1203    Value *S1 = getShadow(&I, 0);
1204    Value *S2 = getShadow(&I, 1);
1205    Value *S2Conv = IRB.CreateSExt(IRB.CreateICmpNE(S2, getCleanShadow(S2)),
1206                                   S2->getType());
1207    Value *V2 = I.getOperand(1);
1208    Value *Shift = IRB.CreateBinOp(I.getOpcode(), S1, V2);
1209    setShadow(&I, IRB.CreateOr(Shift, S2Conv));
1210    setOriginForNaryOp(I);
1211  }
1212
1213  void visitShl(BinaryOperator &I) { handleShift(I); }
1214  void visitAShr(BinaryOperator &I) { handleShift(I); }
1215  void visitLShr(BinaryOperator &I) { handleShift(I); }
1216
1217  /// \brief Instrument llvm.memmove
1218  ///
1219  /// At this point we don't know if llvm.memmove will be inlined or not.
1220  /// If we don't instrument it and it gets inlined,
1221  /// our interceptor will not kick in and we will lose the memmove.
1222  /// If we instrument the call here, but it does not get inlined,
1223  /// we will memove the shadow twice: which is bad in case
1224  /// of overlapping regions. So, we simply lower the intrinsic to a call.
1225  ///
1226  /// Similar situation exists for memcpy and memset.
1227  void visitMemMoveInst(MemMoveInst &I) {
1228    IRBuilder<> IRB(&I);
1229    IRB.CreateCall3(
1230      MS.MemmoveFn,
1231      IRB.CreatePointerCast(I.getArgOperand(0), IRB.getInt8PtrTy()),
1232      IRB.CreatePointerCast(I.getArgOperand(1), IRB.getInt8PtrTy()),
1233      IRB.CreateIntCast(I.getArgOperand(2), MS.IntptrTy, false));
1234    I.eraseFromParent();
1235  }
1236
1237  // Similar to memmove: avoid copying shadow twice.
1238  // This is somewhat unfortunate as it may slowdown small constant memcpys.
1239  // FIXME: consider doing manual inline for small constant sizes and proper
1240  // alignment.
1241  void visitMemCpyInst(MemCpyInst &I) {
1242    IRBuilder<> IRB(&I);
1243    IRB.CreateCall3(
1244      MS.MemcpyFn,
1245      IRB.CreatePointerCast(I.getArgOperand(0), IRB.getInt8PtrTy()),
1246      IRB.CreatePointerCast(I.getArgOperand(1), IRB.getInt8PtrTy()),
1247      IRB.CreateIntCast(I.getArgOperand(2), MS.IntptrTy, false));
1248    I.eraseFromParent();
1249  }
1250
1251  // Same as memcpy.
1252  void visitMemSetInst(MemSetInst &I) {
1253    IRBuilder<> IRB(&I);
1254    IRB.CreateCall3(
1255      MS.MemsetFn,
1256      IRB.CreatePointerCast(I.getArgOperand(0), IRB.getInt8PtrTy()),
1257      IRB.CreateIntCast(I.getArgOperand(1), IRB.getInt32Ty(), false),
1258      IRB.CreateIntCast(I.getArgOperand(2), MS.IntptrTy, false));
1259    I.eraseFromParent();
1260  }
1261
1262  void visitVAStartInst(VAStartInst &I) {
1263    VAHelper->visitVAStartInst(I);
1264  }
1265
1266  void visitVACopyInst(VACopyInst &I) {
1267    VAHelper->visitVACopyInst(I);
1268  }
1269
1270  enum IntrinsicKind {
1271    IK_DoesNotAccessMemory,
1272    IK_OnlyReadsMemory,
1273    IK_WritesMemory
1274  };
1275
1276  static IntrinsicKind getIntrinsicKind(Intrinsic::ID iid) {
1277    const int DoesNotAccessMemory = IK_DoesNotAccessMemory;
1278    const int OnlyReadsArgumentPointees = IK_OnlyReadsMemory;
1279    const int OnlyReadsMemory = IK_OnlyReadsMemory;
1280    const int OnlyAccessesArgumentPointees = IK_WritesMemory;
1281    const int UnknownModRefBehavior = IK_WritesMemory;
1282#define GET_INTRINSIC_MODREF_BEHAVIOR
1283#define ModRefBehavior IntrinsicKind
1284#include "llvm/IR/Intrinsics.gen"
1285#undef ModRefBehavior
1286#undef GET_INTRINSIC_MODREF_BEHAVIOR
1287  }
1288
1289  /// \brief Handle vector store-like intrinsics.
1290  ///
1291  /// Instrument intrinsics that look like a simple SIMD store: writes memory,
1292  /// has 1 pointer argument and 1 vector argument, returns void.
1293  bool handleVectorStoreIntrinsic(IntrinsicInst &I) {
1294    IRBuilder<> IRB(&I);
1295    Value* Addr = I.getArgOperand(0);
1296    Value *Shadow = getShadow(&I, 1);
1297    Value *ShadowPtr = getShadowPtr(Addr, Shadow->getType(), IRB);
1298
1299    // We don't know the pointer alignment (could be unaligned SSE store!).
1300    // Have to assume to worst case.
1301    IRB.CreateAlignedStore(Shadow, ShadowPtr, 1);
1302
1303    if (ClCheckAccessAddress)
1304      insertCheck(Addr, &I);
1305
1306    // FIXME: use ClStoreCleanOrigin
1307    // FIXME: factor out common code from materializeStores
1308    if (MS.TrackOrigins)
1309      IRB.CreateStore(getOrigin(&I, 1), getOriginPtr(Addr, IRB));
1310    return true;
1311  }
1312
1313  /// \brief Handle vector load-like intrinsics.
1314  ///
1315  /// Instrument intrinsics that look like a simple SIMD load: reads memory,
1316  /// has 1 pointer argument, returns a vector.
1317  bool handleVectorLoadIntrinsic(IntrinsicInst &I) {
1318    IRBuilder<> IRB(&I);
1319    Value *Addr = I.getArgOperand(0);
1320
1321    Type *ShadowTy = getShadowTy(&I);
1322    Value *ShadowPtr = getShadowPtr(Addr, ShadowTy, IRB);
1323    // We don't know the pointer alignment (could be unaligned SSE load!).
1324    // Have to assume to worst case.
1325    setShadow(&I, IRB.CreateAlignedLoad(ShadowPtr, 1, "_msld"));
1326
1327    if (ClCheckAccessAddress)
1328      insertCheck(Addr, &I);
1329
1330    if (MS.TrackOrigins)
1331      setOrigin(&I, IRB.CreateLoad(getOriginPtr(Addr, IRB)));
1332    return true;
1333  }
1334
1335  /// \brief Handle (SIMD arithmetic)-like intrinsics.
1336  ///
1337  /// Instrument intrinsics with any number of arguments of the same type,
1338  /// equal to the return type. The type should be simple (no aggregates or
1339  /// pointers; vectors are fine).
1340  /// Caller guarantees that this intrinsic does not access memory.
1341  bool maybeHandleSimpleNomemIntrinsic(IntrinsicInst &I) {
1342    Type *RetTy = I.getType();
1343    if (!(RetTy->isIntOrIntVectorTy() ||
1344          RetTy->isFPOrFPVectorTy() ||
1345          RetTy->isX86_MMXTy()))
1346      return false;
1347
1348    unsigned NumArgOperands = I.getNumArgOperands();
1349
1350    for (unsigned i = 0; i < NumArgOperands; ++i) {
1351      Type *Ty = I.getArgOperand(i)->getType();
1352      if (Ty != RetTy)
1353        return false;
1354    }
1355
1356    IRBuilder<> IRB(&I);
1357    ShadowAndOriginCombiner SC(this, IRB);
1358    for (unsigned i = 0; i < NumArgOperands; ++i)
1359      SC.Add(I.getArgOperand(i));
1360    SC.Done(&I);
1361
1362    return true;
1363  }
1364
1365  /// \brief Heuristically instrument unknown intrinsics.
1366  ///
1367  /// The main purpose of this code is to do something reasonable with all
1368  /// random intrinsics we might encounter, most importantly - SIMD intrinsics.
1369  /// We recognize several classes of intrinsics by their argument types and
1370  /// ModRefBehaviour and apply special intrumentation when we are reasonably
1371  /// sure that we know what the intrinsic does.
1372  ///
1373  /// We special-case intrinsics where this approach fails. See llvm.bswap
1374  /// handling as an example of that.
1375  bool handleUnknownIntrinsic(IntrinsicInst &I) {
1376    unsigned NumArgOperands = I.getNumArgOperands();
1377    if (NumArgOperands == 0)
1378      return false;
1379
1380    Intrinsic::ID iid = I.getIntrinsicID();
1381    IntrinsicKind IK = getIntrinsicKind(iid);
1382    bool OnlyReadsMemory = IK == IK_OnlyReadsMemory;
1383    bool WritesMemory = IK == IK_WritesMemory;
1384    assert(!(OnlyReadsMemory && WritesMemory));
1385
1386    if (NumArgOperands == 2 &&
1387        I.getArgOperand(0)->getType()->isPointerTy() &&
1388        I.getArgOperand(1)->getType()->isVectorTy() &&
1389        I.getType()->isVoidTy() &&
1390        WritesMemory) {
1391      // This looks like a vector store.
1392      return handleVectorStoreIntrinsic(I);
1393    }
1394
1395    if (NumArgOperands == 1 &&
1396        I.getArgOperand(0)->getType()->isPointerTy() &&
1397        I.getType()->isVectorTy() &&
1398        OnlyReadsMemory) {
1399      // This looks like a vector load.
1400      return handleVectorLoadIntrinsic(I);
1401    }
1402
1403    if (!OnlyReadsMemory && !WritesMemory)
1404      if (maybeHandleSimpleNomemIntrinsic(I))
1405        return true;
1406
1407    // FIXME: detect and handle SSE maskstore/maskload
1408    return false;
1409  }
1410
1411  void handleBswap(IntrinsicInst &I) {
1412    IRBuilder<> IRB(&I);
1413    Value *Op = I.getArgOperand(0);
1414    Type *OpType = Op->getType();
1415    Function *BswapFunc = Intrinsic::getDeclaration(
1416      F.getParent(), Intrinsic::bswap, ArrayRef<Type*>(&OpType, 1));
1417    setShadow(&I, IRB.CreateCall(BswapFunc, getShadow(Op)));
1418    setOrigin(&I, getOrigin(Op));
1419  }
1420
1421  void visitIntrinsicInst(IntrinsicInst &I) {
1422    switch (I.getIntrinsicID()) {
1423    case llvm::Intrinsic::bswap:
1424      handleBswap(I);
1425      break;
1426    default:
1427      if (!handleUnknownIntrinsic(I))
1428        visitInstruction(I);
1429      break;
1430    }
1431  }
1432
1433  void visitCallSite(CallSite CS) {
1434    Instruction &I = *CS.getInstruction();
1435    assert((CS.isCall() || CS.isInvoke()) && "Unknown type of CallSite");
1436    if (CS.isCall()) {
1437      CallInst *Call = cast<CallInst>(&I);
1438
1439      // For inline asm, do the usual thing: check argument shadow and mark all
1440      // outputs as clean. Note that any side effects of the inline asm that are
1441      // not immediately visible in its constraints are not handled.
1442      if (Call->isInlineAsm()) {
1443        visitInstruction(I);
1444        return;
1445      }
1446
1447      // Allow only tail calls with the same types, otherwise
1448      // we may have a false positive: shadow for a non-void RetVal
1449      // will get propagated to a void RetVal.
1450      if (Call->isTailCall() && Call->getType() != Call->getParent()->getType())
1451        Call->setTailCall(false);
1452
1453      assert(!isa<IntrinsicInst>(&I) && "intrinsics are handled elsewhere");
1454
1455      // We are going to insert code that relies on the fact that the callee
1456      // will become a non-readonly function after it is instrumented by us. To
1457      // prevent this code from being optimized out, mark that function
1458      // non-readonly in advance.
1459      if (Function *Func = Call->getCalledFunction()) {
1460        // Clear out readonly/readnone attributes.
1461        AttrBuilder B;
1462        B.addAttribute(Attribute::ReadOnly)
1463          .addAttribute(Attribute::ReadNone);
1464        Func->removeAttribute(AttributeSet::FunctionIndex,
1465                              Attribute::get(Func->getContext(), B));
1466      }
1467    }
1468    IRBuilder<> IRB(&I);
1469    unsigned ArgOffset = 0;
1470    DEBUG(dbgs() << "  CallSite: " << I << "\n");
1471    for (CallSite::arg_iterator ArgIt = CS.arg_begin(), End = CS.arg_end();
1472         ArgIt != End; ++ArgIt) {
1473      Value *A = *ArgIt;
1474      unsigned i = ArgIt - CS.arg_begin();
1475      if (!A->getType()->isSized()) {
1476        DEBUG(dbgs() << "Arg " << i << " is not sized: " << I << "\n");
1477        continue;
1478      }
1479      unsigned Size = 0;
1480      Value *Store = 0;
1481      // Compute the Shadow for arg even if it is ByVal, because
1482      // in that case getShadow() will copy the actual arg shadow to
1483      // __msan_param_tls.
1484      Value *ArgShadow = getShadow(A);
1485      Value *ArgShadowBase = getShadowPtrForArgument(A, IRB, ArgOffset);
1486      DEBUG(dbgs() << "  Arg#" << i << ": " << *A <<
1487            " Shadow: " << *ArgShadow << "\n");
1488      if (CS.paramHasAttr(i + 1, Attribute::ByVal)) {
1489        assert(A->getType()->isPointerTy() &&
1490               "ByVal argument is not a pointer!");
1491        Size = MS.TD->getTypeAllocSize(A->getType()->getPointerElementType());
1492        unsigned Alignment = CS.getParamAlignment(i + 1);
1493        Store = IRB.CreateMemCpy(ArgShadowBase,
1494                                 getShadowPtr(A, Type::getInt8Ty(*MS.C), IRB),
1495                                 Size, Alignment);
1496      } else {
1497        Size = MS.TD->getTypeAllocSize(A->getType());
1498        Store = IRB.CreateAlignedStore(ArgShadow, ArgShadowBase,
1499                                       kShadowTLSAlignment);
1500      }
1501      if (MS.TrackOrigins)
1502        IRB.CreateStore(getOrigin(A),
1503                        getOriginPtrForArgument(A, IRB, ArgOffset));
1504      assert(Size != 0 && Store != 0);
1505      DEBUG(dbgs() << "  Param:" << *Store << "\n");
1506      ArgOffset += DataLayout::RoundUpAlignment(Size, 8);
1507    }
1508    DEBUG(dbgs() << "  done with call args\n");
1509
1510    FunctionType *FT =
1511      cast<FunctionType>(CS.getCalledValue()->getType()-> getContainedType(0));
1512    if (FT->isVarArg()) {
1513      VAHelper->visitCallSite(CS, IRB);
1514    }
1515
1516    // Now, get the shadow for the RetVal.
1517    if (!I.getType()->isSized()) return;
1518    IRBuilder<> IRBBefore(&I);
1519    // Untill we have full dynamic coverage, make sure the retval shadow is 0.
1520    Value *Base = getShadowPtrForRetval(&I, IRBBefore);
1521    IRBBefore.CreateAlignedStore(getCleanShadow(&I), Base, kShadowTLSAlignment);
1522    Instruction *NextInsn = 0;
1523    if (CS.isCall()) {
1524      NextInsn = I.getNextNode();
1525    } else {
1526      BasicBlock *NormalDest = cast<InvokeInst>(&I)->getNormalDest();
1527      if (!NormalDest->getSinglePredecessor()) {
1528        // FIXME: this case is tricky, so we are just conservative here.
1529        // Perhaps we need to split the edge between this BB and NormalDest,
1530        // but a naive attempt to use SplitEdge leads to a crash.
1531        setShadow(&I, getCleanShadow(&I));
1532        setOrigin(&I, getCleanOrigin());
1533        return;
1534      }
1535      NextInsn = NormalDest->getFirstInsertionPt();
1536      assert(NextInsn &&
1537             "Could not find insertion point for retval shadow load");
1538    }
1539    IRBuilder<> IRBAfter(NextInsn);
1540    Value *RetvalShadow =
1541      IRBAfter.CreateAlignedLoad(getShadowPtrForRetval(&I, IRBAfter),
1542                                 kShadowTLSAlignment, "_msret");
1543    setShadow(&I, RetvalShadow);
1544    if (MS.TrackOrigins)
1545      setOrigin(&I, IRBAfter.CreateLoad(getOriginPtrForRetval(IRBAfter)));
1546  }
1547
1548  void visitReturnInst(ReturnInst &I) {
1549    IRBuilder<> IRB(&I);
1550    if (Value *RetVal = I.getReturnValue()) {
1551      // Set the shadow for the RetVal.
1552      Value *Shadow = getShadow(RetVal);
1553      Value *ShadowPtr = getShadowPtrForRetval(RetVal, IRB);
1554      DEBUG(dbgs() << "Return: " << *Shadow << "\n" << *ShadowPtr << "\n");
1555      IRB.CreateAlignedStore(Shadow, ShadowPtr, kShadowTLSAlignment);
1556      if (MS.TrackOrigins)
1557        IRB.CreateStore(getOrigin(RetVal), getOriginPtrForRetval(IRB));
1558    }
1559  }
1560
1561  void visitPHINode(PHINode &I) {
1562    IRBuilder<> IRB(&I);
1563    ShadowPHINodes.push_back(&I);
1564    setShadow(&I, IRB.CreatePHI(getShadowTy(&I), I.getNumIncomingValues(),
1565                                "_msphi_s"));
1566    if (MS.TrackOrigins)
1567      setOrigin(&I, IRB.CreatePHI(MS.OriginTy, I.getNumIncomingValues(),
1568                                  "_msphi_o"));
1569  }
1570
1571  void visitAllocaInst(AllocaInst &I) {
1572    setShadow(&I, getCleanShadow(&I));
1573    if (!ClPoisonStack) return;
1574    IRBuilder<> IRB(I.getNextNode());
1575    uint64_t Size = MS.TD->getTypeAllocSize(I.getAllocatedType());
1576    if (ClPoisonStackWithCall) {
1577      IRB.CreateCall2(MS.MsanPoisonStackFn,
1578                      IRB.CreatePointerCast(&I, IRB.getInt8PtrTy()),
1579                      ConstantInt::get(MS.IntptrTy, Size));
1580    } else {
1581      Value *ShadowBase = getShadowPtr(&I, Type::getInt8PtrTy(*MS.C), IRB);
1582      IRB.CreateMemSet(ShadowBase, IRB.getInt8(ClPoisonStackPattern),
1583                       Size, I.getAlignment());
1584    }
1585
1586    if (MS.TrackOrigins) {
1587      setOrigin(&I, getCleanOrigin());
1588      SmallString<2048> StackDescriptionStorage;
1589      raw_svector_ostream StackDescription(StackDescriptionStorage);
1590      // We create a string with a description of the stack allocation and
1591      // pass it into __msan_set_alloca_origin.
1592      // It will be printed by the run-time if stack-originated UMR is found.
1593      // The first 4 bytes of the string are set to '----' and will be replaced
1594      // by __msan_va_arg_overflow_size_tls at the first call.
1595      StackDescription << "----" << I.getName() << "@" << F.getName();
1596      Value *Descr =
1597          createPrivateNonConstGlobalForString(*F.getParent(),
1598                                               StackDescription.str());
1599      IRB.CreateCall3(MS.MsanSetAllocaOriginFn,
1600                      IRB.CreatePointerCast(&I, IRB.getInt8PtrTy()),
1601                      ConstantInt::get(MS.IntptrTy, Size),
1602                      IRB.CreatePointerCast(Descr, IRB.getInt8PtrTy()));
1603    }
1604  }
1605
1606  void visitSelectInst(SelectInst& I) {
1607    IRBuilder<> IRB(&I);
1608    setShadow(&I,  IRB.CreateSelect(I.getCondition(),
1609              getShadow(I.getTrueValue()), getShadow(I.getFalseValue()),
1610              "_msprop"));
1611    if (MS.TrackOrigins) {
1612      // Origins are always i32, so any vector conditions must be flattened.
1613      // FIXME: consider tracking vector origins for app vectors?
1614      Value *Cond = I.getCondition();
1615      if (Cond->getType()->isVectorTy()) {
1616        Value *ConvertedShadow = convertToShadowTyNoVec(Cond, IRB);
1617        Cond = IRB.CreateICmpNE(ConvertedShadow,
1618                                getCleanShadow(ConvertedShadow), "_mso_select");
1619      }
1620      setOrigin(&I, IRB.CreateSelect(Cond,
1621                getOrigin(I.getTrueValue()), getOrigin(I.getFalseValue())));
1622    }
1623  }
1624
1625  void visitLandingPadInst(LandingPadInst &I) {
1626    // Do nothing.
1627    // See http://code.google.com/p/memory-sanitizer/issues/detail?id=1
1628    setShadow(&I, getCleanShadow(&I));
1629    setOrigin(&I, getCleanOrigin());
1630  }
1631
1632  void visitGetElementPtrInst(GetElementPtrInst &I) {
1633    handleShadowOr(I);
1634  }
1635
1636  void visitExtractValueInst(ExtractValueInst &I) {
1637    IRBuilder<> IRB(&I);
1638    Value *Agg = I.getAggregateOperand();
1639    DEBUG(dbgs() << "ExtractValue:  " << I << "\n");
1640    Value *AggShadow = getShadow(Agg);
1641    DEBUG(dbgs() << "   AggShadow:  " << *AggShadow << "\n");
1642    Value *ResShadow = IRB.CreateExtractValue(AggShadow, I.getIndices());
1643    DEBUG(dbgs() << "   ResShadow:  " << *ResShadow << "\n");
1644    setShadow(&I, ResShadow);
1645    setOrigin(&I, getCleanOrigin());
1646  }
1647
1648  void visitInsertValueInst(InsertValueInst &I) {
1649    IRBuilder<> IRB(&I);
1650    DEBUG(dbgs() << "InsertValue:  " << I << "\n");
1651    Value *AggShadow = getShadow(I.getAggregateOperand());
1652    Value *InsShadow = getShadow(I.getInsertedValueOperand());
1653    DEBUG(dbgs() << "   AggShadow:  " << *AggShadow << "\n");
1654    DEBUG(dbgs() << "   InsShadow:  " << *InsShadow << "\n");
1655    Value *Res = IRB.CreateInsertValue(AggShadow, InsShadow, I.getIndices());
1656    DEBUG(dbgs() << "   Res:        " << *Res << "\n");
1657    setShadow(&I, Res);
1658    setOrigin(&I, getCleanOrigin());
1659  }
1660
1661  void dumpInst(Instruction &I) {
1662    if (CallInst *CI = dyn_cast<CallInst>(&I)) {
1663      errs() << "ZZZ call " << CI->getCalledFunction()->getName() << "\n";
1664    } else {
1665      errs() << "ZZZ " << I.getOpcodeName() << "\n";
1666    }
1667    errs() << "QQQ " << I << "\n";
1668  }
1669
1670  void visitResumeInst(ResumeInst &I) {
1671    DEBUG(dbgs() << "Resume: " << I << "\n");
1672    // Nothing to do here.
1673  }
1674
1675  void visitInstruction(Instruction &I) {
1676    // Everything else: stop propagating and check for poisoned shadow.
1677    if (ClDumpStrictInstructions)
1678      dumpInst(I);
1679    DEBUG(dbgs() << "DEFAULT: " << I << "\n");
1680    for (size_t i = 0, n = I.getNumOperands(); i < n; i++)
1681      insertCheck(I.getOperand(i), &I);
1682    setShadow(&I, getCleanShadow(&I));
1683    setOrigin(&I, getCleanOrigin());
1684  }
1685};
1686
1687/// \brief AMD64-specific implementation of VarArgHelper.
1688struct VarArgAMD64Helper : public VarArgHelper {
1689  // An unfortunate workaround for asymmetric lowering of va_arg stuff.
1690  // See a comment in visitCallSite for more details.
1691  static const unsigned AMD64GpEndOffset = 48;  // AMD64 ABI Draft 0.99.6 p3.5.7
1692  static const unsigned AMD64FpEndOffset = 176;
1693
1694  Function &F;
1695  MemorySanitizer &MS;
1696  MemorySanitizerVisitor &MSV;
1697  Value *VAArgTLSCopy;
1698  Value *VAArgOverflowSize;
1699
1700  SmallVector<CallInst*, 16> VAStartInstrumentationList;
1701
1702  VarArgAMD64Helper(Function &F, MemorySanitizer &MS,
1703                    MemorySanitizerVisitor &MSV)
1704    : F(F), MS(MS), MSV(MSV), VAArgTLSCopy(0), VAArgOverflowSize(0) { }
1705
1706  enum ArgKind { AK_GeneralPurpose, AK_FloatingPoint, AK_Memory };
1707
1708  ArgKind classifyArgument(Value* arg) {
1709    // A very rough approximation of X86_64 argument classification rules.
1710    Type *T = arg->getType();
1711    if (T->isFPOrFPVectorTy() || T->isX86_MMXTy())
1712      return AK_FloatingPoint;
1713    if (T->isIntegerTy() && T->getPrimitiveSizeInBits() <= 64)
1714      return AK_GeneralPurpose;
1715    if (T->isPointerTy())
1716      return AK_GeneralPurpose;
1717    return AK_Memory;
1718  }
1719
1720  // For VarArg functions, store the argument shadow in an ABI-specific format
1721  // that corresponds to va_list layout.
1722  // We do this because Clang lowers va_arg in the frontend, and this pass
1723  // only sees the low level code that deals with va_list internals.
1724  // A much easier alternative (provided that Clang emits va_arg instructions)
1725  // would have been to associate each live instance of va_list with a copy of
1726  // MSanParamTLS, and extract shadow on va_arg() call in the argument list
1727  // order.
1728  void visitCallSite(CallSite &CS, IRBuilder<> &IRB) {
1729    unsigned GpOffset = 0;
1730    unsigned FpOffset = AMD64GpEndOffset;
1731    unsigned OverflowOffset = AMD64FpEndOffset;
1732    for (CallSite::arg_iterator ArgIt = CS.arg_begin(), End = CS.arg_end();
1733         ArgIt != End; ++ArgIt) {
1734      Value *A = *ArgIt;
1735      ArgKind AK = classifyArgument(A);
1736      if (AK == AK_GeneralPurpose && GpOffset >= AMD64GpEndOffset)
1737        AK = AK_Memory;
1738      if (AK == AK_FloatingPoint && FpOffset >= AMD64FpEndOffset)
1739        AK = AK_Memory;
1740      Value *Base;
1741      switch (AK) {
1742      case AK_GeneralPurpose:
1743        Base = getShadowPtrForVAArgument(A, IRB, GpOffset);
1744        GpOffset += 8;
1745        break;
1746      case AK_FloatingPoint:
1747        Base = getShadowPtrForVAArgument(A, IRB, FpOffset);
1748        FpOffset += 16;
1749        break;
1750      case AK_Memory:
1751        uint64_t ArgSize = MS.TD->getTypeAllocSize(A->getType());
1752        Base = getShadowPtrForVAArgument(A, IRB, OverflowOffset);
1753        OverflowOffset += DataLayout::RoundUpAlignment(ArgSize, 8);
1754      }
1755      IRB.CreateAlignedStore(MSV.getShadow(A), Base, kShadowTLSAlignment);
1756    }
1757    Constant *OverflowSize =
1758      ConstantInt::get(IRB.getInt64Ty(), OverflowOffset - AMD64FpEndOffset);
1759    IRB.CreateStore(OverflowSize, MS.VAArgOverflowSizeTLS);
1760  }
1761
1762  /// \brief Compute the shadow address for a given va_arg.
1763  Value *getShadowPtrForVAArgument(Value *A, IRBuilder<> &IRB,
1764                                   int ArgOffset) {
1765    Value *Base = IRB.CreatePointerCast(MS.VAArgTLS, MS.IntptrTy);
1766    Base = IRB.CreateAdd(Base, ConstantInt::get(MS.IntptrTy, ArgOffset));
1767    return IRB.CreateIntToPtr(Base, PointerType::get(MSV.getShadowTy(A), 0),
1768                              "_msarg");
1769  }
1770
1771  void visitVAStartInst(VAStartInst &I) {
1772    IRBuilder<> IRB(&I);
1773    VAStartInstrumentationList.push_back(&I);
1774    Value *VAListTag = I.getArgOperand(0);
1775    Value *ShadowPtr = MSV.getShadowPtr(VAListTag, IRB.getInt8Ty(), IRB);
1776
1777    // Unpoison the whole __va_list_tag.
1778    // FIXME: magic ABI constants.
1779    IRB.CreateMemSet(ShadowPtr, Constant::getNullValue(IRB.getInt8Ty()),
1780                     /* size */24, /* alignment */8, false);
1781  }
1782
1783  void visitVACopyInst(VACopyInst &I) {
1784    IRBuilder<> IRB(&I);
1785    Value *VAListTag = I.getArgOperand(0);
1786    Value *ShadowPtr = MSV.getShadowPtr(VAListTag, IRB.getInt8Ty(), IRB);
1787
1788    // Unpoison the whole __va_list_tag.
1789    // FIXME: magic ABI constants.
1790    IRB.CreateMemSet(ShadowPtr, Constant::getNullValue(IRB.getInt8Ty()),
1791                     /* size */24, /* alignment */8, false);
1792  }
1793
1794  void finalizeInstrumentation() {
1795    assert(!VAArgOverflowSize && !VAArgTLSCopy &&
1796           "finalizeInstrumentation called twice");
1797    if (!VAStartInstrumentationList.empty()) {
1798      // If there is a va_start in this function, make a backup copy of
1799      // va_arg_tls somewhere in the function entry block.
1800      IRBuilder<> IRB(F.getEntryBlock().getFirstNonPHI());
1801      VAArgOverflowSize = IRB.CreateLoad(MS.VAArgOverflowSizeTLS);
1802      Value *CopySize =
1803        IRB.CreateAdd(ConstantInt::get(MS.IntptrTy, AMD64FpEndOffset),
1804                      VAArgOverflowSize);
1805      VAArgTLSCopy = IRB.CreateAlloca(Type::getInt8Ty(*MS.C), CopySize);
1806      IRB.CreateMemCpy(VAArgTLSCopy, MS.VAArgTLS, CopySize, 8);
1807    }
1808
1809    // Instrument va_start.
1810    // Copy va_list shadow from the backup copy of the TLS contents.
1811    for (size_t i = 0, n = VAStartInstrumentationList.size(); i < n; i++) {
1812      CallInst *OrigInst = VAStartInstrumentationList[i];
1813      IRBuilder<> IRB(OrigInst->getNextNode());
1814      Value *VAListTag = OrigInst->getArgOperand(0);
1815
1816      Value *RegSaveAreaPtrPtr =
1817        IRB.CreateIntToPtr(
1818          IRB.CreateAdd(IRB.CreatePtrToInt(VAListTag, MS.IntptrTy),
1819                        ConstantInt::get(MS.IntptrTy, 16)),
1820          Type::getInt64PtrTy(*MS.C));
1821      Value *RegSaveAreaPtr = IRB.CreateLoad(RegSaveAreaPtrPtr);
1822      Value *RegSaveAreaShadowPtr =
1823        MSV.getShadowPtr(RegSaveAreaPtr, IRB.getInt8Ty(), IRB);
1824      IRB.CreateMemCpy(RegSaveAreaShadowPtr, VAArgTLSCopy,
1825                       AMD64FpEndOffset, 16);
1826
1827      Value *OverflowArgAreaPtrPtr =
1828        IRB.CreateIntToPtr(
1829          IRB.CreateAdd(IRB.CreatePtrToInt(VAListTag, MS.IntptrTy),
1830                        ConstantInt::get(MS.IntptrTy, 8)),
1831          Type::getInt64PtrTy(*MS.C));
1832      Value *OverflowArgAreaPtr = IRB.CreateLoad(OverflowArgAreaPtrPtr);
1833      Value *OverflowArgAreaShadowPtr =
1834        MSV.getShadowPtr(OverflowArgAreaPtr, IRB.getInt8Ty(), IRB);
1835      Value *SrcPtr =
1836        getShadowPtrForVAArgument(VAArgTLSCopy, IRB, AMD64FpEndOffset);
1837      IRB.CreateMemCpy(OverflowArgAreaShadowPtr, SrcPtr, VAArgOverflowSize, 16);
1838    }
1839  }
1840};
1841
1842VarArgHelper* CreateVarArgHelper(Function &Func, MemorySanitizer &Msan,
1843                                 MemorySanitizerVisitor &Visitor) {
1844  return new VarArgAMD64Helper(Func, Msan, Visitor);
1845}
1846
1847}  // namespace
1848
1849bool MemorySanitizer::runOnFunction(Function &F) {
1850  MemorySanitizerVisitor Visitor(F, *this);
1851
1852  // Clear out readonly/readnone attributes.
1853  AttrBuilder B;
1854  B.addAttribute(Attribute::ReadOnly)
1855    .addAttribute(Attribute::ReadNone);
1856  F.removeAttribute(AttributeSet::FunctionIndex,
1857                    Attribute::get(F.getContext(), B));
1858
1859  return Visitor.runOnFunction();
1860}
1861