ThreadSafety.cpp revision 96fac6a7fe89deff7860e536febbd4ae17bb57f3
1//===- ThreadSafety.cpp ----------------------------------------*- C++ --*-===//
2//
3//                     The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10// A intra-procedural analysis for thread safety (e.g. deadlocks and race
11// conditions), based off of an annotation system.
12//
13// See http://clang.llvm.org/docs/LanguageExtensions.html#threadsafety for more
14// information.
15//
16//===----------------------------------------------------------------------===//
17
18#include "clang/Analysis/Analyses/ThreadSafety.h"
19#include "clang/Analysis/Analyses/PostOrderCFGView.h"
20#include "clang/Analysis/AnalysisContext.h"
21#include "clang/Analysis/CFG.h"
22#include "clang/Analysis/CFGStmtMap.h"
23#include "clang/AST/DeclCXX.h"
24#include "clang/AST/ExprCXX.h"
25#include "clang/AST/StmtCXX.h"
26#include "clang/AST/StmtVisitor.h"
27#include "clang/Basic/SourceManager.h"
28#include "clang/Basic/SourceLocation.h"
29#include "clang/Basic/OperatorKinds.h"
30#include "llvm/ADT/BitVector.h"
31#include "llvm/ADT/FoldingSet.h"
32#include "llvm/ADT/ImmutableMap.h"
33#include "llvm/ADT/PostOrderIterator.h"
34#include "llvm/ADT/SmallVector.h"
35#include "llvm/ADT/StringRef.h"
36#include "llvm/Support/raw_ostream.h"
37#include <algorithm>
38#include <utility>
39#include <vector>
40
41using namespace clang;
42using namespace thread_safety;
43
44// Key method definition
45ThreadSafetyHandler::~ThreadSafetyHandler() {}
46
47namespace {
48
49/// \brief A MutexID object uniquely identifies a particular mutex, and
50/// is built from an Expr* (i.e. calling a lock function).
51///
52/// Thread-safety analysis works by comparing lock expressions.  Within the
53/// body of a function, an expression such as "x->foo->bar.mu" will resolve to
54/// a particular mutex object at run-time.  Subsequent occurrences of the same
55/// expression (where "same" means syntactic equality) will refer to the same
56/// run-time object if three conditions hold:
57/// (1) Local variables in the expression, such as "x" have not changed.
58/// (2) Values on the heap that affect the expression have not changed.
59/// (3) The expression involves only pure function calls.
60///
61/// The current implementation assumes, but does not verify, that multiple uses
62/// of the same lock expression satisfies these criteria.
63///
64/// Clang introduces an additional wrinkle, which is that it is difficult to
65/// derive canonical expressions, or compare expressions directly for equality.
66/// Thus, we identify a mutex not by an Expr, but by the list of named
67/// declarations that are referenced by the Expr.  In other words,
68/// x->foo->bar.mu will be a four element vector with the Decls for
69/// mu, bar, and foo, and x.  The vector will uniquely identify the expression
70/// for all practical purposes.  Null is used to denote 'this'.
71///
72/// Note we will need to perform substitution on "this" and function parameter
73/// names when constructing a lock expression.
74///
75/// For example:
76/// class C { Mutex Mu;  void lock() EXCLUSIVE_LOCK_FUNCTION(this->Mu); };
77/// void myFunc(C *X) { ... X->lock() ... }
78/// The original expression for the mutex acquired by myFunc is "this->Mu", but
79/// "X" is substituted for "this" so we get X->Mu();
80///
81/// For another example:
82/// foo(MyList *L) EXCLUSIVE_LOCKS_REQUIRED(L->Mu) { ... }
83/// MyList *MyL;
84/// foo(MyL);  // requires lock MyL->Mu to be held
85class MutexID {
86  SmallVector<NamedDecl*, 2> DeclSeq;
87
88  /// \brief Encapsulates the lexical context of a function call.  The lexical
89  /// context includes the arguments to the call, including the implicit object
90  /// argument.  When an attribute containing a mutex expression is attached to
91  /// a method, the expression may refer to formal parameters of the method.
92  /// Actual arguments must be substituted for formal parameters to derive
93  /// the appropriate mutex expression in the lexical context where the function
94  /// is called.  PrevCtx holds the context in which the arguments themselves
95  /// should be evaluated; multiple calling contexts can be chained together
96  /// by the lock_returned attribute.
97  struct CallingContext {
98    const NamedDecl* AttrDecl;  // The decl to which the attribute is attached.
99    Expr*            SelfArg;   // Implicit object argument -- e.g. 'this'
100    unsigned         NumArgs;   // Number of funArgs
101    Expr**           FunArgs;   // Function arguments
102    CallingContext*  PrevCtx;   // The previous context; or 0 if none.
103
104    CallingContext(const NamedDecl* D = 0, Expr* S = 0,
105                   unsigned N = 0, Expr** A = 0, CallingContext* P = 0)
106      : AttrDecl(D), SelfArg(S), NumArgs(N), FunArgs(A), PrevCtx(P)
107    { }
108  };
109
110  /// Build a Decl sequence representing the lock from the given expression.
111  /// Recursive function that terminates on DeclRefExpr.
112  /// Note: this function merely creates a MutexID; it does not check to
113  /// ensure that the original expression is a valid mutex expression.
114  void buildMutexID(Expr *Exp, CallingContext* CallCtx) {
115    if (!Exp) {
116      DeclSeq.clear();
117      return;
118    }
119
120    if (DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(Exp)) {
121      NamedDecl *ND = cast<NamedDecl>(DRE->getDecl()->getCanonicalDecl());
122      ParmVarDecl *PV = dyn_cast_or_null<ParmVarDecl>(ND);
123      if (PV) {
124        FunctionDecl *FD =
125          cast<FunctionDecl>(PV->getDeclContext())->getCanonicalDecl();
126        unsigned i = PV->getFunctionScopeIndex();
127
128        if (CallCtx && CallCtx->FunArgs &&
129            FD == CallCtx->AttrDecl->getCanonicalDecl()) {
130          // Substitute call arguments for references to function parameters
131          assert(i < CallCtx->NumArgs);
132          buildMutexID(CallCtx->FunArgs[i], CallCtx->PrevCtx);
133          return;
134        }
135        // Map the param back to the param of the original function declaration.
136        DeclSeq.push_back(FD->getParamDecl(i));
137        return;
138      }
139      // Not a function parameter -- just store the reference.
140      DeclSeq.push_back(ND);
141    } else if (isa<CXXThisExpr>(Exp)) {
142      // Substitute parent for 'this'
143      if (CallCtx && CallCtx->SelfArg)
144        buildMutexID(CallCtx->SelfArg, CallCtx->PrevCtx);
145      else {
146        DeclSeq.push_back(0);  // Use 0 to represent 'this'.
147        return;  // mutexID is still valid in this case
148      }
149    } else if (MemberExpr *ME = dyn_cast<MemberExpr>(Exp)) {
150      NamedDecl *ND = ME->getMemberDecl();
151      DeclSeq.push_back(ND);
152      buildMutexID(ME->getBase(), CallCtx);
153    } else if (CXXMemberCallExpr *CMCE = dyn_cast<CXXMemberCallExpr>(Exp)) {
154      // When calling a function with a lock_returned attribute, replace
155      // the function call with the expression in lock_returned.
156      if (LockReturnedAttr* At =
157            CMCE->getMethodDecl()->getAttr<LockReturnedAttr>()) {
158        CallingContext LRCallCtx(CMCE->getMethodDecl());
159        LRCallCtx.SelfArg = CMCE->getImplicitObjectArgument();
160        LRCallCtx.NumArgs = CMCE->getNumArgs();
161        LRCallCtx.FunArgs = CMCE->getArgs();
162        LRCallCtx.PrevCtx = CallCtx;
163        buildMutexID(At->getArg(), &LRCallCtx);
164        return;
165      }
166      // Hack to treat smart pointers and iterators as pointers;
167      // ignore any method named get().
168      if (CMCE->getMethodDecl()->getNameAsString() == "get" &&
169          CMCE->getNumArgs() == 0) {
170        buildMutexID(CMCE->getImplicitObjectArgument(), CallCtx);
171        return;
172      }
173      DeclSeq.push_back(CMCE->getMethodDecl()->getCanonicalDecl());
174      buildMutexID(CMCE->getImplicitObjectArgument(), CallCtx);
175      unsigned NumCallArgs = CMCE->getNumArgs();
176      Expr** CallArgs = CMCE->getArgs();
177      for (unsigned i = 0; i < NumCallArgs; ++i) {
178        buildMutexID(CallArgs[i], CallCtx);
179      }
180    } else if (CallExpr *CE = dyn_cast<CallExpr>(Exp)) {
181      if (LockReturnedAttr* At =
182            CE->getDirectCallee()->getAttr<LockReturnedAttr>()) {
183        CallingContext LRCallCtx(CE->getDirectCallee());
184        LRCallCtx.NumArgs = CE->getNumArgs();
185        LRCallCtx.FunArgs = CE->getArgs();
186        LRCallCtx.PrevCtx = CallCtx;
187        buildMutexID(At->getArg(), &LRCallCtx);
188        return;
189      }
190      // Treat smart pointers and iterators as pointers;
191      // ignore the * and -> operators.
192      if (CXXOperatorCallExpr *OE = dyn_cast<CXXOperatorCallExpr>(CE)) {
193        OverloadedOperatorKind k = OE->getOperator();
194        if (k == OO_Arrow || k == OO_Star) {
195          buildMutexID(OE->getArg(0), CallCtx);
196          return;
197        }
198      }
199      buildMutexID(CE->getCallee(), CallCtx);
200      unsigned NumCallArgs = CE->getNumArgs();
201      Expr** CallArgs = CE->getArgs();
202      for (unsigned i = 0; i < NumCallArgs; ++i) {
203        buildMutexID(CallArgs[i], CallCtx);
204      }
205    } else if (BinaryOperator *BOE = dyn_cast<BinaryOperator>(Exp)) {
206      buildMutexID(BOE->getLHS(), CallCtx);
207      buildMutexID(BOE->getRHS(), CallCtx);
208    } else if (UnaryOperator *UOE = dyn_cast<UnaryOperator>(Exp)) {
209      buildMutexID(UOE->getSubExpr(), CallCtx);
210    } else if (ArraySubscriptExpr *ASE = dyn_cast<ArraySubscriptExpr>(Exp)) {
211      buildMutexID(ASE->getBase(), CallCtx);
212      buildMutexID(ASE->getIdx(), CallCtx);
213    } else if (AbstractConditionalOperator *CE =
214                 dyn_cast<AbstractConditionalOperator>(Exp)) {
215      buildMutexID(CE->getCond(), CallCtx);
216      buildMutexID(CE->getTrueExpr(), CallCtx);
217      buildMutexID(CE->getFalseExpr(), CallCtx);
218    } else if (ChooseExpr *CE = dyn_cast<ChooseExpr>(Exp)) {
219      buildMutexID(CE->getCond(), CallCtx);
220      buildMutexID(CE->getLHS(), CallCtx);
221      buildMutexID(CE->getRHS(), CallCtx);
222    } else if (CastExpr *CE = dyn_cast<CastExpr>(Exp)) {
223      buildMutexID(CE->getSubExpr(), CallCtx);
224    } else if (ParenExpr *PE = dyn_cast<ParenExpr>(Exp)) {
225      buildMutexID(PE->getSubExpr(), CallCtx);
226    } else if (ExprWithCleanups *EWC = dyn_cast<ExprWithCleanups>(Exp)) {
227      buildMutexID(EWC->getSubExpr(), CallCtx);
228    } else if (CXXBindTemporaryExpr *E = dyn_cast<CXXBindTemporaryExpr>(Exp)) {
229      buildMutexID(E->getSubExpr(), CallCtx);
230    } else if (isa<CharacterLiteral>(Exp) ||
231               isa<CXXNullPtrLiteralExpr>(Exp) ||
232               isa<GNUNullExpr>(Exp) ||
233               isa<CXXBoolLiteralExpr>(Exp) ||
234               isa<FloatingLiteral>(Exp) ||
235               isa<ImaginaryLiteral>(Exp) ||
236               isa<IntegerLiteral>(Exp) ||
237               isa<StringLiteral>(Exp) ||
238               isa<ObjCStringLiteral>(Exp)) {
239      return;  // FIXME: Ignore literals for now
240    } else {
241      // Ignore.  FIXME: mark as invalid expression?
242    }
243  }
244
245  /// \brief Construct a MutexID from an expression.
246  /// \param MutexExp The original mutex expression within an attribute
247  /// \param DeclExp An expression involving the Decl on which the attribute
248  ///        occurs.
249  /// \param D  The declaration to which the lock/unlock attribute is attached.
250  void buildMutexIDFromExp(Expr *MutexExp, Expr *DeclExp, const NamedDecl *D) {
251    CallingContext CallCtx(D);
252
253    // If we are processing a raw attribute expression, with no substitutions.
254    if (DeclExp == 0) {
255      buildMutexID(MutexExp, 0);
256      return;
257    }
258
259    // Examine DeclExp to find SelfArg and FunArgs, which are used to substitute
260    // for formal parameters when we call buildMutexID later.
261    if (MemberExpr *ME = dyn_cast<MemberExpr>(DeclExp)) {
262      CallCtx.SelfArg = ME->getBase();
263    } else if (CXXMemberCallExpr *CE = dyn_cast<CXXMemberCallExpr>(DeclExp)) {
264      CallCtx.SelfArg = CE->getImplicitObjectArgument();
265      CallCtx.NumArgs = CE->getNumArgs();
266      CallCtx.FunArgs = CE->getArgs();
267    } else if (CallExpr *CE = dyn_cast<CallExpr>(DeclExp)) {
268      CallCtx.NumArgs = CE->getNumArgs();
269      CallCtx.FunArgs = CE->getArgs();
270    } else if (CXXConstructExpr *CE = dyn_cast<CXXConstructExpr>(DeclExp)) {
271      CallCtx.SelfArg = 0;  // FIXME -- get the parent from DeclStmt
272      CallCtx.NumArgs = CE->getNumArgs();
273      CallCtx.FunArgs = CE->getArgs();
274    } else if (D && isa<CXXDestructorDecl>(D)) {
275      // There's no such thing as a "destructor call" in the AST.
276      CallCtx.SelfArg = DeclExp;
277    }
278
279    // If the attribute has no arguments, then assume the argument is "this".
280    if (MutexExp == 0) {
281      buildMutexID(CallCtx.SelfArg, 0);
282      return;
283    }
284
285    // For most attributes.
286    buildMutexID(MutexExp, &CallCtx);
287  }
288
289public:
290  explicit MutexID(clang::Decl::EmptyShell e) {
291    DeclSeq.clear();
292  }
293
294  /// \param MutexExp The original mutex expression within an attribute
295  /// \param DeclExp An expression involving the Decl on which the attribute
296  ///        occurs.
297  /// \param D  The declaration to which the lock/unlock attribute is attached.
298  /// Caller must check isValid() after construction.
299  MutexID(Expr* MutexExp, Expr *DeclExp, const NamedDecl* D) {
300    buildMutexIDFromExp(MutexExp, DeclExp, D);
301  }
302
303  /// Return true if this is a valid decl sequence.
304  /// Caller must call this by hand after construction to handle errors.
305  bool isValid() const {
306    return !DeclSeq.empty();
307  }
308
309  /// Issue a warning about an invalid lock expression
310  static void warnInvalidLock(ThreadSafetyHandler &Handler, Expr* MutexExp,
311                              Expr *DeclExp, const NamedDecl* D) {
312    SourceLocation Loc;
313    if (DeclExp)
314      Loc = DeclExp->getExprLoc();
315
316    // FIXME: add a note about the attribute location in MutexExp or D
317    if (Loc.isValid())
318      Handler.handleInvalidLockExp(Loc);
319  }
320
321  bool operator==(const MutexID &other) const {
322    return DeclSeq == other.DeclSeq;
323  }
324
325  bool operator!=(const MutexID &other) const {
326    return !(*this == other);
327  }
328
329  // SmallVector overloads Operator< to do lexicographic ordering. Note that
330  // we use pointer equality (and <) to compare NamedDecls. This means the order
331  // of MutexIDs in a lockset is nondeterministic. In order to output
332  // diagnostics in a deterministic ordering, we must order all diagnostics to
333  // output by SourceLocation when iterating through this lockset.
334  bool operator<(const MutexID &other) const {
335    return DeclSeq < other.DeclSeq;
336  }
337
338  /// \brief Returns the name of the first Decl in the list for a given MutexID;
339  /// e.g. the lock expression foo.bar() has name "bar".
340  /// The caret will point unambiguously to the lock expression, so using this
341  /// name in diagnostics is a way to get simple, and consistent, mutex names.
342  /// We do not want to output the entire expression text for security reasons.
343  std::string getName() const {
344    assert(isValid());
345    if (!DeclSeq.front())
346      return "this";  // Use 0 to represent 'this'.
347    return DeclSeq.front()->getNameAsString();
348  }
349
350  void Profile(llvm::FoldingSetNodeID &ID) const {
351    for (SmallVectorImpl<NamedDecl*>::const_iterator I = DeclSeq.begin(),
352         E = DeclSeq.end(); I != E; ++I) {
353      ID.AddPointer(*I);
354    }
355  }
356};
357
358
359/// \brief This is a helper class that stores info about the most recent
360/// accquire of a Lock.
361///
362/// The main body of the analysis maps MutexIDs to LockDatas.
363struct LockData {
364  SourceLocation AcquireLoc;
365
366  /// \brief LKind stores whether a lock is held shared or exclusively.
367  /// Note that this analysis does not currently support either re-entrant
368  /// locking or lock "upgrading" and "downgrading" between exclusive and
369  /// shared.
370  ///
371  /// FIXME: add support for re-entrant locking and lock up/downgrading
372  LockKind LKind;
373  bool     Managed;            // for ScopedLockable objects
374  MutexID  UnderlyingMutex;    // for ScopedLockable objects
375
376  LockData(SourceLocation AcquireLoc, LockKind LKind, bool M = false)
377    : AcquireLoc(AcquireLoc), LKind(LKind), Managed(M),
378      UnderlyingMutex(Decl::EmptyShell())
379  {}
380
381  LockData(SourceLocation AcquireLoc, LockKind LKind, const MutexID &Mu)
382    : AcquireLoc(AcquireLoc), LKind(LKind), Managed(false),
383      UnderlyingMutex(Mu)
384  {}
385
386  bool operator==(const LockData &other) const {
387    return AcquireLoc == other.AcquireLoc && LKind == other.LKind;
388  }
389
390  bool operator!=(const LockData &other) const {
391    return !(*this == other);
392  }
393
394  void Profile(llvm::FoldingSetNodeID &ID) const {
395    ID.AddInteger(AcquireLoc.getRawEncoding());
396    ID.AddInteger(LKind);
397  }
398};
399
400
401/// A Lockset maps each MutexID (defined above) to information about how it has
402/// been locked.
403typedef llvm::ImmutableMap<MutexID, LockData> Lockset;
404typedef llvm::ImmutableMap<const NamedDecl*, unsigned> LocalVarContext;
405
406class LocalVariableMap;
407
408/// A side (entry or exit) of a CFG node.
409enum CFGBlockSide { CBS_Entry, CBS_Exit };
410
411/// CFGBlockInfo is a struct which contains all the information that is
412/// maintained for each block in the CFG.  See LocalVariableMap for more
413/// information about the contexts.
414struct CFGBlockInfo {
415  Lockset EntrySet;             // Lockset held at entry to block
416  Lockset ExitSet;              // Lockset held at exit from block
417  LocalVarContext EntryContext; // Context held at entry to block
418  LocalVarContext ExitContext;  // Context held at exit from block
419  SourceLocation EntryLoc;      // Location of first statement in block
420  SourceLocation ExitLoc;       // Location of last statement in block.
421  unsigned EntryIndex;          // Used to replay contexts later
422
423  const Lockset &getSet(CFGBlockSide Side) const {
424    return Side == CBS_Entry ? EntrySet : ExitSet;
425  }
426  SourceLocation getLocation(CFGBlockSide Side) const {
427    return Side == CBS_Entry ? EntryLoc : ExitLoc;
428  }
429
430private:
431  CFGBlockInfo(Lockset EmptySet, LocalVarContext EmptyCtx)
432    : EntrySet(EmptySet), ExitSet(EmptySet),
433      EntryContext(EmptyCtx), ExitContext(EmptyCtx)
434  { }
435
436public:
437  static CFGBlockInfo getEmptyBlockInfo(Lockset::Factory &F,
438                                        LocalVariableMap &M);
439};
440
441
442
443// A LocalVariableMap maintains a map from local variables to their currently
444// valid definitions.  It provides SSA-like functionality when traversing the
445// CFG.  Like SSA, each definition or assignment to a variable is assigned a
446// unique name (an integer), which acts as the SSA name for that definition.
447// The total set of names is shared among all CFG basic blocks.
448// Unlike SSA, we do not rewrite expressions to replace local variables declrefs
449// with their SSA-names.  Instead, we compute a Context for each point in the
450// code, which maps local variables to the appropriate SSA-name.  This map
451// changes with each assignment.
452//
453// The map is computed in a single pass over the CFG.  Subsequent analyses can
454// then query the map to find the appropriate Context for a statement, and use
455// that Context to look up the definitions of variables.
456class LocalVariableMap {
457public:
458  typedef LocalVarContext Context;
459
460  /// A VarDefinition consists of an expression, representing the value of the
461  /// variable, along with the context in which that expression should be
462  /// interpreted.  A reference VarDefinition does not itself contain this
463  /// information, but instead contains a pointer to a previous VarDefinition.
464  struct VarDefinition {
465  public:
466    friend class LocalVariableMap;
467
468    const NamedDecl *Dec;  // The original declaration for this variable.
469    const Expr *Exp;       // The expression for this variable, OR
470    unsigned Ref;          // Reference to another VarDefinition
471    Context Ctx;           // The map with which Exp should be interpreted.
472
473    bool isReference() { return !Exp; }
474
475  private:
476    // Create ordinary variable definition
477    VarDefinition(const NamedDecl *D, const Expr *E, Context C)
478      : Dec(D), Exp(E), Ref(0), Ctx(C)
479    { }
480
481    // Create reference to previous definition
482    VarDefinition(const NamedDecl *D, unsigned R, Context C)
483      : Dec(D), Exp(0), Ref(R), Ctx(C)
484    { }
485  };
486
487private:
488  Context::Factory ContextFactory;
489  std::vector<VarDefinition> VarDefinitions;
490  std::vector<unsigned> CtxIndices;
491  std::vector<std::pair<Stmt*, Context> > SavedContexts;
492
493public:
494  LocalVariableMap() {
495    // index 0 is a placeholder for undefined variables (aka phi-nodes).
496    VarDefinitions.push_back(VarDefinition(0, 0u, getEmptyContext()));
497  }
498
499  /// Look up a definition, within the given context.
500  const VarDefinition* lookup(const NamedDecl *D, Context Ctx) {
501    const unsigned *i = Ctx.lookup(D);
502    if (!i)
503      return 0;
504    assert(*i < VarDefinitions.size());
505    return &VarDefinitions[*i];
506  }
507
508  /// Look up the definition for D within the given context.  Returns
509  /// NULL if the expression is not statically known.  If successful, also
510  /// modifies Ctx to hold the context of the return Expr.
511  const Expr* lookupExpr(const NamedDecl *D, Context &Ctx) {
512    const unsigned *P = Ctx.lookup(D);
513    if (!P)
514      return 0;
515
516    unsigned i = *P;
517    while (i > 0) {
518      if (VarDefinitions[i].Exp) {
519        Ctx = VarDefinitions[i].Ctx;
520        return VarDefinitions[i].Exp;
521      }
522      i = VarDefinitions[i].Ref;
523    }
524    return 0;
525  }
526
527  Context getEmptyContext() { return ContextFactory.getEmptyMap(); }
528
529  /// Return the next context after processing S.  This function is used by
530  /// clients of the class to get the appropriate context when traversing the
531  /// CFG.  It must be called for every assignment or DeclStmt.
532  Context getNextContext(unsigned &CtxIndex, Stmt *S, Context C) {
533    if (SavedContexts[CtxIndex+1].first == S) {
534      CtxIndex++;
535      Context Result = SavedContexts[CtxIndex].second;
536      return Result;
537    }
538    return C;
539  }
540
541  void dumpVarDefinitionName(unsigned i) {
542    if (i == 0) {
543      llvm::errs() << "Undefined";
544      return;
545    }
546    const NamedDecl *Dec = VarDefinitions[i].Dec;
547    if (!Dec) {
548      llvm::errs() << "<<NULL>>";
549      return;
550    }
551    Dec->printName(llvm::errs());
552    llvm::errs() << "." << i << " " << ((void*) Dec);
553  }
554
555  /// Dumps an ASCII representation of the variable map to llvm::errs()
556  void dump() {
557    for (unsigned i = 1, e = VarDefinitions.size(); i < e; ++i) {
558      const Expr *Exp = VarDefinitions[i].Exp;
559      unsigned Ref = VarDefinitions[i].Ref;
560
561      dumpVarDefinitionName(i);
562      llvm::errs() << " = ";
563      if (Exp) Exp->dump();
564      else {
565        dumpVarDefinitionName(Ref);
566        llvm::errs() << "\n";
567      }
568    }
569  }
570
571  /// Dumps an ASCII representation of a Context to llvm::errs()
572  void dumpContext(Context C) {
573    for (Context::iterator I = C.begin(), E = C.end(); I != E; ++I) {
574      const NamedDecl *D = I.getKey();
575      D->printName(llvm::errs());
576      const unsigned *i = C.lookup(D);
577      llvm::errs() << " -> ";
578      dumpVarDefinitionName(*i);
579      llvm::errs() << "\n";
580    }
581  }
582
583  /// Builds the variable map.
584  void traverseCFG(CFG *CFGraph, PostOrderCFGView *SortedGraph,
585                     std::vector<CFGBlockInfo> &BlockInfo);
586
587protected:
588  // Get the current context index
589  unsigned getContextIndex() { return SavedContexts.size()-1; }
590
591  // Save the current context for later replay
592  void saveContext(Stmt *S, Context C) {
593    SavedContexts.push_back(std::make_pair(S,C));
594  }
595
596  // Adds a new definition to the given context, and returns a new context.
597  // This method should be called when declaring a new variable.
598  Context addDefinition(const NamedDecl *D, Expr *Exp, Context Ctx) {
599    assert(!Ctx.contains(D));
600    unsigned newID = VarDefinitions.size();
601    Context NewCtx = ContextFactory.add(Ctx, D, newID);
602    VarDefinitions.push_back(VarDefinition(D, Exp, Ctx));
603    return NewCtx;
604  }
605
606  // Add a new reference to an existing definition.
607  Context addReference(const NamedDecl *D, unsigned i, Context Ctx) {
608    unsigned newID = VarDefinitions.size();
609    Context NewCtx = ContextFactory.add(Ctx, D, newID);
610    VarDefinitions.push_back(VarDefinition(D, i, Ctx));
611    return NewCtx;
612  }
613
614  // Updates a definition only if that definition is already in the map.
615  // This method should be called when assigning to an existing variable.
616  Context updateDefinition(const NamedDecl *D, Expr *Exp, Context Ctx) {
617    if (Ctx.contains(D)) {
618      unsigned newID = VarDefinitions.size();
619      Context NewCtx = ContextFactory.remove(Ctx, D);
620      NewCtx = ContextFactory.add(NewCtx, D, newID);
621      VarDefinitions.push_back(VarDefinition(D, Exp, Ctx));
622      return NewCtx;
623    }
624    return Ctx;
625  }
626
627  // Removes a definition from the context, but keeps the variable name
628  // as a valid variable.  The index 0 is a placeholder for cleared definitions.
629  Context clearDefinition(const NamedDecl *D, Context Ctx) {
630    Context NewCtx = Ctx;
631    if (NewCtx.contains(D)) {
632      NewCtx = ContextFactory.remove(NewCtx, D);
633      NewCtx = ContextFactory.add(NewCtx, D, 0);
634    }
635    return NewCtx;
636  }
637
638  // Remove a definition entirely frmo the context.
639  Context removeDefinition(const NamedDecl *D, Context Ctx) {
640    Context NewCtx = Ctx;
641    if (NewCtx.contains(D)) {
642      NewCtx = ContextFactory.remove(NewCtx, D);
643    }
644    return NewCtx;
645  }
646
647  Context intersectContexts(Context C1, Context C2);
648  Context createReferenceContext(Context C);
649  void intersectBackEdge(Context C1, Context C2);
650
651  friend class VarMapBuilder;
652};
653
654
655// This has to be defined after LocalVariableMap.
656CFGBlockInfo CFGBlockInfo::getEmptyBlockInfo(Lockset::Factory &F,
657                                             LocalVariableMap &M) {
658  return CFGBlockInfo(F.getEmptyMap(), M.getEmptyContext());
659}
660
661
662/// Visitor which builds a LocalVariableMap
663class VarMapBuilder : public StmtVisitor<VarMapBuilder> {
664public:
665  LocalVariableMap* VMap;
666  LocalVariableMap::Context Ctx;
667
668  VarMapBuilder(LocalVariableMap *VM, LocalVariableMap::Context C)
669    : VMap(VM), Ctx(C) {}
670
671  void VisitDeclStmt(DeclStmt *S);
672  void VisitBinaryOperator(BinaryOperator *BO);
673};
674
675
676// Add new local variables to the variable map
677void VarMapBuilder::VisitDeclStmt(DeclStmt *S) {
678  bool modifiedCtx = false;
679  DeclGroupRef DGrp = S->getDeclGroup();
680  for (DeclGroupRef::iterator I = DGrp.begin(), E = DGrp.end(); I != E; ++I) {
681    if (VarDecl *VD = dyn_cast_or_null<VarDecl>(*I)) {
682      Expr *E = VD->getInit();
683
684      // Add local variables with trivial type to the variable map
685      QualType T = VD->getType();
686      if (T.isTrivialType(VD->getASTContext())) {
687        Ctx = VMap->addDefinition(VD, E, Ctx);
688        modifiedCtx = true;
689      }
690    }
691  }
692  if (modifiedCtx)
693    VMap->saveContext(S, Ctx);
694}
695
696// Update local variable definitions in variable map
697void VarMapBuilder::VisitBinaryOperator(BinaryOperator *BO) {
698  if (!BO->isAssignmentOp())
699    return;
700
701  Expr *LHSExp = BO->getLHS()->IgnoreParenCasts();
702
703  // Update the variable map and current context.
704  if (DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(LHSExp)) {
705    ValueDecl *VDec = DRE->getDecl();
706    if (Ctx.lookup(VDec)) {
707      if (BO->getOpcode() == BO_Assign)
708        Ctx = VMap->updateDefinition(VDec, BO->getRHS(), Ctx);
709      else
710        // FIXME -- handle compound assignment operators
711        Ctx = VMap->clearDefinition(VDec, Ctx);
712      VMap->saveContext(BO, Ctx);
713    }
714  }
715}
716
717
718// Computes the intersection of two contexts.  The intersection is the
719// set of variables which have the same definition in both contexts;
720// variables with different definitions are discarded.
721LocalVariableMap::Context
722LocalVariableMap::intersectContexts(Context C1, Context C2) {
723  Context Result = C1;
724  for (Context::iterator I = C1.begin(), E = C1.end(); I != E; ++I) {
725    const NamedDecl *Dec = I.getKey();
726    unsigned i1 = I.getData();
727    const unsigned *i2 = C2.lookup(Dec);
728    if (!i2)             // variable doesn't exist on second path
729      Result = removeDefinition(Dec, Result);
730    else if (*i2 != i1)  // variable exists, but has different definition
731      Result = clearDefinition(Dec, Result);
732  }
733  return Result;
734}
735
736// For every variable in C, create a new variable that refers to the
737// definition in C.  Return a new context that contains these new variables.
738// (We use this for a naive implementation of SSA on loop back-edges.)
739LocalVariableMap::Context LocalVariableMap::createReferenceContext(Context C) {
740  Context Result = getEmptyContext();
741  for (Context::iterator I = C.begin(), E = C.end(); I != E; ++I) {
742    const NamedDecl *Dec = I.getKey();
743    unsigned i = I.getData();
744    Result = addReference(Dec, i, Result);
745  }
746  return Result;
747}
748
749// This routine also takes the intersection of C1 and C2, but it does so by
750// altering the VarDefinitions.  C1 must be the result of an earlier call to
751// createReferenceContext.
752void LocalVariableMap::intersectBackEdge(Context C1, Context C2) {
753  for (Context::iterator I = C1.begin(), E = C1.end(); I != E; ++I) {
754    const NamedDecl *Dec = I.getKey();
755    unsigned i1 = I.getData();
756    VarDefinition *VDef = &VarDefinitions[i1];
757    assert(VDef->isReference());
758
759    const unsigned *i2 = C2.lookup(Dec);
760    if (!i2 || (*i2 != i1))
761      VDef->Ref = 0;    // Mark this variable as undefined
762  }
763}
764
765
766// Traverse the CFG in topological order, so all predecessors of a block
767// (excluding back-edges) are visited before the block itself.  At
768// each point in the code, we calculate a Context, which holds the set of
769// variable definitions which are visible at that point in execution.
770// Visible variables are mapped to their definitions using an array that
771// contains all definitions.
772//
773// At join points in the CFG, the set is computed as the intersection of
774// the incoming sets along each edge, E.g.
775//
776//                       { Context                 | VarDefinitions }
777//   int x = 0;          { x -> x1                 | x1 = 0 }
778//   int y = 0;          { x -> x1, y -> y1        | y1 = 0, x1 = 0 }
779//   if (b) x = 1;       { x -> x2, y -> y1        | x2 = 1, y1 = 0, ... }
780//   else   x = 2;       { x -> x3, y -> y1        | x3 = 2, x2 = 1, ... }
781//   ...                 { y -> y1  (x is unknown) | x3 = 2, x2 = 1, ... }
782//
783// This is essentially a simpler and more naive version of the standard SSA
784// algorithm.  Those definitions that remain in the intersection are from blocks
785// that strictly dominate the current block.  We do not bother to insert proper
786// phi nodes, because they are not used in our analysis; instead, wherever
787// a phi node would be required, we simply remove that definition from the
788// context (E.g. x above).
789//
790// The initial traversal does not capture back-edges, so those need to be
791// handled on a separate pass.  Whenever the first pass encounters an
792// incoming back edge, it duplicates the context, creating new definitions
793// that refer back to the originals.  (These correspond to places where SSA
794// might have to insert a phi node.)  On the second pass, these definitions are
795// set to NULL if the the variable has changed on the back-edge (i.e. a phi
796// node was actually required.)  E.g.
797//
798//                       { Context           | VarDefinitions }
799//   int x = 0, y = 0;   { x -> x1, y -> y1  | y1 = 0, x1 = 0 }
800//   while (b)           { x -> x2, y -> y1  | [1st:] x2=x1; [2nd:] x2=NULL; }
801//     x = x+1;          { x -> x3, y -> y1  | x3 = x2 + 1, ... }
802//   ...                 { y -> y1           | x3 = 2, x2 = 1, ... }
803//
804void LocalVariableMap::traverseCFG(CFG *CFGraph,
805                                   PostOrderCFGView *SortedGraph,
806                                   std::vector<CFGBlockInfo> &BlockInfo) {
807  PostOrderCFGView::CFGBlockSet VisitedBlocks(CFGraph);
808
809  CtxIndices.resize(CFGraph->getNumBlockIDs());
810
811  for (PostOrderCFGView::iterator I = SortedGraph->begin(),
812       E = SortedGraph->end(); I!= E; ++I) {
813    const CFGBlock *CurrBlock = *I;
814    int CurrBlockID = CurrBlock->getBlockID();
815    CFGBlockInfo *CurrBlockInfo = &BlockInfo[CurrBlockID];
816
817    VisitedBlocks.insert(CurrBlock);
818
819    // Calculate the entry context for the current block
820    bool HasBackEdges = false;
821    bool CtxInit = true;
822    for (CFGBlock::const_pred_iterator PI = CurrBlock->pred_begin(),
823         PE  = CurrBlock->pred_end(); PI != PE; ++PI) {
824      // if *PI -> CurrBlock is a back edge, so skip it
825      if (*PI == 0 || !VisitedBlocks.alreadySet(*PI)) {
826        HasBackEdges = true;
827        continue;
828      }
829
830      int PrevBlockID = (*PI)->getBlockID();
831      CFGBlockInfo *PrevBlockInfo = &BlockInfo[PrevBlockID];
832
833      if (CtxInit) {
834        CurrBlockInfo->EntryContext = PrevBlockInfo->ExitContext;
835        CtxInit = false;
836      }
837      else {
838        CurrBlockInfo->EntryContext =
839          intersectContexts(CurrBlockInfo->EntryContext,
840                            PrevBlockInfo->ExitContext);
841      }
842    }
843
844    // Duplicate the context if we have back-edges, so we can call
845    // intersectBackEdges later.
846    if (HasBackEdges)
847      CurrBlockInfo->EntryContext =
848        createReferenceContext(CurrBlockInfo->EntryContext);
849
850    // Create a starting context index for the current block
851    saveContext(0, CurrBlockInfo->EntryContext);
852    CurrBlockInfo->EntryIndex = getContextIndex();
853
854    // Visit all the statements in the basic block.
855    VarMapBuilder VMapBuilder(this, CurrBlockInfo->EntryContext);
856    for (CFGBlock::const_iterator BI = CurrBlock->begin(),
857         BE = CurrBlock->end(); BI != BE; ++BI) {
858      switch (BI->getKind()) {
859        case CFGElement::Statement: {
860          const CFGStmt *CS = cast<CFGStmt>(&*BI);
861          VMapBuilder.Visit(const_cast<Stmt*>(CS->getStmt()));
862          break;
863        }
864        default:
865          break;
866      }
867    }
868    CurrBlockInfo->ExitContext = VMapBuilder.Ctx;
869
870    // Mark variables on back edges as "unknown" if they've been changed.
871    for (CFGBlock::const_succ_iterator SI = CurrBlock->succ_begin(),
872         SE  = CurrBlock->succ_end(); SI != SE; ++SI) {
873      // if CurrBlock -> *SI is *not* a back edge
874      if (*SI == 0 || !VisitedBlocks.alreadySet(*SI))
875        continue;
876
877      CFGBlock *FirstLoopBlock = *SI;
878      Context LoopBegin = BlockInfo[FirstLoopBlock->getBlockID()].EntryContext;
879      Context LoopEnd   = CurrBlockInfo->ExitContext;
880      intersectBackEdge(LoopBegin, LoopEnd);
881    }
882  }
883
884  // Put an extra entry at the end of the indexed context array
885  unsigned exitID = CFGraph->getExit().getBlockID();
886  saveContext(0, BlockInfo[exitID].ExitContext);
887}
888
889/// Find the appropriate source locations to use when producing diagnostics for
890/// each block in the CFG.
891static void findBlockLocations(CFG *CFGraph,
892                               PostOrderCFGView *SortedGraph,
893                               std::vector<CFGBlockInfo> &BlockInfo) {
894  for (PostOrderCFGView::iterator I = SortedGraph->begin(),
895       E = SortedGraph->end(); I!= E; ++I) {
896    const CFGBlock *CurrBlock = *I;
897    CFGBlockInfo *CurrBlockInfo = &BlockInfo[CurrBlock->getBlockID()];
898
899    // Find the source location of the last statement in the block, if the
900    // block is not empty.
901    if (const Stmt *S = CurrBlock->getTerminator()) {
902      CurrBlockInfo->EntryLoc = CurrBlockInfo->ExitLoc = S->getLocStart();
903    } else {
904      for (CFGBlock::const_reverse_iterator BI = CurrBlock->rbegin(),
905           BE = CurrBlock->rend(); BI != BE; ++BI) {
906        // FIXME: Handle other CFGElement kinds.
907        if (const CFGStmt *CS = dyn_cast<CFGStmt>(&*BI)) {
908          CurrBlockInfo->ExitLoc = CS->getStmt()->getLocStart();
909          break;
910        }
911      }
912    }
913
914    if (!CurrBlockInfo->ExitLoc.isInvalid()) {
915      // This block contains at least one statement. Find the source location
916      // of the first statement in the block.
917      for (CFGBlock::const_iterator BI = CurrBlock->begin(),
918           BE = CurrBlock->end(); BI != BE; ++BI) {
919        // FIXME: Handle other CFGElement kinds.
920        if (const CFGStmt *CS = dyn_cast<CFGStmt>(&*BI)) {
921          CurrBlockInfo->EntryLoc = CS->getStmt()->getLocStart();
922          break;
923        }
924      }
925    } else if (CurrBlock->pred_size() == 1 && *CurrBlock->pred_begin() &&
926               CurrBlock != &CFGraph->getExit()) {
927      // The block is empty, and has a single predecessor. Use its exit
928      // location.
929      CurrBlockInfo->EntryLoc = CurrBlockInfo->ExitLoc =
930          BlockInfo[(*CurrBlock->pred_begin())->getBlockID()].ExitLoc;
931    }
932  }
933}
934
935/// \brief Class which implements the core thread safety analysis routines.
936class ThreadSafetyAnalyzer {
937  friend class BuildLockset;
938
939  ThreadSafetyHandler       &Handler;
940  Lockset::Factory          LocksetFactory;
941  LocalVariableMap          LocalVarMap;
942  std::vector<CFGBlockInfo> BlockInfo;
943
944public:
945  ThreadSafetyAnalyzer(ThreadSafetyHandler &H) : Handler(H) {}
946
947  Lockset addLock(const Lockset &LSet, const MutexID &Mutex,
948                  const LockData &LDat, bool Warn=true);
949  Lockset addLock(const Lockset &LSet, Expr *MutexExp, const NamedDecl *D,
950                  const LockData &LDat, bool Warn=true);
951  Lockset removeLock(const Lockset &LSet, const MutexID &Mutex,
952                     SourceLocation UnlockLoc,
953                     bool Warn=true, bool FullyRemove=false);
954
955  template <class AttrType>
956  Lockset addLocksToSet(const Lockset &LSet, LockKind LK, AttrType *Attr,
957                        Expr *Exp, NamedDecl *D, VarDecl *VD = 0);
958  Lockset removeLocksFromSet(const Lockset &LSet,
959                             UnlockFunctionAttr *Attr,
960                             Expr *Exp, NamedDecl* FunDecl);
961
962  template <class AttrType>
963  Lockset addTrylock(const Lockset &LSet,
964                     LockKind LK, AttrType *Attr, Expr *Exp, NamedDecl *FunDecl,
965                     const CFGBlock* PredBlock, const CFGBlock *CurrBlock,
966                     Expr *BrE, bool Neg);
967  const CallExpr* getTrylockCallExpr(const Stmt *Cond, LocalVarContext C,
968                                     bool &Negate);
969
970  Lockset getEdgeLockset(const Lockset &ExitSet,
971                         const CFGBlock* PredBlock,
972                         const CFGBlock *CurrBlock);
973
974  Lockset intersectAndWarn(const Lockset &LSet1, const Lockset &LSet2,
975                           SourceLocation JoinLoc,
976                           LockErrorKind LEK1, LockErrorKind LEK2);
977
978  Lockset intersectAndWarn(const Lockset &LSet1, const Lockset &LSet2,
979                           SourceLocation JoinLoc, LockErrorKind LEK1) {
980    return intersectAndWarn(LSet1, LSet2, JoinLoc, LEK1, LEK1);
981  }
982
983  void runAnalysis(AnalysisDeclContext &AC);
984};
985
986
987/// \brief Add a new lock to the lockset, warning if the lock is already there.
988/// \param Mutex -- the Mutex expression for the lock
989/// \param LDat  -- the LockData for the lock
990Lockset ThreadSafetyAnalyzer::addLock(const Lockset &LSet,
991                                      const MutexID &Mutex,
992                                      const LockData &LDat,
993                                      bool Warn) {
994  // FIXME: deal with acquired before/after annotations.
995  // FIXME: Don't always warn when we have support for reentrant locks.
996  if (LSet.lookup(Mutex)) {
997    if (Warn)
998      Handler.handleDoubleLock(Mutex.getName(), LDat.AcquireLoc);
999    return LSet;
1000  } else {
1001    return LocksetFactory.add(LSet, Mutex, LDat);
1002  }
1003}
1004
1005/// \brief Construct a new mutex and add it to the lockset.
1006Lockset ThreadSafetyAnalyzer::addLock(const Lockset &LSet,
1007                                      Expr *MutexExp, const NamedDecl *D,
1008                                      const LockData &LDat,
1009                                      bool Warn) {
1010  MutexID Mutex(MutexExp, 0, D);
1011  if (!Mutex.isValid()) {
1012    MutexID::warnInvalidLock(Handler, MutexExp, 0, D);
1013    return LSet;
1014  }
1015  return addLock(LSet, Mutex, LDat, Warn);
1016}
1017
1018
1019/// \brief Remove a lock from the lockset, warning if the lock is not there.
1020/// \param LockExp The lock expression corresponding to the lock to be removed
1021/// \param UnlockLoc The source location of the unlock (only used in error msg)
1022Lockset ThreadSafetyAnalyzer::removeLock(const Lockset &LSet,
1023                                         const MutexID &Mutex,
1024                                         SourceLocation UnlockLoc,
1025                                         bool Warn, bool FullyRemove) {
1026  const LockData *LDat = LSet.lookup(Mutex);
1027  if (!LDat) {
1028    if (Warn)
1029      Handler.handleUnmatchedUnlock(Mutex.getName(), UnlockLoc);
1030    return LSet;
1031  }
1032  else {
1033    Lockset Result = LSet;
1034    if (LDat->UnderlyingMutex.isValid()) {
1035      // For scoped-lockable vars, remove the mutex associated with this var.
1036      Result = removeLock(Result, LDat->UnderlyingMutex, UnlockLoc,
1037                          false, true);
1038      // Fully remove the object only when the destructor is called
1039      if (FullyRemove)
1040        return LocksetFactory.remove(Result, Mutex);
1041      else
1042        return Result;
1043    }
1044    return LocksetFactory.remove(Result, Mutex);
1045  }
1046}
1047
1048
1049/// \brief This function, parameterized by an attribute type, is used to add a
1050/// set of locks specified as attribute arguments to the lockset.
1051template <typename AttrType>
1052Lockset ThreadSafetyAnalyzer::addLocksToSet(const Lockset &LSet,
1053                                            LockKind LK, AttrType *Attr,
1054                                            Expr *Exp, NamedDecl* FunDecl,
1055                                            VarDecl *VD) {
1056  typedef typename AttrType::args_iterator iterator_type;
1057
1058  SourceLocation ExpLocation = Exp->getExprLoc();
1059
1060  // Figure out if we're calling the constructor of scoped lockable class
1061  bool isScopedVar = false;
1062  if (VD) {
1063    if (CXXConstructorDecl *CD = dyn_cast<CXXConstructorDecl>(FunDecl)) {
1064      CXXRecordDecl* PD = CD->getParent();
1065      if (PD && PD->getAttr<ScopedLockableAttr>())
1066        isScopedVar = true;
1067    }
1068  }
1069
1070  if (Attr->args_size() == 0) {
1071    // The mutex held is the "this" object.
1072    MutexID Mutex(0, Exp, FunDecl);
1073    if (!Mutex.isValid()) {
1074      MutexID::warnInvalidLock(Handler, 0, Exp, FunDecl);
1075      return LSet;
1076    }
1077    else {
1078      return addLock(LSet, Mutex, LockData(ExpLocation, LK));
1079    }
1080  }
1081
1082  Lockset Result = LSet;
1083  for (iterator_type I=Attr->args_begin(), E=Attr->args_end(); I != E; ++I) {
1084    MutexID Mutex(*I, Exp, FunDecl);
1085    if (!Mutex.isValid())
1086      MutexID::warnInvalidLock(Handler, *I, Exp, FunDecl);
1087    else {
1088      if (isScopedVar) {
1089        // Mutex is managed by scoped var -- suppress certain warnings.
1090        Result = addLock(Result, Mutex, LockData(ExpLocation, LK, true));
1091        // For scoped lockable vars, map this var to its underlying mutex.
1092        DeclRefExpr DRE(VD, false, VD->getType(), VK_LValue, VD->getLocation());
1093        MutexID SMutex(&DRE, 0, 0);
1094        Result = addLock(Result, SMutex,
1095                         LockData(VD->getLocation(), LK, Mutex));
1096      }
1097      else {
1098        Result = addLock(Result, Mutex, LockData(ExpLocation, LK));
1099      }
1100    }
1101  }
1102  return Result;
1103}
1104
1105/// \brief This function removes a set of locks specified as attribute
1106/// arguments from the lockset.
1107Lockset ThreadSafetyAnalyzer::removeLocksFromSet(const Lockset &LSet,
1108                                                 UnlockFunctionAttr *Attr,
1109                                                 Expr *Exp,
1110                                                 NamedDecl* FunDecl) {
1111  SourceLocation ExpLocation;
1112  if (Exp) ExpLocation = Exp->getExprLoc();
1113  bool Dtor = isa<CXXDestructorDecl>(FunDecl);
1114
1115  if (Attr->args_size() == 0) {
1116    // The mutex held is the "this" object.
1117    MutexID Mu(0, Exp, FunDecl);
1118    if (!Mu.isValid()) {
1119      MutexID::warnInvalidLock(Handler, 0, Exp, FunDecl);
1120      return LSet;
1121    } else {
1122      return removeLock(LSet, Mu, ExpLocation, true, Dtor);
1123    }
1124  }
1125
1126  Lockset Result = LSet;
1127  for (UnlockFunctionAttr::args_iterator I = Attr->args_begin(),
1128       E = Attr->args_end(); I != E; ++I) {
1129    MutexID Mutex(*I, Exp, FunDecl);
1130    if (!Mutex.isValid())
1131      MutexID::warnInvalidLock(Handler, *I, Exp, FunDecl);
1132    else
1133      Result = removeLock(Result, Mutex, ExpLocation, true, Dtor);
1134  }
1135  return Result;
1136}
1137
1138
1139/// \brief Add lock to set, if the current block is in the taken branch of a
1140/// trylock.
1141template <class AttrType>
1142Lockset ThreadSafetyAnalyzer::addTrylock(const Lockset &LSet,
1143                                         LockKind LK, AttrType *Attr,
1144                                         Expr *Exp, NamedDecl *FunDecl,
1145                                         const CFGBlock *PredBlock,
1146                                         const CFGBlock *CurrBlock,
1147                                         Expr *BrE, bool Neg) {
1148  // Find out which branch has the lock
1149  bool branch = 0;
1150  if (CXXBoolLiteralExpr *BLE = dyn_cast_or_null<CXXBoolLiteralExpr>(BrE)) {
1151    branch = BLE->getValue();
1152  }
1153  else if (IntegerLiteral *ILE = dyn_cast_or_null<IntegerLiteral>(BrE)) {
1154    branch = ILE->getValue().getBoolValue();
1155  }
1156  int branchnum = branch ? 0 : 1;
1157  if (Neg) branchnum = !branchnum;
1158
1159  Lockset Result = LSet;
1160  // If we've taken the trylock branch, then add the lock
1161  int i = 0;
1162  for (CFGBlock::const_succ_iterator SI = PredBlock->succ_begin(),
1163       SE = PredBlock->succ_end(); SI != SE && i < 2; ++SI, ++i) {
1164    if (*SI == CurrBlock && i == branchnum) {
1165      Result = addLocksToSet(Result, LK, Attr, Exp, FunDecl, 0);
1166    }
1167  }
1168  return Result;
1169}
1170
1171
1172// If Cond can be traced back to a function call, return the call expression.
1173// The negate variable should be called with false, and will be set to true
1174// if the function call is negated, e.g. if (!mu.tryLock(...))
1175const CallExpr* ThreadSafetyAnalyzer::getTrylockCallExpr(const Stmt *Cond,
1176                                                         LocalVarContext C,
1177                                                         bool &Negate) {
1178  if (!Cond)
1179    return 0;
1180
1181  if (const CallExpr *CallExp = dyn_cast<CallExpr>(Cond)) {
1182    return CallExp;
1183  }
1184  else if (const ImplicitCastExpr *CE = dyn_cast<ImplicitCastExpr>(Cond)) {
1185    return getTrylockCallExpr(CE->getSubExpr(), C, Negate);
1186  }
1187  else if (const DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(Cond)) {
1188    const Expr *E = LocalVarMap.lookupExpr(DRE->getDecl(), C);
1189    return getTrylockCallExpr(E, C, Negate);
1190  }
1191  else if (const UnaryOperator *UOP = dyn_cast<UnaryOperator>(Cond)) {
1192    if (UOP->getOpcode() == UO_LNot) {
1193      Negate = !Negate;
1194      return getTrylockCallExpr(UOP->getSubExpr(), C, Negate);
1195    }
1196  }
1197  // FIXME -- handle && and || as well.
1198  return NULL;
1199}
1200
1201
1202/// \brief Find the lockset that holds on the edge between PredBlock
1203/// and CurrBlock.  The edge set is the exit set of PredBlock (passed
1204/// as the ExitSet parameter) plus any trylocks, which are conditionally held.
1205Lockset ThreadSafetyAnalyzer::getEdgeLockset(const Lockset &ExitSet,
1206                                             const CFGBlock *PredBlock,
1207                                             const CFGBlock *CurrBlock) {
1208  if (!PredBlock->getTerminatorCondition())
1209    return ExitSet;
1210
1211  bool Negate = false;
1212  const Stmt *Cond = PredBlock->getTerminatorCondition();
1213  const CFGBlockInfo *PredBlockInfo = &BlockInfo[PredBlock->getBlockID()];
1214  const LocalVarContext &LVarCtx = PredBlockInfo->ExitContext;
1215
1216  CallExpr *Exp = const_cast<CallExpr*>(
1217    getTrylockCallExpr(Cond, LVarCtx, Negate));
1218  if (!Exp)
1219    return ExitSet;
1220
1221  NamedDecl *FunDecl = dyn_cast_or_null<NamedDecl>(Exp->getCalleeDecl());
1222  if(!FunDecl || !FunDecl->hasAttrs())
1223    return ExitSet;
1224
1225  Lockset Result = ExitSet;
1226
1227  // If the condition is a call to a Trylock function, then grab the attributes
1228  AttrVec &ArgAttrs = FunDecl->getAttrs();
1229  for (unsigned i = 0; i < ArgAttrs.size(); ++i) {
1230    Attr *Attr = ArgAttrs[i];
1231    switch (Attr->getKind()) {
1232      case attr::ExclusiveTrylockFunction: {
1233        ExclusiveTrylockFunctionAttr *A =
1234          cast<ExclusiveTrylockFunctionAttr>(Attr);
1235        Result = addTrylock(Result, LK_Exclusive, A, Exp, FunDecl,
1236                            PredBlock, CurrBlock,
1237                            A->getSuccessValue(), Negate);
1238        break;
1239      }
1240      case attr::SharedTrylockFunction: {
1241        SharedTrylockFunctionAttr *A =
1242          cast<SharedTrylockFunctionAttr>(Attr);
1243        Result = addTrylock(Result, LK_Shared, A, Exp, FunDecl,
1244                            PredBlock, CurrBlock,
1245                            A->getSuccessValue(), Negate);
1246        break;
1247      }
1248      default:
1249        break;
1250    }
1251  }
1252  return Result;
1253}
1254
1255
1256/// \brief We use this class to visit different types of expressions in
1257/// CFGBlocks, and build up the lockset.
1258/// An expression may cause us to add or remove locks from the lockset, or else
1259/// output error messages related to missing locks.
1260/// FIXME: In future, we may be able to not inherit from a visitor.
1261class BuildLockset : public StmtVisitor<BuildLockset> {
1262  friend class ThreadSafetyAnalyzer;
1263
1264  ThreadSafetyAnalyzer *Analyzer;
1265  Lockset LSet;
1266  LocalVariableMap::Context LVarCtx;
1267  unsigned CtxIndex;
1268
1269  // Helper functions
1270  const ValueDecl *getValueDecl(Expr *Exp);
1271
1272  void warnIfMutexNotHeld(const NamedDecl *D, Expr *Exp, AccessKind AK,
1273                          Expr *MutexExp, ProtectedOperationKind POK);
1274
1275  void checkAccess(Expr *Exp, AccessKind AK);
1276  void checkDereference(Expr *Exp, AccessKind AK);
1277  void handleCall(Expr *Exp, NamedDecl *D, VarDecl *VD = 0);
1278
1279  /// \brief Returns true if the lockset contains a lock, regardless of whether
1280  /// the lock is held exclusively or shared.
1281  bool locksetContains(const MutexID &Lock) const {
1282    return LSet.lookup(Lock);
1283  }
1284
1285  /// \brief Returns true if the lockset contains a lock with the passed in
1286  /// locktype.
1287  bool locksetContains(const MutexID &Lock, LockKind KindRequested) const {
1288    const LockData *LockHeld = LSet.lookup(Lock);
1289    return (LockHeld && KindRequested == LockHeld->LKind);
1290  }
1291
1292  /// \brief Returns true if the lockset contains a lock with at least the
1293  /// passed in locktype. So for example, if we pass in LK_Shared, this function
1294  /// returns true if the lock is held LK_Shared or LK_Exclusive. If we pass in
1295  /// LK_Exclusive, this function returns true if the lock is held LK_Exclusive.
1296  bool locksetContainsAtLeast(const MutexID &Lock,
1297                              LockKind KindRequested) const {
1298    switch (KindRequested) {
1299      case LK_Shared:
1300        return locksetContains(Lock);
1301      case LK_Exclusive:
1302        return locksetContains(Lock, KindRequested);
1303    }
1304    llvm_unreachable("Unknown LockKind");
1305  }
1306
1307public:
1308  BuildLockset(ThreadSafetyAnalyzer *Anlzr, CFGBlockInfo &Info)
1309    : StmtVisitor<BuildLockset>(),
1310      Analyzer(Anlzr),
1311      LSet(Info.EntrySet),
1312      LVarCtx(Info.EntryContext),
1313      CtxIndex(Info.EntryIndex)
1314  {}
1315
1316  void VisitUnaryOperator(UnaryOperator *UO);
1317  void VisitBinaryOperator(BinaryOperator *BO);
1318  void VisitCastExpr(CastExpr *CE);
1319  void VisitCallExpr(CallExpr *Exp);
1320  void VisitCXXConstructExpr(CXXConstructExpr *Exp);
1321  void VisitDeclStmt(DeclStmt *S);
1322};
1323
1324
1325/// \brief Gets the value decl pointer from DeclRefExprs or MemberExprs
1326const ValueDecl *BuildLockset::getValueDecl(Expr *Exp) {
1327  if (const DeclRefExpr *DR = dyn_cast<DeclRefExpr>(Exp))
1328    return DR->getDecl();
1329
1330  if (const MemberExpr *ME = dyn_cast<MemberExpr>(Exp))
1331    return ME->getMemberDecl();
1332
1333  return 0;
1334}
1335
1336/// \brief Warn if the LSet does not contain a lock sufficient to protect access
1337/// of at least the passed in AccessKind.
1338void BuildLockset::warnIfMutexNotHeld(const NamedDecl *D, Expr *Exp,
1339                                      AccessKind AK, Expr *MutexExp,
1340                                      ProtectedOperationKind POK) {
1341  LockKind LK = getLockKindFromAccessKind(AK);
1342
1343  MutexID Mutex(MutexExp, Exp, D);
1344  if (!Mutex.isValid())
1345    MutexID::warnInvalidLock(Analyzer->Handler, MutexExp, Exp, D);
1346  else if (!locksetContainsAtLeast(Mutex, LK))
1347    Analyzer->Handler.handleMutexNotHeld(D, POK, Mutex.getName(), LK,
1348                                         Exp->getExprLoc());
1349}
1350
1351/// \brief This method identifies variable dereferences and checks pt_guarded_by
1352/// and pt_guarded_var annotations. Note that we only check these annotations
1353/// at the time a pointer is dereferenced.
1354/// FIXME: We need to check for other types of pointer dereferences
1355/// (e.g. [], ->) and deal with them here.
1356/// \param Exp An expression that has been read or written.
1357void BuildLockset::checkDereference(Expr *Exp, AccessKind AK) {
1358  UnaryOperator *UO = dyn_cast<UnaryOperator>(Exp);
1359  if (!UO || UO->getOpcode() != clang::UO_Deref)
1360    return;
1361  Exp = UO->getSubExpr()->IgnoreParenCasts();
1362
1363  const ValueDecl *D = getValueDecl(Exp);
1364  if(!D || !D->hasAttrs())
1365    return;
1366
1367  if (D->getAttr<PtGuardedVarAttr>() && LSet.isEmpty())
1368    Analyzer->Handler.handleNoMutexHeld(D, POK_VarDereference, AK,
1369                                        Exp->getExprLoc());
1370
1371  const AttrVec &ArgAttrs = D->getAttrs();
1372  for(unsigned i = 0, Size = ArgAttrs.size(); i < Size; ++i)
1373    if (PtGuardedByAttr *PGBAttr = dyn_cast<PtGuardedByAttr>(ArgAttrs[i]))
1374      warnIfMutexNotHeld(D, Exp, AK, PGBAttr->getArg(), POK_VarDereference);
1375}
1376
1377/// \brief Checks guarded_by and guarded_var attributes.
1378/// Whenever we identify an access (read or write) of a DeclRefExpr or
1379/// MemberExpr, we need to check whether there are any guarded_by or
1380/// guarded_var attributes, and make sure we hold the appropriate mutexes.
1381void BuildLockset::checkAccess(Expr *Exp, AccessKind AK) {
1382  const ValueDecl *D = getValueDecl(Exp);
1383  if(!D || !D->hasAttrs())
1384    return;
1385
1386  if (D->getAttr<GuardedVarAttr>() && LSet.isEmpty())
1387    Analyzer->Handler.handleNoMutexHeld(D, POK_VarAccess, AK,
1388                                        Exp->getExprLoc());
1389
1390  const AttrVec &ArgAttrs = D->getAttrs();
1391  for(unsigned i = 0, Size = ArgAttrs.size(); i < Size; ++i)
1392    if (GuardedByAttr *GBAttr = dyn_cast<GuardedByAttr>(ArgAttrs[i]))
1393      warnIfMutexNotHeld(D, Exp, AK, GBAttr->getArg(), POK_VarAccess);
1394}
1395
1396/// \brief Process a function call, method call, constructor call,
1397/// or destructor call.  This involves looking at the attributes on the
1398/// corresponding function/method/constructor/destructor, issuing warnings,
1399/// and updating the locksets accordingly.
1400///
1401/// FIXME: For classes annotated with one of the guarded annotations, we need
1402/// to treat const method calls as reads and non-const method calls as writes,
1403/// and check that the appropriate locks are held. Non-const method calls with
1404/// the same signature as const method calls can be also treated as reads.
1405///
1406/// FIXME: We need to also visit CallExprs to catch/check global functions.
1407///
1408/// FIXME: Do not flag an error for member variables accessed in constructors/
1409/// destructors
1410void BuildLockset::handleCall(Expr *Exp, NamedDecl *D, VarDecl *VD) {
1411  AttrVec &ArgAttrs = D->getAttrs();
1412  for(unsigned i = 0; i < ArgAttrs.size(); ++i) {
1413    Attr *Attr = ArgAttrs[i];
1414    switch (Attr->getKind()) {
1415      // When we encounter an exclusive lock function, we need to add the lock
1416      // to our lockset with kind exclusive.
1417      case attr::ExclusiveLockFunction: {
1418        ExclusiveLockFunctionAttr *A = cast<ExclusiveLockFunctionAttr>(Attr);
1419        LSet = Analyzer->addLocksToSet(LSet, LK_Exclusive, A, Exp, D, VD);
1420        break;
1421      }
1422
1423      // When we encounter a shared lock function, we need to add the lock
1424      // to our lockset with kind shared.
1425      case attr::SharedLockFunction: {
1426        SharedLockFunctionAttr *A = cast<SharedLockFunctionAttr>(Attr);
1427        LSet = Analyzer->addLocksToSet(LSet, LK_Shared, A, Exp, D, VD);
1428        break;
1429      }
1430
1431      // When we encounter an unlock function, we need to remove unlocked
1432      // mutexes from the lockset, and flag a warning if they are not there.
1433      case attr::UnlockFunction: {
1434        UnlockFunctionAttr *UFAttr = cast<UnlockFunctionAttr>(Attr);
1435        LSet = Analyzer->removeLocksFromSet(LSet, UFAttr, Exp, D);
1436        break;
1437      }
1438
1439      case attr::ExclusiveLocksRequired: {
1440        ExclusiveLocksRequiredAttr *ELRAttr =
1441            cast<ExclusiveLocksRequiredAttr>(Attr);
1442
1443        for (ExclusiveLocksRequiredAttr::args_iterator
1444             I = ELRAttr->args_begin(), E = ELRAttr->args_end(); I != E; ++I)
1445          warnIfMutexNotHeld(D, Exp, AK_Written, *I, POK_FunctionCall);
1446        break;
1447      }
1448
1449      case attr::SharedLocksRequired: {
1450        SharedLocksRequiredAttr *SLRAttr = cast<SharedLocksRequiredAttr>(Attr);
1451
1452        for (SharedLocksRequiredAttr::args_iterator I = SLRAttr->args_begin(),
1453             E = SLRAttr->args_end(); I != E; ++I)
1454          warnIfMutexNotHeld(D, Exp, AK_Read, *I, POK_FunctionCall);
1455        break;
1456      }
1457
1458      case attr::LocksExcluded: {
1459        LocksExcludedAttr *LEAttr = cast<LocksExcludedAttr>(Attr);
1460        for (LocksExcludedAttr::args_iterator I = LEAttr->args_begin(),
1461            E = LEAttr->args_end(); I != E; ++I) {
1462          MutexID Mutex(*I, Exp, D);
1463          if (!Mutex.isValid())
1464            MutexID::warnInvalidLock(Analyzer->Handler, *I, Exp, D);
1465          else if (locksetContains(Mutex))
1466            Analyzer->Handler.handleFunExcludesLock(D->getName(),
1467                                                    Mutex.getName(),
1468                                                    Exp->getExprLoc());
1469        }
1470        break;
1471      }
1472
1473      // Ignore other (non thread-safety) attributes
1474      default:
1475        break;
1476    }
1477  }
1478}
1479
1480
1481/// \brief For unary operations which read and write a variable, we need to
1482/// check whether we hold any required mutexes. Reads are checked in
1483/// VisitCastExpr.
1484void BuildLockset::VisitUnaryOperator(UnaryOperator *UO) {
1485  switch (UO->getOpcode()) {
1486    case clang::UO_PostDec:
1487    case clang::UO_PostInc:
1488    case clang::UO_PreDec:
1489    case clang::UO_PreInc: {
1490      Expr *SubExp = UO->getSubExpr()->IgnoreParenCasts();
1491      checkAccess(SubExp, AK_Written);
1492      checkDereference(SubExp, AK_Written);
1493      break;
1494    }
1495    default:
1496      break;
1497  }
1498}
1499
1500/// For binary operations which assign to a variable (writes), we need to check
1501/// whether we hold any required mutexes.
1502/// FIXME: Deal with non-primitive types.
1503void BuildLockset::VisitBinaryOperator(BinaryOperator *BO) {
1504  if (!BO->isAssignmentOp())
1505    return;
1506
1507  // adjust the context
1508  LVarCtx = Analyzer->LocalVarMap.getNextContext(CtxIndex, BO, LVarCtx);
1509
1510  Expr *LHSExp = BO->getLHS()->IgnoreParenCasts();
1511  checkAccess(LHSExp, AK_Written);
1512  checkDereference(LHSExp, AK_Written);
1513}
1514
1515/// Whenever we do an LValue to Rvalue cast, we are reading a variable and
1516/// need to ensure we hold any required mutexes.
1517/// FIXME: Deal with non-primitive types.
1518void BuildLockset::VisitCastExpr(CastExpr *CE) {
1519  if (CE->getCastKind() != CK_LValueToRValue)
1520    return;
1521  Expr *SubExp = CE->getSubExpr()->IgnoreParenCasts();
1522  checkAccess(SubExp, AK_Read);
1523  checkDereference(SubExp, AK_Read);
1524}
1525
1526
1527void BuildLockset::VisitCallExpr(CallExpr *Exp) {
1528  NamedDecl *D = dyn_cast_or_null<NamedDecl>(Exp->getCalleeDecl());
1529  if(!D || !D->hasAttrs())
1530    return;
1531  handleCall(Exp, D);
1532}
1533
1534void BuildLockset::VisitCXXConstructExpr(CXXConstructExpr *Exp) {
1535  // FIXME -- only handles constructors in DeclStmt below.
1536}
1537
1538void BuildLockset::VisitDeclStmt(DeclStmt *S) {
1539  // adjust the context
1540  LVarCtx = Analyzer->LocalVarMap.getNextContext(CtxIndex, S, LVarCtx);
1541
1542  DeclGroupRef DGrp = S->getDeclGroup();
1543  for (DeclGroupRef::iterator I = DGrp.begin(), E = DGrp.end(); I != E; ++I) {
1544    Decl *D = *I;
1545    if (VarDecl *VD = dyn_cast_or_null<VarDecl>(D)) {
1546      Expr *E = VD->getInit();
1547      // handle constructors that involve temporaries
1548      if (ExprWithCleanups *EWC = dyn_cast_or_null<ExprWithCleanups>(E))
1549        E = EWC->getSubExpr();
1550
1551      if (CXXConstructExpr *CE = dyn_cast_or_null<CXXConstructExpr>(E)) {
1552        NamedDecl *CtorD = dyn_cast_or_null<NamedDecl>(CE->getConstructor());
1553        if (!CtorD || !CtorD->hasAttrs())
1554          return;
1555        handleCall(CE, CtorD, VD);
1556      }
1557    }
1558  }
1559}
1560
1561
1562
1563/// \brief Compute the intersection of two locksets and issue warnings for any
1564/// locks in the symmetric difference.
1565///
1566/// This function is used at a merge point in the CFG when comparing the lockset
1567/// of each branch being merged. For example, given the following sequence:
1568/// A; if () then B; else C; D; we need to check that the lockset after B and C
1569/// are the same. In the event of a difference, we use the intersection of these
1570/// two locksets at the start of D.
1571///
1572/// \param LSet1 The first lockset.
1573/// \param LSet2 The second lockset.
1574/// \param JoinLoc The location of the join point for error reporting
1575/// \param LEK1 The error message to report if a mutex is missing from LSet1
1576/// \param LEK2 The error message to report if a mutex is missing from Lset2
1577Lockset ThreadSafetyAnalyzer::intersectAndWarn(const Lockset &LSet1,
1578                                               const Lockset &LSet2,
1579                                               SourceLocation JoinLoc,
1580                                               LockErrorKind LEK1,
1581                                               LockErrorKind LEK2) {
1582  Lockset Intersection = LSet1;
1583
1584  for (Lockset::iterator I = LSet2.begin(), E = LSet2.end(); I != E; ++I) {
1585    const MutexID &LSet2Mutex = I.getKey();
1586    const LockData &LDat2 = I.getData();
1587    if (const LockData *LDat1 = LSet1.lookup(LSet2Mutex)) {
1588      if (LDat1->LKind != LDat2.LKind) {
1589        Handler.handleExclusiveAndShared(LSet2Mutex.getName(),
1590                                         LDat2.AcquireLoc,
1591                                         LDat1->AcquireLoc);
1592        if (LDat1->LKind != LK_Exclusive)
1593          Intersection = LocksetFactory.add(Intersection, LSet2Mutex, LDat2);
1594      }
1595    } else {
1596      if (LDat2.UnderlyingMutex.isValid()) {
1597        if (LSet2.lookup(LDat2.UnderlyingMutex)) {
1598          // If this is a scoped lock that manages another mutex, and if the
1599          // underlying mutex is still held, then warn about the underlying
1600          // mutex.
1601          Handler.handleMutexHeldEndOfScope(LDat2.UnderlyingMutex.getName(),
1602                                            LDat2.AcquireLoc,
1603                                            JoinLoc, LEK1);
1604        }
1605      }
1606      else if (!LDat2.Managed)
1607        Handler.handleMutexHeldEndOfScope(LSet2Mutex.getName(),
1608                                          LDat2.AcquireLoc,
1609                                          JoinLoc, LEK1);
1610    }
1611  }
1612
1613  for (Lockset::iterator I = LSet1.begin(), E = LSet1.end(); I != E; ++I) {
1614    if (!LSet2.contains(I.getKey())) {
1615      const MutexID &Mutex = I.getKey();
1616      const LockData &LDat1 = I.getData();
1617
1618      if (LDat1.UnderlyingMutex.isValid()) {
1619        if (LSet1.lookup(LDat1.UnderlyingMutex)) {
1620          // If this is a scoped lock that manages another mutex, and if the
1621          // underlying mutex is still held, then warn about the underlying
1622          // mutex.
1623          Handler.handleMutexHeldEndOfScope(LDat1.UnderlyingMutex.getName(),
1624                                            LDat1.AcquireLoc,
1625                                            JoinLoc, LEK1);
1626        }
1627      }
1628      else if (!LDat1.Managed)
1629        Handler.handleMutexHeldEndOfScope(Mutex.getName(),
1630                                          LDat1.AcquireLoc,
1631                                          JoinLoc, LEK2);
1632      Intersection = LocksetFactory.remove(Intersection, Mutex);
1633    }
1634  }
1635  return Intersection;
1636}
1637
1638
1639/// \brief Check a function's CFG for thread-safety violations.
1640///
1641/// We traverse the blocks in the CFG, compute the set of mutexes that are held
1642/// at the end of each block, and issue warnings for thread safety violations.
1643/// Each block in the CFG is traversed exactly once.
1644void ThreadSafetyAnalyzer::runAnalysis(AnalysisDeclContext &AC) {
1645  CFG *CFGraph = AC.getCFG();
1646  if (!CFGraph) return;
1647  const NamedDecl *D = dyn_cast_or_null<NamedDecl>(AC.getDecl());
1648
1649  // AC.dumpCFG(true);
1650
1651  if (!D)
1652    return;  // Ignore anonymous functions for now.
1653  if (D->getAttr<NoThreadSafetyAnalysisAttr>())
1654    return;
1655  // FIXME: Do something a bit more intelligent inside constructor and
1656  // destructor code.  Constructors and destructors must assume unique access
1657  // to 'this', so checks on member variable access is disabled, but we should
1658  // still enable checks on other objects.
1659  if (isa<CXXConstructorDecl>(D))
1660    return;  // Don't check inside constructors.
1661  if (isa<CXXDestructorDecl>(D))
1662    return;  // Don't check inside destructors.
1663
1664  BlockInfo.resize(CFGraph->getNumBlockIDs(),
1665    CFGBlockInfo::getEmptyBlockInfo(LocksetFactory, LocalVarMap));
1666
1667  // We need to explore the CFG via a "topological" ordering.
1668  // That way, we will be guaranteed to have information about required
1669  // predecessor locksets when exploring a new block.
1670  PostOrderCFGView *SortedGraph = AC.getAnalysis<PostOrderCFGView>();
1671  PostOrderCFGView::CFGBlockSet VisitedBlocks(CFGraph);
1672
1673  // Compute SSA names for local variables
1674  LocalVarMap.traverseCFG(CFGraph, SortedGraph, BlockInfo);
1675
1676  // Fill in source locations for all CFGBlocks.
1677  findBlockLocations(CFGraph, SortedGraph, BlockInfo);
1678
1679  // Add locks from exclusive_locks_required and shared_locks_required
1680  // to initial lockset. Also turn off checking for lock and unlock functions.
1681  // FIXME: is there a more intelligent way to check lock/unlock functions?
1682  if (!SortedGraph->empty() && D->hasAttrs()) {
1683    const CFGBlock *FirstBlock = *SortedGraph->begin();
1684    Lockset &InitialLockset = BlockInfo[FirstBlock->getBlockID()].EntrySet;
1685    const AttrVec &ArgAttrs = D->getAttrs();
1686    for (unsigned i = 0; i < ArgAttrs.size(); ++i) {
1687      Attr *Attr = ArgAttrs[i];
1688      SourceLocation AttrLoc = Attr->getLocation();
1689      if (SharedLocksRequiredAttr *SLRAttr
1690            = dyn_cast<SharedLocksRequiredAttr>(Attr)) {
1691        for (SharedLocksRequiredAttr::args_iterator
1692             SLRIter = SLRAttr->args_begin(),
1693             SLREnd = SLRAttr->args_end(); SLRIter != SLREnd; ++SLRIter)
1694          InitialLockset = addLock(InitialLockset, *SLRIter, D,
1695                                   LockData(AttrLoc, LK_Shared), false);
1696      } else if (ExclusiveLocksRequiredAttr *ELRAttr
1697                   = dyn_cast<ExclusiveLocksRequiredAttr>(Attr)) {
1698        for (ExclusiveLocksRequiredAttr::args_iterator
1699             ELRIter = ELRAttr->args_begin(),
1700             ELREnd = ELRAttr->args_end(); ELRIter != ELREnd; ++ELRIter)
1701          InitialLockset = addLock(InitialLockset, *ELRIter, D,
1702                                   LockData(AttrLoc, LK_Exclusive), false);
1703      } else if (isa<UnlockFunctionAttr>(Attr)) {
1704        // Don't try to check unlock functions for now
1705        return;
1706      } else if (isa<ExclusiveLockFunctionAttr>(Attr)) {
1707        // Don't try to check lock functions for now
1708        return;
1709      } else if (isa<SharedLockFunctionAttr>(Attr)) {
1710        // Don't try to check lock functions for now
1711        return;
1712      } else if (isa<ExclusiveTrylockFunctionAttr>(Attr)) {
1713        // Don't try to check trylock functions for now
1714        return;
1715      } else if (isa<SharedTrylockFunctionAttr>(Attr)) {
1716        // Don't try to check trylock functions for now
1717        return;
1718      }
1719    }
1720  }
1721
1722  for (PostOrderCFGView::iterator I = SortedGraph->begin(),
1723       E = SortedGraph->end(); I!= E; ++I) {
1724    const CFGBlock *CurrBlock = *I;
1725    int CurrBlockID = CurrBlock->getBlockID();
1726    CFGBlockInfo *CurrBlockInfo = &BlockInfo[CurrBlockID];
1727
1728    // Use the default initial lockset in case there are no predecessors.
1729    VisitedBlocks.insert(CurrBlock);
1730
1731    // Iterate through the predecessor blocks and warn if the lockset for all
1732    // predecessors is not the same. We take the entry lockset of the current
1733    // block to be the intersection of all previous locksets.
1734    // FIXME: By keeping the intersection, we may output more errors in future
1735    // for a lock which is not in the intersection, but was in the union. We
1736    // may want to also keep the union in future. As an example, let's say
1737    // the intersection contains Mutex L, and the union contains L and M.
1738    // Later we unlock M. At this point, we would output an error because we
1739    // never locked M; although the real error is probably that we forgot to
1740    // lock M on all code paths. Conversely, let's say that later we lock M.
1741    // In this case, we should compare against the intersection instead of the
1742    // union because the real error is probably that we forgot to unlock M on
1743    // all code paths.
1744    bool LocksetInitialized = false;
1745    llvm::SmallVector<CFGBlock*, 8> SpecialBlocks;
1746    for (CFGBlock::const_pred_iterator PI = CurrBlock->pred_begin(),
1747         PE  = CurrBlock->pred_end(); PI != PE; ++PI) {
1748
1749      // if *PI -> CurrBlock is a back edge
1750      if (*PI == 0 || !VisitedBlocks.alreadySet(*PI))
1751        continue;
1752
1753      // Ignore edges from blocks that can't return.
1754      if ((*PI)->hasNoReturnElement())
1755        continue;
1756
1757      // If the previous block ended in a 'continue' or 'break' statement, then
1758      // a difference in locksets is probably due to a bug in that block, rather
1759      // than in some other predecessor. In that case, keep the other
1760      // predecessor's lockset.
1761      if (const Stmt *Terminator = (*PI)->getTerminator()) {
1762        if (isa<ContinueStmt>(Terminator) || isa<BreakStmt>(Terminator)) {
1763          SpecialBlocks.push_back(*PI);
1764          continue;
1765        }
1766      }
1767
1768      int PrevBlockID = (*PI)->getBlockID();
1769      CFGBlockInfo *PrevBlockInfo = &BlockInfo[PrevBlockID];
1770      Lockset PrevLockset =
1771        getEdgeLockset(PrevBlockInfo->ExitSet, *PI, CurrBlock);
1772
1773      if (!LocksetInitialized) {
1774        CurrBlockInfo->EntrySet = PrevLockset;
1775        LocksetInitialized = true;
1776      } else {
1777        CurrBlockInfo->EntrySet =
1778          intersectAndWarn(CurrBlockInfo->EntrySet, PrevLockset,
1779                           CurrBlockInfo->EntryLoc,
1780                           LEK_LockedSomePredecessors);
1781      }
1782    }
1783
1784    // Process continue and break blocks. Assume that the lockset for the
1785    // resulting block is unaffected by any discrepancies in them.
1786    for (unsigned SpecialI = 0, SpecialN = SpecialBlocks.size();
1787         SpecialI < SpecialN; ++SpecialI) {
1788      CFGBlock *PrevBlock = SpecialBlocks[SpecialI];
1789      int PrevBlockID = PrevBlock->getBlockID();
1790      CFGBlockInfo *PrevBlockInfo = &BlockInfo[PrevBlockID];
1791
1792      if (!LocksetInitialized) {
1793        CurrBlockInfo->EntrySet = PrevBlockInfo->ExitSet;
1794        LocksetInitialized = true;
1795      } else {
1796        // Determine whether this edge is a loop terminator for diagnostic
1797        // purposes. FIXME: A 'break' statement might be a loop terminator, but
1798        // it might also be part of a switch. Also, a subsequent destructor
1799        // might add to the lockset, in which case the real issue might be a
1800        // double lock on the other path.
1801        const Stmt *Terminator = PrevBlock->getTerminator();
1802        bool IsLoop = Terminator && isa<ContinueStmt>(Terminator);
1803
1804        Lockset PrevLockset =
1805          getEdgeLockset(PrevBlockInfo->ExitSet, PrevBlock, CurrBlock);
1806
1807        // Do not update EntrySet.
1808        intersectAndWarn(CurrBlockInfo->EntrySet, PrevLockset,
1809                         PrevBlockInfo->ExitLoc,
1810                         IsLoop ? LEK_LockedSomeLoopIterations
1811                                : LEK_LockedSomePredecessors);
1812      }
1813    }
1814
1815    BuildLockset LocksetBuilder(this, *CurrBlockInfo);
1816
1817    // Visit all the statements in the basic block.
1818    for (CFGBlock::const_iterator BI = CurrBlock->begin(),
1819         BE = CurrBlock->end(); BI != BE; ++BI) {
1820      switch (BI->getKind()) {
1821        case CFGElement::Statement: {
1822          const CFGStmt *CS = cast<CFGStmt>(&*BI);
1823          LocksetBuilder.Visit(const_cast<Stmt*>(CS->getStmt()));
1824          break;
1825        }
1826        // Ignore BaseDtor, MemberDtor, and TemporaryDtor for now.
1827        case CFGElement::AutomaticObjectDtor: {
1828          const CFGAutomaticObjDtor *AD = cast<CFGAutomaticObjDtor>(&*BI);
1829          CXXDestructorDecl *DD = const_cast<CXXDestructorDecl*>(
1830            AD->getDestructorDecl(AC.getASTContext()));
1831          if (!DD->hasAttrs())
1832            break;
1833
1834          // Create a dummy expression,
1835          VarDecl *VD = const_cast<VarDecl*>(AD->getVarDecl());
1836          DeclRefExpr DRE(VD, false, VD->getType(), VK_LValue,
1837                          AD->getTriggerStmt()->getLocEnd());
1838          LocksetBuilder.handleCall(&DRE, DD);
1839          break;
1840        }
1841        default:
1842          break;
1843      }
1844    }
1845    CurrBlockInfo->ExitSet = LocksetBuilder.LSet;
1846
1847    // For every back edge from CurrBlock (the end of the loop) to another block
1848    // (FirstLoopBlock) we need to check that the Lockset of Block is equal to
1849    // the one held at the beginning of FirstLoopBlock. We can look up the
1850    // Lockset held at the beginning of FirstLoopBlock in the EntryLockSets map.
1851    for (CFGBlock::const_succ_iterator SI = CurrBlock->succ_begin(),
1852         SE  = CurrBlock->succ_end(); SI != SE; ++SI) {
1853
1854      // if CurrBlock -> *SI is *not* a back edge
1855      if (*SI == 0 || !VisitedBlocks.alreadySet(*SI))
1856        continue;
1857
1858      CFGBlock *FirstLoopBlock = *SI;
1859      CFGBlockInfo *PreLoop = &BlockInfo[FirstLoopBlock->getBlockID()];
1860      CFGBlockInfo *LoopEnd = &BlockInfo[CurrBlockID];
1861      intersectAndWarn(LoopEnd->ExitSet, PreLoop->EntrySet,
1862                       PreLoop->EntryLoc,
1863                       LEK_LockedSomeLoopIterations);
1864    }
1865  }
1866
1867  CFGBlockInfo *Initial = &BlockInfo[CFGraph->getEntry().getBlockID()];
1868  CFGBlockInfo *Final   = &BlockInfo[CFGraph->getExit().getBlockID()];
1869
1870  // FIXME: Should we call this function for all blocks which exit the function?
1871  intersectAndWarn(Initial->EntrySet, Final->ExitSet,
1872                   Final->ExitLoc,
1873                   LEK_LockedAtEndOfFunction,
1874                   LEK_NotLockedAtEndOfFunction);
1875}
1876
1877} // end anonymous namespace
1878
1879
1880namespace clang {
1881namespace thread_safety {
1882
1883/// \brief Check a function's CFG for thread-safety violations.
1884///
1885/// We traverse the blocks in the CFG, compute the set of mutexes that are held
1886/// at the end of each block, and issue warnings for thread safety violations.
1887/// Each block in the CFG is traversed exactly once.
1888void runThreadSafetyAnalysis(AnalysisDeclContext &AC,
1889                             ThreadSafetyHandler &Handler) {
1890  ThreadSafetyAnalyzer Analyzer(Handler);
1891  Analyzer.runAnalysis(AC);
1892}
1893
1894/// \brief Helper function that returns a LockKind required for the given level
1895/// of access.
1896LockKind getLockKindFromAccessKind(AccessKind AK) {
1897  switch (AK) {
1898    case AK_Read :
1899      return LK_Shared;
1900    case AK_Written :
1901      return LK_Exclusive;
1902  }
1903  llvm_unreachable("Unknown AccessKind");
1904}
1905
1906}} // end namespace clang::thread_safety
1907