ThreadSafety.cpp revision 99107ebc0a5aea953b736e12757e0919d5249d43
1//===- ThreadSafety.cpp ----------------------------------------*- C++ --*-===//
2//
3//                     The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10// A intra-procedural analysis for thread safety (e.g. deadlocks and race
11// conditions), based off of an annotation system.
12//
13// See http://gcc.gnu.org/wiki/ThreadSafetyAnnotation for the gcc version.
14//
15//===----------------------------------------------------------------------===//
16
17#include "clang/Analysis/Analyses/ThreadSafety.h"
18#include "clang/Sema/SemaInternal.h"
19#include "clang/Basic/SourceManager.h"
20#include "clang/Basic/SourceLocation.h"
21#include "clang/AST/DeclCXX.h"
22#include "clang/AST/ExprCXX.h"
23#include "clang/AST/StmtCXX.h"
24#include "clang/AST/StmtVisitor.h"
25#include "clang/Analysis/AnalysisContext.h"
26#include "clang/Analysis/CFG.h"
27#include "clang/Analysis/CFGStmtMap.h"
28#include "llvm/ADT/BitVector.h"
29#include "llvm/ADT/FoldingSet.h"
30#include "llvm/ADT/ImmutableMap.h"
31#include "llvm/ADT/PostOrderIterator.h"
32#include "llvm/ADT/SmallVector.h"
33#include "llvm/ADT/StringRef.h"
34#include <algorithm>
35#include <vector>
36
37using namespace clang;
38using namespace thread_safety;
39
40// Helper functions
41static Expr *getParent(Expr *Exp) {
42  if (MemberExpr *ME = dyn_cast<MemberExpr>(Exp))
43    return ME->getBase();
44  if (CXXMemberCallExpr *CE = dyn_cast<CXXMemberCallExpr>(Exp))
45    return CE->getImplicitObjectArgument();
46  return 0;
47}
48
49namespace {
50/// \brief Implements a set of CFGBlocks using a BitVector.
51///
52/// This class contains a minimal interface, primarily dictated by the SetType
53/// template parameter of the llvm::po_iterator template, as used with external
54/// storage. We also use this set to keep track of which CFGBlocks we visit
55/// during the analysis.
56class CFGBlockSet {
57  llvm::BitVector VisitedBlockIDs;
58
59public:
60  // po_iterator requires this iterator, but the only interface needed is the
61  // value_type typedef.
62  struct iterator {
63    typedef const CFGBlock *value_type;
64  };
65
66  CFGBlockSet() {}
67  CFGBlockSet(const CFG *G) : VisitedBlockIDs(G->getNumBlockIDs(), false) {}
68
69  /// \brief Set the bit associated with a particular CFGBlock.
70  /// This is the important method for the SetType template parameter.
71  bool insert(const CFGBlock *Block) {
72    // Note that insert() is called by po_iterator, which doesn't check to make
73    // sure that Block is non-null.  Moreover, the CFGBlock iterator will
74    // occasionally hand out null pointers for pruned edges, so we catch those
75    // here.
76    if (Block == 0)
77      return false;  // if an edge is trivially false.
78    if (VisitedBlockIDs.test(Block->getBlockID()))
79      return false;
80    VisitedBlockIDs.set(Block->getBlockID());
81    return true;
82  }
83
84  /// \brief Check if the bit for a CFGBlock has been already set.
85  /// This method is for tracking visited blocks in the main threadsafety loop.
86  /// Block must not be null.
87  bool alreadySet(const CFGBlock *Block) {
88    return VisitedBlockIDs.test(Block->getBlockID());
89  }
90};
91
92/// \brief We create a helper class which we use to iterate through CFGBlocks in
93/// the topological order.
94class TopologicallySortedCFG {
95  typedef llvm::po_iterator<const CFG*, CFGBlockSet, true>  po_iterator;
96
97  std::vector<const CFGBlock*> Blocks;
98
99public:
100  typedef std::vector<const CFGBlock*>::reverse_iterator iterator;
101
102  TopologicallySortedCFG(const CFG *CFGraph) {
103    Blocks.reserve(CFGraph->getNumBlockIDs());
104    CFGBlockSet BSet(CFGraph);
105
106    for (po_iterator I = po_iterator::begin(CFGraph, BSet),
107         E = po_iterator::end(CFGraph, BSet); I != E; ++I) {
108      Blocks.push_back(*I);
109    }
110  }
111
112  iterator begin() {
113    return Blocks.rbegin();
114  }
115
116  iterator end() {
117    return Blocks.rend();
118  }
119};
120
121/// \brief A MutexID object uniquely identifies a particular mutex, and
122/// is built from an Expr* (i.e. calling a lock function).
123///
124/// Thread-safety analysis works by comparing lock expressions.  Within the
125/// body of a function, an expression such as "x->foo->bar.mu" will resolve to
126/// a particular mutex object at run-time.  Subsequent occurrences of the same
127/// expression (where "same" means syntactic equality) will refer to the same
128/// run-time object if three conditions hold:
129/// (1) Local variables in the expression, such as "x" have not changed.
130/// (2) Values on the heap that affect the expression have not changed.
131/// (3) The expression involves only pure function calls.
132/// The current implementation assumes, but does not verify, that multiple uses
133/// of the same lock expression satisfies these criteria.
134///
135/// Clang introduces an additional wrinkle, which is that it is difficult to
136/// derive canonical expressions, or compare expressions directly for equality.
137/// Thus, we identify a mutex not by an Expr, but by the set of named
138/// declarations that are referenced by the Expr.  In other words,
139/// x->foo->bar.mu will be a four element vector with the Decls for
140/// mu, bar, and foo, and x.  The vector will uniquely identify the expression
141/// for all practical purposes.
142///
143/// Note we will need to perform substitution on "this" and function parameter
144/// names when constructing a lock expression.
145///
146/// For example:
147/// class C { Mutex Mu;  void lock() EXCLUSIVE_LOCK_FUNCTION(this->Mu); };
148/// void myFunc(C *X) { ... X->lock() ... }
149/// The original expression for the mutex acquired by myFunc is "this->Mu", but
150/// "X" is substituted for "this" so we get X->Mu();
151///
152/// For another example:
153/// foo(MyList *L) EXCLUSIVE_LOCKS_REQUIRED(L->Mu) { ... }
154/// MyList *MyL;
155/// foo(MyL);  // requires lock MyL->Mu to be held
156class MutexID {
157  SmallVector<NamedDecl*, 2> DeclSeq;
158  ThreadSafetyHandler &Handler;
159
160  /// Build a Decl sequence representing the lock from the given expression.
161  /// Recursive function that bottoms out when the final DeclRefExpr is reached.
162  void buildMutexID(Expr *Exp, Expr *Parent) {
163    if (DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(Exp)) {
164      NamedDecl *ND = cast<NamedDecl>(DRE->getDecl()->getCanonicalDecl());
165      DeclSeq.push_back(ND);
166    } else if (MemberExpr *ME = dyn_cast<MemberExpr>(Exp)) {
167      NamedDecl *ND = ME->getMemberDecl();
168      DeclSeq.push_back(ND);
169      buildMutexID(ME->getBase(), Parent);
170    } else if (isa<CXXThisExpr>(Exp)) {
171      if (!Parent)
172        return;
173      buildMutexID(Parent, 0);
174    } else if (CastExpr *CE = dyn_cast<CastExpr>(Exp))
175      buildMutexID(CE->getSubExpr(), Parent);
176    else
177      Handler.handleInvalidLockExp(Exp->getExprLoc());
178  }
179
180public:
181  MutexID(ThreadSafetyHandler &Handler, Expr *LExpr, Expr *ParentExpr)
182    : Handler(Handler) {
183    buildMutexID(LExpr, ParentExpr);
184    assert(!DeclSeq.empty());
185  }
186
187  bool operator==(const MutexID &other) const {
188    return DeclSeq == other.DeclSeq;
189  }
190
191  bool operator!=(const MutexID &other) const {
192    return !(*this == other);
193  }
194
195  // SmallVector overloads Operator< to do lexicographic ordering. Note that
196  // we use pointer equality (and <) to compare NamedDecls. This means the order
197  // of MutexIDs in a lockset is nondeterministic. In order to output
198  // diagnostics in a deterministic ordering, we must order all diagnostics to
199  // output by SourceLocation when iterating through this lockset.
200  bool operator<(const MutexID &other) const {
201    return DeclSeq < other.DeclSeq;
202  }
203
204  /// \brief Returns the name of the first Decl in the list for a given MutexID;
205  /// e.g. the lock expression foo.bar() has name "bar".
206  /// The caret will point unambiguously to the lock expression, so using this
207  /// name in diagnostics is a way to get simple, and consistent, mutex names.
208  /// We do not want to output the entire expression text for security reasons.
209  StringRef getName() const {
210    return DeclSeq.front()->getName();
211  }
212
213  void Profile(llvm::FoldingSetNodeID &ID) const {
214    for (SmallVectorImpl<NamedDecl*>::const_iterator I = DeclSeq.begin(),
215         E = DeclSeq.end(); I != E; ++I) {
216      ID.AddPointer(*I);
217    }
218  }
219};
220
221/// \brief This is a helper class that stores info about the most recent
222/// accquire of a Lock.
223///
224/// The main body of the analysis maps MutexIDs to LockDatas.
225struct LockData {
226  SourceLocation AcquireLoc;
227
228  /// \brief LKind stores whether a lock is held shared or exclusively.
229  /// Note that this analysis does not currently support either re-entrant
230  /// locking or lock "upgrading" and "downgrading" between exclusive and
231  /// shared.
232  ///
233  /// FIXME: add support for re-entrant locking and lock up/downgrading
234  LockKind LKind;
235
236  LockData(SourceLocation AcquireLoc, LockKind LKind)
237    : AcquireLoc(AcquireLoc), LKind(LKind) {}
238
239  bool operator==(const LockData &other) const {
240    return AcquireLoc == other.AcquireLoc && LKind == other.LKind;
241  }
242
243  bool operator!=(const LockData &other) const {
244    return !(*this == other);
245  }
246
247  void Profile(llvm::FoldingSetNodeID &ID) const {
248      ID.AddInteger(AcquireLoc.getRawEncoding());
249      ID.AddInteger(LKind);
250    }
251};
252
253/// A Lockset maps each MutexID (defined above) to information about how it has
254/// been locked.
255typedef llvm::ImmutableMap<MutexID, LockData> Lockset;
256
257/// \brief We use this class to visit different types of expressions in
258/// CFGBlocks, and build up the lockset.
259/// An expression may cause us to add or remove locks from the lockset, or else
260/// output error messages related to missing locks.
261/// FIXME: In future, we may be able to not inherit from a visitor.
262class BuildLockset : public StmtVisitor<BuildLockset> {
263  ThreadSafetyHandler &Handler;
264  Lockset LSet;
265  Lockset::Factory &LocksetFactory;
266
267  // Helper functions
268  void removeLock(SourceLocation UnlockLoc, Expr *LockExp, Expr *Parent);
269  void addLock(SourceLocation LockLoc, Expr *LockExp, Expr *Parent,
270               LockKind LK);
271  const ValueDecl *getValueDecl(Expr *Exp);
272  void warnIfMutexNotHeld (const NamedDecl *D, Expr *Exp, AccessKind AK,
273                           Expr *MutexExp, ProtectedOperationKind POK);
274  void checkAccess(Expr *Exp, AccessKind AK);
275  void checkDereference(Expr *Exp, AccessKind AK);
276
277  template <class AttrType>
278  void addLocksToSet(LockKind LK, Attr *Attr, CXXMemberCallExpr *Exp);
279
280  /// \brief Returns true if the lockset contains a lock, regardless of whether
281  /// the lock is held exclusively or shared.
282  bool locksetContains(MutexID Lock) const {
283    return LSet.lookup(Lock);
284  }
285
286  /// \brief Returns true if the lockset contains a lock with the passed in
287  /// locktype.
288  bool locksetContains(MutexID Lock, LockKind KindRequested) const {
289    const LockData *LockHeld = LSet.lookup(Lock);
290    return (LockHeld && KindRequested == LockHeld->LKind);
291  }
292
293  /// \brief Returns true if the lockset contains a lock with at least the
294  /// passed in locktype. So for example, if we pass in LK_Shared, this function
295  /// returns true if the lock is held LK_Shared or LK_Exclusive. If we pass in
296  /// LK_Exclusive, this function returns true if the lock is held LK_Exclusive.
297  bool locksetContainsAtLeast(MutexID Lock, LockKind KindRequested) const {
298    switch (KindRequested) {
299      case LK_Shared:
300        return locksetContains(Lock);
301      case LK_Exclusive:
302        return locksetContains(Lock, KindRequested);
303    }
304  }
305
306public:
307  BuildLockset(ThreadSafetyHandler &Handler, Lockset LS, Lockset::Factory &F)
308    : StmtVisitor<BuildLockset>(), Handler(Handler), LSet(LS),
309      LocksetFactory(F) {}
310
311  Lockset getLockset() {
312    return LSet;
313  }
314
315  void VisitUnaryOperator(UnaryOperator *UO);
316  void VisitBinaryOperator(BinaryOperator *BO);
317  void VisitCastExpr(CastExpr *CE);
318  void VisitCXXMemberCallExpr(CXXMemberCallExpr *Exp);
319};
320
321/// \brief Add a new lock to the lockset, warning if the lock is already there.
322/// \param LockLoc The source location of the acquire
323/// \param LockExp The lock expression corresponding to the lock to be added
324void BuildLockset::addLock(SourceLocation LockLoc, Expr *LockExp, Expr *Parent,
325                           LockKind LK) {
326  // FIXME: deal with acquired before/after annotations
327  MutexID Mutex(Handler, LockExp, Parent);
328  LockData NewLock(LockLoc, LK);
329
330  // FIXME: Don't always warn when we have support for reentrant locks.
331  if (locksetContains(Mutex))
332    Handler.handleDoubleLock(Mutex.getName(), LockLoc);
333  LSet = LocksetFactory.add(LSet, Mutex, NewLock);
334}
335
336/// \brief Remove a lock from the lockset, warning if the lock is not there.
337/// \param LockExp The lock expression corresponding to the lock to be removed
338/// \param UnlockLoc The source location of the unlock (only used in error msg)
339void BuildLockset::removeLock(SourceLocation UnlockLoc, Expr *LockExp,
340                              Expr *Parent) {
341  MutexID Mutex(Handler, LockExp, Parent);
342
343  Lockset NewLSet = LocksetFactory.remove(LSet, Mutex);
344  if(NewLSet == LSet)
345    Handler.handleUnmatchedUnlock(Mutex.getName(), UnlockLoc);
346
347  LSet = NewLSet;
348}
349
350/// \brief Gets the value decl pointer from DeclRefExprs or MemberExprs
351const ValueDecl *BuildLockset::getValueDecl(Expr *Exp) {
352  if (const DeclRefExpr *DR = dyn_cast<DeclRefExpr>(Exp))
353    return DR->getDecl();
354
355  if (const MemberExpr *ME = dyn_cast<MemberExpr>(Exp))
356    return ME->getMemberDecl();
357
358  return 0;
359}
360
361/// \brief Warn if the LSet does not contain a lock sufficient to protect access
362/// of at least the passed in AccessType.
363void BuildLockset::warnIfMutexNotHeld(const NamedDecl *D, Expr *Exp,
364                                      AccessKind AK, Expr *MutexExp,
365                                      ProtectedOperationKind POK) {
366  LockKind LK = getLockKindFromAccessKind(AK);
367  Expr *Parent = getParent(Exp);
368  MutexID Mutex(Handler, MutexExp, Parent);
369  if (!locksetContainsAtLeast(Mutex, LK))
370    Handler.handleMutexNotHeld(D, POK, Mutex.getName(), LK, Exp->getExprLoc());
371}
372
373
374/// \brief This method identifies variable dereferences and checks pt_guarded_by
375/// and pt_guarded_var annotations. Note that we only check these annotations
376/// at the time a pointer is dereferenced.
377/// FIXME: We need to check for other types of pointer dereferences
378/// (e.g. [], ->) and deal with them here.
379/// \param Exp An expression that has been read or written.
380void BuildLockset::checkDereference(Expr *Exp, AccessKind AK) {
381  UnaryOperator *UO = dyn_cast<UnaryOperator>(Exp);
382  if (!UO || UO->getOpcode() != clang::UO_Deref)
383    return;
384  Exp = UO->getSubExpr()->IgnoreParenCasts();
385
386  const ValueDecl *D = getValueDecl(Exp);
387  if(!D || !D->hasAttrs())
388    return;
389
390  if (D->getAttr<PtGuardedVarAttr>() && LSet.isEmpty())
391    Handler.handleNoMutexHeld(D, POK_VarDereference, AK, Exp->getExprLoc());
392
393  const AttrVec &ArgAttrs = D->getAttrs();
394  for(unsigned i = 0, Size = ArgAttrs.size(); i < Size; ++i)
395    if (PtGuardedByAttr *PGBAttr = dyn_cast<PtGuardedByAttr>(ArgAttrs[i]))
396      warnIfMutexNotHeld(D, Exp, AK, PGBAttr->getArg(), POK_VarDereference);
397}
398
399/// \brief Checks guarded_by and guarded_var attributes.
400/// Whenever we identify an access (read or write) of a DeclRefExpr or
401/// MemberExpr, we need to check whether there are any guarded_by or
402/// guarded_var attributes, and make sure we hold the appropriate mutexes.
403void BuildLockset::checkAccess(Expr *Exp, AccessKind AK) {
404  const ValueDecl *D = getValueDecl(Exp);
405  if(!D || !D->hasAttrs())
406    return;
407
408  if (D->getAttr<GuardedVarAttr>() && LSet.isEmpty())
409    Handler.handleNoMutexHeld(D, POK_VarAccess, AK, Exp->getExprLoc());
410
411  const AttrVec &ArgAttrs = D->getAttrs();
412  for(unsigned i = 0, Size = ArgAttrs.size(); i < Size; ++i)
413    if (GuardedByAttr *GBAttr = dyn_cast<GuardedByAttr>(ArgAttrs[i]))
414      warnIfMutexNotHeld(D, Exp, AK, GBAttr->getArg(), POK_VarAccess);
415}
416
417/// \brief For unary operations which read and write a variable, we need to
418/// check whether we hold any required mutexes. Reads are checked in
419/// VisitCastExpr.
420void BuildLockset::VisitUnaryOperator(UnaryOperator *UO) {
421  switch (UO->getOpcode()) {
422    case clang::UO_PostDec:
423    case clang::UO_PostInc:
424    case clang::UO_PreDec:
425    case clang::UO_PreInc: {
426      Expr *SubExp = UO->getSubExpr()->IgnoreParenCasts();
427      checkAccess(SubExp, AK_Written);
428      checkDereference(SubExp, AK_Written);
429      break;
430    }
431    default:
432      break;
433  }
434}
435
436/// For binary operations which assign to a variable (writes), we need to check
437/// whether we hold any required mutexes.
438/// FIXME: Deal with non-primitive types.
439void BuildLockset::VisitBinaryOperator(BinaryOperator *BO) {
440  if (!BO->isAssignmentOp())
441    return;
442  Expr *LHSExp = BO->getLHS()->IgnoreParenCasts();
443  checkAccess(LHSExp, AK_Written);
444  checkDereference(LHSExp, AK_Written);
445}
446
447/// Whenever we do an LValue to Rvalue cast, we are reading a variable and
448/// need to ensure we hold any required mutexes.
449/// FIXME: Deal with non-primitive types.
450void BuildLockset::VisitCastExpr(CastExpr *CE) {
451  if (CE->getCastKind() != CK_LValueToRValue)
452    return;
453  Expr *SubExp = CE->getSubExpr()->IgnoreParenCasts();
454  checkAccess(SubExp, AK_Read);
455  checkDereference(SubExp, AK_Read);
456}
457
458/// \brief This function, parameterized by an attribute type, is used to add a
459/// set of locks specified as attribute arguments to the lockset.
460template <typename AttrType>
461void BuildLockset::addLocksToSet(LockKind LK, Attr *Attr,
462                                 CXXMemberCallExpr *Exp) {
463  typedef typename AttrType::args_iterator iterator_type;
464  SourceLocation ExpLocation = Exp->getExprLoc();
465  Expr *Parent = Exp->getImplicitObjectArgument();
466  AttrType *SpecificAttr = cast<AttrType>(Attr);
467
468  if (SpecificAttr->args_size() == 0) {
469    // The mutex held is the "this" object.
470    addLock(ExpLocation, Parent, Parent, LK);
471    return;
472  }
473
474  for (iterator_type I = SpecificAttr->args_begin(),
475       E = SpecificAttr->args_end(); I != E; ++I)
476    addLock(ExpLocation, *I, Parent, LK);
477}
478
479/// \brief When visiting CXXMemberCallExprs we need to examine the attributes on
480/// the method that is being called and add, remove or check locks in the
481/// lockset accordingly.
482///
483/// FIXME: For classes annotated with one of the guarded annotations, we need
484/// to treat const method calls as reads and non-const method calls as writes,
485/// and check that the appropriate locks are held. Non-const method calls with
486/// the same signature as const method calls can be also treated as reads.
487///
488/// FIXME: We need to also visit CallExprs to catch/check global functions.
489void BuildLockset::VisitCXXMemberCallExpr(CXXMemberCallExpr *Exp) {
490  NamedDecl *D = dyn_cast_or_null<NamedDecl>(Exp->getCalleeDecl());
491
492  SourceLocation ExpLocation = Exp->getExprLoc();
493  Expr *Parent = Exp->getImplicitObjectArgument();
494
495  if(!D || !D->hasAttrs())
496    return;
497
498  AttrVec &ArgAttrs = D->getAttrs();
499  for(unsigned i = 0; i < ArgAttrs.size(); ++i) {
500    Attr *Attr = ArgAttrs[i];
501    switch (Attr->getKind()) {
502      // When we encounter an exclusive lock function, we need to add the lock
503      // to our lockset with kind exclusive.
504      case attr::ExclusiveLockFunction:
505        addLocksToSet<ExclusiveLockFunctionAttr>(LK_Exclusive, Attr, Exp);
506        break;
507
508      // When we encounter a shared lock function, we need to add the lock
509      // to our lockset with kind shared.
510      case attr::SharedLockFunction:
511        addLocksToSet<SharedLockFunctionAttr>(LK_Shared, Attr, Exp);
512        break;
513
514      // When we encounter an unlock function, we need to remove unlocked
515      // mutexes from the lockset, and flag a warning if they are not there.
516      case attr::UnlockFunction: {
517        UnlockFunctionAttr *UFAttr = cast<UnlockFunctionAttr>(Attr);
518
519        if (UFAttr->args_size() == 0) { // The lock held is the "this" object.
520          removeLock(ExpLocation, Parent, Parent);
521          break;
522        }
523
524        for (UnlockFunctionAttr::args_iterator I = UFAttr->args_begin(),
525             E = UFAttr->args_end(); I != E; ++I)
526          removeLock(ExpLocation, *I, Parent);
527        break;
528      }
529
530      case attr::ExclusiveLocksRequired: {
531        // FIXME: Also use this attribute to add required locks to the initial
532        // lockset when processing a CFG for a function annotated with this
533        // attribute.
534        ExclusiveLocksRequiredAttr *ELRAttr =
535            cast<ExclusiveLocksRequiredAttr>(Attr);
536
537        for (ExclusiveLocksRequiredAttr::args_iterator
538             I = ELRAttr->args_begin(), E = ELRAttr->args_end(); I != E; ++I)
539          warnIfMutexNotHeld(D, Exp, AK_Written, *I, POK_FunctionCall);
540        break;
541      }
542
543      case attr::SharedLocksRequired: {
544        // FIXME: Also use this attribute to add required locks to the initial
545        // lockset when processing a CFG for a function annotated with this
546        // attribute.
547        SharedLocksRequiredAttr *SLRAttr = cast<SharedLocksRequiredAttr>(Attr);
548
549        for (SharedLocksRequiredAttr::args_iterator I = SLRAttr->args_begin(),
550             E = SLRAttr->args_end(); I != E; ++I)
551          warnIfMutexNotHeld(D, Exp, AK_Read, *I, POK_FunctionCall);
552        break;
553      }
554
555      case attr::LocksExcluded: {
556        LocksExcludedAttr *LEAttr = cast<LocksExcludedAttr>(Attr);
557        for (LocksExcludedAttr::args_iterator I = LEAttr->args_begin(),
558            E = LEAttr->args_end(); I != E; ++I) {
559          MutexID Mutex(Handler, *I, Parent);
560          if (locksetContains(Mutex))
561            Handler.handleFunExcludesLock(D->getName(), Mutex.getName(),
562                                          ExpLocation);
563        }
564        break;
565      }
566
567      case attr::LockReturned:
568        // FIXME: Deal with this attribute.
569        break;
570
571      // Ignore other (non thread-safety) attributes
572      default:
573        break;
574    }
575  }
576}
577
578} // end anonymous namespace
579
580/// \brief Flags a warning for each lock that is in LSet2 but not LSet1, or
581/// else mutexes that are held shared in one lockset and exclusive in the other.
582static Lockset warnIfNotInFirstSetOrNotSameKind(ThreadSafetyHandler &Handler,
583                                                const Lockset LSet1,
584                                                const Lockset LSet2,
585                                                Lockset Intersection,
586                                                Lockset::Factory &Fact) {
587  for (Lockset::iterator I = LSet2.begin(), E = LSet2.end(); I != E; ++I) {
588    const MutexID &LSet2Mutex = I.getKey();
589    const LockData &LSet2LockData = I.getData();
590    if (const LockData *LD = LSet1.lookup(LSet2Mutex)) {
591      if (LD->LKind != LSet2LockData.LKind) {
592        Handler.handleExclusiveAndShared(LSet2Mutex.getName(),
593                                         LSet2LockData.AcquireLoc,
594                                         LD->AcquireLoc);
595        if (LD->LKind != LK_Exclusive)
596          Intersection = Fact.add(Intersection, LSet2Mutex, LSet2LockData);
597      }
598    } else {
599      Handler.handleMutexHeldEndOfScope(LSet2Mutex.getName(),
600                                        LSet2LockData.AcquireLoc);
601    }
602  }
603  return Intersection;
604}
605
606
607/// \brief Compute the intersection of two locksets and issue warnings for any
608/// locks in the symmetric difference.
609///
610/// This function is used at a merge point in the CFG when comparing the lockset
611/// of each branch being merged. For example, given the following sequence:
612/// A; if () then B; else C; D; we need to check that the lockset after B and C
613/// are the same. In the event of a difference, we use the intersection of these
614/// two locksets at the start of D.
615static Lockset intersectAndWarn(ThreadSafetyHandler &Handler,
616                                const Lockset LSet1, const Lockset LSet2,
617                                Lockset::Factory &Fact) {
618  Lockset Intersection = LSet1;
619  Intersection = warnIfNotInFirstSetOrNotSameKind(Handler, LSet1, LSet2,
620                                                  Intersection, Fact);
621
622  for (Lockset::iterator I = LSet1.begin(), E = LSet1.end(); I != E; ++I) {
623    if (!LSet2.contains(I.getKey())) {
624      const MutexID &Mutex = I.getKey();
625      const LockData &MissingLock = I.getData();
626      Handler.handleMutexHeldEndOfScope(Mutex.getName(),
627                                        MissingLock.AcquireLoc);
628      Intersection = Fact.remove(Intersection, Mutex);
629    }
630  }
631  return Intersection;
632}
633
634/// \brief Returns the location of the first Stmt in a Block.
635static SourceLocation getFirstStmtLocation(CFGBlock *Block) {
636  SourceLocation Loc;
637  for (CFGBlock::const_iterator BI = Block->begin(), BE = Block->end();
638       BI != BE; ++BI) {
639    if (const CFGStmt *CfgStmt = dyn_cast<CFGStmt>(&(*BI))) {
640      Loc = CfgStmt->getStmt()->getLocStart();
641      if (Loc.isValid()) return Loc;
642    }
643  }
644  if (Stmt *S = Block->getTerminator().getStmt()) {
645    Loc = S->getLocStart();
646    if (Loc.isValid()) return Loc;
647  }
648  return Loc;
649}
650
651/// \brief Warn about different locksets along backedges of loops.
652/// This function is called when we encounter a back edge. At that point,
653/// we need to verify that the lockset before taking the backedge is the
654/// same as the lockset before entering the loop.
655///
656/// \param LoopEntrySet Locks before starting the loop
657/// \param LoopReentrySet Locks in the last CFG block of the loop
658static void warnBackEdgeUnequalLocksets(ThreadSafetyHandler &Handler,
659                                        const Lockset LoopReentrySet,
660                                        const Lockset LoopEntrySet,
661                                        SourceLocation FirstLocInLoop,
662                                        Lockset::Factory &Fact) {
663  assert(FirstLocInLoop.isValid());
664  // Warn for locks held at the start of the loop, but not the end.
665  for (Lockset::iterator I = LoopEntrySet.begin(), E = LoopEntrySet.end();
666       I != E; ++I) {
667    if (!LoopReentrySet.contains(I.getKey())) {
668      // We report this error at the location of the first statement in a loop
669      Handler.handleNoLockLoopEntry(I.getKey().getName(), FirstLocInLoop);
670    }
671  }
672
673  // Warn for locks held at the end of the loop, but not at the start.
674  warnIfNotInFirstSetOrNotSameKind(Handler, LoopEntrySet, LoopReentrySet,
675                                   LoopReentrySet, Fact);
676}
677
678
679namespace clang { namespace thread_safety {
680/// \brief Check a function's CFG for thread-safety violations.
681///
682/// We traverse the blocks in the CFG, compute the set of mutexes that are held
683/// at the end of each block, and issue warnings for thread safety violations.
684/// Each block in the CFG is traversed exactly once.
685void runThreadSafetyAnalysis(AnalysisContext &AC,
686                             ThreadSafetyHandler &Handler) {
687  CFG *CFGraph = AC.getCFG();
688  if (!CFGraph) return;
689  const Decl *D = AC.getDecl();
690  if (D && D->getAttr<NoThreadSafetyAnalysisAttr>()) return;
691
692  Lockset::Factory LocksetFactory;
693
694  // FIXME: Swith to SmallVector? Otherwise improve performance impact?
695  std::vector<Lockset> EntryLocksets(CFGraph->getNumBlockIDs(),
696                                     LocksetFactory.getEmptyMap());
697  std::vector<Lockset> ExitLocksets(CFGraph->getNumBlockIDs(),
698                                    LocksetFactory.getEmptyMap());
699
700  // We need to explore the CFG via a "topological" ordering.
701  // That way, we will be guaranteed to have information about required
702  // predecessor locksets when exploring a new block.
703  TopologicallySortedCFG SortedGraph(CFGraph);
704  CFGBlockSet VisitedBlocks(CFGraph);
705
706  for (TopologicallySortedCFG::iterator I = SortedGraph.begin(),
707       E = SortedGraph.end(); I!= E; ++I) {
708    const CFGBlock *CurrBlock = *I;
709    int CurrBlockID = CurrBlock->getBlockID();
710
711    VisitedBlocks.insert(CurrBlock);
712
713    // Use the default initial lockset in case there are no predecessors.
714    Lockset &Entryset = EntryLocksets[CurrBlockID];
715    Lockset &Exitset = ExitLocksets[CurrBlockID];
716
717    // Iterate through the predecessor blocks and warn if the lockset for all
718    // predecessors is not the same. We take the entry lockset of the current
719    // block to be the intersection of all previous locksets.
720    // FIXME: By keeping the intersection, we may output more errors in future
721    // for a lock which is not in the intersection, but was in the union. We
722    // may want to also keep the union in future. As an example, let's say
723    // the intersection contains Mutex L, and the union contains L and M.
724    // Later we unlock M. At this point, we would output an error because we
725    // never locked M; although the real error is probably that we forgot to
726    // lock M on all code paths. Conversely, let's say that later we lock M.
727    // In this case, we should compare against the intersection instead of the
728    // union because the real error is probably that we forgot to unlock M on
729    // all code paths.
730    bool LocksetInitialized = false;
731    for (CFGBlock::const_pred_iterator PI = CurrBlock->pred_begin(),
732         PE  = CurrBlock->pred_end(); PI != PE; ++PI) {
733
734      // if *PI -> CurrBlock is a back edge
735      if (*PI == 0 || !VisitedBlocks.alreadySet(*PI))
736        continue;
737
738      int PrevBlockID = (*PI)->getBlockID();
739      if (!LocksetInitialized) {
740        Entryset = ExitLocksets[PrevBlockID];
741        LocksetInitialized = true;
742      } else {
743        Entryset = intersectAndWarn(Handler, Entryset,
744                                    ExitLocksets[PrevBlockID], LocksetFactory);
745      }
746    }
747
748    BuildLockset LocksetBuilder(Handler, Entryset, LocksetFactory);
749    for (CFGBlock::const_iterator BI = CurrBlock->begin(),
750         BE = CurrBlock->end(); BI != BE; ++BI) {
751      if (const CFGStmt *CfgStmt = dyn_cast<CFGStmt>(&*BI))
752        LocksetBuilder.Visit(const_cast<Stmt*>(CfgStmt->getStmt()));
753    }
754    Exitset = LocksetBuilder.getLockset();
755
756    // For every back edge from CurrBlock (the end of the loop) to another block
757    // (FirstLoopBlock) we need to check that the Lockset of Block is equal to
758    // the one held at the beginning of FirstLoopBlock. We can look up the
759    // Lockset held at the beginning of FirstLoopBlock in the EntryLockSets map.
760    for (CFGBlock::const_succ_iterator SI = CurrBlock->succ_begin(),
761         SE  = CurrBlock->succ_end(); SI != SE; ++SI) {
762
763      // if CurrBlock -> *SI is *not* a back edge
764      if (*SI == 0 || !VisitedBlocks.alreadySet(*SI))
765        continue;
766
767      CFGBlock *FirstLoopBlock = *SI;
768      SourceLocation FirstLoopLocation = getFirstStmtLocation(FirstLoopBlock);
769
770      assert(FirstLoopLocation.isValid());
771
772      // Fail gracefully in release code.
773      if (!FirstLoopLocation.isValid())
774        continue;
775
776      Lockset PreLoop = EntryLocksets[FirstLoopBlock->getBlockID()];
777      Lockset LoopEnd = ExitLocksets[CurrBlockID];
778      warnBackEdgeUnequalLocksets(Handler, LoopEnd, PreLoop, FirstLoopLocation,
779                                  LocksetFactory);
780    }
781  }
782
783  Lockset FinalLockset = ExitLocksets[CFGraph->getExit().getBlockID()];
784  if (!FinalLockset.isEmpty()) {
785    for (Lockset::iterator I=FinalLockset.begin(), E=FinalLockset.end();
786         I != E; ++I) {
787      const MutexID &Mutex = I.getKey();
788      const LockData &MissingLock = I.getData();
789
790      std::string FunName = "<unknown>";
791      if (const NamedDecl *ContextDecl = dyn_cast<NamedDecl>(AC.getDecl())) {
792        FunName = ContextDecl->getDeclName().getAsString();
793      }
794
795      Handler.handleNoUnlock(Mutex.getName(), FunName, MissingLock.AcquireLoc);
796    }
797  }
798}
799
800/// \brief Helper function that returns a LockKind required for the given level
801/// of access.
802LockKind getLockKindFromAccessKind(AccessKind AK) {
803  switch (AK) {
804    case AK_Read :
805      return LK_Shared;
806    case AK_Written :
807      return LK_Exclusive;
808  }
809}
810}} // end namespace clang::thread_safety
811