ThreadSafety.cpp revision 0fed26d94553881011aa7ec30cee3ed0da71c7a1
1//===- ThreadSafety.cpp ----------------------------------------*- C++ --*-===//
2//
3//                     The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10// A intra-procedural analysis for thread safety (e.g. deadlocks and race
11// conditions), based off of an annotation system.
12//
13// See http://gcc.gnu.org/wiki/ThreadSafetyAnnotation for the gcc version.
14//
15//===----------------------------------------------------------------------===//
16
17#include "clang/Analysis/Analyses/ThreadSafety.h"
18#include "clang/Basic/SourceManager.h"
19#include "clang/Basic/SourceLocation.h"
20#include "clang/AST/DeclCXX.h"
21#include "clang/AST/ExprCXX.h"
22#include "clang/AST/StmtCXX.h"
23#include "clang/AST/StmtVisitor.h"
24#include "clang/Analysis/AnalysisContext.h"
25#include "clang/Analysis/CFG.h"
26#include "clang/Analysis/CFGStmtMap.h"
27#include "llvm/ADT/BitVector.h"
28#include "llvm/ADT/FoldingSet.h"
29#include "llvm/ADT/ImmutableMap.h"
30#include "llvm/ADT/PostOrderIterator.h"
31#include "llvm/ADT/SmallVector.h"
32#include "llvm/ADT/StringRef.h"
33#include <algorithm>
34#include <vector>
35
36using namespace clang;
37using namespace thread_safety;
38
39// Helper functions
40static Expr *getParent(Expr *Exp) {
41  if (MemberExpr *ME = dyn_cast<MemberExpr>(Exp))
42    return ME->getBase();
43  if (CXXMemberCallExpr *CE = dyn_cast<CXXMemberCallExpr>(Exp))
44    return CE->getImplicitObjectArgument();
45  return 0;
46}
47
48namespace {
49/// \brief Implements a set of CFGBlocks using a BitVector.
50///
51/// This class contains a minimal interface, primarily dictated by the SetType
52/// template parameter of the llvm::po_iterator template, as used with external
53/// storage. We also use this set to keep track of which CFGBlocks we visit
54/// during the analysis.
55class CFGBlockSet {
56  llvm::BitVector VisitedBlockIDs;
57
58public:
59  // po_iterator requires this iterator, but the only interface needed is the
60  // value_type typedef.
61  struct iterator {
62    typedef const CFGBlock *value_type;
63  };
64
65  CFGBlockSet() {}
66  CFGBlockSet(const CFG *G) : VisitedBlockIDs(G->getNumBlockIDs(), false) {}
67
68  /// \brief Set the bit associated with a particular CFGBlock.
69  /// This is the important method for the SetType template parameter.
70  bool insert(const CFGBlock *Block) {
71    // Note that insert() is called by po_iterator, which doesn't check to make
72    // sure that Block is non-null.  Moreover, the CFGBlock iterator will
73    // occasionally hand out null pointers for pruned edges, so we catch those
74    // here.
75    if (Block == 0)
76      return false;  // if an edge is trivially false.
77    if (VisitedBlockIDs.test(Block->getBlockID()))
78      return false;
79    VisitedBlockIDs.set(Block->getBlockID());
80    return true;
81  }
82
83  /// \brief Check if the bit for a CFGBlock has been already set.
84  /// This method is for tracking visited blocks in the main threadsafety loop.
85  /// Block must not be null.
86  bool alreadySet(const CFGBlock *Block) {
87    return VisitedBlockIDs.test(Block->getBlockID());
88  }
89};
90
91/// \brief We create a helper class which we use to iterate through CFGBlocks in
92/// the topological order.
93class TopologicallySortedCFG {
94  typedef llvm::po_iterator<const CFG*, CFGBlockSet, true>  po_iterator;
95
96  std::vector<const CFGBlock*> Blocks;
97
98public:
99  typedef std::vector<const CFGBlock*>::reverse_iterator iterator;
100
101  TopologicallySortedCFG(const CFG *CFGraph) {
102    Blocks.reserve(CFGraph->getNumBlockIDs());
103    CFGBlockSet BSet(CFGraph);
104
105    for (po_iterator I = po_iterator::begin(CFGraph, BSet),
106         E = po_iterator::end(CFGraph, BSet); I != E; ++I) {
107      Blocks.push_back(*I);
108    }
109  }
110
111  iterator begin() {
112    return Blocks.rbegin();
113  }
114
115  iterator end() {
116    return Blocks.rend();
117  }
118};
119
120/// \brief A MutexID object uniquely identifies a particular mutex, and
121/// is built from an Expr* (i.e. calling a lock function).
122///
123/// Thread-safety analysis works by comparing lock expressions.  Within the
124/// body of a function, an expression such as "x->foo->bar.mu" will resolve to
125/// a particular mutex object at run-time.  Subsequent occurrences of the same
126/// expression (where "same" means syntactic equality) will refer to the same
127/// run-time object if three conditions hold:
128/// (1) Local variables in the expression, such as "x" have not changed.
129/// (2) Values on the heap that affect the expression have not changed.
130/// (3) The expression involves only pure function calls.
131/// The current implementation assumes, but does not verify, that multiple uses
132/// of the same lock expression satisfies these criteria.
133///
134/// Clang introduces an additional wrinkle, which is that it is difficult to
135/// derive canonical expressions, or compare expressions directly for equality.
136/// Thus, we identify a mutex not by an Expr, but by the set of named
137/// declarations that are referenced by the Expr.  In other words,
138/// x->foo->bar.mu will be a four element vector with the Decls for
139/// mu, bar, and foo, and x.  The vector will uniquely identify the expression
140/// for all practical purposes.
141///
142/// Note we will need to perform substitution on "this" and function parameter
143/// names when constructing a lock expression.
144///
145/// For example:
146/// class C { Mutex Mu;  void lock() EXCLUSIVE_LOCK_FUNCTION(this->Mu); };
147/// void myFunc(C *X) { ... X->lock() ... }
148/// The original expression for the mutex acquired by myFunc is "this->Mu", but
149/// "X" is substituted for "this" so we get X->Mu();
150///
151/// For another example:
152/// foo(MyList *L) EXCLUSIVE_LOCKS_REQUIRED(L->Mu) { ... }
153/// MyList *MyL;
154/// foo(MyL);  // requires lock MyL->Mu to be held
155class MutexID {
156  SmallVector<NamedDecl*, 2> DeclSeq;
157  ThreadSafetyHandler &Handler;
158
159  /// Build a Decl sequence representing the lock from the given expression.
160  /// Recursive function that bottoms out when the final DeclRefExpr is reached.
161  void buildMutexID(Expr *Exp, Expr *Parent) {
162    if (DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(Exp)) {
163      NamedDecl *ND = cast<NamedDecl>(DRE->getDecl()->getCanonicalDecl());
164      DeclSeq.push_back(ND);
165    } else if (MemberExpr *ME = dyn_cast<MemberExpr>(Exp)) {
166      NamedDecl *ND = ME->getMemberDecl();
167      DeclSeq.push_back(ND);
168      buildMutexID(ME->getBase(), Parent);
169    } else if (isa<CXXThisExpr>(Exp)) {
170      if (!Parent)
171        return;
172      buildMutexID(Parent, 0);
173    } else if (CastExpr *CE = dyn_cast<CastExpr>(Exp))
174      buildMutexID(CE->getSubExpr(), Parent);
175    else
176      Handler.handleInvalidLockExp(Exp->getExprLoc());
177  }
178
179public:
180  MutexID(ThreadSafetyHandler &Handler, Expr *LExpr, Expr *ParentExpr)
181    : Handler(Handler) {
182    buildMutexID(LExpr, ParentExpr);
183    assert(!DeclSeq.empty());
184  }
185
186  bool operator==(const MutexID &other) const {
187    return DeclSeq == other.DeclSeq;
188  }
189
190  bool operator!=(const MutexID &other) const {
191    return !(*this == other);
192  }
193
194  // SmallVector overloads Operator< to do lexicographic ordering. Note that
195  // we use pointer equality (and <) to compare NamedDecls. This means the order
196  // of MutexIDs in a lockset is nondeterministic. In order to output
197  // diagnostics in a deterministic ordering, we must order all diagnostics to
198  // output by SourceLocation when iterating through this lockset.
199  bool operator<(const MutexID &other) const {
200    return DeclSeq < other.DeclSeq;
201  }
202
203  /// \brief Returns the name of the first Decl in the list for a given MutexID;
204  /// e.g. the lock expression foo.bar() has name "bar".
205  /// The caret will point unambiguously to the lock expression, so using this
206  /// name in diagnostics is a way to get simple, and consistent, mutex names.
207  /// We do not want to output the entire expression text for security reasons.
208  StringRef getName() const {
209    return DeclSeq.front()->getName();
210  }
211
212  void Profile(llvm::FoldingSetNodeID &ID) const {
213    for (SmallVectorImpl<NamedDecl*>::const_iterator I = DeclSeq.begin(),
214         E = DeclSeq.end(); I != E; ++I) {
215      ID.AddPointer(*I);
216    }
217  }
218};
219
220/// \brief This is a helper class that stores info about the most recent
221/// accquire of a Lock.
222///
223/// The main body of the analysis maps MutexIDs to LockDatas.
224struct LockData {
225  SourceLocation AcquireLoc;
226
227  /// \brief LKind stores whether a lock is held shared or exclusively.
228  /// Note that this analysis does not currently support either re-entrant
229  /// locking or lock "upgrading" and "downgrading" between exclusive and
230  /// shared.
231  ///
232  /// FIXME: add support for re-entrant locking and lock up/downgrading
233  LockKind LKind;
234
235  LockData(SourceLocation AcquireLoc, LockKind LKind)
236    : AcquireLoc(AcquireLoc), LKind(LKind) {}
237
238  bool operator==(const LockData &other) const {
239    return AcquireLoc == other.AcquireLoc && LKind == other.LKind;
240  }
241
242  bool operator!=(const LockData &other) const {
243    return !(*this == other);
244  }
245
246  void Profile(llvm::FoldingSetNodeID &ID) const {
247      ID.AddInteger(AcquireLoc.getRawEncoding());
248      ID.AddInteger(LKind);
249    }
250};
251
252/// A Lockset maps each MutexID (defined above) to information about how it has
253/// been locked.
254typedef llvm::ImmutableMap<MutexID, LockData> Lockset;
255
256/// \brief We use this class to visit different types of expressions in
257/// CFGBlocks, and build up the lockset.
258/// An expression may cause us to add or remove locks from the lockset, or else
259/// output error messages related to missing locks.
260/// FIXME: In future, we may be able to not inherit from a visitor.
261class BuildLockset : public StmtVisitor<BuildLockset> {
262  ThreadSafetyHandler &Handler;
263  Lockset LSet;
264  Lockset::Factory &LocksetFactory;
265
266  // Helper functions
267  void removeLock(SourceLocation UnlockLoc, Expr *LockExp, Expr *Parent);
268  void addLock(SourceLocation LockLoc, Expr *LockExp, Expr *Parent,
269               LockKind LK);
270  const ValueDecl *getValueDecl(Expr *Exp);
271  void warnIfMutexNotHeld (const NamedDecl *D, Expr *Exp, AccessKind AK,
272                           Expr *MutexExp, ProtectedOperationKind POK);
273  void checkAccess(Expr *Exp, AccessKind AK);
274  void checkDereference(Expr *Exp, AccessKind AK);
275
276  template <class AttrType>
277  void addLocksToSet(LockKind LK, Attr *Attr, CXXMemberCallExpr *Exp);
278
279  /// \brief Returns true if the lockset contains a lock, regardless of whether
280  /// the lock is held exclusively or shared.
281  bool locksetContains(MutexID Lock) const {
282    return LSet.lookup(Lock);
283  }
284
285  /// \brief Returns true if the lockset contains a lock with the passed in
286  /// locktype.
287  bool locksetContains(MutexID Lock, LockKind KindRequested) const {
288    const LockData *LockHeld = LSet.lookup(Lock);
289    return (LockHeld && KindRequested == LockHeld->LKind);
290  }
291
292  /// \brief Returns true if the lockset contains a lock with at least the
293  /// passed in locktype. So for example, if we pass in LK_Shared, this function
294  /// returns true if the lock is held LK_Shared or LK_Exclusive. If we pass in
295  /// LK_Exclusive, this function returns true if the lock is held LK_Exclusive.
296  bool locksetContainsAtLeast(MutexID Lock, LockKind KindRequested) const {
297    switch (KindRequested) {
298      case LK_Shared:
299        return locksetContains(Lock);
300      case LK_Exclusive:
301        return locksetContains(Lock, KindRequested);
302    }
303  }
304
305public:
306  BuildLockset(ThreadSafetyHandler &Handler, Lockset LS, Lockset::Factory &F)
307    : StmtVisitor<BuildLockset>(), Handler(Handler), LSet(LS),
308      LocksetFactory(F) {}
309
310  Lockset getLockset() {
311    return LSet;
312  }
313
314  void VisitUnaryOperator(UnaryOperator *UO);
315  void VisitBinaryOperator(BinaryOperator *BO);
316  void VisitCastExpr(CastExpr *CE);
317  void VisitCXXMemberCallExpr(CXXMemberCallExpr *Exp);
318};
319
320/// \brief Add a new lock to the lockset, warning if the lock is already there.
321/// \param LockLoc The source location of the acquire
322/// \param LockExp The lock expression corresponding to the lock to be added
323void BuildLockset::addLock(SourceLocation LockLoc, Expr *LockExp, Expr *Parent,
324                           LockKind LK) {
325  // FIXME: deal with acquired before/after annotations
326  MutexID Mutex(Handler, LockExp, Parent);
327  LockData NewLock(LockLoc, LK);
328
329  // FIXME: Don't always warn when we have support for reentrant locks.
330  if (locksetContains(Mutex))
331    Handler.handleDoubleLock(Mutex.getName(), LockLoc);
332  LSet = LocksetFactory.add(LSet, Mutex, NewLock);
333}
334
335/// \brief Remove a lock from the lockset, warning if the lock is not there.
336/// \param LockExp The lock expression corresponding to the lock to be removed
337/// \param UnlockLoc The source location of the unlock (only used in error msg)
338void BuildLockset::removeLock(SourceLocation UnlockLoc, Expr *LockExp,
339                              Expr *Parent) {
340  MutexID Mutex(Handler, LockExp, Parent);
341
342  Lockset NewLSet = LocksetFactory.remove(LSet, Mutex);
343  if(NewLSet == LSet)
344    Handler.handleUnmatchedUnlock(Mutex.getName(), UnlockLoc);
345
346  LSet = NewLSet;
347}
348
349/// \brief Gets the value decl pointer from DeclRefExprs or MemberExprs
350const ValueDecl *BuildLockset::getValueDecl(Expr *Exp) {
351  if (const DeclRefExpr *DR = dyn_cast<DeclRefExpr>(Exp))
352    return DR->getDecl();
353
354  if (const MemberExpr *ME = dyn_cast<MemberExpr>(Exp))
355    return ME->getMemberDecl();
356
357  return 0;
358}
359
360/// \brief Warn if the LSet does not contain a lock sufficient to protect access
361/// of at least the passed in AccessType.
362void BuildLockset::warnIfMutexNotHeld(const NamedDecl *D, Expr *Exp,
363                                      AccessKind AK, Expr *MutexExp,
364                                      ProtectedOperationKind POK) {
365  LockKind LK = getLockKindFromAccessKind(AK);
366  Expr *Parent = getParent(Exp);
367  MutexID Mutex(Handler, MutexExp, Parent);
368  if (!locksetContainsAtLeast(Mutex, LK))
369    Handler.handleMutexNotHeld(D, POK, Mutex.getName(), LK, Exp->getExprLoc());
370}
371
372
373/// \brief This method identifies variable dereferences and checks pt_guarded_by
374/// and pt_guarded_var annotations. Note that we only check these annotations
375/// at the time a pointer is dereferenced.
376/// FIXME: We need to check for other types of pointer dereferences
377/// (e.g. [], ->) and deal with them here.
378/// \param Exp An expression that has been read or written.
379void BuildLockset::checkDereference(Expr *Exp, AccessKind AK) {
380  UnaryOperator *UO = dyn_cast<UnaryOperator>(Exp);
381  if (!UO || UO->getOpcode() != clang::UO_Deref)
382    return;
383  Exp = UO->getSubExpr()->IgnoreParenCasts();
384
385  const ValueDecl *D = getValueDecl(Exp);
386  if(!D || !D->hasAttrs())
387    return;
388
389  if (D->getAttr<PtGuardedVarAttr>() && LSet.isEmpty())
390    Handler.handleNoMutexHeld(D, POK_VarDereference, AK, Exp->getExprLoc());
391
392  const AttrVec &ArgAttrs = D->getAttrs();
393  for(unsigned i = 0, Size = ArgAttrs.size(); i < Size; ++i)
394    if (PtGuardedByAttr *PGBAttr = dyn_cast<PtGuardedByAttr>(ArgAttrs[i]))
395      warnIfMutexNotHeld(D, Exp, AK, PGBAttr->getArg(), POK_VarDereference);
396}
397
398/// \brief Checks guarded_by and guarded_var attributes.
399/// Whenever we identify an access (read or write) of a DeclRefExpr or
400/// MemberExpr, we need to check whether there are any guarded_by or
401/// guarded_var attributes, and make sure we hold the appropriate mutexes.
402void BuildLockset::checkAccess(Expr *Exp, AccessKind AK) {
403  const ValueDecl *D = getValueDecl(Exp);
404  if(!D || !D->hasAttrs())
405    return;
406
407  if (D->getAttr<GuardedVarAttr>() && LSet.isEmpty())
408    Handler.handleNoMutexHeld(D, POK_VarAccess, AK, Exp->getExprLoc());
409
410  const AttrVec &ArgAttrs = D->getAttrs();
411  for(unsigned i = 0, Size = ArgAttrs.size(); i < Size; ++i)
412    if (GuardedByAttr *GBAttr = dyn_cast<GuardedByAttr>(ArgAttrs[i]))
413      warnIfMutexNotHeld(D, Exp, AK, GBAttr->getArg(), POK_VarAccess);
414}
415
416/// \brief For unary operations which read and write a variable, we need to
417/// check whether we hold any required mutexes. Reads are checked in
418/// VisitCastExpr.
419void BuildLockset::VisitUnaryOperator(UnaryOperator *UO) {
420  switch (UO->getOpcode()) {
421    case clang::UO_PostDec:
422    case clang::UO_PostInc:
423    case clang::UO_PreDec:
424    case clang::UO_PreInc: {
425      Expr *SubExp = UO->getSubExpr()->IgnoreParenCasts();
426      checkAccess(SubExp, AK_Written);
427      checkDereference(SubExp, AK_Written);
428      break;
429    }
430    default:
431      break;
432  }
433}
434
435/// For binary operations which assign to a variable (writes), we need to check
436/// whether we hold any required mutexes.
437/// FIXME: Deal with non-primitive types.
438void BuildLockset::VisitBinaryOperator(BinaryOperator *BO) {
439  if (!BO->isAssignmentOp())
440    return;
441  Expr *LHSExp = BO->getLHS()->IgnoreParenCasts();
442  checkAccess(LHSExp, AK_Written);
443  checkDereference(LHSExp, AK_Written);
444}
445
446/// Whenever we do an LValue to Rvalue cast, we are reading a variable and
447/// need to ensure we hold any required mutexes.
448/// FIXME: Deal with non-primitive types.
449void BuildLockset::VisitCastExpr(CastExpr *CE) {
450  if (CE->getCastKind() != CK_LValueToRValue)
451    return;
452  Expr *SubExp = CE->getSubExpr()->IgnoreParenCasts();
453  checkAccess(SubExp, AK_Read);
454  checkDereference(SubExp, AK_Read);
455}
456
457/// \brief This function, parameterized by an attribute type, is used to add a
458/// set of locks specified as attribute arguments to the lockset.
459template <typename AttrType>
460void BuildLockset::addLocksToSet(LockKind LK, Attr *Attr,
461                                 CXXMemberCallExpr *Exp) {
462  typedef typename AttrType::args_iterator iterator_type;
463  SourceLocation ExpLocation = Exp->getExprLoc();
464  Expr *Parent = Exp->getImplicitObjectArgument();
465  AttrType *SpecificAttr = cast<AttrType>(Attr);
466
467  if (SpecificAttr->args_size() == 0) {
468    // The mutex held is the "this" object.
469    addLock(ExpLocation, Parent, Parent, LK);
470    return;
471  }
472
473  for (iterator_type I = SpecificAttr->args_begin(),
474       E = SpecificAttr->args_end(); I != E; ++I)
475    addLock(ExpLocation, *I, Parent, LK);
476}
477
478/// \brief When visiting CXXMemberCallExprs we need to examine the attributes on
479/// the method that is being called and add, remove or check locks in the
480/// lockset accordingly.
481///
482/// FIXME: For classes annotated with one of the guarded annotations, we need
483/// to treat const method calls as reads and non-const method calls as writes,
484/// and check that the appropriate locks are held. Non-const method calls with
485/// the same signature as const method calls can be also treated as reads.
486///
487/// FIXME: We need to also visit CallExprs to catch/check global functions.
488void BuildLockset::VisitCXXMemberCallExpr(CXXMemberCallExpr *Exp) {
489  NamedDecl *D = dyn_cast_or_null<NamedDecl>(Exp->getCalleeDecl());
490
491  SourceLocation ExpLocation = Exp->getExprLoc();
492  Expr *Parent = Exp->getImplicitObjectArgument();
493
494  if(!D || !D->hasAttrs())
495    return;
496
497  AttrVec &ArgAttrs = D->getAttrs();
498  for(unsigned i = 0; i < ArgAttrs.size(); ++i) {
499    Attr *Attr = ArgAttrs[i];
500    switch (Attr->getKind()) {
501      // When we encounter an exclusive lock function, we need to add the lock
502      // to our lockset with kind exclusive.
503      case attr::ExclusiveLockFunction:
504        addLocksToSet<ExclusiveLockFunctionAttr>(LK_Exclusive, Attr, Exp);
505        break;
506
507      // When we encounter a shared lock function, we need to add the lock
508      // to our lockset with kind shared.
509      case attr::SharedLockFunction:
510        addLocksToSet<SharedLockFunctionAttr>(LK_Shared, Attr, Exp);
511        break;
512
513      // When we encounter an unlock function, we need to remove unlocked
514      // mutexes from the lockset, and flag a warning if they are not there.
515      case attr::UnlockFunction: {
516        UnlockFunctionAttr *UFAttr = cast<UnlockFunctionAttr>(Attr);
517
518        if (UFAttr->args_size() == 0) { // The lock held is the "this" object.
519          removeLock(ExpLocation, Parent, Parent);
520          break;
521        }
522
523        for (UnlockFunctionAttr::args_iterator I = UFAttr->args_begin(),
524             E = UFAttr->args_end(); I != E; ++I)
525          removeLock(ExpLocation, *I, Parent);
526        break;
527      }
528
529      case attr::ExclusiveLocksRequired: {
530        // FIXME: Also use this attribute to add required locks to the initial
531        // lockset when processing a CFG for a function annotated with this
532        // attribute.
533        ExclusiveLocksRequiredAttr *ELRAttr =
534            cast<ExclusiveLocksRequiredAttr>(Attr);
535
536        for (ExclusiveLocksRequiredAttr::args_iterator
537             I = ELRAttr->args_begin(), E = ELRAttr->args_end(); I != E; ++I)
538          warnIfMutexNotHeld(D, Exp, AK_Written, *I, POK_FunctionCall);
539        break;
540      }
541
542      case attr::SharedLocksRequired: {
543        // FIXME: Also use this attribute to add required locks to the initial
544        // lockset when processing a CFG for a function annotated with this
545        // attribute.
546        SharedLocksRequiredAttr *SLRAttr = cast<SharedLocksRequiredAttr>(Attr);
547
548        for (SharedLocksRequiredAttr::args_iterator I = SLRAttr->args_begin(),
549             E = SLRAttr->args_end(); I != E; ++I)
550          warnIfMutexNotHeld(D, Exp, AK_Read, *I, POK_FunctionCall);
551        break;
552      }
553
554      case attr::LocksExcluded: {
555        LocksExcludedAttr *LEAttr = cast<LocksExcludedAttr>(Attr);
556        for (LocksExcludedAttr::args_iterator I = LEAttr->args_begin(),
557            E = LEAttr->args_end(); I != E; ++I) {
558          MutexID Mutex(Handler, *I, Parent);
559          if (locksetContains(Mutex))
560            Handler.handleFunExcludesLock(D->getName(), Mutex.getName(),
561                                          ExpLocation);
562        }
563        break;
564      }
565
566      case attr::LockReturned:
567        // FIXME: Deal with this attribute.
568        break;
569
570      // Ignore other (non thread-safety) attributes
571      default:
572        break;
573    }
574  }
575}
576
577} // end anonymous namespace
578
579/// \brief Flags a warning for each lock that is in LSet2 but not LSet1, or
580/// else mutexes that are held shared in one lockset and exclusive in the other.
581static Lockset warnIfNotInFirstSetOrNotSameKind(ThreadSafetyHandler &Handler,
582                                                const Lockset LSet1,
583                                                const Lockset LSet2,
584                                                Lockset Intersection,
585                                                Lockset::Factory &Fact) {
586  for (Lockset::iterator I = LSet2.begin(), E = LSet2.end(); I != E; ++I) {
587    const MutexID &LSet2Mutex = I.getKey();
588    const LockData &LSet2LockData = I.getData();
589    if (const LockData *LD = LSet1.lookup(LSet2Mutex)) {
590      if (LD->LKind != LSet2LockData.LKind) {
591        Handler.handleExclusiveAndShared(LSet2Mutex.getName(),
592                                         LSet2LockData.AcquireLoc,
593                                         LD->AcquireLoc);
594        if (LD->LKind != LK_Exclusive)
595          Intersection = Fact.add(Intersection, LSet2Mutex, LSet2LockData);
596      }
597    } else {
598      Handler.handleMutexHeldEndOfScope(LSet2Mutex.getName(),
599                                        LSet2LockData.AcquireLoc);
600    }
601  }
602  return Intersection;
603}
604
605
606/// \brief Compute the intersection of two locksets and issue warnings for any
607/// locks in the symmetric difference.
608///
609/// This function is used at a merge point in the CFG when comparing the lockset
610/// of each branch being merged. For example, given the following sequence:
611/// A; if () then B; else C; D; we need to check that the lockset after B and C
612/// are the same. In the event of a difference, we use the intersection of these
613/// two locksets at the start of D.
614static Lockset intersectAndWarn(ThreadSafetyHandler &Handler,
615                                const Lockset LSet1, const Lockset LSet2,
616                                Lockset::Factory &Fact) {
617  Lockset Intersection = LSet1;
618  Intersection = warnIfNotInFirstSetOrNotSameKind(Handler, LSet1, LSet2,
619                                                  Intersection, Fact);
620
621  for (Lockset::iterator I = LSet1.begin(), E = LSet1.end(); I != E; ++I) {
622    if (!LSet2.contains(I.getKey())) {
623      const MutexID &Mutex = I.getKey();
624      const LockData &MissingLock = I.getData();
625      Handler.handleMutexHeldEndOfScope(Mutex.getName(),
626                                        MissingLock.AcquireLoc);
627      Intersection = Fact.remove(Intersection, Mutex);
628    }
629  }
630  return Intersection;
631}
632
633/// \brief Returns the location of the first Stmt in a Block.
634static SourceLocation getFirstStmtLocation(CFGBlock *Block) {
635  SourceLocation Loc;
636  for (CFGBlock::const_iterator BI = Block->begin(), BE = Block->end();
637       BI != BE; ++BI) {
638    if (const CFGStmt *CfgStmt = dyn_cast<CFGStmt>(&(*BI))) {
639      Loc = CfgStmt->getStmt()->getLocStart();
640      if (Loc.isValid()) return Loc;
641    }
642  }
643  if (Stmt *S = Block->getTerminator().getStmt()) {
644    Loc = S->getLocStart();
645    if (Loc.isValid()) return Loc;
646  }
647  return Loc;
648}
649
650/// \brief Warn about different locksets along backedges of loops.
651/// This function is called when we encounter a back edge. At that point,
652/// we need to verify that the lockset before taking the backedge is the
653/// same as the lockset before entering the loop.
654///
655/// \param LoopEntrySet Locks before starting the loop
656/// \param LoopReentrySet Locks in the last CFG block of the loop
657static void warnBackEdgeUnequalLocksets(ThreadSafetyHandler &Handler,
658                                        const Lockset LoopReentrySet,
659                                        const Lockset LoopEntrySet,
660                                        SourceLocation FirstLocInLoop,
661                                        Lockset::Factory &Fact) {
662  assert(FirstLocInLoop.isValid());
663  // Warn for locks held at the start of the loop, but not the end.
664  for (Lockset::iterator I = LoopEntrySet.begin(), E = LoopEntrySet.end();
665       I != E; ++I) {
666    if (!LoopReentrySet.contains(I.getKey())) {
667      // We report this error at the location of the first statement in a loop
668      Handler.handleNoLockLoopEntry(I.getKey().getName(), FirstLocInLoop);
669    }
670  }
671
672  // Warn for locks held at the end of the loop, but not at the start.
673  warnIfNotInFirstSetOrNotSameKind(Handler, LoopEntrySet, LoopReentrySet,
674                                   LoopReentrySet, Fact);
675}
676
677
678namespace clang { namespace thread_safety {
679/// \brief Check a function's CFG for thread-safety violations.
680///
681/// We traverse the blocks in the CFG, compute the set of mutexes that are held
682/// at the end of each block, and issue warnings for thread safety violations.
683/// Each block in the CFG is traversed exactly once.
684void runThreadSafetyAnalysis(AnalysisContext &AC,
685                             ThreadSafetyHandler &Handler) {
686  CFG *CFGraph = AC.getCFG();
687  if (!CFGraph) return;
688  const Decl *D = AC.getDecl();
689  if (D && D->getAttr<NoThreadSafetyAnalysisAttr>()) return;
690
691  Lockset::Factory LocksetFactory;
692
693  // FIXME: Swith to SmallVector? Otherwise improve performance impact?
694  std::vector<Lockset> EntryLocksets(CFGraph->getNumBlockIDs(),
695                                     LocksetFactory.getEmptyMap());
696  std::vector<Lockset> ExitLocksets(CFGraph->getNumBlockIDs(),
697                                    LocksetFactory.getEmptyMap());
698
699  // We need to explore the CFG via a "topological" ordering.
700  // That way, we will be guaranteed to have information about required
701  // predecessor locksets when exploring a new block.
702  TopologicallySortedCFG SortedGraph(CFGraph);
703  CFGBlockSet VisitedBlocks(CFGraph);
704
705  for (TopologicallySortedCFG::iterator I = SortedGraph.begin(),
706       E = SortedGraph.end(); I!= E; ++I) {
707    const CFGBlock *CurrBlock = *I;
708    int CurrBlockID = CurrBlock->getBlockID();
709
710    VisitedBlocks.insert(CurrBlock);
711
712    // Use the default initial lockset in case there are no predecessors.
713    Lockset &Entryset = EntryLocksets[CurrBlockID];
714    Lockset &Exitset = ExitLocksets[CurrBlockID];
715
716    // Iterate through the predecessor blocks and warn if the lockset for all
717    // predecessors is not the same. We take the entry lockset of the current
718    // block to be the intersection of all previous locksets.
719    // FIXME: By keeping the intersection, we may output more errors in future
720    // for a lock which is not in the intersection, but was in the union. We
721    // may want to also keep the union in future. As an example, let's say
722    // the intersection contains Mutex L, and the union contains L and M.
723    // Later we unlock M. At this point, we would output an error because we
724    // never locked M; although the real error is probably that we forgot to
725    // lock M on all code paths. Conversely, let's say that later we lock M.
726    // In this case, we should compare against the intersection instead of the
727    // union because the real error is probably that we forgot to unlock M on
728    // all code paths.
729    bool LocksetInitialized = false;
730    for (CFGBlock::const_pred_iterator PI = CurrBlock->pred_begin(),
731         PE  = CurrBlock->pred_end(); PI != PE; ++PI) {
732
733      // if *PI -> CurrBlock is a back edge
734      if (*PI == 0 || !VisitedBlocks.alreadySet(*PI))
735        continue;
736
737      int PrevBlockID = (*PI)->getBlockID();
738      if (!LocksetInitialized) {
739        Entryset = ExitLocksets[PrevBlockID];
740        LocksetInitialized = true;
741      } else {
742        Entryset = intersectAndWarn(Handler, Entryset,
743                                    ExitLocksets[PrevBlockID], LocksetFactory);
744      }
745    }
746
747    BuildLockset LocksetBuilder(Handler, Entryset, LocksetFactory);
748    for (CFGBlock::const_iterator BI = CurrBlock->begin(),
749         BE = CurrBlock->end(); BI != BE; ++BI) {
750      if (const CFGStmt *CfgStmt = dyn_cast<CFGStmt>(&*BI))
751        LocksetBuilder.Visit(const_cast<Stmt*>(CfgStmt->getStmt()));
752    }
753    Exitset = LocksetBuilder.getLockset();
754
755    // For every back edge from CurrBlock (the end of the loop) to another block
756    // (FirstLoopBlock) we need to check that the Lockset of Block is equal to
757    // the one held at the beginning of FirstLoopBlock. We can look up the
758    // Lockset held at the beginning of FirstLoopBlock in the EntryLockSets map.
759    for (CFGBlock::const_succ_iterator SI = CurrBlock->succ_begin(),
760         SE  = CurrBlock->succ_end(); SI != SE; ++SI) {
761
762      // if CurrBlock -> *SI is *not* a back edge
763      if (*SI == 0 || !VisitedBlocks.alreadySet(*SI))
764        continue;
765
766      CFGBlock *FirstLoopBlock = *SI;
767      SourceLocation FirstLoopLocation = getFirstStmtLocation(FirstLoopBlock);
768
769      assert(FirstLoopLocation.isValid());
770
771      // Fail gracefully in release code.
772      if (!FirstLoopLocation.isValid())
773        continue;
774
775      Lockset PreLoop = EntryLocksets[FirstLoopBlock->getBlockID()];
776      Lockset LoopEnd = ExitLocksets[CurrBlockID];
777      warnBackEdgeUnequalLocksets(Handler, LoopEnd, PreLoop, FirstLoopLocation,
778                                  LocksetFactory);
779    }
780  }
781
782  Lockset FinalLockset = ExitLocksets[CFGraph->getExit().getBlockID()];
783  if (!FinalLockset.isEmpty()) {
784    for (Lockset::iterator I=FinalLockset.begin(), E=FinalLockset.end();
785         I != E; ++I) {
786      const MutexID &Mutex = I.getKey();
787      const LockData &MissingLock = I.getData();
788
789      std::string FunName = "<unknown>";
790      if (const NamedDecl *ContextDecl = dyn_cast<NamedDecl>(AC.getDecl())) {
791        FunName = ContextDecl->getDeclName().getAsString();
792      }
793
794      Handler.handleNoUnlock(Mutex.getName(), FunName, MissingLock.AcquireLoc);
795    }
796  }
797}
798
799/// \brief Helper function that returns a LockKind required for the given level
800/// of access.
801LockKind getLockKindFromAccessKind(AccessKind AK) {
802  switch (AK) {
803    case AK_Read :
804      return LK_Shared;
805    case AK_Written :
806      return LK_Exclusive;
807  }
808}
809}} // end namespace clang::thread_safety
810