ThreadSafety.cpp revision 2e5156274b8051217565b557bfa14c80f7990e9c
1//===- ThreadSafety.cpp ----------------------------------------*- C++ --*-===//
2//
3//                     The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10// A intra-procedural analysis for thread safety (e.g. deadlocks and race
11// conditions), based off of an annotation system.
12//
13// See http://clang.llvm.org/docs/LanguageExtensions.html#threadsafety for more
14// information.
15//
16//===----------------------------------------------------------------------===//
17
18#include "clang/Analysis/Analyses/ThreadSafety.h"
19#include "clang/Analysis/Analyses/PostOrderCFGView.h"
20#include "clang/Analysis/AnalysisContext.h"
21#include "clang/Analysis/CFG.h"
22#include "clang/Analysis/CFGStmtMap.h"
23#include "clang/AST/DeclCXX.h"
24#include "clang/AST/ExprCXX.h"
25#include "clang/AST/StmtCXX.h"
26#include "clang/AST/StmtVisitor.h"
27#include "clang/Basic/SourceManager.h"
28#include "clang/Basic/SourceLocation.h"
29#include "llvm/ADT/BitVector.h"
30#include "llvm/ADT/FoldingSet.h"
31#include "llvm/ADT/ImmutableMap.h"
32#include "llvm/ADT/PostOrderIterator.h"
33#include "llvm/ADT/SmallVector.h"
34#include "llvm/ADT/StringRef.h"
35#include "llvm/Support/raw_ostream.h"
36#include <algorithm>
37#include <utility>
38#include <vector>
39
40using namespace clang;
41using namespace thread_safety;
42
43// Key method definition
44ThreadSafetyHandler::~ThreadSafetyHandler() {}
45
46namespace {
47
48/// \brief A MutexID object uniquely identifies a particular mutex, and
49/// is built from an Expr* (i.e. calling a lock function).
50///
51/// Thread-safety analysis works by comparing lock expressions.  Within the
52/// body of a function, an expression such as "x->foo->bar.mu" will resolve to
53/// a particular mutex object at run-time.  Subsequent occurrences of the same
54/// expression (where "same" means syntactic equality) will refer to the same
55/// run-time object if three conditions hold:
56/// (1) Local variables in the expression, such as "x" have not changed.
57/// (2) Values on the heap that affect the expression have not changed.
58/// (3) The expression involves only pure function calls.
59///
60/// The current implementation assumes, but does not verify, that multiple uses
61/// of the same lock expression satisfies these criteria.
62///
63/// Clang introduces an additional wrinkle, which is that it is difficult to
64/// derive canonical expressions, or compare expressions directly for equality.
65/// Thus, we identify a mutex not by an Expr, but by the set of named
66/// declarations that are referenced by the Expr.  In other words,
67/// x->foo->bar.mu will be a four element vector with the Decls for
68/// mu, bar, and foo, and x.  The vector will uniquely identify the expression
69/// for all practical purposes.
70///
71/// Note we will need to perform substitution on "this" and function parameter
72/// names when constructing a lock expression.
73///
74/// For example:
75/// class C { Mutex Mu;  void lock() EXCLUSIVE_LOCK_FUNCTION(this->Mu); };
76/// void myFunc(C *X) { ... X->lock() ... }
77/// The original expression for the mutex acquired by myFunc is "this->Mu", but
78/// "X" is substituted for "this" so we get X->Mu();
79///
80/// For another example:
81/// foo(MyList *L) EXCLUSIVE_LOCKS_REQUIRED(L->Mu) { ... }
82/// MyList *MyL;
83/// foo(MyL);  // requires lock MyL->Mu to be held
84class MutexID {
85  SmallVector<NamedDecl*, 2> DeclSeq;
86
87  /// Build a Decl sequence representing the lock from the given expression.
88  /// Recursive function that terminates on DeclRefExpr.
89  /// Note: this function merely creates a MutexID; it does not check to
90  /// ensure that the original expression is a valid mutex expression.
91  void buildMutexID(Expr *Exp, const NamedDecl *D, Expr *Parent,
92                    unsigned NumArgs, Expr **FunArgs) {
93    if (!Exp) {
94      DeclSeq.clear();
95      return;
96    }
97
98    if (DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(Exp)) {
99      NamedDecl *ND = cast<NamedDecl>(DRE->getDecl()->getCanonicalDecl());
100      ParmVarDecl *PV = dyn_cast_or_null<ParmVarDecl>(ND);
101      if (PV) {
102        FunctionDecl *FD =
103          cast<FunctionDecl>(PV->getDeclContext())->getCanonicalDecl();
104        unsigned i = PV->getFunctionScopeIndex();
105
106        if (FunArgs && FD == D->getCanonicalDecl()) {
107          // Substitute call arguments for references to function parameters
108          assert(i < NumArgs);
109          buildMutexID(FunArgs[i], D, 0, 0, 0);
110          return;
111        }
112        // Map the param back to the param of the original function declaration.
113        DeclSeq.push_back(FD->getParamDecl(i));
114        return;
115      }
116      // Not a function parameter -- just store the reference.
117      DeclSeq.push_back(ND);
118    } else if (MemberExpr *ME = dyn_cast<MemberExpr>(Exp)) {
119      NamedDecl *ND = ME->getMemberDecl();
120      DeclSeq.push_back(ND);
121      buildMutexID(ME->getBase(), D, Parent, NumArgs, FunArgs);
122    } else if (isa<CXXThisExpr>(Exp)) {
123      if (Parent)
124        buildMutexID(Parent, D, 0, 0, 0);
125      else
126        return;  // mutexID is still valid in this case
127    } else if (UnaryOperator *UOE = dyn_cast<UnaryOperator>(Exp))
128      buildMutexID(UOE->getSubExpr(), D, Parent, NumArgs, FunArgs);
129    else if (CastExpr *CE = dyn_cast<CastExpr>(Exp))
130      buildMutexID(CE->getSubExpr(), D, Parent, NumArgs, FunArgs);
131    else
132      DeclSeq.clear(); // Mark as invalid lock expression.
133  }
134
135  /// \brief Construct a MutexID from an expression.
136  /// \param MutexExp The original mutex expression within an attribute
137  /// \param DeclExp An expression involving the Decl on which the attribute
138  ///        occurs.
139  /// \param D  The declaration to which the lock/unlock attribute is attached.
140  void buildMutexIDFromExp(Expr *MutexExp, Expr *DeclExp, const NamedDecl *D) {
141    Expr *Parent = 0;
142    unsigned NumArgs = 0;
143    Expr **FunArgs = 0;
144
145    // If we are processing a raw attribute expression, with no substitutions.
146    if (DeclExp == 0) {
147      buildMutexID(MutexExp, D, 0, 0, 0);
148      return;
149    }
150
151    // Examine DeclExp to find Parent and FunArgs, which are used to substitute
152    // for formal parameters when we call buildMutexID later.
153    if (MemberExpr *ME = dyn_cast<MemberExpr>(DeclExp)) {
154      Parent = ME->getBase();
155    } else if (CXXMemberCallExpr *CE = dyn_cast<CXXMemberCallExpr>(DeclExp)) {
156      Parent = CE->getImplicitObjectArgument();
157      NumArgs = CE->getNumArgs();
158      FunArgs = CE->getArgs();
159    } else if (CallExpr *CE = dyn_cast<CallExpr>(DeclExp)) {
160      NumArgs = CE->getNumArgs();
161      FunArgs = CE->getArgs();
162    } else if (CXXConstructExpr *CE = dyn_cast<CXXConstructExpr>(DeclExp)) {
163      Parent = 0;  // FIXME -- get the parent from DeclStmt
164      NumArgs = CE->getNumArgs();
165      FunArgs = CE->getArgs();
166    } else if (D && isa<CXXDestructorDecl>(D)) {
167      // There's no such thing as a "destructor call" in the AST.
168      Parent = DeclExp;
169    }
170
171    // If the attribute has no arguments, then assume the argument is "this".
172    if (MutexExp == 0) {
173      buildMutexID(Parent, D, 0, 0, 0);
174      return;
175    }
176
177    buildMutexID(MutexExp, D, Parent, NumArgs, FunArgs);
178  }
179
180public:
181  explicit MutexID(clang::Decl::EmptyShell e) {
182    DeclSeq.clear();
183  }
184
185  /// \param MutexExp The original mutex expression within an attribute
186  /// \param DeclExp An expression involving the Decl on which the attribute
187  ///        occurs.
188  /// \param D  The declaration to which the lock/unlock attribute is attached.
189  /// Caller must check isValid() after construction.
190  MutexID(Expr* MutexExp, Expr *DeclExp, const NamedDecl* D) {
191    buildMutexIDFromExp(MutexExp, DeclExp, D);
192  }
193
194  /// Return true if this is a valid decl sequence.
195  /// Caller must call this by hand after construction to handle errors.
196  bool isValid() const {
197    return !DeclSeq.empty();
198  }
199
200  /// Issue a warning about an invalid lock expression
201  static void warnInvalidLock(ThreadSafetyHandler &Handler, Expr* MutexExp,
202                              Expr *DeclExp, const NamedDecl* D) {
203    SourceLocation Loc;
204    if (DeclExp)
205      Loc = DeclExp->getExprLoc();
206
207    // FIXME: add a note about the attribute location in MutexExp or D
208    if (Loc.isValid())
209      Handler.handleInvalidLockExp(Loc);
210  }
211
212  bool operator==(const MutexID &other) const {
213    return DeclSeq == other.DeclSeq;
214  }
215
216  bool operator!=(const MutexID &other) const {
217    return !(*this == other);
218  }
219
220  // SmallVector overloads Operator< to do lexicographic ordering. Note that
221  // we use pointer equality (and <) to compare NamedDecls. This means the order
222  // of MutexIDs in a lockset is nondeterministic. In order to output
223  // diagnostics in a deterministic ordering, we must order all diagnostics to
224  // output by SourceLocation when iterating through this lockset.
225  bool operator<(const MutexID &other) const {
226    return DeclSeq < other.DeclSeq;
227  }
228
229  /// \brief Returns the name of the first Decl in the list for a given MutexID;
230  /// e.g. the lock expression foo.bar() has name "bar".
231  /// The caret will point unambiguously to the lock expression, so using this
232  /// name in diagnostics is a way to get simple, and consistent, mutex names.
233  /// We do not want to output the entire expression text for security reasons.
234  StringRef getName() const {
235    assert(isValid());
236    return DeclSeq.front()->getName();
237  }
238
239  void Profile(llvm::FoldingSetNodeID &ID) const {
240    for (SmallVectorImpl<NamedDecl*>::const_iterator I = DeclSeq.begin(),
241         E = DeclSeq.end(); I != E; ++I) {
242      ID.AddPointer(*I);
243    }
244  }
245};
246
247
248/// \brief This is a helper class that stores info about the most recent
249/// accquire of a Lock.
250///
251/// The main body of the analysis maps MutexIDs to LockDatas.
252struct LockData {
253  SourceLocation AcquireLoc;
254
255  /// \brief LKind stores whether a lock is held shared or exclusively.
256  /// Note that this analysis does not currently support either re-entrant
257  /// locking or lock "upgrading" and "downgrading" between exclusive and
258  /// shared.
259  ///
260  /// FIXME: add support for re-entrant locking and lock up/downgrading
261  LockKind LKind;
262  MutexID UnderlyingMutex;  // for ScopedLockable objects
263
264  LockData(SourceLocation AcquireLoc, LockKind LKind)
265    : AcquireLoc(AcquireLoc), LKind(LKind), UnderlyingMutex(Decl::EmptyShell())
266  {}
267
268  LockData(SourceLocation AcquireLoc, LockKind LKind, const MutexID &Mu)
269    : AcquireLoc(AcquireLoc), LKind(LKind), UnderlyingMutex(Mu) {}
270
271  bool operator==(const LockData &other) const {
272    return AcquireLoc == other.AcquireLoc && LKind == other.LKind;
273  }
274
275  bool operator!=(const LockData &other) const {
276    return !(*this == other);
277  }
278
279  void Profile(llvm::FoldingSetNodeID &ID) const {
280    ID.AddInteger(AcquireLoc.getRawEncoding());
281    ID.AddInteger(LKind);
282  }
283};
284
285
286/// A Lockset maps each MutexID (defined above) to information about how it has
287/// been locked.
288typedef llvm::ImmutableMap<MutexID, LockData> Lockset;
289typedef llvm::ImmutableMap<NamedDecl*, unsigned> LocalVarContext;
290
291class LocalVariableMap;
292
293/// A side (entry or exit) of a CFG node.
294enum CFGBlockSide { CBS_Entry, CBS_Exit };
295
296/// CFGBlockInfo is a struct which contains all the information that is
297/// maintained for each block in the CFG.  See LocalVariableMap for more
298/// information about the contexts.
299struct CFGBlockInfo {
300  Lockset EntrySet;             // Lockset held at entry to block
301  Lockset ExitSet;              // Lockset held at exit from block
302  LocalVarContext EntryContext; // Context held at entry to block
303  LocalVarContext ExitContext;  // Context held at exit from block
304  SourceLocation EntryLoc;      // Location of first statement in block
305  SourceLocation ExitLoc;       // Location of last statement in block.
306  unsigned EntryIndex;          // Used to replay contexts later
307
308  const Lockset &getSet(CFGBlockSide Side) const {
309    return Side == CBS_Entry ? EntrySet : ExitSet;
310  }
311  SourceLocation getLocation(CFGBlockSide Side) const {
312    return Side == CBS_Entry ? EntryLoc : ExitLoc;
313  }
314
315private:
316  CFGBlockInfo(Lockset EmptySet, LocalVarContext EmptyCtx)
317    : EntrySet(EmptySet), ExitSet(EmptySet),
318      EntryContext(EmptyCtx), ExitContext(EmptyCtx)
319  { }
320
321public:
322  static CFGBlockInfo getEmptyBlockInfo(Lockset::Factory &F,
323                                        LocalVariableMap &M);
324};
325
326
327
328// A LocalVariableMap maintains a map from local variables to their currently
329// valid definitions.  It provides SSA-like functionality when traversing the
330// CFG.  Like SSA, each definition or assignment to a variable is assigned a
331// unique name (an integer), which acts as the SSA name for that definition.
332// The total set of names is shared among all CFG basic blocks.
333// Unlike SSA, we do not rewrite expressions to replace local variables declrefs
334// with their SSA-names.  Instead, we compute a Context for each point in the
335// code, which maps local variables to the appropriate SSA-name.  This map
336// changes with each assignment.
337//
338// The map is computed in a single pass over the CFG.  Subsequent analyses can
339// then query the map to find the appropriate Context for a statement, and use
340// that Context to look up the definitions of variables.
341class LocalVariableMap {
342public:
343  typedef LocalVarContext Context;
344
345  /// A VarDefinition consists of an expression, representing the value of the
346  /// variable, along with the context in which that expression should be
347  /// interpreted.  A reference VarDefinition does not itself contain this
348  /// information, but instead contains a pointer to a previous VarDefinition.
349  struct VarDefinition {
350  public:
351    friend class LocalVariableMap;
352
353    NamedDecl *Dec;       // The original declaration for this variable.
354    Expr *Exp;            // The expression for this variable, OR
355    unsigned Ref;         // Reference to another VarDefinition
356    Context Ctx;          // The map with which Exp should be interpreted.
357
358    bool isReference() { return !Exp; }
359
360  private:
361    // Create ordinary variable definition
362    VarDefinition(NamedDecl *D, Expr *E, Context C)
363      : Dec(D), Exp(E), Ref(0), Ctx(C)
364    { }
365
366    // Create reference to previous definition
367    VarDefinition(NamedDecl *D, unsigned R, Context C)
368      : Dec(D), Exp(0), Ref(R), Ctx(C)
369    { }
370  };
371
372private:
373  Context::Factory ContextFactory;
374  std::vector<VarDefinition> VarDefinitions;
375  std::vector<unsigned> CtxIndices;
376  std::vector<std::pair<Stmt*, Context> > SavedContexts;
377
378public:
379  LocalVariableMap() {
380    // index 0 is a placeholder for undefined variables (aka phi-nodes).
381    VarDefinitions.push_back(VarDefinition(0, 0u, getEmptyContext()));
382  }
383
384  /// Look up a definition, within the given context.
385  const VarDefinition* lookup(NamedDecl *D, Context Ctx) {
386    const unsigned *i = Ctx.lookup(D);
387    if (!i)
388      return 0;
389    assert(*i < VarDefinitions.size());
390    return &VarDefinitions[*i];
391  }
392
393  /// Look up the definition for D within the given context.  Returns
394  /// NULL if the expression is not statically known.  If successful, also
395  /// modifies Ctx to hold the context of the return Expr.
396  Expr* lookupExpr(NamedDecl *D, Context &Ctx) {
397    const unsigned *P = Ctx.lookup(D);
398    if (!P)
399      return 0;
400
401    unsigned i = *P;
402    while (i > 0) {
403      if (VarDefinitions[i].Exp) {
404        Ctx = VarDefinitions[i].Ctx;
405        return VarDefinitions[i].Exp;
406      }
407      i = VarDefinitions[i].Ref;
408    }
409    return 0;
410  }
411
412  Context getEmptyContext() { return ContextFactory.getEmptyMap(); }
413
414  /// Return the next context after processing S.  This function is used by
415  /// clients of the class to get the appropriate context when traversing the
416  /// CFG.  It must be called for every assignment or DeclStmt.
417  Context getNextContext(unsigned &CtxIndex, Stmt *S, Context C) {
418    if (SavedContexts[CtxIndex+1].first == S) {
419      CtxIndex++;
420      Context Result = SavedContexts[CtxIndex].second;
421      return Result;
422    }
423    return C;
424  }
425
426  void dumpVarDefinitionName(unsigned i) {
427    if (i == 0) {
428      llvm::errs() << "Undefined";
429      return;
430    }
431    NamedDecl *Dec = VarDefinitions[i].Dec;
432    if (!Dec) {
433      llvm::errs() << "<<NULL>>";
434      return;
435    }
436    Dec->printName(llvm::errs());
437    llvm::errs() << "." << i << " " << ((void*) Dec);
438  }
439
440  /// Dumps an ASCII representation of the variable map to llvm::errs()
441  void dump() {
442    for (unsigned i = 1, e = VarDefinitions.size(); i < e; ++i) {
443      Expr *Exp = VarDefinitions[i].Exp;
444      unsigned Ref = VarDefinitions[i].Ref;
445
446      dumpVarDefinitionName(i);
447      llvm::errs() << " = ";
448      if (Exp) Exp->dump();
449      else {
450        dumpVarDefinitionName(Ref);
451        llvm::errs() << "\n";
452      }
453    }
454  }
455
456  /// Dumps an ASCII representation of a Context to llvm::errs()
457  void dumpContext(Context C) {
458    for (Context::iterator I = C.begin(), E = C.end(); I != E; ++I) {
459      NamedDecl *D = I.getKey();
460      D->printName(llvm::errs());
461      const unsigned *i = C.lookup(D);
462      llvm::errs() << " -> ";
463      dumpVarDefinitionName(*i);
464      llvm::errs() << "\n";
465    }
466  }
467
468  /// Builds the variable map.
469  void traverseCFG(CFG *CFGraph, PostOrderCFGView *SortedGraph,
470                     std::vector<CFGBlockInfo> &BlockInfo);
471
472protected:
473  // Get the current context index
474  unsigned getContextIndex() { return SavedContexts.size()-1; }
475
476  // Save the current context for later replay
477  void saveContext(Stmt *S, Context C) {
478    SavedContexts.push_back(std::make_pair(S,C));
479  }
480
481  // Adds a new definition to the given context, and returns a new context.
482  // This method should be called when declaring a new variable.
483  Context addDefinition(NamedDecl *D, Expr *Exp, Context Ctx) {
484    assert(!Ctx.contains(D));
485    unsigned newID = VarDefinitions.size();
486    Context NewCtx = ContextFactory.add(Ctx, D, newID);
487    VarDefinitions.push_back(VarDefinition(D, Exp, Ctx));
488    return NewCtx;
489  }
490
491  // Add a new reference to an existing definition.
492  Context addReference(NamedDecl *D, unsigned i, Context Ctx) {
493    unsigned newID = VarDefinitions.size();
494    Context NewCtx = ContextFactory.add(Ctx, D, newID);
495    VarDefinitions.push_back(VarDefinition(D, i, Ctx));
496    return NewCtx;
497  }
498
499  // Updates a definition only if that definition is already in the map.
500  // This method should be called when assigning to an existing variable.
501  Context updateDefinition(NamedDecl *D, Expr *Exp, Context Ctx) {
502    if (Ctx.contains(D)) {
503      unsigned newID = VarDefinitions.size();
504      Context NewCtx = ContextFactory.remove(Ctx, D);
505      NewCtx = ContextFactory.add(NewCtx, D, newID);
506      VarDefinitions.push_back(VarDefinition(D, Exp, Ctx));
507      return NewCtx;
508    }
509    return Ctx;
510  }
511
512  // Removes a definition from the context, but keeps the variable name
513  // as a valid variable.  The index 0 is a placeholder for cleared definitions.
514  Context clearDefinition(NamedDecl *D, Context Ctx) {
515    Context NewCtx = Ctx;
516    if (NewCtx.contains(D)) {
517      NewCtx = ContextFactory.remove(NewCtx, D);
518      NewCtx = ContextFactory.add(NewCtx, D, 0);
519    }
520    return NewCtx;
521  }
522
523  // Remove a definition entirely frmo the context.
524  Context removeDefinition(NamedDecl *D, Context Ctx) {
525    Context NewCtx = Ctx;
526    if (NewCtx.contains(D)) {
527      NewCtx = ContextFactory.remove(NewCtx, D);
528    }
529    return NewCtx;
530  }
531
532  Context intersectContexts(Context C1, Context C2);
533  Context createReferenceContext(Context C);
534  void intersectBackEdge(Context C1, Context C2);
535
536  friend class VarMapBuilder;
537};
538
539
540// This has to be defined after LocalVariableMap.
541CFGBlockInfo CFGBlockInfo::getEmptyBlockInfo(Lockset::Factory &F,
542                                             LocalVariableMap &M) {
543  return CFGBlockInfo(F.getEmptyMap(), M.getEmptyContext());
544}
545
546
547/// Visitor which builds a LocalVariableMap
548class VarMapBuilder : public StmtVisitor<VarMapBuilder> {
549public:
550  LocalVariableMap* VMap;
551  LocalVariableMap::Context Ctx;
552
553  VarMapBuilder(LocalVariableMap *VM, LocalVariableMap::Context C)
554    : VMap(VM), Ctx(C) {}
555
556  void VisitDeclStmt(DeclStmt *S);
557  void VisitBinaryOperator(BinaryOperator *BO);
558};
559
560
561// Add new local variables to the variable map
562void VarMapBuilder::VisitDeclStmt(DeclStmt *S) {
563  bool modifiedCtx = false;
564  DeclGroupRef DGrp = S->getDeclGroup();
565  for (DeclGroupRef::iterator I = DGrp.begin(), E = DGrp.end(); I != E; ++I) {
566    if (VarDecl *VD = dyn_cast_or_null<VarDecl>(*I)) {
567      Expr *E = VD->getInit();
568
569      // Add local variables with trivial type to the variable map
570      QualType T = VD->getType();
571      if (T.isTrivialType(VD->getASTContext())) {
572        Ctx = VMap->addDefinition(VD, E, Ctx);
573        modifiedCtx = true;
574      }
575    }
576  }
577  if (modifiedCtx)
578    VMap->saveContext(S, Ctx);
579}
580
581// Update local variable definitions in variable map
582void VarMapBuilder::VisitBinaryOperator(BinaryOperator *BO) {
583  if (!BO->isAssignmentOp())
584    return;
585
586  Expr *LHSExp = BO->getLHS()->IgnoreParenCasts();
587
588  // Update the variable map and current context.
589  if (DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(LHSExp)) {
590    ValueDecl *VDec = DRE->getDecl();
591    if (Ctx.lookup(VDec)) {
592      if (BO->getOpcode() == BO_Assign)
593        Ctx = VMap->updateDefinition(VDec, BO->getRHS(), Ctx);
594      else
595        // FIXME -- handle compound assignment operators
596        Ctx = VMap->clearDefinition(VDec, Ctx);
597      VMap->saveContext(BO, Ctx);
598    }
599  }
600}
601
602
603// Computes the intersection of two contexts.  The intersection is the
604// set of variables which have the same definition in both contexts;
605// variables with different definitions are discarded.
606LocalVariableMap::Context
607LocalVariableMap::intersectContexts(Context C1, Context C2) {
608  Context Result = C1;
609  for (Context::iterator I = C1.begin(), E = C1.end(); I != E; ++I) {
610    NamedDecl *Dec = I.getKey();
611    unsigned i1 = I.getData();
612    const unsigned *i2 = C2.lookup(Dec);
613    if (!i2)             // variable doesn't exist on second path
614      Result = removeDefinition(Dec, Result);
615    else if (*i2 != i1)  // variable exists, but has different definition
616      Result = clearDefinition(Dec, Result);
617  }
618  return Result;
619}
620
621// For every variable in C, create a new variable that refers to the
622// definition in C.  Return a new context that contains these new variables.
623// (We use this for a naive implementation of SSA on loop back-edges.)
624LocalVariableMap::Context LocalVariableMap::createReferenceContext(Context C) {
625  Context Result = getEmptyContext();
626  for (Context::iterator I = C.begin(), E = C.end(); I != E; ++I) {
627    NamedDecl *Dec = I.getKey();
628    unsigned i = I.getData();
629    Result = addReference(Dec, i, Result);
630  }
631  return Result;
632}
633
634// This routine also takes the intersection of C1 and C2, but it does so by
635// altering the VarDefinitions.  C1 must be the result of an earlier call to
636// createReferenceContext.
637void LocalVariableMap::intersectBackEdge(Context C1, Context C2) {
638  for (Context::iterator I = C1.begin(), E = C1.end(); I != E; ++I) {
639    NamedDecl *Dec = I.getKey();
640    unsigned i1 = I.getData();
641    VarDefinition *VDef = &VarDefinitions[i1];
642    assert(VDef->isReference());
643
644    const unsigned *i2 = C2.lookup(Dec);
645    if (!i2 || (*i2 != i1))
646      VDef->Ref = 0;    // Mark this variable as undefined
647  }
648}
649
650
651// Traverse the CFG in topological order, so all predecessors of a block
652// (excluding back-edges) are visited before the block itself.  At
653// each point in the code, we calculate a Context, which holds the set of
654// variable definitions which are visible at that point in execution.
655// Visible variables are mapped to their definitions using an array that
656// contains all definitions.
657//
658// At join points in the CFG, the set is computed as the intersection of
659// the incoming sets along each edge, E.g.
660//
661//                       { Context                 | VarDefinitions }
662//   int x = 0;          { x -> x1                 | x1 = 0 }
663//   int y = 0;          { x -> x1, y -> y1        | y1 = 0, x1 = 0 }
664//   if (b) x = 1;       { x -> x2, y -> y1        | x2 = 1, y1 = 0, ... }
665//   else   x = 2;       { x -> x3, y -> y1        | x3 = 2, x2 = 1, ... }
666//   ...                 { y -> y1  (x is unknown) | x3 = 2, x2 = 1, ... }
667//
668// This is essentially a simpler and more naive version of the standard SSA
669// algorithm.  Those definitions that remain in the intersection are from blocks
670// that strictly dominate the current block.  We do not bother to insert proper
671// phi nodes, because they are not used in our analysis; instead, wherever
672// a phi node would be required, we simply remove that definition from the
673// context (E.g. x above).
674//
675// The initial traversal does not capture back-edges, so those need to be
676// handled on a separate pass.  Whenever the first pass encounters an
677// incoming back edge, it duplicates the context, creating new definitions
678// that refer back to the originals.  (These correspond to places where SSA
679// might have to insert a phi node.)  On the second pass, these definitions are
680// set to NULL if the the variable has changed on the back-edge (i.e. a phi
681// node was actually required.)  E.g.
682//
683//                       { Context           | VarDefinitions }
684//   int x = 0, y = 0;   { x -> x1, y -> y1  | y1 = 0, x1 = 0 }
685//   while (b)           { x -> x2, y -> y1  | [1st:] x2=x1; [2nd:] x2=NULL; }
686//     x = x+1;          { x -> x3, y -> y1  | x3 = x2 + 1, ... }
687//   ...                 { y -> y1           | x3 = 2, x2 = 1, ... }
688//
689void LocalVariableMap::traverseCFG(CFG *CFGraph,
690                                   PostOrderCFGView *SortedGraph,
691                                   std::vector<CFGBlockInfo> &BlockInfo) {
692  PostOrderCFGView::CFGBlockSet VisitedBlocks(CFGraph);
693
694  CtxIndices.resize(CFGraph->getNumBlockIDs());
695
696  for (PostOrderCFGView::iterator I = SortedGraph->begin(),
697       E = SortedGraph->end(); I!= E; ++I) {
698    const CFGBlock *CurrBlock = *I;
699    int CurrBlockID = CurrBlock->getBlockID();
700    CFGBlockInfo *CurrBlockInfo = &BlockInfo[CurrBlockID];
701
702    VisitedBlocks.insert(CurrBlock);
703
704    // Calculate the entry context for the current block
705    bool HasBackEdges = false;
706    bool CtxInit = true;
707    for (CFGBlock::const_pred_iterator PI = CurrBlock->pred_begin(),
708         PE  = CurrBlock->pred_end(); PI != PE; ++PI) {
709      // if *PI -> CurrBlock is a back edge, so skip it
710      if (*PI == 0 || !VisitedBlocks.alreadySet(*PI)) {
711        HasBackEdges = true;
712        continue;
713      }
714
715      int PrevBlockID = (*PI)->getBlockID();
716      CFGBlockInfo *PrevBlockInfo = &BlockInfo[PrevBlockID];
717
718      if (CtxInit) {
719        CurrBlockInfo->EntryContext = PrevBlockInfo->ExitContext;
720        CtxInit = false;
721      }
722      else {
723        CurrBlockInfo->EntryContext =
724          intersectContexts(CurrBlockInfo->EntryContext,
725                            PrevBlockInfo->ExitContext);
726      }
727    }
728
729    // Duplicate the context if we have back-edges, so we can call
730    // intersectBackEdges later.
731    if (HasBackEdges)
732      CurrBlockInfo->EntryContext =
733        createReferenceContext(CurrBlockInfo->EntryContext);
734
735    // Create a starting context index for the current block
736    saveContext(0, CurrBlockInfo->EntryContext);
737    CurrBlockInfo->EntryIndex = getContextIndex();
738
739    // Visit all the statements in the basic block.
740    VarMapBuilder VMapBuilder(this, CurrBlockInfo->EntryContext);
741    for (CFGBlock::const_iterator BI = CurrBlock->begin(),
742         BE = CurrBlock->end(); BI != BE; ++BI) {
743      switch (BI->getKind()) {
744        case CFGElement::Statement: {
745          const CFGStmt *CS = cast<CFGStmt>(&*BI);
746          VMapBuilder.Visit(const_cast<Stmt*>(CS->getStmt()));
747          break;
748        }
749        default:
750          break;
751      }
752    }
753    CurrBlockInfo->ExitContext = VMapBuilder.Ctx;
754
755    // Mark variables on back edges as "unknown" if they've been changed.
756    for (CFGBlock::const_succ_iterator SI = CurrBlock->succ_begin(),
757         SE  = CurrBlock->succ_end(); SI != SE; ++SI) {
758      // if CurrBlock -> *SI is *not* a back edge
759      if (*SI == 0 || !VisitedBlocks.alreadySet(*SI))
760        continue;
761
762      CFGBlock *FirstLoopBlock = *SI;
763      Context LoopBegin = BlockInfo[FirstLoopBlock->getBlockID()].EntryContext;
764      Context LoopEnd   = CurrBlockInfo->ExitContext;
765      intersectBackEdge(LoopBegin, LoopEnd);
766    }
767  }
768
769  // Put an extra entry at the end of the indexed context array
770  unsigned exitID = CFGraph->getExit().getBlockID();
771  saveContext(0, BlockInfo[exitID].ExitContext);
772}
773
774/// Find the appropriate source locations to use when producing diagnostics for
775/// each block in the CFG.
776static void findBlockLocations(CFG *CFGraph,
777                               PostOrderCFGView *SortedGraph,
778                               std::vector<CFGBlockInfo> &BlockInfo) {
779  for (PostOrderCFGView::iterator I = SortedGraph->begin(),
780       E = SortedGraph->end(); I!= E; ++I) {
781    const CFGBlock *CurrBlock = *I;
782    CFGBlockInfo *CurrBlockInfo = &BlockInfo[CurrBlock->getBlockID()];
783
784    // Find the source location of the last statement in the block, if the
785    // block is not empty.
786    if (const Stmt *S = CurrBlock->getTerminator()) {
787      CurrBlockInfo->EntryLoc = CurrBlockInfo->ExitLoc = S->getLocStart();
788    } else {
789      for (CFGBlock::const_reverse_iterator BI = CurrBlock->rbegin(),
790           BE = CurrBlock->rend(); BI != BE; ++BI) {
791        // FIXME: Handle other CFGElement kinds.
792        if (const CFGStmt *CS = dyn_cast<CFGStmt>(&*BI)) {
793          CurrBlockInfo->ExitLoc = CS->getStmt()->getLocStart();
794          break;
795        }
796      }
797    }
798
799    if (!CurrBlockInfo->ExitLoc.isInvalid()) {
800      // This block contains at least one statement. Find the source location
801      // of the first statement in the block.
802      for (CFGBlock::const_iterator BI = CurrBlock->begin(),
803           BE = CurrBlock->end(); BI != BE; ++BI) {
804        // FIXME: Handle other CFGElement kinds.
805        if (const CFGStmt *CS = dyn_cast<CFGStmt>(&*BI)) {
806          CurrBlockInfo->EntryLoc = CS->getStmt()->getLocStart();
807          break;
808        }
809      }
810    } else if (CurrBlock->pred_size() == 1 && *CurrBlock->pred_begin() &&
811               CurrBlock != &CFGraph->getExit()) {
812      // The block is empty, and has a single predecessor. Use its exit
813      // location.
814      CurrBlockInfo->EntryLoc = CurrBlockInfo->ExitLoc =
815          BlockInfo[(*CurrBlock->pred_begin())->getBlockID()].ExitLoc;
816    }
817  }
818}
819
820/// \brief Class which implements the core thread safety analysis routines.
821class ThreadSafetyAnalyzer {
822  friend class BuildLockset;
823
824  ThreadSafetyHandler &Handler;
825  Lockset::Factory    LocksetFactory;
826  LocalVariableMap    LocalVarMap;
827
828public:
829  ThreadSafetyAnalyzer(ThreadSafetyHandler &H) : Handler(H) {}
830
831  Lockset intersectAndWarn(const CFGBlockInfo &Block1, CFGBlockSide Side1,
832                           const CFGBlockInfo &Block2, CFGBlockSide Side2,
833                           LockErrorKind LEK);
834
835  Lockset addLock(Lockset &LSet, Expr *MutexExp, const NamedDecl *D,
836                  LockKind LK, SourceLocation Loc);
837
838  void runAnalysis(AnalysisDeclContext &AC);
839};
840
841
842/// \brief We use this class to visit different types of expressions in
843/// CFGBlocks, and build up the lockset.
844/// An expression may cause us to add or remove locks from the lockset, or else
845/// output error messages related to missing locks.
846/// FIXME: In future, we may be able to not inherit from a visitor.
847class BuildLockset : public StmtVisitor<BuildLockset> {
848  friend class ThreadSafetyAnalyzer;
849
850  ThreadSafetyHandler &Handler;
851  Lockset::Factory &LocksetFactory;
852  LocalVariableMap &LocalVarMap;
853
854  Lockset LSet;
855  LocalVariableMap::Context LVarCtx;
856  unsigned CtxIndex;
857
858  // Helper functions
859  void addLock(const MutexID &Mutex, const LockData &LDat);
860  void removeLock(const MutexID &Mutex, SourceLocation UnlockLoc);
861
862  template <class AttrType>
863  void addLocksToSet(LockKind LK, AttrType *Attr,
864                     Expr *Exp, NamedDecl *D, VarDecl *VD = 0);
865  void removeLocksFromSet(UnlockFunctionAttr *Attr,
866                          Expr *Exp, NamedDecl* FunDecl);
867
868  const ValueDecl *getValueDecl(Expr *Exp);
869  void warnIfMutexNotHeld (const NamedDecl *D, Expr *Exp, AccessKind AK,
870                           Expr *MutexExp, ProtectedOperationKind POK);
871  void checkAccess(Expr *Exp, AccessKind AK);
872  void checkDereference(Expr *Exp, AccessKind AK);
873  void handleCall(Expr *Exp, NamedDecl *D, VarDecl *VD = 0);
874
875  template <class AttrType>
876  void addTrylock(LockKind LK, AttrType *Attr, Expr *Exp, NamedDecl *FunDecl,
877                  const CFGBlock* PredBlock, const CFGBlock *CurrBlock,
878                  Expr *BrE, bool Neg);
879  CallExpr* getTrylockCallExpr(Stmt *Cond, LocalVariableMap::Context C,
880                               bool &Negate);
881  void handleTrylock(Stmt *Cond, const CFGBlock* PredBlock,
882                     const CFGBlock *CurrBlock);
883
884  /// \brief Returns true if the lockset contains a lock, regardless of whether
885  /// the lock is held exclusively or shared.
886  bool locksetContains(const MutexID &Lock) const {
887    return LSet.lookup(Lock);
888  }
889
890  /// \brief Returns true if the lockset contains a lock with the passed in
891  /// locktype.
892  bool locksetContains(const MutexID &Lock, LockKind KindRequested) const {
893    const LockData *LockHeld = LSet.lookup(Lock);
894    return (LockHeld && KindRequested == LockHeld->LKind);
895  }
896
897  /// \brief Returns true if the lockset contains a lock with at least the
898  /// passed in locktype. So for example, if we pass in LK_Shared, this function
899  /// returns true if the lock is held LK_Shared or LK_Exclusive. If we pass in
900  /// LK_Exclusive, this function returns true if the lock is held LK_Exclusive.
901  bool locksetContainsAtLeast(const MutexID &Lock,
902                              LockKind KindRequested) const {
903    switch (KindRequested) {
904      case LK_Shared:
905        return locksetContains(Lock);
906      case LK_Exclusive:
907        return locksetContains(Lock, KindRequested);
908    }
909    llvm_unreachable("Unknown LockKind");
910  }
911
912public:
913  BuildLockset(ThreadSafetyAnalyzer *analyzer, CFGBlockInfo &Info)
914    : StmtVisitor<BuildLockset>(),
915      Handler(analyzer->Handler),
916      LocksetFactory(analyzer->LocksetFactory),
917      LocalVarMap(analyzer->LocalVarMap),
918      LSet(Info.EntrySet),
919      LVarCtx(Info.EntryContext),
920      CtxIndex(Info.EntryIndex)
921  {}
922
923  void VisitUnaryOperator(UnaryOperator *UO);
924  void VisitBinaryOperator(BinaryOperator *BO);
925  void VisitCastExpr(CastExpr *CE);
926  void VisitCallExpr(CallExpr *Exp);
927  void VisitCXXConstructExpr(CXXConstructExpr *Exp);
928  void VisitDeclStmt(DeclStmt *S);
929};
930
931/// \brief Add a new lock to the lockset, warning if the lock is already there.
932/// \param Mutex -- the Mutex expression for the lock
933/// \param LDat  -- the LockData for the lock
934void BuildLockset::addLock(const MutexID &Mutex, const LockData& LDat) {
935  // FIXME: deal with acquired before/after annotations.
936  // FIXME: Don't always warn when we have support for reentrant locks.
937  if (locksetContains(Mutex))
938    Handler.handleDoubleLock(Mutex.getName(), LDat.AcquireLoc);
939  else
940    LSet = LocksetFactory.add(LSet, Mutex, LDat);
941}
942
943/// \brief Remove a lock from the lockset, warning if the lock is not there.
944/// \param LockExp The lock expression corresponding to the lock to be removed
945/// \param UnlockLoc The source location of the unlock (only used in error msg)
946void BuildLockset::removeLock(const MutexID &Mutex, SourceLocation UnlockLoc) {
947  const LockData *LDat = LSet.lookup(Mutex);
948  if (!LDat)
949    Handler.handleUnmatchedUnlock(Mutex.getName(), UnlockLoc);
950  else {
951    // For scoped-lockable vars, remove the mutex associated with this var.
952    if (LDat->UnderlyingMutex.isValid())
953      removeLock(LDat->UnderlyingMutex, UnlockLoc);
954    LSet = LocksetFactory.remove(LSet, Mutex);
955  }
956}
957
958/// \brief This function, parameterized by an attribute type, is used to add a
959/// set of locks specified as attribute arguments to the lockset.
960template <typename AttrType>
961void BuildLockset::addLocksToSet(LockKind LK, AttrType *Attr,
962                                 Expr *Exp, NamedDecl* FunDecl, VarDecl *VD) {
963  typedef typename AttrType::args_iterator iterator_type;
964
965  SourceLocation ExpLocation = Exp->getExprLoc();
966
967  // Figure out if we're calling the constructor of scoped lockable class
968  bool isScopedVar = false;
969  if (VD) {
970    if (CXXConstructorDecl *CD = dyn_cast<CXXConstructorDecl>(FunDecl)) {
971      CXXRecordDecl* PD = CD->getParent();
972      if (PD && PD->getAttr<ScopedLockableAttr>())
973        isScopedVar = true;
974    }
975  }
976
977  if (Attr->args_size() == 0) {
978    // The mutex held is the "this" object.
979    MutexID Mutex(0, Exp, FunDecl);
980    if (!Mutex.isValid())
981      MutexID::warnInvalidLock(Handler, 0, Exp, FunDecl);
982    else
983      addLock(Mutex, LockData(ExpLocation, LK));
984    return;
985  }
986
987  for (iterator_type I=Attr->args_begin(), E=Attr->args_end(); I != E; ++I) {
988    MutexID Mutex(*I, Exp, FunDecl);
989    if (!Mutex.isValid())
990      MutexID::warnInvalidLock(Handler, *I, Exp, FunDecl);
991    else {
992      addLock(Mutex, LockData(ExpLocation, LK));
993      if (isScopedVar) {
994        // For scoped lockable vars, map this var to its underlying mutex.
995        DeclRefExpr DRE(VD, VD->getType(), VK_LValue, VD->getLocation());
996        MutexID SMutex(&DRE, 0, 0);
997        addLock(SMutex, LockData(VD->getLocation(), LK, Mutex));
998      }
999    }
1000  }
1001}
1002
1003/// \brief This function removes a set of locks specified as attribute
1004/// arguments from the lockset.
1005void BuildLockset::removeLocksFromSet(UnlockFunctionAttr *Attr,
1006                                      Expr *Exp, NamedDecl* FunDecl) {
1007  SourceLocation ExpLocation;
1008  if (Exp) ExpLocation = Exp->getExprLoc();
1009
1010  if (Attr->args_size() == 0) {
1011    // The mutex held is the "this" object.
1012    MutexID Mu(0, Exp, FunDecl);
1013    if (!Mu.isValid())
1014      MutexID::warnInvalidLock(Handler, 0, Exp, FunDecl);
1015    else
1016      removeLock(Mu, ExpLocation);
1017    return;
1018  }
1019
1020  for (UnlockFunctionAttr::args_iterator I = Attr->args_begin(),
1021       E = Attr->args_end(); I != E; ++I) {
1022    MutexID Mutex(*I, Exp, FunDecl);
1023    if (!Mutex.isValid())
1024      MutexID::warnInvalidLock(Handler, *I, Exp, FunDecl);
1025    else
1026      removeLock(Mutex, ExpLocation);
1027  }
1028}
1029
1030/// \brief Gets the value decl pointer from DeclRefExprs or MemberExprs
1031const ValueDecl *BuildLockset::getValueDecl(Expr *Exp) {
1032  if (const DeclRefExpr *DR = dyn_cast<DeclRefExpr>(Exp))
1033    return DR->getDecl();
1034
1035  if (const MemberExpr *ME = dyn_cast<MemberExpr>(Exp))
1036    return ME->getMemberDecl();
1037
1038  return 0;
1039}
1040
1041/// \brief Warn if the LSet does not contain a lock sufficient to protect access
1042/// of at least the passed in AccessKind.
1043void BuildLockset::warnIfMutexNotHeld(const NamedDecl *D, Expr *Exp,
1044                                      AccessKind AK, Expr *MutexExp,
1045                                      ProtectedOperationKind POK) {
1046  LockKind LK = getLockKindFromAccessKind(AK);
1047
1048  MutexID Mutex(MutexExp, Exp, D);
1049  if (!Mutex.isValid())
1050    MutexID::warnInvalidLock(Handler, MutexExp, Exp, D);
1051  else if (!locksetContainsAtLeast(Mutex, LK))
1052    Handler.handleMutexNotHeld(D, POK, Mutex.getName(), LK, Exp->getExprLoc());
1053}
1054
1055/// \brief This method identifies variable dereferences and checks pt_guarded_by
1056/// and pt_guarded_var annotations. Note that we only check these annotations
1057/// at the time a pointer is dereferenced.
1058/// FIXME: We need to check for other types of pointer dereferences
1059/// (e.g. [], ->) and deal with them here.
1060/// \param Exp An expression that has been read or written.
1061void BuildLockset::checkDereference(Expr *Exp, AccessKind AK) {
1062  UnaryOperator *UO = dyn_cast<UnaryOperator>(Exp);
1063  if (!UO || UO->getOpcode() != clang::UO_Deref)
1064    return;
1065  Exp = UO->getSubExpr()->IgnoreParenCasts();
1066
1067  const ValueDecl *D = getValueDecl(Exp);
1068  if(!D || !D->hasAttrs())
1069    return;
1070
1071  if (D->getAttr<PtGuardedVarAttr>() && LSet.isEmpty())
1072    Handler.handleNoMutexHeld(D, POK_VarDereference, AK, Exp->getExprLoc());
1073
1074  const AttrVec &ArgAttrs = D->getAttrs();
1075  for(unsigned i = 0, Size = ArgAttrs.size(); i < Size; ++i)
1076    if (PtGuardedByAttr *PGBAttr = dyn_cast<PtGuardedByAttr>(ArgAttrs[i]))
1077      warnIfMutexNotHeld(D, Exp, AK, PGBAttr->getArg(), POK_VarDereference);
1078}
1079
1080/// \brief Checks guarded_by and guarded_var attributes.
1081/// Whenever we identify an access (read or write) of a DeclRefExpr or
1082/// MemberExpr, we need to check whether there are any guarded_by or
1083/// guarded_var attributes, and make sure we hold the appropriate mutexes.
1084void BuildLockset::checkAccess(Expr *Exp, AccessKind AK) {
1085  const ValueDecl *D = getValueDecl(Exp);
1086  if(!D || !D->hasAttrs())
1087    return;
1088
1089  if (D->getAttr<GuardedVarAttr>() && LSet.isEmpty())
1090    Handler.handleNoMutexHeld(D, POK_VarAccess, AK, Exp->getExprLoc());
1091
1092  const AttrVec &ArgAttrs = D->getAttrs();
1093  for(unsigned i = 0, Size = ArgAttrs.size(); i < Size; ++i)
1094    if (GuardedByAttr *GBAttr = dyn_cast<GuardedByAttr>(ArgAttrs[i]))
1095      warnIfMutexNotHeld(D, Exp, AK, GBAttr->getArg(), POK_VarAccess);
1096}
1097
1098/// \brief Process a function call, method call, constructor call,
1099/// or destructor call.  This involves looking at the attributes on the
1100/// corresponding function/method/constructor/destructor, issuing warnings,
1101/// and updating the locksets accordingly.
1102///
1103/// FIXME: For classes annotated with one of the guarded annotations, we need
1104/// to treat const method calls as reads and non-const method calls as writes,
1105/// and check that the appropriate locks are held. Non-const method calls with
1106/// the same signature as const method calls can be also treated as reads.
1107///
1108/// FIXME: We need to also visit CallExprs to catch/check global functions.
1109///
1110/// FIXME: Do not flag an error for member variables accessed in constructors/
1111/// destructors
1112void BuildLockset::handleCall(Expr *Exp, NamedDecl *D, VarDecl *VD) {
1113  AttrVec &ArgAttrs = D->getAttrs();
1114  for(unsigned i = 0; i < ArgAttrs.size(); ++i) {
1115    Attr *Attr = ArgAttrs[i];
1116    switch (Attr->getKind()) {
1117      // When we encounter an exclusive lock function, we need to add the lock
1118      // to our lockset with kind exclusive.
1119      case attr::ExclusiveLockFunction: {
1120        ExclusiveLockFunctionAttr *A = cast<ExclusiveLockFunctionAttr>(Attr);
1121        addLocksToSet(LK_Exclusive, A, Exp, D, VD);
1122        break;
1123      }
1124
1125      // When we encounter a shared lock function, we need to add the lock
1126      // to our lockset with kind shared.
1127      case attr::SharedLockFunction: {
1128        SharedLockFunctionAttr *A = cast<SharedLockFunctionAttr>(Attr);
1129        addLocksToSet(LK_Shared, A, Exp, D, VD);
1130        break;
1131      }
1132
1133      // When we encounter an unlock function, we need to remove unlocked
1134      // mutexes from the lockset, and flag a warning if they are not there.
1135      case attr::UnlockFunction: {
1136        UnlockFunctionAttr *UFAttr = cast<UnlockFunctionAttr>(Attr);
1137        removeLocksFromSet(UFAttr, Exp, D);
1138        break;
1139      }
1140
1141      case attr::ExclusiveLocksRequired: {
1142        ExclusiveLocksRequiredAttr *ELRAttr =
1143            cast<ExclusiveLocksRequiredAttr>(Attr);
1144
1145        for (ExclusiveLocksRequiredAttr::args_iterator
1146             I = ELRAttr->args_begin(), E = ELRAttr->args_end(); I != E; ++I)
1147          warnIfMutexNotHeld(D, Exp, AK_Written, *I, POK_FunctionCall);
1148        break;
1149      }
1150
1151      case attr::SharedLocksRequired: {
1152        SharedLocksRequiredAttr *SLRAttr = cast<SharedLocksRequiredAttr>(Attr);
1153
1154        for (SharedLocksRequiredAttr::args_iterator I = SLRAttr->args_begin(),
1155             E = SLRAttr->args_end(); I != E; ++I)
1156          warnIfMutexNotHeld(D, Exp, AK_Read, *I, POK_FunctionCall);
1157        break;
1158      }
1159
1160      case attr::LocksExcluded: {
1161        LocksExcludedAttr *LEAttr = cast<LocksExcludedAttr>(Attr);
1162        for (LocksExcludedAttr::args_iterator I = LEAttr->args_begin(),
1163            E = LEAttr->args_end(); I != E; ++I) {
1164          MutexID Mutex(*I, Exp, D);
1165          if (!Mutex.isValid())
1166            MutexID::warnInvalidLock(Handler, *I, Exp, D);
1167          else if (locksetContains(Mutex))
1168            Handler.handleFunExcludesLock(D->getName(), Mutex.getName(),
1169                                          Exp->getExprLoc());
1170        }
1171        break;
1172      }
1173
1174      // Ignore other (non thread-safety) attributes
1175      default:
1176        break;
1177    }
1178  }
1179}
1180
1181
1182/// \brief Add lock to set, if the current block is in the taken branch of a
1183/// trylock.
1184template <class AttrType>
1185void BuildLockset::addTrylock(LockKind LK, AttrType *Attr, Expr *Exp,
1186                              NamedDecl *FunDecl, const CFGBlock *PredBlock,
1187                              const CFGBlock *CurrBlock, Expr *BrE, bool Neg) {
1188  // Find out which branch has the lock
1189  bool branch = 0;
1190  if (CXXBoolLiteralExpr *BLE = dyn_cast_or_null<CXXBoolLiteralExpr>(BrE)) {
1191    branch = BLE->getValue();
1192  }
1193  else if (IntegerLiteral *ILE = dyn_cast_or_null<IntegerLiteral>(BrE)) {
1194    branch = ILE->getValue().getBoolValue();
1195  }
1196  int branchnum = branch ? 0 : 1;
1197  if (Neg) branchnum = !branchnum;
1198
1199  // If we've taken the trylock branch, then add the lock
1200  int i = 0;
1201  for (CFGBlock::const_succ_iterator SI = PredBlock->succ_begin(),
1202       SE = PredBlock->succ_end(); SI != SE && i < 2; ++SI, ++i) {
1203    if (*SI == CurrBlock && i == branchnum) {
1204      addLocksToSet(LK, Attr, Exp, FunDecl, 0);
1205    }
1206  }
1207}
1208
1209
1210// If Cond can be traced back to a function call, return the call expression.
1211// The negate variable should be called with false, and will be set to true
1212// if the function call is negated, e.g. if (!mu.tryLock(...))
1213CallExpr* BuildLockset::getTrylockCallExpr(Stmt *Cond,
1214                               LocalVariableMap::Context C,
1215                               bool &Negate) {
1216  if (!Cond)
1217    return 0;
1218
1219  if (CallExpr *CallExp = dyn_cast<CallExpr>(Cond)) {
1220    return CallExp;
1221  }
1222  else if (ImplicitCastExpr *CE = dyn_cast<ImplicitCastExpr>(Cond)) {
1223    return getTrylockCallExpr(CE->getSubExpr(), C, Negate);
1224  }
1225  else if (DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(Cond)) {
1226    Expr *E = LocalVarMap.lookupExpr(DRE->getDecl(), C);
1227    return getTrylockCallExpr(E, C, Negate);
1228  }
1229  else if (UnaryOperator *UOP = dyn_cast<UnaryOperator>(Cond)) {
1230    if (UOP->getOpcode() == UO_LNot) {
1231      Negate = !Negate;
1232      return getTrylockCallExpr(UOP->getSubExpr(), C, Negate);
1233    }
1234  }
1235  // FIXME -- handle && and || as well.
1236  return NULL;
1237}
1238
1239
1240/// \brief Process a conditional branch from a previous block to the current
1241/// block, looking for trylock calls.
1242void BuildLockset::handleTrylock(Stmt *Cond, const CFGBlock *PredBlock,
1243                                 const CFGBlock *CurrBlock) {
1244  bool Negate = false;
1245  CallExpr *Exp = getTrylockCallExpr(Cond, LVarCtx, Negate);
1246  if (!Exp)
1247    return;
1248
1249  NamedDecl *FunDecl = dyn_cast_or_null<NamedDecl>(Exp->getCalleeDecl());
1250  if(!FunDecl || !FunDecl->hasAttrs())
1251    return;
1252
1253  // If the condition is a call to a Trylock function, then grab the attributes
1254  AttrVec &ArgAttrs = FunDecl->getAttrs();
1255  for (unsigned i = 0; i < ArgAttrs.size(); ++i) {
1256    Attr *Attr = ArgAttrs[i];
1257    switch (Attr->getKind()) {
1258      case attr::ExclusiveTrylockFunction: {
1259        ExclusiveTrylockFunctionAttr *A =
1260          cast<ExclusiveTrylockFunctionAttr>(Attr);
1261        addTrylock(LK_Exclusive, A, Exp, FunDecl, PredBlock, CurrBlock,
1262                   A->getSuccessValue(), Negate);
1263        break;
1264      }
1265      case attr::SharedTrylockFunction: {
1266        SharedTrylockFunctionAttr *A =
1267          cast<SharedTrylockFunctionAttr>(Attr);
1268        addTrylock(LK_Shared, A, Exp, FunDecl, PredBlock, CurrBlock,
1269                   A->getSuccessValue(), Negate);
1270        break;
1271      }
1272      default:
1273        break;
1274    }
1275  }
1276}
1277
1278
1279/// \brief For unary operations which read and write a variable, we need to
1280/// check whether we hold any required mutexes. Reads are checked in
1281/// VisitCastExpr.
1282void BuildLockset::VisitUnaryOperator(UnaryOperator *UO) {
1283  switch (UO->getOpcode()) {
1284    case clang::UO_PostDec:
1285    case clang::UO_PostInc:
1286    case clang::UO_PreDec:
1287    case clang::UO_PreInc: {
1288      Expr *SubExp = UO->getSubExpr()->IgnoreParenCasts();
1289      checkAccess(SubExp, AK_Written);
1290      checkDereference(SubExp, AK_Written);
1291      break;
1292    }
1293    default:
1294      break;
1295  }
1296}
1297
1298/// For binary operations which assign to a variable (writes), we need to check
1299/// whether we hold any required mutexes.
1300/// FIXME: Deal with non-primitive types.
1301void BuildLockset::VisitBinaryOperator(BinaryOperator *BO) {
1302  if (!BO->isAssignmentOp())
1303    return;
1304
1305  // adjust the context
1306  LVarCtx = LocalVarMap.getNextContext(CtxIndex, BO, LVarCtx);
1307
1308  Expr *LHSExp = BO->getLHS()->IgnoreParenCasts();
1309  checkAccess(LHSExp, AK_Written);
1310  checkDereference(LHSExp, AK_Written);
1311}
1312
1313/// Whenever we do an LValue to Rvalue cast, we are reading a variable and
1314/// need to ensure we hold any required mutexes.
1315/// FIXME: Deal with non-primitive types.
1316void BuildLockset::VisitCastExpr(CastExpr *CE) {
1317  if (CE->getCastKind() != CK_LValueToRValue)
1318    return;
1319  Expr *SubExp = CE->getSubExpr()->IgnoreParenCasts();
1320  checkAccess(SubExp, AK_Read);
1321  checkDereference(SubExp, AK_Read);
1322}
1323
1324
1325void BuildLockset::VisitCallExpr(CallExpr *Exp) {
1326  NamedDecl *D = dyn_cast_or_null<NamedDecl>(Exp->getCalleeDecl());
1327  if(!D || !D->hasAttrs())
1328    return;
1329  handleCall(Exp, D);
1330}
1331
1332void BuildLockset::VisitCXXConstructExpr(CXXConstructExpr *Exp) {
1333  // FIXME -- only handles constructors in DeclStmt below.
1334}
1335
1336void BuildLockset::VisitDeclStmt(DeclStmt *S) {
1337  // adjust the context
1338  LVarCtx = LocalVarMap.getNextContext(CtxIndex, S, LVarCtx);
1339
1340  DeclGroupRef DGrp = S->getDeclGroup();
1341  for (DeclGroupRef::iterator I = DGrp.begin(), E = DGrp.end(); I != E; ++I) {
1342    Decl *D = *I;
1343    if (VarDecl *VD = dyn_cast_or_null<VarDecl>(D)) {
1344      Expr *E = VD->getInit();
1345      if (CXXConstructExpr *CE = dyn_cast_or_null<CXXConstructExpr>(E)) {
1346        NamedDecl *CtorD = dyn_cast_or_null<NamedDecl>(CE->getConstructor());
1347        if (!CtorD || !CtorD->hasAttrs())
1348          return;
1349        handleCall(CE, CtorD, VD);
1350      }
1351    }
1352  }
1353}
1354
1355
1356/// \brief Compute the intersection of two locksets and issue warnings for any
1357/// locks in the symmetric difference.
1358///
1359/// This function is used at a merge point in the CFG when comparing the lockset
1360/// of each branch being merged. For example, given the following sequence:
1361/// A; if () then B; else C; D; we need to check that the lockset after B and C
1362/// are the same. In the event of a difference, we use the intersection of these
1363/// two locksets at the start of D.
1364Lockset ThreadSafetyAnalyzer::intersectAndWarn(const CFGBlockInfo &Block1,
1365                                               CFGBlockSide Side1,
1366                                               const CFGBlockInfo &Block2,
1367                                               CFGBlockSide Side2,
1368                                               LockErrorKind LEK) {
1369  Lockset LSet1 = Block1.getSet(Side1);
1370  Lockset LSet2 = Block2.getSet(Side2);
1371
1372  Lockset Intersection = LSet1;
1373  for (Lockset::iterator I = LSet2.begin(), E = LSet2.end(); I != E; ++I) {
1374    const MutexID &LSet2Mutex = I.getKey();
1375    const LockData &LSet2LockData = I.getData();
1376    if (const LockData *LD = LSet1.lookup(LSet2Mutex)) {
1377      if (LD->LKind != LSet2LockData.LKind) {
1378        Handler.handleExclusiveAndShared(LSet2Mutex.getName(),
1379                                         LSet2LockData.AcquireLoc,
1380                                         LD->AcquireLoc);
1381        if (LD->LKind != LK_Exclusive)
1382          Intersection = LocksetFactory.add(Intersection, LSet2Mutex,
1383                                            LSet2LockData);
1384      }
1385    } else {
1386      Handler.handleMutexHeldEndOfScope(LSet2Mutex.getName(),
1387                                        LSet2LockData.AcquireLoc,
1388                                        Block1.getLocation(Side1), LEK);
1389    }
1390  }
1391
1392  for (Lockset::iterator I = LSet1.begin(), E = LSet1.end(); I != E; ++I) {
1393    if (!LSet2.contains(I.getKey())) {
1394      const MutexID &Mutex = I.getKey();
1395      const LockData &MissingLock = I.getData();
1396      Handler.handleMutexHeldEndOfScope(Mutex.getName(),
1397                                        MissingLock.AcquireLoc,
1398                                        Block2.getLocation(Side2), LEK);
1399      Intersection = LocksetFactory.remove(Intersection, Mutex);
1400    }
1401  }
1402  return Intersection;
1403}
1404
1405Lockset ThreadSafetyAnalyzer::addLock(Lockset &LSet, Expr *MutexExp,
1406                                      const NamedDecl *D,
1407                                      LockKind LK, SourceLocation Loc) {
1408  MutexID Mutex(MutexExp, 0, D);
1409  if (!Mutex.isValid()) {
1410    MutexID::warnInvalidLock(Handler, MutexExp, 0, D);
1411    return LSet;
1412  }
1413  LockData NewLock(Loc, LK);
1414  return LocksetFactory.add(LSet, Mutex, NewLock);
1415}
1416
1417/// \brief Check a function's CFG for thread-safety violations.
1418///
1419/// We traverse the blocks in the CFG, compute the set of mutexes that are held
1420/// at the end of each block, and issue warnings for thread safety violations.
1421/// Each block in the CFG is traversed exactly once.
1422void ThreadSafetyAnalyzer::runAnalysis(AnalysisDeclContext &AC) {
1423  CFG *CFGraph = AC.getCFG();
1424  if (!CFGraph) return;
1425  const NamedDecl *D = dyn_cast_or_null<NamedDecl>(AC.getDecl());
1426
1427  if (!D)
1428    return;  // Ignore anonymous functions for now.
1429  if (D->getAttr<NoThreadSafetyAnalysisAttr>())
1430    return;
1431
1432  std::vector<CFGBlockInfo> BlockInfo(CFGraph->getNumBlockIDs(),
1433    CFGBlockInfo::getEmptyBlockInfo(LocksetFactory, LocalVarMap));
1434
1435  // We need to explore the CFG via a "topological" ordering.
1436  // That way, we will be guaranteed to have information about required
1437  // predecessor locksets when exploring a new block.
1438  PostOrderCFGView *SortedGraph = AC.getAnalysis<PostOrderCFGView>();
1439  PostOrderCFGView::CFGBlockSet VisitedBlocks(CFGraph);
1440
1441  // Compute SSA names for local variables
1442  LocalVarMap.traverseCFG(CFGraph, SortedGraph, BlockInfo);
1443
1444  // Fill in source locations for all CFGBlocks.
1445  findBlockLocations(CFGraph, SortedGraph, BlockInfo);
1446
1447  // Add locks from exclusive_locks_required and shared_locks_required
1448  // to initial lockset.
1449  if (!SortedGraph->empty() && D->hasAttrs()) {
1450    const CFGBlock *FirstBlock = *SortedGraph->begin();
1451    Lockset &InitialLockset = BlockInfo[FirstBlock->getBlockID()].EntrySet;
1452    const AttrVec &ArgAttrs = D->getAttrs();
1453    for(unsigned i = 0; i < ArgAttrs.size(); ++i) {
1454      Attr *Attr = ArgAttrs[i];
1455      SourceLocation AttrLoc = Attr->getLocation();
1456      if (SharedLocksRequiredAttr *SLRAttr
1457            = dyn_cast<SharedLocksRequiredAttr>(Attr)) {
1458        for (SharedLocksRequiredAttr::args_iterator
1459            SLRIter = SLRAttr->args_begin(),
1460            SLREnd = SLRAttr->args_end(); SLRIter != SLREnd; ++SLRIter)
1461          InitialLockset = addLock(InitialLockset,
1462                                   *SLRIter, D, LK_Shared,
1463                                   AttrLoc);
1464      } else if (ExclusiveLocksRequiredAttr *ELRAttr
1465                   = dyn_cast<ExclusiveLocksRequiredAttr>(Attr)) {
1466        for (ExclusiveLocksRequiredAttr::args_iterator
1467            ELRIter = ELRAttr->args_begin(),
1468            ELREnd = ELRAttr->args_end(); ELRIter != ELREnd; ++ELRIter)
1469          InitialLockset = addLock(InitialLockset,
1470                                   *ELRIter, D, LK_Exclusive,
1471                                   AttrLoc);
1472      }
1473    }
1474  }
1475
1476  for (PostOrderCFGView::iterator I = SortedGraph->begin(),
1477       E = SortedGraph->end(); I!= E; ++I) {
1478    const CFGBlock *CurrBlock = *I;
1479    int CurrBlockID = CurrBlock->getBlockID();
1480    CFGBlockInfo *CurrBlockInfo = &BlockInfo[CurrBlockID];
1481
1482    // Use the default initial lockset in case there are no predecessors.
1483    VisitedBlocks.insert(CurrBlock);
1484
1485    // Iterate through the predecessor blocks and warn if the lockset for all
1486    // predecessors is not the same. We take the entry lockset of the current
1487    // block to be the intersection of all previous locksets.
1488    // FIXME: By keeping the intersection, we may output more errors in future
1489    // for a lock which is not in the intersection, but was in the union. We
1490    // may want to also keep the union in future. As an example, let's say
1491    // the intersection contains Mutex L, and the union contains L and M.
1492    // Later we unlock M. At this point, we would output an error because we
1493    // never locked M; although the real error is probably that we forgot to
1494    // lock M on all code paths. Conversely, let's say that later we lock M.
1495    // In this case, we should compare against the intersection instead of the
1496    // union because the real error is probably that we forgot to unlock M on
1497    // all code paths.
1498    bool LocksetInitialized = false;
1499    llvm::SmallVector<CFGBlock*, 8> SpecialBlocks;
1500    for (CFGBlock::const_pred_iterator PI = CurrBlock->pred_begin(),
1501         PE  = CurrBlock->pred_end(); PI != PE; ++PI) {
1502
1503      // if *PI -> CurrBlock is a back edge
1504      if (*PI == 0 || !VisitedBlocks.alreadySet(*PI))
1505        continue;
1506
1507      // If the previous block ended in a 'continue' or 'break' statement, then
1508      // a difference in locksets is probably due to a bug in that block, rather
1509      // than in some other predecessor. In that case, keep the other
1510      // predecessor's lockset.
1511      if (const Stmt *Terminator = (*PI)->getTerminator()) {
1512        if (isa<ContinueStmt>(Terminator) || isa<BreakStmt>(Terminator)) {
1513          SpecialBlocks.push_back(*PI);
1514          continue;
1515        }
1516      }
1517
1518      int PrevBlockID = (*PI)->getBlockID();
1519      CFGBlockInfo *PrevBlockInfo = &BlockInfo[PrevBlockID];
1520
1521      if (!LocksetInitialized) {
1522        CurrBlockInfo->EntrySet = PrevBlockInfo->ExitSet;
1523        LocksetInitialized = true;
1524      } else {
1525        CurrBlockInfo->EntrySet =
1526          intersectAndWarn(*CurrBlockInfo, CBS_Entry,
1527                           *PrevBlockInfo, CBS_Exit,
1528                           LEK_LockedSomePredecessors);
1529      }
1530    }
1531
1532    // Process continue and break blocks. Assume that the lockset for the
1533    // resulting block is unaffected by any discrepancies in them.
1534    for (unsigned SpecialI = 0, SpecialN = SpecialBlocks.size();
1535         SpecialI < SpecialN; ++SpecialI) {
1536      CFGBlock *PrevBlock = SpecialBlocks[SpecialI];
1537      int PrevBlockID = PrevBlock->getBlockID();
1538      CFGBlockInfo *PrevBlockInfo = &BlockInfo[PrevBlockID];
1539
1540      if (!LocksetInitialized) {
1541        CurrBlockInfo->EntrySet = PrevBlockInfo->ExitSet;
1542        LocksetInitialized = true;
1543      } else {
1544        // Determine whether this edge is a loop terminator for diagnostic
1545        // purposes. FIXME: A 'break' statement might be a loop terminator, but
1546        // it might also be part of a switch. Also, a subsequent destructor
1547        // might add to the lockset, in which case the real issue might be a
1548        // double lock on the other path.
1549        const Stmt *Terminator = PrevBlock->getTerminator();
1550        bool IsLoop = Terminator && isa<ContinueStmt>(Terminator);
1551
1552        // Do not update EntrySet.
1553        intersectAndWarn(*CurrBlockInfo, CBS_Entry, *PrevBlockInfo, CBS_Exit,
1554                         IsLoop ? LEK_LockedSomeLoopIterations
1555                                : LEK_LockedSomePredecessors);
1556      }
1557    }
1558
1559    BuildLockset LocksetBuilder(this, *CurrBlockInfo);
1560    CFGBlock::const_pred_iterator PI = CurrBlock->pred_begin(),
1561                                  PE = CurrBlock->pred_end();
1562    if (PI != PE) {
1563      // If the predecessor ended in a branch, then process any trylocks.
1564      // FIXME -- check to make sure there's only one predecessor.
1565      if (Stmt *TCE = (*PI)->getTerminatorCondition()) {
1566        LocksetBuilder.handleTrylock(TCE, *PI, CurrBlock);
1567      }
1568    }
1569
1570    // Visit all the statements in the basic block.
1571    for (CFGBlock::const_iterator BI = CurrBlock->begin(),
1572         BE = CurrBlock->end(); BI != BE; ++BI) {
1573      switch (BI->getKind()) {
1574        case CFGElement::Statement: {
1575          const CFGStmt *CS = cast<CFGStmt>(&*BI);
1576          LocksetBuilder.Visit(const_cast<Stmt*>(CS->getStmt()));
1577          break;
1578        }
1579        // Ignore BaseDtor, MemberDtor, and TemporaryDtor for now.
1580        case CFGElement::AutomaticObjectDtor: {
1581          const CFGAutomaticObjDtor *AD = cast<CFGAutomaticObjDtor>(&*BI);
1582          CXXDestructorDecl *DD = const_cast<CXXDestructorDecl*>(
1583            AD->getDestructorDecl(AC.getASTContext()));
1584          if (!DD->hasAttrs())
1585            break;
1586
1587          // Create a dummy expression,
1588          VarDecl *VD = const_cast<VarDecl*>(AD->getVarDecl());
1589          DeclRefExpr DRE(VD, VD->getType(), VK_LValue,
1590                          AD->getTriggerStmt()->getLocEnd());
1591          LocksetBuilder.handleCall(&DRE, DD);
1592          break;
1593        }
1594        default:
1595          break;
1596      }
1597    }
1598    CurrBlockInfo->ExitSet = LocksetBuilder.LSet;
1599
1600    // For every back edge from CurrBlock (the end of the loop) to another block
1601    // (FirstLoopBlock) we need to check that the Lockset of Block is equal to
1602    // the one held at the beginning of FirstLoopBlock. We can look up the
1603    // Lockset held at the beginning of FirstLoopBlock in the EntryLockSets map.
1604    for (CFGBlock::const_succ_iterator SI = CurrBlock->succ_begin(),
1605         SE  = CurrBlock->succ_end(); SI != SE; ++SI) {
1606
1607      // if CurrBlock -> *SI is *not* a back edge
1608      if (*SI == 0 || !VisitedBlocks.alreadySet(*SI))
1609        continue;
1610
1611      CFGBlock *FirstLoopBlock = *SI;
1612      CFGBlockInfo &PreLoop = BlockInfo[FirstLoopBlock->getBlockID()];
1613      CFGBlockInfo &LoopEnd = BlockInfo[CurrBlockID];
1614      intersectAndWarn(LoopEnd, CBS_Exit, PreLoop, CBS_Entry,
1615                       LEK_LockedSomeLoopIterations);
1616    }
1617  }
1618
1619  CFGBlockInfo &Initial = BlockInfo[CFGraph->getEntry().getBlockID()];
1620  CFGBlockInfo &Final = BlockInfo[CFGraph->getExit().getBlockID()];
1621
1622  // FIXME: Should we call this function for all blocks which exit the function?
1623  intersectAndWarn(Initial, CBS_Entry, Final, CBS_Exit,
1624                   LEK_LockedAtEndOfFunction);
1625}
1626
1627} // end anonymous namespace
1628
1629
1630namespace clang {
1631namespace thread_safety {
1632
1633/// \brief Check a function's CFG for thread-safety violations.
1634///
1635/// We traverse the blocks in the CFG, compute the set of mutexes that are held
1636/// at the end of each block, and issue warnings for thread safety violations.
1637/// Each block in the CFG is traversed exactly once.
1638void runThreadSafetyAnalysis(AnalysisDeclContext &AC,
1639                             ThreadSafetyHandler &Handler) {
1640  ThreadSafetyAnalyzer Analyzer(Handler);
1641  Analyzer.runAnalysis(AC);
1642}
1643
1644/// \brief Helper function that returns a LockKind required for the given level
1645/// of access.
1646LockKind getLockKindFromAccessKind(AccessKind AK) {
1647  switch (AK) {
1648    case AK_Read :
1649      return LK_Shared;
1650    case AK_Written :
1651      return LK_Exclusive;
1652  }
1653  llvm_unreachable("Unknown AccessKind");
1654}
1655
1656}} // end namespace clang::thread_safety
1657