ThreadSafety.cpp revision 2a35be803c405221f5f23c7bdedb91f09efdd3ac
1//===- ThreadSafety.cpp ----------------------------------------*- C++ --*-===// 2// 3// The LLVM Compiler Infrastructure 4// 5// This file is distributed under the University of Illinois Open Source 6// License. See LICENSE.TXT for details. 7// 8//===----------------------------------------------------------------------===// 9// 10// A intra-procedural analysis for thread safety (e.g. deadlocks and race 11// conditions), based off of an annotation system. 12// 13// See http://clang.llvm.org/docs/LanguageExtensions.html#threadsafety for more 14// information. 15// 16//===----------------------------------------------------------------------===// 17 18#include "clang/Analysis/Analyses/ThreadSafety.h" 19#include "clang/Analysis/Analyses/PostOrderCFGView.h" 20#include "clang/Analysis/AnalysisContext.h" 21#include "clang/Analysis/CFG.h" 22#include "clang/Analysis/CFGStmtMap.h" 23#include "clang/AST/DeclCXX.h" 24#include "clang/AST/ExprCXX.h" 25#include "clang/AST/StmtCXX.h" 26#include "clang/AST/StmtVisitor.h" 27#include "clang/Basic/SourceManager.h" 28#include "clang/Basic/SourceLocation.h" 29#include "llvm/ADT/BitVector.h" 30#include "llvm/ADT/FoldingSet.h" 31#include "llvm/ADT/ImmutableMap.h" 32#include "llvm/ADT/PostOrderIterator.h" 33#include "llvm/ADT/SmallVector.h" 34#include "llvm/ADT/StringRef.h" 35#include "llvm/Support/raw_ostream.h" 36#include <algorithm> 37#include <utility> 38#include <vector> 39 40using namespace clang; 41using namespace thread_safety; 42 43// Key method definition 44ThreadSafetyHandler::~ThreadSafetyHandler() {} 45 46namespace { 47 48/// \brief A MutexID object uniquely identifies a particular mutex, and 49/// is built from an Expr* (i.e. calling a lock function). 50/// 51/// Thread-safety analysis works by comparing lock expressions. Within the 52/// body of a function, an expression such as "x->foo->bar.mu" will resolve to 53/// a particular mutex object at run-time. Subsequent occurrences of the same 54/// expression (where "same" means syntactic equality) will refer to the same 55/// run-time object if three conditions hold: 56/// (1) Local variables in the expression, such as "x" have not changed. 57/// (2) Values on the heap that affect the expression have not changed. 58/// (3) The expression involves only pure function calls. 59/// 60/// The current implementation assumes, but does not verify, that multiple uses 61/// of the same lock expression satisfies these criteria. 62/// 63/// Clang introduces an additional wrinkle, which is that it is difficult to 64/// derive canonical expressions, or compare expressions directly for equality. 65/// Thus, we identify a mutex not by an Expr, but by the list of named 66/// declarations that are referenced by the Expr. In other words, 67/// x->foo->bar.mu will be a four element vector with the Decls for 68/// mu, bar, and foo, and x. The vector will uniquely identify the expression 69/// for all practical purposes. Null is used to denote 'this'. 70/// 71/// Note we will need to perform substitution on "this" and function parameter 72/// names when constructing a lock expression. 73/// 74/// For example: 75/// class C { Mutex Mu; void lock() EXCLUSIVE_LOCK_FUNCTION(this->Mu); }; 76/// void myFunc(C *X) { ... X->lock() ... } 77/// The original expression for the mutex acquired by myFunc is "this->Mu", but 78/// "X" is substituted for "this" so we get X->Mu(); 79/// 80/// For another example: 81/// foo(MyList *L) EXCLUSIVE_LOCKS_REQUIRED(L->Mu) { ... } 82/// MyList *MyL; 83/// foo(MyL); // requires lock MyL->Mu to be held 84class MutexID { 85 SmallVector<NamedDecl*, 2> DeclSeq; 86 87 /// Build a Decl sequence representing the lock from the given expression. 88 /// Recursive function that terminates on DeclRefExpr. 89 /// Note: this function merely creates a MutexID; it does not check to 90 /// ensure that the original expression is a valid mutex expression. 91 void buildMutexID(Expr *Exp, const NamedDecl *D, Expr *Parent, 92 unsigned NumArgs, Expr **FunArgs) { 93 if (!Exp) { 94 DeclSeq.clear(); 95 return; 96 } 97 98 if (DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(Exp)) { 99 NamedDecl *ND = cast<NamedDecl>(DRE->getDecl()->getCanonicalDecl()); 100 ParmVarDecl *PV = dyn_cast_or_null<ParmVarDecl>(ND); 101 if (PV) { 102 FunctionDecl *FD = 103 cast<FunctionDecl>(PV->getDeclContext())->getCanonicalDecl(); 104 unsigned i = PV->getFunctionScopeIndex(); 105 106 if (FunArgs && FD == D->getCanonicalDecl()) { 107 // Substitute call arguments for references to function parameters 108 assert(i < NumArgs); 109 buildMutexID(FunArgs[i], D, 0, 0, 0); 110 return; 111 } 112 // Map the param back to the param of the original function declaration. 113 DeclSeq.push_back(FD->getParamDecl(i)); 114 return; 115 } 116 // Not a function parameter -- just store the reference. 117 DeclSeq.push_back(ND); 118 } else if (MemberExpr *ME = dyn_cast<MemberExpr>(Exp)) { 119 NamedDecl *ND = ME->getMemberDecl(); 120 DeclSeq.push_back(ND); 121 buildMutexID(ME->getBase(), D, Parent, NumArgs, FunArgs); 122 } else if (isa<CXXThisExpr>(Exp)) { 123 if (Parent) 124 buildMutexID(Parent, D, 0, 0, 0); 125 else { 126 DeclSeq.push_back(0); // Use 0 to represent 'this'. 127 return; // mutexID is still valid in this case 128 } 129 } else if (UnaryOperator *UOE = dyn_cast<UnaryOperator>(Exp)) 130 buildMutexID(UOE->getSubExpr(), D, Parent, NumArgs, FunArgs); 131 else if (CastExpr *CE = dyn_cast<CastExpr>(Exp)) 132 buildMutexID(CE->getSubExpr(), D, Parent, NumArgs, FunArgs); 133 else 134 DeclSeq.clear(); // Mark as invalid lock expression. 135 } 136 137 /// \brief Construct a MutexID from an expression. 138 /// \param MutexExp The original mutex expression within an attribute 139 /// \param DeclExp An expression involving the Decl on which the attribute 140 /// occurs. 141 /// \param D The declaration to which the lock/unlock attribute is attached. 142 void buildMutexIDFromExp(Expr *MutexExp, Expr *DeclExp, const NamedDecl *D) { 143 Expr *Parent = 0; 144 unsigned NumArgs = 0; 145 Expr **FunArgs = 0; 146 147 // If we are processing a raw attribute expression, with no substitutions. 148 if (DeclExp == 0) { 149 buildMutexID(MutexExp, D, 0, 0, 0); 150 return; 151 } 152 153 // Examine DeclExp to find Parent and FunArgs, which are used to substitute 154 // for formal parameters when we call buildMutexID later. 155 if (MemberExpr *ME = dyn_cast<MemberExpr>(DeclExp)) { 156 Parent = ME->getBase(); 157 } else if (CXXMemberCallExpr *CE = dyn_cast<CXXMemberCallExpr>(DeclExp)) { 158 Parent = CE->getImplicitObjectArgument(); 159 NumArgs = CE->getNumArgs(); 160 FunArgs = CE->getArgs(); 161 } else if (CallExpr *CE = dyn_cast<CallExpr>(DeclExp)) { 162 NumArgs = CE->getNumArgs(); 163 FunArgs = CE->getArgs(); 164 } else if (CXXConstructExpr *CE = dyn_cast<CXXConstructExpr>(DeclExp)) { 165 Parent = 0; // FIXME -- get the parent from DeclStmt 166 NumArgs = CE->getNumArgs(); 167 FunArgs = CE->getArgs(); 168 } else if (D && isa<CXXDestructorDecl>(D)) { 169 // There's no such thing as a "destructor call" in the AST. 170 Parent = DeclExp; 171 } 172 173 // If the attribute has no arguments, then assume the argument is "this". 174 if (MutexExp == 0) { 175 buildMutexID(Parent, D, 0, 0, 0); 176 return; 177 } 178 179 buildMutexID(MutexExp, D, Parent, NumArgs, FunArgs); 180 } 181 182public: 183 explicit MutexID(clang::Decl::EmptyShell e) { 184 DeclSeq.clear(); 185 } 186 187 /// \param MutexExp The original mutex expression within an attribute 188 /// \param DeclExp An expression involving the Decl on which the attribute 189 /// occurs. 190 /// \param D The declaration to which the lock/unlock attribute is attached. 191 /// Caller must check isValid() after construction. 192 MutexID(Expr* MutexExp, Expr *DeclExp, const NamedDecl* D) { 193 buildMutexIDFromExp(MutexExp, DeclExp, D); 194 } 195 196 /// Return true if this is a valid decl sequence. 197 /// Caller must call this by hand after construction to handle errors. 198 bool isValid() const { 199 return !DeclSeq.empty(); 200 } 201 202 /// Issue a warning about an invalid lock expression 203 static void warnInvalidLock(ThreadSafetyHandler &Handler, Expr* MutexExp, 204 Expr *DeclExp, const NamedDecl* D) { 205 SourceLocation Loc; 206 if (DeclExp) 207 Loc = DeclExp->getExprLoc(); 208 209 // FIXME: add a note about the attribute location in MutexExp or D 210 if (Loc.isValid()) 211 Handler.handleInvalidLockExp(Loc); 212 } 213 214 bool operator==(const MutexID &other) const { 215 return DeclSeq == other.DeclSeq; 216 } 217 218 bool operator!=(const MutexID &other) const { 219 return !(*this == other); 220 } 221 222 // SmallVector overloads Operator< to do lexicographic ordering. Note that 223 // we use pointer equality (and <) to compare NamedDecls. This means the order 224 // of MutexIDs in a lockset is nondeterministic. In order to output 225 // diagnostics in a deterministic ordering, we must order all diagnostics to 226 // output by SourceLocation when iterating through this lockset. 227 bool operator<(const MutexID &other) const { 228 return DeclSeq < other.DeclSeq; 229 } 230 231 /// \brief Returns the name of the first Decl in the list for a given MutexID; 232 /// e.g. the lock expression foo.bar() has name "bar". 233 /// The caret will point unambiguously to the lock expression, so using this 234 /// name in diagnostics is a way to get simple, and consistent, mutex names. 235 /// We do not want to output the entire expression text for security reasons. 236 StringRef getName() const { 237 assert(isValid()); 238 if (!DeclSeq.front()) 239 return "this"; // Use 0 to represent 'this'. 240 return DeclSeq.front()->getName(); 241 } 242 243 void Profile(llvm::FoldingSetNodeID &ID) const { 244 for (SmallVectorImpl<NamedDecl*>::const_iterator I = DeclSeq.begin(), 245 E = DeclSeq.end(); I != E; ++I) { 246 ID.AddPointer(*I); 247 } 248 } 249}; 250 251 252/// \brief This is a helper class that stores info about the most recent 253/// accquire of a Lock. 254/// 255/// The main body of the analysis maps MutexIDs to LockDatas. 256struct LockData { 257 SourceLocation AcquireLoc; 258 259 /// \brief LKind stores whether a lock is held shared or exclusively. 260 /// Note that this analysis does not currently support either re-entrant 261 /// locking or lock "upgrading" and "downgrading" between exclusive and 262 /// shared. 263 /// 264 /// FIXME: add support for re-entrant locking and lock up/downgrading 265 LockKind LKind; 266 MutexID UnderlyingMutex; // for ScopedLockable objects 267 268 LockData(SourceLocation AcquireLoc, LockKind LKind) 269 : AcquireLoc(AcquireLoc), LKind(LKind), UnderlyingMutex(Decl::EmptyShell()) 270 {} 271 272 LockData(SourceLocation AcquireLoc, LockKind LKind, const MutexID &Mu) 273 : AcquireLoc(AcquireLoc), LKind(LKind), UnderlyingMutex(Mu) {} 274 275 bool operator==(const LockData &other) const { 276 return AcquireLoc == other.AcquireLoc && LKind == other.LKind; 277 } 278 279 bool operator!=(const LockData &other) const { 280 return !(*this == other); 281 } 282 283 void Profile(llvm::FoldingSetNodeID &ID) const { 284 ID.AddInteger(AcquireLoc.getRawEncoding()); 285 ID.AddInteger(LKind); 286 } 287}; 288 289 290/// A Lockset maps each MutexID (defined above) to information about how it has 291/// been locked. 292typedef llvm::ImmutableMap<MutexID, LockData> Lockset; 293typedef llvm::ImmutableMap<NamedDecl*, unsigned> LocalVarContext; 294 295class LocalVariableMap; 296 297/// A side (entry or exit) of a CFG node. 298enum CFGBlockSide { CBS_Entry, CBS_Exit }; 299 300/// CFGBlockInfo is a struct which contains all the information that is 301/// maintained for each block in the CFG. See LocalVariableMap for more 302/// information about the contexts. 303struct CFGBlockInfo { 304 Lockset EntrySet; // Lockset held at entry to block 305 Lockset ExitSet; // Lockset held at exit from block 306 LocalVarContext EntryContext; // Context held at entry to block 307 LocalVarContext ExitContext; // Context held at exit from block 308 SourceLocation EntryLoc; // Location of first statement in block 309 SourceLocation ExitLoc; // Location of last statement in block. 310 unsigned EntryIndex; // Used to replay contexts later 311 312 const Lockset &getSet(CFGBlockSide Side) const { 313 return Side == CBS_Entry ? EntrySet : ExitSet; 314 } 315 SourceLocation getLocation(CFGBlockSide Side) const { 316 return Side == CBS_Entry ? EntryLoc : ExitLoc; 317 } 318 319private: 320 CFGBlockInfo(Lockset EmptySet, LocalVarContext EmptyCtx) 321 : EntrySet(EmptySet), ExitSet(EmptySet), 322 EntryContext(EmptyCtx), ExitContext(EmptyCtx) 323 { } 324 325public: 326 static CFGBlockInfo getEmptyBlockInfo(Lockset::Factory &F, 327 LocalVariableMap &M); 328}; 329 330 331 332// A LocalVariableMap maintains a map from local variables to their currently 333// valid definitions. It provides SSA-like functionality when traversing the 334// CFG. Like SSA, each definition or assignment to a variable is assigned a 335// unique name (an integer), which acts as the SSA name for that definition. 336// The total set of names is shared among all CFG basic blocks. 337// Unlike SSA, we do not rewrite expressions to replace local variables declrefs 338// with their SSA-names. Instead, we compute a Context for each point in the 339// code, which maps local variables to the appropriate SSA-name. This map 340// changes with each assignment. 341// 342// The map is computed in a single pass over the CFG. Subsequent analyses can 343// then query the map to find the appropriate Context for a statement, and use 344// that Context to look up the definitions of variables. 345class LocalVariableMap { 346public: 347 typedef LocalVarContext Context; 348 349 /// A VarDefinition consists of an expression, representing the value of the 350 /// variable, along with the context in which that expression should be 351 /// interpreted. A reference VarDefinition does not itself contain this 352 /// information, but instead contains a pointer to a previous VarDefinition. 353 struct VarDefinition { 354 public: 355 friend class LocalVariableMap; 356 357 NamedDecl *Dec; // The original declaration for this variable. 358 Expr *Exp; // The expression for this variable, OR 359 unsigned Ref; // Reference to another VarDefinition 360 Context Ctx; // The map with which Exp should be interpreted. 361 362 bool isReference() { return !Exp; } 363 364 private: 365 // Create ordinary variable definition 366 VarDefinition(NamedDecl *D, Expr *E, Context C) 367 : Dec(D), Exp(E), Ref(0), Ctx(C) 368 { } 369 370 // Create reference to previous definition 371 VarDefinition(NamedDecl *D, unsigned R, Context C) 372 : Dec(D), Exp(0), Ref(R), Ctx(C) 373 { } 374 }; 375 376private: 377 Context::Factory ContextFactory; 378 std::vector<VarDefinition> VarDefinitions; 379 std::vector<unsigned> CtxIndices; 380 std::vector<std::pair<Stmt*, Context> > SavedContexts; 381 382public: 383 LocalVariableMap() { 384 // index 0 is a placeholder for undefined variables (aka phi-nodes). 385 VarDefinitions.push_back(VarDefinition(0, 0u, getEmptyContext())); 386 } 387 388 /// Look up a definition, within the given context. 389 const VarDefinition* lookup(NamedDecl *D, Context Ctx) { 390 const unsigned *i = Ctx.lookup(D); 391 if (!i) 392 return 0; 393 assert(*i < VarDefinitions.size()); 394 return &VarDefinitions[*i]; 395 } 396 397 /// Look up the definition for D within the given context. Returns 398 /// NULL if the expression is not statically known. If successful, also 399 /// modifies Ctx to hold the context of the return Expr. 400 Expr* lookupExpr(NamedDecl *D, Context &Ctx) { 401 const unsigned *P = Ctx.lookup(D); 402 if (!P) 403 return 0; 404 405 unsigned i = *P; 406 while (i > 0) { 407 if (VarDefinitions[i].Exp) { 408 Ctx = VarDefinitions[i].Ctx; 409 return VarDefinitions[i].Exp; 410 } 411 i = VarDefinitions[i].Ref; 412 } 413 return 0; 414 } 415 416 Context getEmptyContext() { return ContextFactory.getEmptyMap(); } 417 418 /// Return the next context after processing S. This function is used by 419 /// clients of the class to get the appropriate context when traversing the 420 /// CFG. It must be called for every assignment or DeclStmt. 421 Context getNextContext(unsigned &CtxIndex, Stmt *S, Context C) { 422 if (SavedContexts[CtxIndex+1].first == S) { 423 CtxIndex++; 424 Context Result = SavedContexts[CtxIndex].second; 425 return Result; 426 } 427 return C; 428 } 429 430 void dumpVarDefinitionName(unsigned i) { 431 if (i == 0) { 432 llvm::errs() << "Undefined"; 433 return; 434 } 435 NamedDecl *Dec = VarDefinitions[i].Dec; 436 if (!Dec) { 437 llvm::errs() << "<<NULL>>"; 438 return; 439 } 440 Dec->printName(llvm::errs()); 441 llvm::errs() << "." << i << " " << ((void*) Dec); 442 } 443 444 /// Dumps an ASCII representation of the variable map to llvm::errs() 445 void dump() { 446 for (unsigned i = 1, e = VarDefinitions.size(); i < e; ++i) { 447 Expr *Exp = VarDefinitions[i].Exp; 448 unsigned Ref = VarDefinitions[i].Ref; 449 450 dumpVarDefinitionName(i); 451 llvm::errs() << " = "; 452 if (Exp) Exp->dump(); 453 else { 454 dumpVarDefinitionName(Ref); 455 llvm::errs() << "\n"; 456 } 457 } 458 } 459 460 /// Dumps an ASCII representation of a Context to llvm::errs() 461 void dumpContext(Context C) { 462 for (Context::iterator I = C.begin(), E = C.end(); I != E; ++I) { 463 NamedDecl *D = I.getKey(); 464 D->printName(llvm::errs()); 465 const unsigned *i = C.lookup(D); 466 llvm::errs() << " -> "; 467 dumpVarDefinitionName(*i); 468 llvm::errs() << "\n"; 469 } 470 } 471 472 /// Builds the variable map. 473 void traverseCFG(CFG *CFGraph, PostOrderCFGView *SortedGraph, 474 std::vector<CFGBlockInfo> &BlockInfo); 475 476protected: 477 // Get the current context index 478 unsigned getContextIndex() { return SavedContexts.size()-1; } 479 480 // Save the current context for later replay 481 void saveContext(Stmt *S, Context C) { 482 SavedContexts.push_back(std::make_pair(S,C)); 483 } 484 485 // Adds a new definition to the given context, and returns a new context. 486 // This method should be called when declaring a new variable. 487 Context addDefinition(NamedDecl *D, Expr *Exp, Context Ctx) { 488 assert(!Ctx.contains(D)); 489 unsigned newID = VarDefinitions.size(); 490 Context NewCtx = ContextFactory.add(Ctx, D, newID); 491 VarDefinitions.push_back(VarDefinition(D, Exp, Ctx)); 492 return NewCtx; 493 } 494 495 // Add a new reference to an existing definition. 496 Context addReference(NamedDecl *D, unsigned i, Context Ctx) { 497 unsigned newID = VarDefinitions.size(); 498 Context NewCtx = ContextFactory.add(Ctx, D, newID); 499 VarDefinitions.push_back(VarDefinition(D, i, Ctx)); 500 return NewCtx; 501 } 502 503 // Updates a definition only if that definition is already in the map. 504 // This method should be called when assigning to an existing variable. 505 Context updateDefinition(NamedDecl *D, Expr *Exp, Context Ctx) { 506 if (Ctx.contains(D)) { 507 unsigned newID = VarDefinitions.size(); 508 Context NewCtx = ContextFactory.remove(Ctx, D); 509 NewCtx = ContextFactory.add(NewCtx, D, newID); 510 VarDefinitions.push_back(VarDefinition(D, Exp, Ctx)); 511 return NewCtx; 512 } 513 return Ctx; 514 } 515 516 // Removes a definition from the context, but keeps the variable name 517 // as a valid variable. The index 0 is a placeholder for cleared definitions. 518 Context clearDefinition(NamedDecl *D, Context Ctx) { 519 Context NewCtx = Ctx; 520 if (NewCtx.contains(D)) { 521 NewCtx = ContextFactory.remove(NewCtx, D); 522 NewCtx = ContextFactory.add(NewCtx, D, 0); 523 } 524 return NewCtx; 525 } 526 527 // Remove a definition entirely frmo the context. 528 Context removeDefinition(NamedDecl *D, Context Ctx) { 529 Context NewCtx = Ctx; 530 if (NewCtx.contains(D)) { 531 NewCtx = ContextFactory.remove(NewCtx, D); 532 } 533 return NewCtx; 534 } 535 536 Context intersectContexts(Context C1, Context C2); 537 Context createReferenceContext(Context C); 538 void intersectBackEdge(Context C1, Context C2); 539 540 friend class VarMapBuilder; 541}; 542 543 544// This has to be defined after LocalVariableMap. 545CFGBlockInfo CFGBlockInfo::getEmptyBlockInfo(Lockset::Factory &F, 546 LocalVariableMap &M) { 547 return CFGBlockInfo(F.getEmptyMap(), M.getEmptyContext()); 548} 549 550 551/// Visitor which builds a LocalVariableMap 552class VarMapBuilder : public StmtVisitor<VarMapBuilder> { 553public: 554 LocalVariableMap* VMap; 555 LocalVariableMap::Context Ctx; 556 557 VarMapBuilder(LocalVariableMap *VM, LocalVariableMap::Context C) 558 : VMap(VM), Ctx(C) {} 559 560 void VisitDeclStmt(DeclStmt *S); 561 void VisitBinaryOperator(BinaryOperator *BO); 562}; 563 564 565// Add new local variables to the variable map 566void VarMapBuilder::VisitDeclStmt(DeclStmt *S) { 567 bool modifiedCtx = false; 568 DeclGroupRef DGrp = S->getDeclGroup(); 569 for (DeclGroupRef::iterator I = DGrp.begin(), E = DGrp.end(); I != E; ++I) { 570 if (VarDecl *VD = dyn_cast_or_null<VarDecl>(*I)) { 571 Expr *E = VD->getInit(); 572 573 // Add local variables with trivial type to the variable map 574 QualType T = VD->getType(); 575 if (T.isTrivialType(VD->getASTContext())) { 576 Ctx = VMap->addDefinition(VD, E, Ctx); 577 modifiedCtx = true; 578 } 579 } 580 } 581 if (modifiedCtx) 582 VMap->saveContext(S, Ctx); 583} 584 585// Update local variable definitions in variable map 586void VarMapBuilder::VisitBinaryOperator(BinaryOperator *BO) { 587 if (!BO->isAssignmentOp()) 588 return; 589 590 Expr *LHSExp = BO->getLHS()->IgnoreParenCasts(); 591 592 // Update the variable map and current context. 593 if (DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(LHSExp)) { 594 ValueDecl *VDec = DRE->getDecl(); 595 if (Ctx.lookup(VDec)) { 596 if (BO->getOpcode() == BO_Assign) 597 Ctx = VMap->updateDefinition(VDec, BO->getRHS(), Ctx); 598 else 599 // FIXME -- handle compound assignment operators 600 Ctx = VMap->clearDefinition(VDec, Ctx); 601 VMap->saveContext(BO, Ctx); 602 } 603 } 604} 605 606 607// Computes the intersection of two contexts. The intersection is the 608// set of variables which have the same definition in both contexts; 609// variables with different definitions are discarded. 610LocalVariableMap::Context 611LocalVariableMap::intersectContexts(Context C1, Context C2) { 612 Context Result = C1; 613 for (Context::iterator I = C1.begin(), E = C1.end(); I != E; ++I) { 614 NamedDecl *Dec = I.getKey(); 615 unsigned i1 = I.getData(); 616 const unsigned *i2 = C2.lookup(Dec); 617 if (!i2) // variable doesn't exist on second path 618 Result = removeDefinition(Dec, Result); 619 else if (*i2 != i1) // variable exists, but has different definition 620 Result = clearDefinition(Dec, Result); 621 } 622 return Result; 623} 624 625// For every variable in C, create a new variable that refers to the 626// definition in C. Return a new context that contains these new variables. 627// (We use this for a naive implementation of SSA on loop back-edges.) 628LocalVariableMap::Context LocalVariableMap::createReferenceContext(Context C) { 629 Context Result = getEmptyContext(); 630 for (Context::iterator I = C.begin(), E = C.end(); I != E; ++I) { 631 NamedDecl *Dec = I.getKey(); 632 unsigned i = I.getData(); 633 Result = addReference(Dec, i, Result); 634 } 635 return Result; 636} 637 638// This routine also takes the intersection of C1 and C2, but it does so by 639// altering the VarDefinitions. C1 must be the result of an earlier call to 640// createReferenceContext. 641void LocalVariableMap::intersectBackEdge(Context C1, Context C2) { 642 for (Context::iterator I = C1.begin(), E = C1.end(); I != E; ++I) { 643 NamedDecl *Dec = I.getKey(); 644 unsigned i1 = I.getData(); 645 VarDefinition *VDef = &VarDefinitions[i1]; 646 assert(VDef->isReference()); 647 648 const unsigned *i2 = C2.lookup(Dec); 649 if (!i2 || (*i2 != i1)) 650 VDef->Ref = 0; // Mark this variable as undefined 651 } 652} 653 654 655// Traverse the CFG in topological order, so all predecessors of a block 656// (excluding back-edges) are visited before the block itself. At 657// each point in the code, we calculate a Context, which holds the set of 658// variable definitions which are visible at that point in execution. 659// Visible variables are mapped to their definitions using an array that 660// contains all definitions. 661// 662// At join points in the CFG, the set is computed as the intersection of 663// the incoming sets along each edge, E.g. 664// 665// { Context | VarDefinitions } 666// int x = 0; { x -> x1 | x1 = 0 } 667// int y = 0; { x -> x1, y -> y1 | y1 = 0, x1 = 0 } 668// if (b) x = 1; { x -> x2, y -> y1 | x2 = 1, y1 = 0, ... } 669// else x = 2; { x -> x3, y -> y1 | x3 = 2, x2 = 1, ... } 670// ... { y -> y1 (x is unknown) | x3 = 2, x2 = 1, ... } 671// 672// This is essentially a simpler and more naive version of the standard SSA 673// algorithm. Those definitions that remain in the intersection are from blocks 674// that strictly dominate the current block. We do not bother to insert proper 675// phi nodes, because they are not used in our analysis; instead, wherever 676// a phi node would be required, we simply remove that definition from the 677// context (E.g. x above). 678// 679// The initial traversal does not capture back-edges, so those need to be 680// handled on a separate pass. Whenever the first pass encounters an 681// incoming back edge, it duplicates the context, creating new definitions 682// that refer back to the originals. (These correspond to places where SSA 683// might have to insert a phi node.) On the second pass, these definitions are 684// set to NULL if the the variable has changed on the back-edge (i.e. a phi 685// node was actually required.) E.g. 686// 687// { Context | VarDefinitions } 688// int x = 0, y = 0; { x -> x1, y -> y1 | y1 = 0, x1 = 0 } 689// while (b) { x -> x2, y -> y1 | [1st:] x2=x1; [2nd:] x2=NULL; } 690// x = x+1; { x -> x3, y -> y1 | x3 = x2 + 1, ... } 691// ... { y -> y1 | x3 = 2, x2 = 1, ... } 692// 693void LocalVariableMap::traverseCFG(CFG *CFGraph, 694 PostOrderCFGView *SortedGraph, 695 std::vector<CFGBlockInfo> &BlockInfo) { 696 PostOrderCFGView::CFGBlockSet VisitedBlocks(CFGraph); 697 698 CtxIndices.resize(CFGraph->getNumBlockIDs()); 699 700 for (PostOrderCFGView::iterator I = SortedGraph->begin(), 701 E = SortedGraph->end(); I!= E; ++I) { 702 const CFGBlock *CurrBlock = *I; 703 int CurrBlockID = CurrBlock->getBlockID(); 704 CFGBlockInfo *CurrBlockInfo = &BlockInfo[CurrBlockID]; 705 706 VisitedBlocks.insert(CurrBlock); 707 708 // Calculate the entry context for the current block 709 bool HasBackEdges = false; 710 bool CtxInit = true; 711 for (CFGBlock::const_pred_iterator PI = CurrBlock->pred_begin(), 712 PE = CurrBlock->pred_end(); PI != PE; ++PI) { 713 // if *PI -> CurrBlock is a back edge, so skip it 714 if (*PI == 0 || !VisitedBlocks.alreadySet(*PI)) { 715 HasBackEdges = true; 716 continue; 717 } 718 719 int PrevBlockID = (*PI)->getBlockID(); 720 CFGBlockInfo *PrevBlockInfo = &BlockInfo[PrevBlockID]; 721 722 if (CtxInit) { 723 CurrBlockInfo->EntryContext = PrevBlockInfo->ExitContext; 724 CtxInit = false; 725 } 726 else { 727 CurrBlockInfo->EntryContext = 728 intersectContexts(CurrBlockInfo->EntryContext, 729 PrevBlockInfo->ExitContext); 730 } 731 } 732 733 // Duplicate the context if we have back-edges, so we can call 734 // intersectBackEdges later. 735 if (HasBackEdges) 736 CurrBlockInfo->EntryContext = 737 createReferenceContext(CurrBlockInfo->EntryContext); 738 739 // Create a starting context index for the current block 740 saveContext(0, CurrBlockInfo->EntryContext); 741 CurrBlockInfo->EntryIndex = getContextIndex(); 742 743 // Visit all the statements in the basic block. 744 VarMapBuilder VMapBuilder(this, CurrBlockInfo->EntryContext); 745 for (CFGBlock::const_iterator BI = CurrBlock->begin(), 746 BE = CurrBlock->end(); BI != BE; ++BI) { 747 switch (BI->getKind()) { 748 case CFGElement::Statement: { 749 const CFGStmt *CS = cast<CFGStmt>(&*BI); 750 VMapBuilder.Visit(const_cast<Stmt*>(CS->getStmt())); 751 break; 752 } 753 default: 754 break; 755 } 756 } 757 CurrBlockInfo->ExitContext = VMapBuilder.Ctx; 758 759 // Mark variables on back edges as "unknown" if they've been changed. 760 for (CFGBlock::const_succ_iterator SI = CurrBlock->succ_begin(), 761 SE = CurrBlock->succ_end(); SI != SE; ++SI) { 762 // if CurrBlock -> *SI is *not* a back edge 763 if (*SI == 0 || !VisitedBlocks.alreadySet(*SI)) 764 continue; 765 766 CFGBlock *FirstLoopBlock = *SI; 767 Context LoopBegin = BlockInfo[FirstLoopBlock->getBlockID()].EntryContext; 768 Context LoopEnd = CurrBlockInfo->ExitContext; 769 intersectBackEdge(LoopBegin, LoopEnd); 770 } 771 } 772 773 // Put an extra entry at the end of the indexed context array 774 unsigned exitID = CFGraph->getExit().getBlockID(); 775 saveContext(0, BlockInfo[exitID].ExitContext); 776} 777 778/// Find the appropriate source locations to use when producing diagnostics for 779/// each block in the CFG. 780static void findBlockLocations(CFG *CFGraph, 781 PostOrderCFGView *SortedGraph, 782 std::vector<CFGBlockInfo> &BlockInfo) { 783 for (PostOrderCFGView::iterator I = SortedGraph->begin(), 784 E = SortedGraph->end(); I!= E; ++I) { 785 const CFGBlock *CurrBlock = *I; 786 CFGBlockInfo *CurrBlockInfo = &BlockInfo[CurrBlock->getBlockID()]; 787 788 // Find the source location of the last statement in the block, if the 789 // block is not empty. 790 if (const Stmt *S = CurrBlock->getTerminator()) { 791 CurrBlockInfo->EntryLoc = CurrBlockInfo->ExitLoc = S->getLocStart(); 792 } else { 793 for (CFGBlock::const_reverse_iterator BI = CurrBlock->rbegin(), 794 BE = CurrBlock->rend(); BI != BE; ++BI) { 795 // FIXME: Handle other CFGElement kinds. 796 if (const CFGStmt *CS = dyn_cast<CFGStmt>(&*BI)) { 797 CurrBlockInfo->ExitLoc = CS->getStmt()->getLocStart(); 798 break; 799 } 800 } 801 } 802 803 if (!CurrBlockInfo->ExitLoc.isInvalid()) { 804 // This block contains at least one statement. Find the source location 805 // of the first statement in the block. 806 for (CFGBlock::const_iterator BI = CurrBlock->begin(), 807 BE = CurrBlock->end(); BI != BE; ++BI) { 808 // FIXME: Handle other CFGElement kinds. 809 if (const CFGStmt *CS = dyn_cast<CFGStmt>(&*BI)) { 810 CurrBlockInfo->EntryLoc = CS->getStmt()->getLocStart(); 811 break; 812 } 813 } 814 } else if (CurrBlock->pred_size() == 1 && *CurrBlock->pred_begin() && 815 CurrBlock != &CFGraph->getExit()) { 816 // The block is empty, and has a single predecessor. Use its exit 817 // location. 818 CurrBlockInfo->EntryLoc = CurrBlockInfo->ExitLoc = 819 BlockInfo[(*CurrBlock->pred_begin())->getBlockID()].ExitLoc; 820 } 821 } 822} 823 824/// \brief Class which implements the core thread safety analysis routines. 825class ThreadSafetyAnalyzer { 826 friend class BuildLockset; 827 828 ThreadSafetyHandler &Handler; 829 Lockset::Factory LocksetFactory; 830 LocalVariableMap LocalVarMap; 831 832public: 833 ThreadSafetyAnalyzer(ThreadSafetyHandler &H) : Handler(H) {} 834 835 Lockset intersectAndWarn(const CFGBlockInfo &Block1, CFGBlockSide Side1, 836 const CFGBlockInfo &Block2, CFGBlockSide Side2, 837 LockErrorKind LEK); 838 839 Lockset addLock(Lockset &LSet, Expr *MutexExp, const NamedDecl *D, 840 LockKind LK, SourceLocation Loc); 841 842 void runAnalysis(AnalysisDeclContext &AC); 843}; 844 845 846/// \brief We use this class to visit different types of expressions in 847/// CFGBlocks, and build up the lockset. 848/// An expression may cause us to add or remove locks from the lockset, or else 849/// output error messages related to missing locks. 850/// FIXME: In future, we may be able to not inherit from a visitor. 851class BuildLockset : public StmtVisitor<BuildLockset> { 852 friend class ThreadSafetyAnalyzer; 853 854 ThreadSafetyHandler &Handler; 855 Lockset::Factory &LocksetFactory; 856 LocalVariableMap &LocalVarMap; 857 858 Lockset LSet; 859 LocalVariableMap::Context LVarCtx; 860 unsigned CtxIndex; 861 862 // Helper functions 863 void addLock(const MutexID &Mutex, const LockData &LDat); 864 void removeLock(const MutexID &Mutex, SourceLocation UnlockLoc); 865 866 template <class AttrType> 867 void addLocksToSet(LockKind LK, AttrType *Attr, 868 Expr *Exp, NamedDecl *D, VarDecl *VD = 0); 869 void removeLocksFromSet(UnlockFunctionAttr *Attr, 870 Expr *Exp, NamedDecl* FunDecl); 871 872 const ValueDecl *getValueDecl(Expr *Exp); 873 void warnIfMutexNotHeld (const NamedDecl *D, Expr *Exp, AccessKind AK, 874 Expr *MutexExp, ProtectedOperationKind POK); 875 void checkAccess(Expr *Exp, AccessKind AK); 876 void checkDereference(Expr *Exp, AccessKind AK); 877 void handleCall(Expr *Exp, NamedDecl *D, VarDecl *VD = 0); 878 879 template <class AttrType> 880 void addTrylock(LockKind LK, AttrType *Attr, Expr *Exp, NamedDecl *FunDecl, 881 const CFGBlock* PredBlock, const CFGBlock *CurrBlock, 882 Expr *BrE, bool Neg); 883 CallExpr* getTrylockCallExpr(Stmt *Cond, LocalVariableMap::Context C, 884 bool &Negate); 885 void handleTrylock(Stmt *Cond, const CFGBlock* PredBlock, 886 const CFGBlock *CurrBlock); 887 888 /// \brief Returns true if the lockset contains a lock, regardless of whether 889 /// the lock is held exclusively or shared. 890 bool locksetContains(const MutexID &Lock) const { 891 return LSet.lookup(Lock); 892 } 893 894 /// \brief Returns true if the lockset contains a lock with the passed in 895 /// locktype. 896 bool locksetContains(const MutexID &Lock, LockKind KindRequested) const { 897 const LockData *LockHeld = LSet.lookup(Lock); 898 return (LockHeld && KindRequested == LockHeld->LKind); 899 } 900 901 /// \brief Returns true if the lockset contains a lock with at least the 902 /// passed in locktype. So for example, if we pass in LK_Shared, this function 903 /// returns true if the lock is held LK_Shared or LK_Exclusive. If we pass in 904 /// LK_Exclusive, this function returns true if the lock is held LK_Exclusive. 905 bool locksetContainsAtLeast(const MutexID &Lock, 906 LockKind KindRequested) const { 907 switch (KindRequested) { 908 case LK_Shared: 909 return locksetContains(Lock); 910 case LK_Exclusive: 911 return locksetContains(Lock, KindRequested); 912 } 913 llvm_unreachable("Unknown LockKind"); 914 } 915 916public: 917 BuildLockset(ThreadSafetyAnalyzer *analyzer, CFGBlockInfo &Info) 918 : StmtVisitor<BuildLockset>(), 919 Handler(analyzer->Handler), 920 LocksetFactory(analyzer->LocksetFactory), 921 LocalVarMap(analyzer->LocalVarMap), 922 LSet(Info.EntrySet), 923 LVarCtx(Info.EntryContext), 924 CtxIndex(Info.EntryIndex) 925 {} 926 927 void VisitUnaryOperator(UnaryOperator *UO); 928 void VisitBinaryOperator(BinaryOperator *BO); 929 void VisitCastExpr(CastExpr *CE); 930 void VisitCallExpr(CallExpr *Exp); 931 void VisitCXXConstructExpr(CXXConstructExpr *Exp); 932 void VisitDeclStmt(DeclStmt *S); 933}; 934 935/// \brief Add a new lock to the lockset, warning if the lock is already there. 936/// \param Mutex -- the Mutex expression for the lock 937/// \param LDat -- the LockData for the lock 938void BuildLockset::addLock(const MutexID &Mutex, const LockData& LDat) { 939 // FIXME: deal with acquired before/after annotations. 940 // FIXME: Don't always warn when we have support for reentrant locks. 941 if (locksetContains(Mutex)) 942 Handler.handleDoubleLock(Mutex.getName(), LDat.AcquireLoc); 943 else 944 LSet = LocksetFactory.add(LSet, Mutex, LDat); 945} 946 947/// \brief Remove a lock from the lockset, warning if the lock is not there. 948/// \param LockExp The lock expression corresponding to the lock to be removed 949/// \param UnlockLoc The source location of the unlock (only used in error msg) 950void BuildLockset::removeLock(const MutexID &Mutex, SourceLocation UnlockLoc) { 951 const LockData *LDat = LSet.lookup(Mutex); 952 if (!LDat) 953 Handler.handleUnmatchedUnlock(Mutex.getName(), UnlockLoc); 954 else { 955 // For scoped-lockable vars, remove the mutex associated with this var. 956 if (LDat->UnderlyingMutex.isValid()) 957 removeLock(LDat->UnderlyingMutex, UnlockLoc); 958 LSet = LocksetFactory.remove(LSet, Mutex); 959 } 960} 961 962/// \brief This function, parameterized by an attribute type, is used to add a 963/// set of locks specified as attribute arguments to the lockset. 964template <typename AttrType> 965void BuildLockset::addLocksToSet(LockKind LK, AttrType *Attr, 966 Expr *Exp, NamedDecl* FunDecl, VarDecl *VD) { 967 typedef typename AttrType::args_iterator iterator_type; 968 969 SourceLocation ExpLocation = Exp->getExprLoc(); 970 971 // Figure out if we're calling the constructor of scoped lockable class 972 bool isScopedVar = false; 973 if (VD) { 974 if (CXXConstructorDecl *CD = dyn_cast<CXXConstructorDecl>(FunDecl)) { 975 CXXRecordDecl* PD = CD->getParent(); 976 if (PD && PD->getAttr<ScopedLockableAttr>()) 977 isScopedVar = true; 978 } 979 } 980 981 if (Attr->args_size() == 0) { 982 // The mutex held is the "this" object. 983 MutexID Mutex(0, Exp, FunDecl); 984 if (!Mutex.isValid()) 985 MutexID::warnInvalidLock(Handler, 0, Exp, FunDecl); 986 else 987 addLock(Mutex, LockData(ExpLocation, LK)); 988 return; 989 } 990 991 for (iterator_type I=Attr->args_begin(), E=Attr->args_end(); I != E; ++I) { 992 MutexID Mutex(*I, Exp, FunDecl); 993 if (!Mutex.isValid()) 994 MutexID::warnInvalidLock(Handler, *I, Exp, FunDecl); 995 else { 996 addLock(Mutex, LockData(ExpLocation, LK)); 997 if (isScopedVar) { 998 // For scoped lockable vars, map this var to its underlying mutex. 999 DeclRefExpr DRE(VD, VD->getType(), VK_LValue, VD->getLocation()); 1000 MutexID SMutex(&DRE, 0, 0); 1001 addLock(SMutex, LockData(VD->getLocation(), LK, Mutex)); 1002 } 1003 } 1004 } 1005} 1006 1007/// \brief This function removes a set of locks specified as attribute 1008/// arguments from the lockset. 1009void BuildLockset::removeLocksFromSet(UnlockFunctionAttr *Attr, 1010 Expr *Exp, NamedDecl* FunDecl) { 1011 SourceLocation ExpLocation; 1012 if (Exp) ExpLocation = Exp->getExprLoc(); 1013 1014 if (Attr->args_size() == 0) { 1015 // The mutex held is the "this" object. 1016 MutexID Mu(0, Exp, FunDecl); 1017 if (!Mu.isValid()) 1018 MutexID::warnInvalidLock(Handler, 0, Exp, FunDecl); 1019 else 1020 removeLock(Mu, ExpLocation); 1021 return; 1022 } 1023 1024 for (UnlockFunctionAttr::args_iterator I = Attr->args_begin(), 1025 E = Attr->args_end(); I != E; ++I) { 1026 MutexID Mutex(*I, Exp, FunDecl); 1027 if (!Mutex.isValid()) 1028 MutexID::warnInvalidLock(Handler, *I, Exp, FunDecl); 1029 else 1030 removeLock(Mutex, ExpLocation); 1031 } 1032} 1033 1034/// \brief Gets the value decl pointer from DeclRefExprs or MemberExprs 1035const ValueDecl *BuildLockset::getValueDecl(Expr *Exp) { 1036 if (const DeclRefExpr *DR = dyn_cast<DeclRefExpr>(Exp)) 1037 return DR->getDecl(); 1038 1039 if (const MemberExpr *ME = dyn_cast<MemberExpr>(Exp)) 1040 return ME->getMemberDecl(); 1041 1042 return 0; 1043} 1044 1045/// \brief Warn if the LSet does not contain a lock sufficient to protect access 1046/// of at least the passed in AccessKind. 1047void BuildLockset::warnIfMutexNotHeld(const NamedDecl *D, Expr *Exp, 1048 AccessKind AK, Expr *MutexExp, 1049 ProtectedOperationKind POK) { 1050 LockKind LK = getLockKindFromAccessKind(AK); 1051 1052 MutexID Mutex(MutexExp, Exp, D); 1053 if (!Mutex.isValid()) 1054 MutexID::warnInvalidLock(Handler, MutexExp, Exp, D); 1055 else if (!locksetContainsAtLeast(Mutex, LK)) 1056 Handler.handleMutexNotHeld(D, POK, Mutex.getName(), LK, Exp->getExprLoc()); 1057} 1058 1059/// \brief This method identifies variable dereferences and checks pt_guarded_by 1060/// and pt_guarded_var annotations. Note that we only check these annotations 1061/// at the time a pointer is dereferenced. 1062/// FIXME: We need to check for other types of pointer dereferences 1063/// (e.g. [], ->) and deal with them here. 1064/// \param Exp An expression that has been read or written. 1065void BuildLockset::checkDereference(Expr *Exp, AccessKind AK) { 1066 UnaryOperator *UO = dyn_cast<UnaryOperator>(Exp); 1067 if (!UO || UO->getOpcode() != clang::UO_Deref) 1068 return; 1069 Exp = UO->getSubExpr()->IgnoreParenCasts(); 1070 1071 const ValueDecl *D = getValueDecl(Exp); 1072 if(!D || !D->hasAttrs()) 1073 return; 1074 1075 if (D->getAttr<PtGuardedVarAttr>() && LSet.isEmpty()) 1076 Handler.handleNoMutexHeld(D, POK_VarDereference, AK, Exp->getExprLoc()); 1077 1078 const AttrVec &ArgAttrs = D->getAttrs(); 1079 for(unsigned i = 0, Size = ArgAttrs.size(); i < Size; ++i) 1080 if (PtGuardedByAttr *PGBAttr = dyn_cast<PtGuardedByAttr>(ArgAttrs[i])) 1081 warnIfMutexNotHeld(D, Exp, AK, PGBAttr->getArg(), POK_VarDereference); 1082} 1083 1084/// \brief Checks guarded_by and guarded_var attributes. 1085/// Whenever we identify an access (read or write) of a DeclRefExpr or 1086/// MemberExpr, we need to check whether there are any guarded_by or 1087/// guarded_var attributes, and make sure we hold the appropriate mutexes. 1088void BuildLockset::checkAccess(Expr *Exp, AccessKind AK) { 1089 const ValueDecl *D = getValueDecl(Exp); 1090 if(!D || !D->hasAttrs()) 1091 return; 1092 1093 if (D->getAttr<GuardedVarAttr>() && LSet.isEmpty()) 1094 Handler.handleNoMutexHeld(D, POK_VarAccess, AK, Exp->getExprLoc()); 1095 1096 const AttrVec &ArgAttrs = D->getAttrs(); 1097 for(unsigned i = 0, Size = ArgAttrs.size(); i < Size; ++i) 1098 if (GuardedByAttr *GBAttr = dyn_cast<GuardedByAttr>(ArgAttrs[i])) 1099 warnIfMutexNotHeld(D, Exp, AK, GBAttr->getArg(), POK_VarAccess); 1100} 1101 1102/// \brief Process a function call, method call, constructor call, 1103/// or destructor call. This involves looking at the attributes on the 1104/// corresponding function/method/constructor/destructor, issuing warnings, 1105/// and updating the locksets accordingly. 1106/// 1107/// FIXME: For classes annotated with one of the guarded annotations, we need 1108/// to treat const method calls as reads and non-const method calls as writes, 1109/// and check that the appropriate locks are held. Non-const method calls with 1110/// the same signature as const method calls can be also treated as reads. 1111/// 1112/// FIXME: We need to also visit CallExprs to catch/check global functions. 1113/// 1114/// FIXME: Do not flag an error for member variables accessed in constructors/ 1115/// destructors 1116void BuildLockset::handleCall(Expr *Exp, NamedDecl *D, VarDecl *VD) { 1117 AttrVec &ArgAttrs = D->getAttrs(); 1118 for(unsigned i = 0; i < ArgAttrs.size(); ++i) { 1119 Attr *Attr = ArgAttrs[i]; 1120 switch (Attr->getKind()) { 1121 // When we encounter an exclusive lock function, we need to add the lock 1122 // to our lockset with kind exclusive. 1123 case attr::ExclusiveLockFunction: { 1124 ExclusiveLockFunctionAttr *A = cast<ExclusiveLockFunctionAttr>(Attr); 1125 addLocksToSet(LK_Exclusive, A, Exp, D, VD); 1126 break; 1127 } 1128 1129 // When we encounter a shared lock function, we need to add the lock 1130 // to our lockset with kind shared. 1131 case attr::SharedLockFunction: { 1132 SharedLockFunctionAttr *A = cast<SharedLockFunctionAttr>(Attr); 1133 addLocksToSet(LK_Shared, A, Exp, D, VD); 1134 break; 1135 } 1136 1137 // When we encounter an unlock function, we need to remove unlocked 1138 // mutexes from the lockset, and flag a warning if they are not there. 1139 case attr::UnlockFunction: { 1140 UnlockFunctionAttr *UFAttr = cast<UnlockFunctionAttr>(Attr); 1141 removeLocksFromSet(UFAttr, Exp, D); 1142 break; 1143 } 1144 1145 case attr::ExclusiveLocksRequired: { 1146 ExclusiveLocksRequiredAttr *ELRAttr = 1147 cast<ExclusiveLocksRequiredAttr>(Attr); 1148 1149 for (ExclusiveLocksRequiredAttr::args_iterator 1150 I = ELRAttr->args_begin(), E = ELRAttr->args_end(); I != E; ++I) 1151 warnIfMutexNotHeld(D, Exp, AK_Written, *I, POK_FunctionCall); 1152 break; 1153 } 1154 1155 case attr::SharedLocksRequired: { 1156 SharedLocksRequiredAttr *SLRAttr = cast<SharedLocksRequiredAttr>(Attr); 1157 1158 for (SharedLocksRequiredAttr::args_iterator I = SLRAttr->args_begin(), 1159 E = SLRAttr->args_end(); I != E; ++I) 1160 warnIfMutexNotHeld(D, Exp, AK_Read, *I, POK_FunctionCall); 1161 break; 1162 } 1163 1164 case attr::LocksExcluded: { 1165 LocksExcludedAttr *LEAttr = cast<LocksExcludedAttr>(Attr); 1166 for (LocksExcludedAttr::args_iterator I = LEAttr->args_begin(), 1167 E = LEAttr->args_end(); I != E; ++I) { 1168 MutexID Mutex(*I, Exp, D); 1169 if (!Mutex.isValid()) 1170 MutexID::warnInvalidLock(Handler, *I, Exp, D); 1171 else if (locksetContains(Mutex)) 1172 Handler.handleFunExcludesLock(D->getName(), Mutex.getName(), 1173 Exp->getExprLoc()); 1174 } 1175 break; 1176 } 1177 1178 // Ignore other (non thread-safety) attributes 1179 default: 1180 break; 1181 } 1182 } 1183} 1184 1185 1186/// \brief Add lock to set, if the current block is in the taken branch of a 1187/// trylock. 1188template <class AttrType> 1189void BuildLockset::addTrylock(LockKind LK, AttrType *Attr, Expr *Exp, 1190 NamedDecl *FunDecl, const CFGBlock *PredBlock, 1191 const CFGBlock *CurrBlock, Expr *BrE, bool Neg) { 1192 // Find out which branch has the lock 1193 bool branch = 0; 1194 if (CXXBoolLiteralExpr *BLE = dyn_cast_or_null<CXXBoolLiteralExpr>(BrE)) { 1195 branch = BLE->getValue(); 1196 } 1197 else if (IntegerLiteral *ILE = dyn_cast_or_null<IntegerLiteral>(BrE)) { 1198 branch = ILE->getValue().getBoolValue(); 1199 } 1200 int branchnum = branch ? 0 : 1; 1201 if (Neg) branchnum = !branchnum; 1202 1203 // If we've taken the trylock branch, then add the lock 1204 int i = 0; 1205 for (CFGBlock::const_succ_iterator SI = PredBlock->succ_begin(), 1206 SE = PredBlock->succ_end(); SI != SE && i < 2; ++SI, ++i) { 1207 if (*SI == CurrBlock && i == branchnum) { 1208 addLocksToSet(LK, Attr, Exp, FunDecl, 0); 1209 } 1210 } 1211} 1212 1213 1214// If Cond can be traced back to a function call, return the call expression. 1215// The negate variable should be called with false, and will be set to true 1216// if the function call is negated, e.g. if (!mu.tryLock(...)) 1217CallExpr* BuildLockset::getTrylockCallExpr(Stmt *Cond, 1218 LocalVariableMap::Context C, 1219 bool &Negate) { 1220 if (!Cond) 1221 return 0; 1222 1223 if (CallExpr *CallExp = dyn_cast<CallExpr>(Cond)) { 1224 return CallExp; 1225 } 1226 else if (ImplicitCastExpr *CE = dyn_cast<ImplicitCastExpr>(Cond)) { 1227 return getTrylockCallExpr(CE->getSubExpr(), C, Negate); 1228 } 1229 else if (DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(Cond)) { 1230 Expr *E = LocalVarMap.lookupExpr(DRE->getDecl(), C); 1231 return getTrylockCallExpr(E, C, Negate); 1232 } 1233 else if (UnaryOperator *UOP = dyn_cast<UnaryOperator>(Cond)) { 1234 if (UOP->getOpcode() == UO_LNot) { 1235 Negate = !Negate; 1236 return getTrylockCallExpr(UOP->getSubExpr(), C, Negate); 1237 } 1238 } 1239 // FIXME -- handle && and || as well. 1240 return NULL; 1241} 1242 1243 1244/// \brief Process a conditional branch from a previous block to the current 1245/// block, looking for trylock calls. 1246void BuildLockset::handleTrylock(Stmt *Cond, const CFGBlock *PredBlock, 1247 const CFGBlock *CurrBlock) { 1248 bool Negate = false; 1249 CallExpr *Exp = getTrylockCallExpr(Cond, LVarCtx, Negate); 1250 if (!Exp) 1251 return; 1252 1253 NamedDecl *FunDecl = dyn_cast_or_null<NamedDecl>(Exp->getCalleeDecl()); 1254 if(!FunDecl || !FunDecl->hasAttrs()) 1255 return; 1256 1257 // If the condition is a call to a Trylock function, then grab the attributes 1258 AttrVec &ArgAttrs = FunDecl->getAttrs(); 1259 for (unsigned i = 0; i < ArgAttrs.size(); ++i) { 1260 Attr *Attr = ArgAttrs[i]; 1261 switch (Attr->getKind()) { 1262 case attr::ExclusiveTrylockFunction: { 1263 ExclusiveTrylockFunctionAttr *A = 1264 cast<ExclusiveTrylockFunctionAttr>(Attr); 1265 addTrylock(LK_Exclusive, A, Exp, FunDecl, PredBlock, CurrBlock, 1266 A->getSuccessValue(), Negate); 1267 break; 1268 } 1269 case attr::SharedTrylockFunction: { 1270 SharedTrylockFunctionAttr *A = 1271 cast<SharedTrylockFunctionAttr>(Attr); 1272 addTrylock(LK_Shared, A, Exp, FunDecl, PredBlock, CurrBlock, 1273 A->getSuccessValue(), Negate); 1274 break; 1275 } 1276 default: 1277 break; 1278 } 1279 } 1280} 1281 1282 1283/// \brief For unary operations which read and write a variable, we need to 1284/// check whether we hold any required mutexes. Reads are checked in 1285/// VisitCastExpr. 1286void BuildLockset::VisitUnaryOperator(UnaryOperator *UO) { 1287 switch (UO->getOpcode()) { 1288 case clang::UO_PostDec: 1289 case clang::UO_PostInc: 1290 case clang::UO_PreDec: 1291 case clang::UO_PreInc: { 1292 Expr *SubExp = UO->getSubExpr()->IgnoreParenCasts(); 1293 checkAccess(SubExp, AK_Written); 1294 checkDereference(SubExp, AK_Written); 1295 break; 1296 } 1297 default: 1298 break; 1299 } 1300} 1301 1302/// For binary operations which assign to a variable (writes), we need to check 1303/// whether we hold any required mutexes. 1304/// FIXME: Deal with non-primitive types. 1305void BuildLockset::VisitBinaryOperator(BinaryOperator *BO) { 1306 if (!BO->isAssignmentOp()) 1307 return; 1308 1309 // adjust the context 1310 LVarCtx = LocalVarMap.getNextContext(CtxIndex, BO, LVarCtx); 1311 1312 Expr *LHSExp = BO->getLHS()->IgnoreParenCasts(); 1313 checkAccess(LHSExp, AK_Written); 1314 checkDereference(LHSExp, AK_Written); 1315} 1316 1317/// Whenever we do an LValue to Rvalue cast, we are reading a variable and 1318/// need to ensure we hold any required mutexes. 1319/// FIXME: Deal with non-primitive types. 1320void BuildLockset::VisitCastExpr(CastExpr *CE) { 1321 if (CE->getCastKind() != CK_LValueToRValue) 1322 return; 1323 Expr *SubExp = CE->getSubExpr()->IgnoreParenCasts(); 1324 checkAccess(SubExp, AK_Read); 1325 checkDereference(SubExp, AK_Read); 1326} 1327 1328 1329void BuildLockset::VisitCallExpr(CallExpr *Exp) { 1330 NamedDecl *D = dyn_cast_or_null<NamedDecl>(Exp->getCalleeDecl()); 1331 if(!D || !D->hasAttrs()) 1332 return; 1333 handleCall(Exp, D); 1334} 1335 1336void BuildLockset::VisitCXXConstructExpr(CXXConstructExpr *Exp) { 1337 // FIXME -- only handles constructors in DeclStmt below. 1338} 1339 1340void BuildLockset::VisitDeclStmt(DeclStmt *S) { 1341 // adjust the context 1342 LVarCtx = LocalVarMap.getNextContext(CtxIndex, S, LVarCtx); 1343 1344 DeclGroupRef DGrp = S->getDeclGroup(); 1345 for (DeclGroupRef::iterator I = DGrp.begin(), E = DGrp.end(); I != E; ++I) { 1346 Decl *D = *I; 1347 if (VarDecl *VD = dyn_cast_or_null<VarDecl>(D)) { 1348 Expr *E = VD->getInit(); 1349 if (CXXConstructExpr *CE = dyn_cast_or_null<CXXConstructExpr>(E)) { 1350 NamedDecl *CtorD = dyn_cast_or_null<NamedDecl>(CE->getConstructor()); 1351 if (!CtorD || !CtorD->hasAttrs()) 1352 return; 1353 handleCall(CE, CtorD, VD); 1354 } 1355 } 1356 } 1357} 1358 1359 1360/// \brief Compute the intersection of two locksets and issue warnings for any 1361/// locks in the symmetric difference. 1362/// 1363/// This function is used at a merge point in the CFG when comparing the lockset 1364/// of each branch being merged. For example, given the following sequence: 1365/// A; if () then B; else C; D; we need to check that the lockset after B and C 1366/// are the same. In the event of a difference, we use the intersection of these 1367/// two locksets at the start of D. 1368Lockset ThreadSafetyAnalyzer::intersectAndWarn(const CFGBlockInfo &Block1, 1369 CFGBlockSide Side1, 1370 const CFGBlockInfo &Block2, 1371 CFGBlockSide Side2, 1372 LockErrorKind LEK) { 1373 Lockset LSet1 = Block1.getSet(Side1); 1374 Lockset LSet2 = Block2.getSet(Side2); 1375 1376 Lockset Intersection = LSet1; 1377 for (Lockset::iterator I = LSet2.begin(), E = LSet2.end(); I != E; ++I) { 1378 const MutexID &LSet2Mutex = I.getKey(); 1379 const LockData &LSet2LockData = I.getData(); 1380 if (const LockData *LD = LSet1.lookup(LSet2Mutex)) { 1381 if (LD->LKind != LSet2LockData.LKind) { 1382 Handler.handleExclusiveAndShared(LSet2Mutex.getName(), 1383 LSet2LockData.AcquireLoc, 1384 LD->AcquireLoc); 1385 if (LD->LKind != LK_Exclusive) 1386 Intersection = LocksetFactory.add(Intersection, LSet2Mutex, 1387 LSet2LockData); 1388 } 1389 } else { 1390 Handler.handleMutexHeldEndOfScope(LSet2Mutex.getName(), 1391 LSet2LockData.AcquireLoc, 1392 Block1.getLocation(Side1), LEK); 1393 } 1394 } 1395 1396 for (Lockset::iterator I = LSet1.begin(), E = LSet1.end(); I != E; ++I) { 1397 if (!LSet2.contains(I.getKey())) { 1398 const MutexID &Mutex = I.getKey(); 1399 const LockData &MissingLock = I.getData(); 1400 Handler.handleMutexHeldEndOfScope(Mutex.getName(), 1401 MissingLock.AcquireLoc, 1402 Block2.getLocation(Side2), LEK); 1403 Intersection = LocksetFactory.remove(Intersection, Mutex); 1404 } 1405 } 1406 return Intersection; 1407} 1408 1409Lockset ThreadSafetyAnalyzer::addLock(Lockset &LSet, Expr *MutexExp, 1410 const NamedDecl *D, 1411 LockKind LK, SourceLocation Loc) { 1412 MutexID Mutex(MutexExp, 0, D); 1413 if (!Mutex.isValid()) { 1414 MutexID::warnInvalidLock(Handler, MutexExp, 0, D); 1415 return LSet; 1416 } 1417 LockData NewLock(Loc, LK); 1418 return LocksetFactory.add(LSet, Mutex, NewLock); 1419} 1420 1421/// \brief Check a function's CFG for thread-safety violations. 1422/// 1423/// We traverse the blocks in the CFG, compute the set of mutexes that are held 1424/// at the end of each block, and issue warnings for thread safety violations. 1425/// Each block in the CFG is traversed exactly once. 1426void ThreadSafetyAnalyzer::runAnalysis(AnalysisDeclContext &AC) { 1427 CFG *CFGraph = AC.getCFG(); 1428 if (!CFGraph) return; 1429 const NamedDecl *D = dyn_cast_or_null<NamedDecl>(AC.getDecl()); 1430 1431 if (!D) 1432 return; // Ignore anonymous functions for now. 1433 if (D->getAttr<NoThreadSafetyAnalysisAttr>()) 1434 return; 1435 // FIXME: Do something a bit more intelligent inside constructor and 1436 // destructor code. Constructors and destructors must assume unique access 1437 // to 'this', so checks on member variable access is disabled, but we should 1438 // still enable checks on other objects. 1439 if (isa<CXXConstructorDecl>(D)) 1440 return; // Don't check inside constructors. 1441 if (isa<CXXDestructorDecl>(D)) 1442 return; // Don't check inside destructors. 1443 1444 std::vector<CFGBlockInfo> BlockInfo(CFGraph->getNumBlockIDs(), 1445 CFGBlockInfo::getEmptyBlockInfo(LocksetFactory, LocalVarMap)); 1446 1447 // We need to explore the CFG via a "topological" ordering. 1448 // That way, we will be guaranteed to have information about required 1449 // predecessor locksets when exploring a new block. 1450 PostOrderCFGView *SortedGraph = AC.getAnalysis<PostOrderCFGView>(); 1451 PostOrderCFGView::CFGBlockSet VisitedBlocks(CFGraph); 1452 1453 // Compute SSA names for local variables 1454 LocalVarMap.traverseCFG(CFGraph, SortedGraph, BlockInfo); 1455 1456 // Fill in source locations for all CFGBlocks. 1457 findBlockLocations(CFGraph, SortedGraph, BlockInfo); 1458 1459 // Add locks from exclusive_locks_required and shared_locks_required 1460 // to initial lockset. Also turn off checking for lock and unlock functions. 1461 // FIXME: is there a more intelligent way to check lock/unlock functions? 1462 if (!SortedGraph->empty() && D->hasAttrs()) { 1463 const CFGBlock *FirstBlock = *SortedGraph->begin(); 1464 Lockset &InitialLockset = BlockInfo[FirstBlock->getBlockID()].EntrySet; 1465 const AttrVec &ArgAttrs = D->getAttrs(); 1466 for (unsigned i = 0; i < ArgAttrs.size(); ++i) { 1467 Attr *Attr = ArgAttrs[i]; 1468 SourceLocation AttrLoc = Attr->getLocation(); 1469 if (SharedLocksRequiredAttr *SLRAttr 1470 = dyn_cast<SharedLocksRequiredAttr>(Attr)) { 1471 for (SharedLocksRequiredAttr::args_iterator 1472 SLRIter = SLRAttr->args_begin(), 1473 SLREnd = SLRAttr->args_end(); SLRIter != SLREnd; ++SLRIter) 1474 InitialLockset = addLock(InitialLockset, 1475 *SLRIter, D, LK_Shared, 1476 AttrLoc); 1477 } else if (ExclusiveLocksRequiredAttr *ELRAttr 1478 = dyn_cast<ExclusiveLocksRequiredAttr>(Attr)) { 1479 for (ExclusiveLocksRequiredAttr::args_iterator 1480 ELRIter = ELRAttr->args_begin(), 1481 ELREnd = ELRAttr->args_end(); ELRIter != ELREnd; ++ELRIter) 1482 InitialLockset = addLock(InitialLockset, 1483 *ELRIter, D, LK_Exclusive, 1484 AttrLoc); 1485 } else if (isa<UnlockFunctionAttr>(Attr)) { 1486 // Don't try to check unlock functions for now 1487 return; 1488 } else if (isa<ExclusiveLockFunctionAttr>(Attr)) { 1489 // Don't try to check lock functions for now 1490 return; 1491 } else if (isa<SharedLockFunctionAttr>(Attr)) { 1492 // Don't try to check lock functions for now 1493 return; 1494 } 1495 } 1496 } 1497 1498 for (PostOrderCFGView::iterator I = SortedGraph->begin(), 1499 E = SortedGraph->end(); I!= E; ++I) { 1500 const CFGBlock *CurrBlock = *I; 1501 int CurrBlockID = CurrBlock->getBlockID(); 1502 CFGBlockInfo *CurrBlockInfo = &BlockInfo[CurrBlockID]; 1503 1504 // Use the default initial lockset in case there are no predecessors. 1505 VisitedBlocks.insert(CurrBlock); 1506 1507 // Iterate through the predecessor blocks and warn if the lockset for all 1508 // predecessors is not the same. We take the entry lockset of the current 1509 // block to be the intersection of all previous locksets. 1510 // FIXME: By keeping the intersection, we may output more errors in future 1511 // for a lock which is not in the intersection, but was in the union. We 1512 // may want to also keep the union in future. As an example, let's say 1513 // the intersection contains Mutex L, and the union contains L and M. 1514 // Later we unlock M. At this point, we would output an error because we 1515 // never locked M; although the real error is probably that we forgot to 1516 // lock M on all code paths. Conversely, let's say that later we lock M. 1517 // In this case, we should compare against the intersection instead of the 1518 // union because the real error is probably that we forgot to unlock M on 1519 // all code paths. 1520 bool LocksetInitialized = false; 1521 llvm::SmallVector<CFGBlock*, 8> SpecialBlocks; 1522 for (CFGBlock::const_pred_iterator PI = CurrBlock->pred_begin(), 1523 PE = CurrBlock->pred_end(); PI != PE; ++PI) { 1524 1525 // if *PI -> CurrBlock is a back edge 1526 if (*PI == 0 || !VisitedBlocks.alreadySet(*PI)) 1527 continue; 1528 1529 // Ignore edges from blocks that can't return. 1530 if ((*PI)->hasNoReturnElement()) 1531 continue; 1532 1533 // If the previous block ended in a 'continue' or 'break' statement, then 1534 // a difference in locksets is probably due to a bug in that block, rather 1535 // than in some other predecessor. In that case, keep the other 1536 // predecessor's lockset. 1537 if (const Stmt *Terminator = (*PI)->getTerminator()) { 1538 if (isa<ContinueStmt>(Terminator) || isa<BreakStmt>(Terminator)) { 1539 SpecialBlocks.push_back(*PI); 1540 continue; 1541 } 1542 } 1543 1544 int PrevBlockID = (*PI)->getBlockID(); 1545 CFGBlockInfo *PrevBlockInfo = &BlockInfo[PrevBlockID]; 1546 1547 if (!LocksetInitialized) { 1548 CurrBlockInfo->EntrySet = PrevBlockInfo->ExitSet; 1549 LocksetInitialized = true; 1550 } else { 1551 CurrBlockInfo->EntrySet = 1552 intersectAndWarn(*CurrBlockInfo, CBS_Entry, 1553 *PrevBlockInfo, CBS_Exit, 1554 LEK_LockedSomePredecessors); 1555 } 1556 } 1557 1558 // Process continue and break blocks. Assume that the lockset for the 1559 // resulting block is unaffected by any discrepancies in them. 1560 for (unsigned SpecialI = 0, SpecialN = SpecialBlocks.size(); 1561 SpecialI < SpecialN; ++SpecialI) { 1562 CFGBlock *PrevBlock = SpecialBlocks[SpecialI]; 1563 int PrevBlockID = PrevBlock->getBlockID(); 1564 CFGBlockInfo *PrevBlockInfo = &BlockInfo[PrevBlockID]; 1565 1566 if (!LocksetInitialized) { 1567 CurrBlockInfo->EntrySet = PrevBlockInfo->ExitSet; 1568 LocksetInitialized = true; 1569 } else { 1570 // Determine whether this edge is a loop terminator for diagnostic 1571 // purposes. FIXME: A 'break' statement might be a loop terminator, but 1572 // it might also be part of a switch. Also, a subsequent destructor 1573 // might add to the lockset, in which case the real issue might be a 1574 // double lock on the other path. 1575 const Stmt *Terminator = PrevBlock->getTerminator(); 1576 bool IsLoop = Terminator && isa<ContinueStmt>(Terminator); 1577 1578 // Do not update EntrySet. 1579 intersectAndWarn(*CurrBlockInfo, CBS_Entry, *PrevBlockInfo, CBS_Exit, 1580 IsLoop ? LEK_LockedSomeLoopIterations 1581 : LEK_LockedSomePredecessors); 1582 } 1583 } 1584 1585 BuildLockset LocksetBuilder(this, *CurrBlockInfo); 1586 CFGBlock::const_pred_iterator PI = CurrBlock->pred_begin(), 1587 PE = CurrBlock->pred_end(); 1588 if (PI != PE) { 1589 // If the predecessor ended in a branch, then process any trylocks. 1590 // FIXME -- check to make sure there's only one predecessor. 1591 if (Stmt *TCE = (*PI)->getTerminatorCondition()) { 1592 LocksetBuilder.handleTrylock(TCE, *PI, CurrBlock); 1593 } 1594 } 1595 1596 // Visit all the statements in the basic block. 1597 for (CFGBlock::const_iterator BI = CurrBlock->begin(), 1598 BE = CurrBlock->end(); BI != BE; ++BI) { 1599 switch (BI->getKind()) { 1600 case CFGElement::Statement: { 1601 const CFGStmt *CS = cast<CFGStmt>(&*BI); 1602 LocksetBuilder.Visit(const_cast<Stmt*>(CS->getStmt())); 1603 break; 1604 } 1605 // Ignore BaseDtor, MemberDtor, and TemporaryDtor for now. 1606 case CFGElement::AutomaticObjectDtor: { 1607 const CFGAutomaticObjDtor *AD = cast<CFGAutomaticObjDtor>(&*BI); 1608 CXXDestructorDecl *DD = const_cast<CXXDestructorDecl*>( 1609 AD->getDestructorDecl(AC.getASTContext())); 1610 if (!DD->hasAttrs()) 1611 break; 1612 1613 // Create a dummy expression, 1614 VarDecl *VD = const_cast<VarDecl*>(AD->getVarDecl()); 1615 DeclRefExpr DRE(VD, VD->getType(), VK_LValue, 1616 AD->getTriggerStmt()->getLocEnd()); 1617 LocksetBuilder.handleCall(&DRE, DD); 1618 break; 1619 } 1620 default: 1621 break; 1622 } 1623 } 1624 CurrBlockInfo->ExitSet = LocksetBuilder.LSet; 1625 1626 // For every back edge from CurrBlock (the end of the loop) to another block 1627 // (FirstLoopBlock) we need to check that the Lockset of Block is equal to 1628 // the one held at the beginning of FirstLoopBlock. We can look up the 1629 // Lockset held at the beginning of FirstLoopBlock in the EntryLockSets map. 1630 for (CFGBlock::const_succ_iterator SI = CurrBlock->succ_begin(), 1631 SE = CurrBlock->succ_end(); SI != SE; ++SI) { 1632 1633 // if CurrBlock -> *SI is *not* a back edge 1634 if (*SI == 0 || !VisitedBlocks.alreadySet(*SI)) 1635 continue; 1636 1637 CFGBlock *FirstLoopBlock = *SI; 1638 CFGBlockInfo &PreLoop = BlockInfo[FirstLoopBlock->getBlockID()]; 1639 CFGBlockInfo &LoopEnd = BlockInfo[CurrBlockID]; 1640 intersectAndWarn(LoopEnd, CBS_Exit, PreLoop, CBS_Entry, 1641 LEK_LockedSomeLoopIterations); 1642 } 1643 } 1644 1645 CFGBlockInfo &Initial = BlockInfo[CFGraph->getEntry().getBlockID()]; 1646 CFGBlockInfo &Final = BlockInfo[CFGraph->getExit().getBlockID()]; 1647 1648 // FIXME: Should we call this function for all blocks which exit the function? 1649 intersectAndWarn(Initial, CBS_Entry, Final, CBS_Exit, 1650 LEK_LockedAtEndOfFunction); 1651} 1652 1653} // end anonymous namespace 1654 1655 1656namespace clang { 1657namespace thread_safety { 1658 1659/// \brief Check a function's CFG for thread-safety violations. 1660/// 1661/// We traverse the blocks in the CFG, compute the set of mutexes that are held 1662/// at the end of each block, and issue warnings for thread safety violations. 1663/// Each block in the CFG is traversed exactly once. 1664void runThreadSafetyAnalysis(AnalysisDeclContext &AC, 1665 ThreadSafetyHandler &Handler) { 1666 ThreadSafetyAnalyzer Analyzer(Handler); 1667 Analyzer.runAnalysis(AC); 1668} 1669 1670/// \brief Helper function that returns a LockKind required for the given level 1671/// of access. 1672LockKind getLockKindFromAccessKind(AccessKind AK) { 1673 switch (AK) { 1674 case AK_Read : 1675 return LK_Shared; 1676 case AK_Written : 1677 return LK_Exclusive; 1678 } 1679 llvm_unreachable("Unknown AccessKind"); 1680} 1681 1682}} // end namespace clang::thread_safety 1683