ThreadSafety.cpp revision 2a237e03c407ff55bd8639b18658a8751955f1db
1//===- ThreadSafety.cpp ----------------------------------------*- C++ --*-===// 2// 3// The LLVM Compiler Infrastructure 4// 5// This file is distributed under the University of Illinois Open Source 6// License. See LICENSE.TXT for details. 7// 8//===----------------------------------------------------------------------===// 9// 10// A intra-procedural analysis for thread safety (e.g. deadlocks and race 11// conditions), based off of an annotation system. 12// 13// See http://clang.llvm.org/docs/LanguageExtensions.html#threadsafety for more 14// information. 15// 16//===----------------------------------------------------------------------===// 17 18#include "clang/Analysis/Analyses/ThreadSafety.h" 19#include "clang/Analysis/Analyses/PostOrderCFGView.h" 20#include "clang/Analysis/AnalysisContext.h" 21#include "clang/Analysis/CFG.h" 22#include "clang/Analysis/CFGStmtMap.h" 23#include "clang/AST/DeclCXX.h" 24#include "clang/AST/ExprCXX.h" 25#include "clang/AST/StmtCXX.h" 26#include "clang/AST/StmtVisitor.h" 27#include "clang/Basic/SourceManager.h" 28#include "clang/Basic/SourceLocation.h" 29#include "clang/Basic/OperatorKinds.h" 30#include "llvm/ADT/BitVector.h" 31#include "llvm/ADT/FoldingSet.h" 32#include "llvm/ADT/ImmutableMap.h" 33#include "llvm/ADT/PostOrderIterator.h" 34#include "llvm/ADT/SmallVector.h" 35#include "llvm/ADT/StringRef.h" 36#include "llvm/Support/raw_ostream.h" 37#include <algorithm> 38#include <utility> 39#include <vector> 40 41using namespace clang; 42using namespace thread_safety; 43 44// Key method definition 45ThreadSafetyHandler::~ThreadSafetyHandler() {} 46 47namespace { 48 49/// SExpr implements a simple expression language that is used to store, 50/// compare, and pretty-print C++ expressions. Unlike a clang Expr, a SExpr 51/// does not capture surface syntax, and it does not distinguish between 52/// C++ concepts, like pointers and references, that have no real semantic 53/// differences. This simplicity allows SExprs to be meaningfully compared, 54/// e.g. 55/// (x) = x 56/// (*this).foo = this->foo 57/// *&a = a 58/// 59/// Thread-safety analysis works by comparing lock expressions. Within the 60/// body of a function, an expression such as "x->foo->bar.mu" will resolve to 61/// a particular mutex object at run-time. Subsequent occurrences of the same 62/// expression (where "same" means syntactic equality) will refer to the same 63/// run-time object if three conditions hold: 64/// (1) Local variables in the expression, such as "x" have not changed. 65/// (2) Values on the heap that affect the expression have not changed. 66/// (3) The expression involves only pure function calls. 67/// 68/// The current implementation assumes, but does not verify, that multiple uses 69/// of the same lock expression satisfies these criteria. 70class SExpr { 71private: 72 enum ExprOp { 73 EOP_Nop, ///< No-op 74 EOP_Wildcard, ///< Matches anything. 75 EOP_Universal, ///< Universal lock. 76 EOP_This, ///< This keyword. 77 EOP_NVar, ///< Named variable. 78 EOP_LVar, ///< Local variable. 79 EOP_Dot, ///< Field access 80 EOP_Call, ///< Function call 81 EOP_MCall, ///< Method call 82 EOP_Index, ///< Array index 83 EOP_Unary, ///< Unary operation 84 EOP_Binary, ///< Binary operation 85 EOP_Unknown ///< Catchall for everything else 86 }; 87 88 89 class SExprNode { 90 private: 91 unsigned char Op; ///< Opcode of the root node 92 unsigned char Flags; ///< Additional opcode-specific data 93 unsigned short Sz; ///< Number of child nodes 94 const void* Data; ///< Additional opcode-specific data 95 96 public: 97 SExprNode(ExprOp O, unsigned F, const void* D) 98 : Op(static_cast<unsigned char>(O)), 99 Flags(static_cast<unsigned char>(F)), Sz(1), Data(D) 100 { } 101 102 unsigned size() const { return Sz; } 103 void setSize(unsigned S) { Sz = S; } 104 105 ExprOp kind() const { return static_cast<ExprOp>(Op); } 106 107 const NamedDecl* getNamedDecl() const { 108 assert(Op == EOP_NVar || Op == EOP_LVar || Op == EOP_Dot); 109 return reinterpret_cast<const NamedDecl*>(Data); 110 } 111 112 const NamedDecl* getFunctionDecl() const { 113 assert(Op == EOP_Call || Op == EOP_MCall); 114 return reinterpret_cast<const NamedDecl*>(Data); 115 } 116 117 bool isArrow() const { return Op == EOP_Dot && Flags == 1; } 118 void setArrow(bool A) { Flags = A ? 1 : 0; } 119 120 unsigned arity() const { 121 switch (Op) { 122 case EOP_Nop: return 0; 123 case EOP_Wildcard: return 0; 124 case EOP_Universal: return 0; 125 case EOP_NVar: return 0; 126 case EOP_LVar: return 0; 127 case EOP_This: return 0; 128 case EOP_Dot: return 1; 129 case EOP_Call: return Flags+1; // First arg is function. 130 case EOP_MCall: return Flags+1; // First arg is implicit obj. 131 case EOP_Index: return 2; 132 case EOP_Unary: return 1; 133 case EOP_Binary: return 2; 134 case EOP_Unknown: return Flags; 135 } 136 return 0; 137 } 138 139 bool operator==(const SExprNode& Other) const { 140 // Ignore flags and size -- they don't matter. 141 return (Op == Other.Op && 142 Data == Other.Data); 143 } 144 145 bool operator!=(const SExprNode& Other) const { 146 return !(*this == Other); 147 } 148 149 bool matches(const SExprNode& Other) const { 150 return (*this == Other) || 151 (Op == EOP_Wildcard) || 152 (Other.Op == EOP_Wildcard); 153 } 154 }; 155 156 157 /// \brief Encapsulates the lexical context of a function call. The lexical 158 /// context includes the arguments to the call, including the implicit object 159 /// argument. When an attribute containing a mutex expression is attached to 160 /// a method, the expression may refer to formal parameters of the method. 161 /// Actual arguments must be substituted for formal parameters to derive 162 /// the appropriate mutex expression in the lexical context where the function 163 /// is called. PrevCtx holds the context in which the arguments themselves 164 /// should be evaluated; multiple calling contexts can be chained together 165 /// by the lock_returned attribute. 166 struct CallingContext { 167 const NamedDecl* AttrDecl; // The decl to which the attribute is attached. 168 Expr* SelfArg; // Implicit object argument -- e.g. 'this' 169 bool SelfArrow; // is Self referred to with -> or .? 170 unsigned NumArgs; // Number of funArgs 171 Expr** FunArgs; // Function arguments 172 CallingContext* PrevCtx; // The previous context; or 0 if none. 173 174 CallingContext(const NamedDecl *D = 0, Expr *S = 0, 175 unsigned N = 0, Expr **A = 0, CallingContext *P = 0) 176 : AttrDecl(D), SelfArg(S), SelfArrow(false), 177 NumArgs(N), FunArgs(A), PrevCtx(P) 178 { } 179 }; 180 181 typedef SmallVector<SExprNode, 4> NodeVector; 182 183private: 184 // A SExpr is a list of SExprNodes in prefix order. The Size field allows 185 // the list to be traversed as a tree. 186 NodeVector NodeVec; 187 188private: 189 unsigned makeNop() { 190 NodeVec.push_back(SExprNode(EOP_Nop, 0, 0)); 191 return NodeVec.size()-1; 192 } 193 194 unsigned makeWildcard() { 195 NodeVec.push_back(SExprNode(EOP_Wildcard, 0, 0)); 196 return NodeVec.size()-1; 197 } 198 199 unsigned makeUniversal() { 200 NodeVec.push_back(SExprNode(EOP_Universal, 0, 0)); 201 return NodeVec.size()-1; 202 } 203 204 unsigned makeNamedVar(const NamedDecl *D) { 205 NodeVec.push_back(SExprNode(EOP_NVar, 0, D)); 206 return NodeVec.size()-1; 207 } 208 209 unsigned makeLocalVar(const NamedDecl *D) { 210 NodeVec.push_back(SExprNode(EOP_LVar, 0, D)); 211 return NodeVec.size()-1; 212 } 213 214 unsigned makeThis() { 215 NodeVec.push_back(SExprNode(EOP_This, 0, 0)); 216 return NodeVec.size()-1; 217 } 218 219 unsigned makeDot(const NamedDecl *D, bool Arrow) { 220 NodeVec.push_back(SExprNode(EOP_Dot, Arrow ? 1 : 0, D)); 221 return NodeVec.size()-1; 222 } 223 224 unsigned makeCall(unsigned NumArgs, const NamedDecl *D) { 225 NodeVec.push_back(SExprNode(EOP_Call, NumArgs, D)); 226 return NodeVec.size()-1; 227 } 228 229 unsigned makeMCall(unsigned NumArgs, const NamedDecl *D) { 230 NodeVec.push_back(SExprNode(EOP_MCall, NumArgs, D)); 231 return NodeVec.size()-1; 232 } 233 234 unsigned makeIndex() { 235 NodeVec.push_back(SExprNode(EOP_Index, 0, 0)); 236 return NodeVec.size()-1; 237 } 238 239 unsigned makeUnary() { 240 NodeVec.push_back(SExprNode(EOP_Unary, 0, 0)); 241 return NodeVec.size()-1; 242 } 243 244 unsigned makeBinary() { 245 NodeVec.push_back(SExprNode(EOP_Binary, 0, 0)); 246 return NodeVec.size()-1; 247 } 248 249 unsigned makeUnknown(unsigned Arity) { 250 NodeVec.push_back(SExprNode(EOP_Unknown, Arity, 0)); 251 return NodeVec.size()-1; 252 } 253 254 /// Build an SExpr from the given C++ expression. 255 /// Recursive function that terminates on DeclRefExpr. 256 /// Note: this function merely creates a SExpr; it does not check to 257 /// ensure that the original expression is a valid mutex expression. 258 /// 259 /// NDeref returns the number of Derefence and AddressOf operations 260 /// preceeding the Expr; this is used to decide whether to pretty-print 261 /// SExprs with . or ->. 262 unsigned buildSExpr(Expr *Exp, CallingContext* CallCtx, int* NDeref = 0) { 263 if (!Exp) 264 return 0; 265 266 if (DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(Exp)) { 267 NamedDecl *ND = cast<NamedDecl>(DRE->getDecl()->getCanonicalDecl()); 268 ParmVarDecl *PV = dyn_cast_or_null<ParmVarDecl>(ND); 269 if (PV) { 270 FunctionDecl *FD = 271 cast<FunctionDecl>(PV->getDeclContext())->getCanonicalDecl(); 272 unsigned i = PV->getFunctionScopeIndex(); 273 274 if (CallCtx && CallCtx->FunArgs && 275 FD == CallCtx->AttrDecl->getCanonicalDecl()) { 276 // Substitute call arguments for references to function parameters 277 assert(i < CallCtx->NumArgs); 278 return buildSExpr(CallCtx->FunArgs[i], CallCtx->PrevCtx, NDeref); 279 } 280 // Map the param back to the param of the original function declaration. 281 makeNamedVar(FD->getParamDecl(i)); 282 return 1; 283 } 284 // Not a function parameter -- just store the reference. 285 makeNamedVar(ND); 286 return 1; 287 } else if (isa<CXXThisExpr>(Exp)) { 288 // Substitute parent for 'this' 289 if (CallCtx && CallCtx->SelfArg) { 290 if (!CallCtx->SelfArrow && NDeref) 291 // 'this' is a pointer, but self is not, so need to take address. 292 --(*NDeref); 293 return buildSExpr(CallCtx->SelfArg, CallCtx->PrevCtx, NDeref); 294 } 295 else { 296 makeThis(); 297 return 1; 298 } 299 } else if (MemberExpr *ME = dyn_cast<MemberExpr>(Exp)) { 300 NamedDecl *ND = ME->getMemberDecl(); 301 int ImplicitDeref = ME->isArrow() ? 1 : 0; 302 unsigned Root = makeDot(ND, false); 303 unsigned Sz = buildSExpr(ME->getBase(), CallCtx, &ImplicitDeref); 304 NodeVec[Root].setArrow(ImplicitDeref > 0); 305 NodeVec[Root].setSize(Sz + 1); 306 return Sz + 1; 307 } else if (CXXMemberCallExpr *CMCE = dyn_cast<CXXMemberCallExpr>(Exp)) { 308 // When calling a function with a lock_returned attribute, replace 309 // the function call with the expression in lock_returned. 310 CXXMethodDecl* MD = 311 cast<CXXMethodDecl>(CMCE->getMethodDecl()->getMostRecentDecl()); 312 if (LockReturnedAttr* At = MD->getAttr<LockReturnedAttr>()) { 313 CallingContext LRCallCtx(CMCE->getMethodDecl()); 314 LRCallCtx.SelfArg = CMCE->getImplicitObjectArgument(); 315 LRCallCtx.SelfArrow = 316 dyn_cast<MemberExpr>(CMCE->getCallee())->isArrow(); 317 LRCallCtx.NumArgs = CMCE->getNumArgs(); 318 LRCallCtx.FunArgs = CMCE->getArgs(); 319 LRCallCtx.PrevCtx = CallCtx; 320 return buildSExpr(At->getArg(), &LRCallCtx); 321 } 322 // Hack to treat smart pointers and iterators as pointers; 323 // ignore any method named get(). 324 if (CMCE->getMethodDecl()->getNameAsString() == "get" && 325 CMCE->getNumArgs() == 0) { 326 if (NDeref && dyn_cast<MemberExpr>(CMCE->getCallee())->isArrow()) 327 ++(*NDeref); 328 return buildSExpr(CMCE->getImplicitObjectArgument(), CallCtx, NDeref); 329 } 330 unsigned NumCallArgs = CMCE->getNumArgs(); 331 unsigned Root = 332 makeMCall(NumCallArgs, CMCE->getMethodDecl()->getCanonicalDecl()); 333 unsigned Sz = buildSExpr(CMCE->getImplicitObjectArgument(), CallCtx); 334 Expr** CallArgs = CMCE->getArgs(); 335 for (unsigned i = 0; i < NumCallArgs; ++i) { 336 Sz += buildSExpr(CallArgs[i], CallCtx); 337 } 338 NodeVec[Root].setSize(Sz + 1); 339 return Sz + 1; 340 } else if (CallExpr *CE = dyn_cast<CallExpr>(Exp)) { 341 FunctionDecl* FD = 342 cast<FunctionDecl>(CE->getDirectCallee()->getMostRecentDecl()); 343 if (LockReturnedAttr* At = FD->getAttr<LockReturnedAttr>()) { 344 CallingContext LRCallCtx(CE->getDirectCallee()); 345 LRCallCtx.NumArgs = CE->getNumArgs(); 346 LRCallCtx.FunArgs = CE->getArgs(); 347 LRCallCtx.PrevCtx = CallCtx; 348 return buildSExpr(At->getArg(), &LRCallCtx); 349 } 350 // Treat smart pointers and iterators as pointers; 351 // ignore the * and -> operators. 352 if (CXXOperatorCallExpr *OE = dyn_cast<CXXOperatorCallExpr>(CE)) { 353 OverloadedOperatorKind k = OE->getOperator(); 354 if (k == OO_Star) { 355 if (NDeref) ++(*NDeref); 356 return buildSExpr(OE->getArg(0), CallCtx, NDeref); 357 } 358 else if (k == OO_Arrow) { 359 return buildSExpr(OE->getArg(0), CallCtx, NDeref); 360 } 361 } 362 unsigned NumCallArgs = CE->getNumArgs(); 363 unsigned Root = makeCall(NumCallArgs, 0); 364 unsigned Sz = buildSExpr(CE->getCallee(), CallCtx); 365 Expr** CallArgs = CE->getArgs(); 366 for (unsigned i = 0; i < NumCallArgs; ++i) { 367 Sz += buildSExpr(CallArgs[i], CallCtx); 368 } 369 NodeVec[Root].setSize(Sz+1); 370 return Sz+1; 371 } else if (BinaryOperator *BOE = dyn_cast<BinaryOperator>(Exp)) { 372 unsigned Root = makeBinary(); 373 unsigned Sz = buildSExpr(BOE->getLHS(), CallCtx); 374 Sz += buildSExpr(BOE->getRHS(), CallCtx); 375 NodeVec[Root].setSize(Sz); 376 return Sz; 377 } else if (UnaryOperator *UOE = dyn_cast<UnaryOperator>(Exp)) { 378 // Ignore & and * operators -- they're no-ops. 379 // However, we try to figure out whether the expression is a pointer, 380 // so we can use . and -> appropriately in error messages. 381 if (UOE->getOpcode() == UO_Deref) { 382 if (NDeref) ++(*NDeref); 383 return buildSExpr(UOE->getSubExpr(), CallCtx, NDeref); 384 } 385 if (UOE->getOpcode() == UO_AddrOf) { 386 if (DeclRefExpr* DRE = dyn_cast<DeclRefExpr>(UOE->getSubExpr())) { 387 if (DRE->getDecl()->isCXXInstanceMember()) { 388 // This is a pointer-to-member expression, e.g. &MyClass::mu_. 389 // We interpret this syntax specially, as a wildcard. 390 unsigned Root = makeDot(DRE->getDecl(), false); 391 makeWildcard(); 392 NodeVec[Root].setSize(2); 393 return 2; 394 } 395 } 396 if (NDeref) --(*NDeref); 397 return buildSExpr(UOE->getSubExpr(), CallCtx, NDeref); 398 } 399 unsigned Root = makeUnary(); 400 unsigned Sz = buildSExpr(UOE->getSubExpr(), CallCtx); 401 NodeVec[Root].setSize(Sz); 402 return Sz; 403 } else if (ArraySubscriptExpr *ASE = dyn_cast<ArraySubscriptExpr>(Exp)) { 404 unsigned Root = makeIndex(); 405 unsigned Sz = buildSExpr(ASE->getBase(), CallCtx); 406 Sz += buildSExpr(ASE->getIdx(), CallCtx); 407 NodeVec[Root].setSize(Sz); 408 return Sz; 409 } else if (AbstractConditionalOperator *CE = 410 dyn_cast<AbstractConditionalOperator>(Exp)) { 411 unsigned Root = makeUnknown(3); 412 unsigned Sz = buildSExpr(CE->getCond(), CallCtx); 413 Sz += buildSExpr(CE->getTrueExpr(), CallCtx); 414 Sz += buildSExpr(CE->getFalseExpr(), CallCtx); 415 NodeVec[Root].setSize(Sz); 416 return Sz; 417 } else if (ChooseExpr *CE = dyn_cast<ChooseExpr>(Exp)) { 418 unsigned Root = makeUnknown(3); 419 unsigned Sz = buildSExpr(CE->getCond(), CallCtx); 420 Sz += buildSExpr(CE->getLHS(), CallCtx); 421 Sz += buildSExpr(CE->getRHS(), CallCtx); 422 NodeVec[Root].setSize(Sz); 423 return Sz; 424 } else if (CastExpr *CE = dyn_cast<CastExpr>(Exp)) { 425 return buildSExpr(CE->getSubExpr(), CallCtx, NDeref); 426 } else if (ParenExpr *PE = dyn_cast<ParenExpr>(Exp)) { 427 return buildSExpr(PE->getSubExpr(), CallCtx, NDeref); 428 } else if (ExprWithCleanups *EWC = dyn_cast<ExprWithCleanups>(Exp)) { 429 return buildSExpr(EWC->getSubExpr(), CallCtx, NDeref); 430 } else if (CXXBindTemporaryExpr *E = dyn_cast<CXXBindTemporaryExpr>(Exp)) { 431 return buildSExpr(E->getSubExpr(), CallCtx, NDeref); 432 } else if (isa<CharacterLiteral>(Exp) || 433 isa<CXXNullPtrLiteralExpr>(Exp) || 434 isa<GNUNullExpr>(Exp) || 435 isa<CXXBoolLiteralExpr>(Exp) || 436 isa<FloatingLiteral>(Exp) || 437 isa<ImaginaryLiteral>(Exp) || 438 isa<IntegerLiteral>(Exp) || 439 isa<StringLiteral>(Exp) || 440 isa<ObjCStringLiteral>(Exp)) { 441 makeNop(); 442 return 1; // FIXME: Ignore literals for now 443 } else { 444 makeNop(); 445 return 1; // Ignore. FIXME: mark as invalid expression? 446 } 447 } 448 449 /// \brief Construct a SExpr from an expression. 450 /// \param MutexExp The original mutex expression within an attribute 451 /// \param DeclExp An expression involving the Decl on which the attribute 452 /// occurs. 453 /// \param D The declaration to which the lock/unlock attribute is attached. 454 void buildSExprFromExpr(Expr *MutexExp, Expr *DeclExp, const NamedDecl *D) { 455 CallingContext CallCtx(D); 456 457 if (MutexExp) { 458 if (StringLiteral* SLit = dyn_cast<StringLiteral>(MutexExp)) { 459 if (SLit->getString() == StringRef("*")) 460 // The "*" expr is a universal lock, which essentially turns off 461 // checks until it is removed from the lockset. 462 makeUniversal(); 463 else 464 // Ignore other string literals for now. 465 makeNop(); 466 return; 467 } 468 } 469 470 // If we are processing a raw attribute expression, with no substitutions. 471 if (DeclExp == 0) { 472 buildSExpr(MutexExp, 0); 473 return; 474 } 475 476 // Examine DeclExp to find SelfArg and FunArgs, which are used to substitute 477 // for formal parameters when we call buildMutexID later. 478 if (MemberExpr *ME = dyn_cast<MemberExpr>(DeclExp)) { 479 CallCtx.SelfArg = ME->getBase(); 480 CallCtx.SelfArrow = ME->isArrow(); 481 } else if (CXXMemberCallExpr *CE = dyn_cast<CXXMemberCallExpr>(DeclExp)) { 482 CallCtx.SelfArg = CE->getImplicitObjectArgument(); 483 CallCtx.SelfArrow = dyn_cast<MemberExpr>(CE->getCallee())->isArrow(); 484 CallCtx.NumArgs = CE->getNumArgs(); 485 CallCtx.FunArgs = CE->getArgs(); 486 } else if (CallExpr *CE = dyn_cast<CallExpr>(DeclExp)) { 487 CallCtx.NumArgs = CE->getNumArgs(); 488 CallCtx.FunArgs = CE->getArgs(); 489 } else if (CXXConstructExpr *CE = dyn_cast<CXXConstructExpr>(DeclExp)) { 490 CallCtx.SelfArg = 0; // FIXME -- get the parent from DeclStmt 491 CallCtx.NumArgs = CE->getNumArgs(); 492 CallCtx.FunArgs = CE->getArgs(); 493 } else if (D && isa<CXXDestructorDecl>(D)) { 494 // There's no such thing as a "destructor call" in the AST. 495 CallCtx.SelfArg = DeclExp; 496 } 497 498 // If the attribute has no arguments, then assume the argument is "this". 499 if (MutexExp == 0) { 500 buildSExpr(CallCtx.SelfArg, 0); 501 return; 502 } 503 504 // For most attributes. 505 buildSExpr(MutexExp, &CallCtx); 506 } 507 508 /// \brief Get index of next sibling of node i. 509 unsigned getNextSibling(unsigned i) const { 510 return i + NodeVec[i].size(); 511 } 512 513public: 514 explicit SExpr(clang::Decl::EmptyShell e) { NodeVec.clear(); } 515 516 /// \param MutexExp The original mutex expression within an attribute 517 /// \param DeclExp An expression involving the Decl on which the attribute 518 /// occurs. 519 /// \param D The declaration to which the lock/unlock attribute is attached. 520 /// Caller must check isValid() after construction. 521 SExpr(Expr* MutexExp, Expr *DeclExp, const NamedDecl* D) { 522 buildSExprFromExpr(MutexExp, DeclExp, D); 523 } 524 525 /// Return true if this is a valid decl sequence. 526 /// Caller must call this by hand after construction to handle errors. 527 bool isValid() const { 528 return !NodeVec.empty(); 529 } 530 531 bool shouldIgnore() const { 532 // Nop is a mutex that we have decided to deliberately ignore. 533 assert(NodeVec.size() > 0 && "Invalid Mutex"); 534 return NodeVec[0].kind() == EOP_Nop; 535 } 536 537 bool isUniversal() const { 538 assert(NodeVec.size() > 0 && "Invalid Mutex"); 539 return NodeVec[0].kind() == EOP_Universal; 540 } 541 542 /// Issue a warning about an invalid lock expression 543 static void warnInvalidLock(ThreadSafetyHandler &Handler, Expr* MutexExp, 544 Expr *DeclExp, const NamedDecl* D) { 545 SourceLocation Loc; 546 if (DeclExp) 547 Loc = DeclExp->getExprLoc(); 548 549 // FIXME: add a note about the attribute location in MutexExp or D 550 if (Loc.isValid()) 551 Handler.handleInvalidLockExp(Loc); 552 } 553 554 bool operator==(const SExpr &other) const { 555 return NodeVec == other.NodeVec; 556 } 557 558 bool operator!=(const SExpr &other) const { 559 return !(*this == other); 560 } 561 562 bool matches(const SExpr &Other, unsigned i = 0, unsigned j = 0) const { 563 if (NodeVec[i].matches(Other.NodeVec[j])) { 564 unsigned ni = NodeVec[i].arity(); 565 unsigned nj = Other.NodeVec[j].arity(); 566 unsigned n = (ni < nj) ? ni : nj; 567 bool Result = true; 568 unsigned ci = i+1; // first child of i 569 unsigned cj = j+1; // first child of j 570 for (unsigned k = 0; k < n; 571 ++k, ci=getNextSibling(ci), cj = Other.getNextSibling(cj)) { 572 Result = Result && matches(Other, ci, cj); 573 } 574 return Result; 575 } 576 return false; 577 } 578 579 // A partial match between a.mu and b.mu returns true a and b have the same 580 // type (and thus mu refers to the same mutex declaration), regardless of 581 // whether a and b are different objects or not. 582 bool partiallyMatches(const SExpr &Other) const { 583 if (NodeVec[0].kind() == EOP_Dot) 584 return NodeVec[0].matches(Other.NodeVec[0]); 585 return false; 586 } 587 588 /// \brief Pretty print a lock expression for use in error messages. 589 std::string toString(unsigned i = 0) const { 590 assert(isValid()); 591 if (i >= NodeVec.size()) 592 return ""; 593 594 const SExprNode* N = &NodeVec[i]; 595 switch (N->kind()) { 596 case EOP_Nop: 597 return "_"; 598 case EOP_Wildcard: 599 return "(?)"; 600 case EOP_Universal: 601 return "*"; 602 case EOP_This: 603 return "this"; 604 case EOP_NVar: 605 case EOP_LVar: { 606 return N->getNamedDecl()->getNameAsString(); 607 } 608 case EOP_Dot: { 609 if (NodeVec[i+1].kind() == EOP_Wildcard) { 610 std::string S = "&"; 611 S += N->getNamedDecl()->getQualifiedNameAsString(); 612 return S; 613 } 614 std::string FieldName = N->getNamedDecl()->getNameAsString(); 615 if (NodeVec[i+1].kind() == EOP_This) 616 return FieldName; 617 618 std::string S = toString(i+1); 619 if (N->isArrow()) 620 return S + "->" + FieldName; 621 else 622 return S + "." + FieldName; 623 } 624 case EOP_Call: { 625 std::string S = toString(i+1) + "("; 626 unsigned NumArgs = N->arity()-1; 627 unsigned ci = getNextSibling(i+1); 628 for (unsigned k=0; k<NumArgs; ++k, ci = getNextSibling(ci)) { 629 S += toString(ci); 630 if (k+1 < NumArgs) S += ","; 631 } 632 S += ")"; 633 return S; 634 } 635 case EOP_MCall: { 636 std::string S = ""; 637 if (NodeVec[i+1].kind() != EOP_This) 638 S = toString(i+1) + "."; 639 if (const NamedDecl *D = N->getFunctionDecl()) 640 S += D->getNameAsString() + "("; 641 else 642 S += "#("; 643 unsigned NumArgs = N->arity()-1; 644 unsigned ci = getNextSibling(i+1); 645 for (unsigned k=0; k<NumArgs; ++k, ci = getNextSibling(ci)) { 646 S += toString(ci); 647 if (k+1 < NumArgs) S += ","; 648 } 649 S += ")"; 650 return S; 651 } 652 case EOP_Index: { 653 std::string S1 = toString(i+1); 654 std::string S2 = toString(i+1 + NodeVec[i+1].size()); 655 return S1 + "[" + S2 + "]"; 656 } 657 case EOP_Unary: { 658 std::string S = toString(i+1); 659 return "#" + S; 660 } 661 case EOP_Binary: { 662 std::string S1 = toString(i+1); 663 std::string S2 = toString(i+1 + NodeVec[i+1].size()); 664 return "(" + S1 + "#" + S2 + ")"; 665 } 666 case EOP_Unknown: { 667 unsigned NumChildren = N->arity(); 668 if (NumChildren == 0) 669 return "(...)"; 670 std::string S = "("; 671 unsigned ci = i+1; 672 for (unsigned j = 0; j < NumChildren; ++j, ci = getNextSibling(ci)) { 673 S += toString(ci); 674 if (j+1 < NumChildren) S += "#"; 675 } 676 S += ")"; 677 return S; 678 } 679 } 680 return ""; 681 } 682}; 683 684 685 686/// \brief A short list of SExprs 687class MutexIDList : public SmallVector<SExpr, 3> { 688public: 689 /// \brief Return true if the list contains the specified SExpr 690 /// Performs a linear search, because these lists are almost always very small. 691 bool contains(const SExpr& M) { 692 for (iterator I=begin(),E=end(); I != E; ++I) 693 if ((*I) == M) return true; 694 return false; 695 } 696 697 /// \brief Push M onto list, bud discard duplicates 698 void push_back_nodup(const SExpr& M) { 699 if (!contains(M)) push_back(M); 700 } 701}; 702 703 704 705/// \brief This is a helper class that stores info about the most recent 706/// accquire of a Lock. 707/// 708/// The main body of the analysis maps MutexIDs to LockDatas. 709struct LockData { 710 SourceLocation AcquireLoc; 711 712 /// \brief LKind stores whether a lock is held shared or exclusively. 713 /// Note that this analysis does not currently support either re-entrant 714 /// locking or lock "upgrading" and "downgrading" between exclusive and 715 /// shared. 716 /// 717 /// FIXME: add support for re-entrant locking and lock up/downgrading 718 LockKind LKind; 719 bool Managed; // for ScopedLockable objects 720 SExpr UnderlyingMutex; // for ScopedLockable objects 721 722 LockData(SourceLocation AcquireLoc, LockKind LKind, bool M = false) 723 : AcquireLoc(AcquireLoc), LKind(LKind), Managed(M), 724 UnderlyingMutex(Decl::EmptyShell()) 725 {} 726 727 LockData(SourceLocation AcquireLoc, LockKind LKind, const SExpr &Mu) 728 : AcquireLoc(AcquireLoc), LKind(LKind), Managed(false), 729 UnderlyingMutex(Mu) 730 {} 731 732 bool operator==(const LockData &other) const { 733 return AcquireLoc == other.AcquireLoc && LKind == other.LKind; 734 } 735 736 bool operator!=(const LockData &other) const { 737 return !(*this == other); 738 } 739 740 void Profile(llvm::FoldingSetNodeID &ID) const { 741 ID.AddInteger(AcquireLoc.getRawEncoding()); 742 ID.AddInteger(LKind); 743 } 744 745 bool isAtLeast(LockKind LK) { 746 return (LK == LK_Shared) || (LKind == LK_Exclusive); 747 } 748}; 749 750 751/// \brief A FactEntry stores a single fact that is known at a particular point 752/// in the program execution. Currently, this is information regarding a lock 753/// that is held at that point. 754struct FactEntry { 755 SExpr MutID; 756 LockData LDat; 757 758 FactEntry(const SExpr& M, const LockData& L) 759 : MutID(M), LDat(L) 760 { } 761}; 762 763 764typedef unsigned short FactID; 765 766/// \brief FactManager manages the memory for all facts that are created during 767/// the analysis of a single routine. 768class FactManager { 769private: 770 std::vector<FactEntry> Facts; 771 772public: 773 FactID newLock(const SExpr& M, const LockData& L) { 774 Facts.push_back(FactEntry(M,L)); 775 return static_cast<unsigned short>(Facts.size() - 1); 776 } 777 778 const FactEntry& operator[](FactID F) const { return Facts[F]; } 779 FactEntry& operator[](FactID F) { return Facts[F]; } 780}; 781 782 783/// \brief A FactSet is the set of facts that are known to be true at a 784/// particular program point. FactSets must be small, because they are 785/// frequently copied, and are thus implemented as a set of indices into a 786/// table maintained by a FactManager. A typical FactSet only holds 1 or 2 787/// locks, so we can get away with doing a linear search for lookup. Note 788/// that a hashtable or map is inappropriate in this case, because lookups 789/// may involve partial pattern matches, rather than exact matches. 790class FactSet { 791private: 792 typedef SmallVector<FactID, 4> FactVec; 793 794 FactVec FactIDs; 795 796public: 797 typedef FactVec::iterator iterator; 798 typedef FactVec::const_iterator const_iterator; 799 800 iterator begin() { return FactIDs.begin(); } 801 const_iterator begin() const { return FactIDs.begin(); } 802 803 iterator end() { return FactIDs.end(); } 804 const_iterator end() const { return FactIDs.end(); } 805 806 bool isEmpty() const { return FactIDs.size() == 0; } 807 808 FactID addLock(FactManager& FM, const SExpr& M, const LockData& L) { 809 FactID F = FM.newLock(M, L); 810 FactIDs.push_back(F); 811 return F; 812 } 813 814 bool removeLock(FactManager& FM, const SExpr& M) { 815 unsigned n = FactIDs.size(); 816 if (n == 0) 817 return false; 818 819 for (unsigned i = 0; i < n-1; ++i) { 820 if (FM[FactIDs[i]].MutID.matches(M)) { 821 FactIDs[i] = FactIDs[n-1]; 822 FactIDs.pop_back(); 823 return true; 824 } 825 } 826 if (FM[FactIDs[n-1]].MutID.matches(M)) { 827 FactIDs.pop_back(); 828 return true; 829 } 830 return false; 831 } 832 833 LockData* findLock(FactManager &FM, const SExpr &M) const { 834 for (const_iterator I = begin(), E = end(); I != E; ++I) { 835 const SExpr &Exp = FM[*I].MutID; 836 if (Exp.matches(M)) 837 return &FM[*I].LDat; 838 } 839 return 0; 840 } 841 842 LockData* findLockUniv(FactManager &FM, const SExpr &M) const { 843 for (const_iterator I = begin(), E = end(); I != E; ++I) { 844 const SExpr &Exp = FM[*I].MutID; 845 if (Exp.matches(M) || Exp.isUniversal()) 846 return &FM[*I].LDat; 847 } 848 return 0; 849 } 850 851 FactEntry* findPartialMatch(FactManager &FM, const SExpr &M) const { 852 for (const_iterator I=begin(), E=end(); I != E; ++I) { 853 const SExpr& Exp = FM[*I].MutID; 854 if (Exp.partiallyMatches(M)) return &FM[*I]; 855 } 856 return 0; 857 } 858}; 859 860 861 862/// A Lockset maps each SExpr (defined above) to information about how it has 863/// been locked. 864typedef llvm::ImmutableMap<SExpr, LockData> Lockset; 865typedef llvm::ImmutableMap<const NamedDecl*, unsigned> LocalVarContext; 866 867class LocalVariableMap; 868 869/// A side (entry or exit) of a CFG node. 870enum CFGBlockSide { CBS_Entry, CBS_Exit }; 871 872/// CFGBlockInfo is a struct which contains all the information that is 873/// maintained for each block in the CFG. See LocalVariableMap for more 874/// information about the contexts. 875struct CFGBlockInfo { 876 FactSet EntrySet; // Lockset held at entry to block 877 FactSet ExitSet; // Lockset held at exit from block 878 LocalVarContext EntryContext; // Context held at entry to block 879 LocalVarContext ExitContext; // Context held at exit from block 880 SourceLocation EntryLoc; // Location of first statement in block 881 SourceLocation ExitLoc; // Location of last statement in block. 882 unsigned EntryIndex; // Used to replay contexts later 883 884 const FactSet &getSet(CFGBlockSide Side) const { 885 return Side == CBS_Entry ? EntrySet : ExitSet; 886 } 887 SourceLocation getLocation(CFGBlockSide Side) const { 888 return Side == CBS_Entry ? EntryLoc : ExitLoc; 889 } 890 891private: 892 CFGBlockInfo(LocalVarContext EmptyCtx) 893 : EntryContext(EmptyCtx), ExitContext(EmptyCtx) 894 { } 895 896public: 897 static CFGBlockInfo getEmptyBlockInfo(LocalVariableMap &M); 898}; 899 900 901 902// A LocalVariableMap maintains a map from local variables to their currently 903// valid definitions. It provides SSA-like functionality when traversing the 904// CFG. Like SSA, each definition or assignment to a variable is assigned a 905// unique name (an integer), which acts as the SSA name for that definition. 906// The total set of names is shared among all CFG basic blocks. 907// Unlike SSA, we do not rewrite expressions to replace local variables declrefs 908// with their SSA-names. Instead, we compute a Context for each point in the 909// code, which maps local variables to the appropriate SSA-name. This map 910// changes with each assignment. 911// 912// The map is computed in a single pass over the CFG. Subsequent analyses can 913// then query the map to find the appropriate Context for a statement, and use 914// that Context to look up the definitions of variables. 915class LocalVariableMap { 916public: 917 typedef LocalVarContext Context; 918 919 /// A VarDefinition consists of an expression, representing the value of the 920 /// variable, along with the context in which that expression should be 921 /// interpreted. A reference VarDefinition does not itself contain this 922 /// information, but instead contains a pointer to a previous VarDefinition. 923 struct VarDefinition { 924 public: 925 friend class LocalVariableMap; 926 927 const NamedDecl *Dec; // The original declaration for this variable. 928 const Expr *Exp; // The expression for this variable, OR 929 unsigned Ref; // Reference to another VarDefinition 930 Context Ctx; // The map with which Exp should be interpreted. 931 932 bool isReference() { return !Exp; } 933 934 private: 935 // Create ordinary variable definition 936 VarDefinition(const NamedDecl *D, const Expr *E, Context C) 937 : Dec(D), Exp(E), Ref(0), Ctx(C) 938 { } 939 940 // Create reference to previous definition 941 VarDefinition(const NamedDecl *D, unsigned R, Context C) 942 : Dec(D), Exp(0), Ref(R), Ctx(C) 943 { } 944 }; 945 946private: 947 Context::Factory ContextFactory; 948 std::vector<VarDefinition> VarDefinitions; 949 std::vector<unsigned> CtxIndices; 950 std::vector<std::pair<Stmt*, Context> > SavedContexts; 951 952public: 953 LocalVariableMap() { 954 // index 0 is a placeholder for undefined variables (aka phi-nodes). 955 VarDefinitions.push_back(VarDefinition(0, 0u, getEmptyContext())); 956 } 957 958 /// Look up a definition, within the given context. 959 const VarDefinition* lookup(const NamedDecl *D, Context Ctx) { 960 const unsigned *i = Ctx.lookup(D); 961 if (!i) 962 return 0; 963 assert(*i < VarDefinitions.size()); 964 return &VarDefinitions[*i]; 965 } 966 967 /// Look up the definition for D within the given context. Returns 968 /// NULL if the expression is not statically known. If successful, also 969 /// modifies Ctx to hold the context of the return Expr. 970 const Expr* lookupExpr(const NamedDecl *D, Context &Ctx) { 971 const unsigned *P = Ctx.lookup(D); 972 if (!P) 973 return 0; 974 975 unsigned i = *P; 976 while (i > 0) { 977 if (VarDefinitions[i].Exp) { 978 Ctx = VarDefinitions[i].Ctx; 979 return VarDefinitions[i].Exp; 980 } 981 i = VarDefinitions[i].Ref; 982 } 983 return 0; 984 } 985 986 Context getEmptyContext() { return ContextFactory.getEmptyMap(); } 987 988 /// Return the next context after processing S. This function is used by 989 /// clients of the class to get the appropriate context when traversing the 990 /// CFG. It must be called for every assignment or DeclStmt. 991 Context getNextContext(unsigned &CtxIndex, Stmt *S, Context C) { 992 if (SavedContexts[CtxIndex+1].first == S) { 993 CtxIndex++; 994 Context Result = SavedContexts[CtxIndex].second; 995 return Result; 996 } 997 return C; 998 } 999 1000 void dumpVarDefinitionName(unsigned i) { 1001 if (i == 0) { 1002 llvm::errs() << "Undefined"; 1003 return; 1004 } 1005 const NamedDecl *Dec = VarDefinitions[i].Dec; 1006 if (!Dec) { 1007 llvm::errs() << "<<NULL>>"; 1008 return; 1009 } 1010 Dec->printName(llvm::errs()); 1011 llvm::errs() << "." << i << " " << ((const void*) Dec); 1012 } 1013 1014 /// Dumps an ASCII representation of the variable map to llvm::errs() 1015 void dump() { 1016 for (unsigned i = 1, e = VarDefinitions.size(); i < e; ++i) { 1017 const Expr *Exp = VarDefinitions[i].Exp; 1018 unsigned Ref = VarDefinitions[i].Ref; 1019 1020 dumpVarDefinitionName(i); 1021 llvm::errs() << " = "; 1022 if (Exp) Exp->dump(); 1023 else { 1024 dumpVarDefinitionName(Ref); 1025 llvm::errs() << "\n"; 1026 } 1027 } 1028 } 1029 1030 /// Dumps an ASCII representation of a Context to llvm::errs() 1031 void dumpContext(Context C) { 1032 for (Context::iterator I = C.begin(), E = C.end(); I != E; ++I) { 1033 const NamedDecl *D = I.getKey(); 1034 D->printName(llvm::errs()); 1035 const unsigned *i = C.lookup(D); 1036 llvm::errs() << " -> "; 1037 dumpVarDefinitionName(*i); 1038 llvm::errs() << "\n"; 1039 } 1040 } 1041 1042 /// Builds the variable map. 1043 void traverseCFG(CFG *CFGraph, PostOrderCFGView *SortedGraph, 1044 std::vector<CFGBlockInfo> &BlockInfo); 1045 1046protected: 1047 // Get the current context index 1048 unsigned getContextIndex() { return SavedContexts.size()-1; } 1049 1050 // Save the current context for later replay 1051 void saveContext(Stmt *S, Context C) { 1052 SavedContexts.push_back(std::make_pair(S,C)); 1053 } 1054 1055 // Adds a new definition to the given context, and returns a new context. 1056 // This method should be called when declaring a new variable. 1057 Context addDefinition(const NamedDecl *D, Expr *Exp, Context Ctx) { 1058 assert(!Ctx.contains(D)); 1059 unsigned newID = VarDefinitions.size(); 1060 Context NewCtx = ContextFactory.add(Ctx, D, newID); 1061 VarDefinitions.push_back(VarDefinition(D, Exp, Ctx)); 1062 return NewCtx; 1063 } 1064 1065 // Add a new reference to an existing definition. 1066 Context addReference(const NamedDecl *D, unsigned i, Context Ctx) { 1067 unsigned newID = VarDefinitions.size(); 1068 Context NewCtx = ContextFactory.add(Ctx, D, newID); 1069 VarDefinitions.push_back(VarDefinition(D, i, Ctx)); 1070 return NewCtx; 1071 } 1072 1073 // Updates a definition only if that definition is already in the map. 1074 // This method should be called when assigning to an existing variable. 1075 Context updateDefinition(const NamedDecl *D, Expr *Exp, Context Ctx) { 1076 if (Ctx.contains(D)) { 1077 unsigned newID = VarDefinitions.size(); 1078 Context NewCtx = ContextFactory.remove(Ctx, D); 1079 NewCtx = ContextFactory.add(NewCtx, D, newID); 1080 VarDefinitions.push_back(VarDefinition(D, Exp, Ctx)); 1081 return NewCtx; 1082 } 1083 return Ctx; 1084 } 1085 1086 // Removes a definition from the context, but keeps the variable name 1087 // as a valid variable. The index 0 is a placeholder for cleared definitions. 1088 Context clearDefinition(const NamedDecl *D, Context Ctx) { 1089 Context NewCtx = Ctx; 1090 if (NewCtx.contains(D)) { 1091 NewCtx = ContextFactory.remove(NewCtx, D); 1092 NewCtx = ContextFactory.add(NewCtx, D, 0); 1093 } 1094 return NewCtx; 1095 } 1096 1097 // Remove a definition entirely frmo the context. 1098 Context removeDefinition(const NamedDecl *D, Context Ctx) { 1099 Context NewCtx = Ctx; 1100 if (NewCtx.contains(D)) { 1101 NewCtx = ContextFactory.remove(NewCtx, D); 1102 } 1103 return NewCtx; 1104 } 1105 1106 Context intersectContexts(Context C1, Context C2); 1107 Context createReferenceContext(Context C); 1108 void intersectBackEdge(Context C1, Context C2); 1109 1110 friend class VarMapBuilder; 1111}; 1112 1113 1114// This has to be defined after LocalVariableMap. 1115CFGBlockInfo CFGBlockInfo::getEmptyBlockInfo(LocalVariableMap &M) { 1116 return CFGBlockInfo(M.getEmptyContext()); 1117} 1118 1119 1120/// Visitor which builds a LocalVariableMap 1121class VarMapBuilder : public StmtVisitor<VarMapBuilder> { 1122public: 1123 LocalVariableMap* VMap; 1124 LocalVariableMap::Context Ctx; 1125 1126 VarMapBuilder(LocalVariableMap *VM, LocalVariableMap::Context C) 1127 : VMap(VM), Ctx(C) {} 1128 1129 void VisitDeclStmt(DeclStmt *S); 1130 void VisitBinaryOperator(BinaryOperator *BO); 1131}; 1132 1133 1134// Add new local variables to the variable map 1135void VarMapBuilder::VisitDeclStmt(DeclStmt *S) { 1136 bool modifiedCtx = false; 1137 DeclGroupRef DGrp = S->getDeclGroup(); 1138 for (DeclGroupRef::iterator I = DGrp.begin(), E = DGrp.end(); I != E; ++I) { 1139 if (VarDecl *VD = dyn_cast_or_null<VarDecl>(*I)) { 1140 Expr *E = VD->getInit(); 1141 1142 // Add local variables with trivial type to the variable map 1143 QualType T = VD->getType(); 1144 if (T.isTrivialType(VD->getASTContext())) { 1145 Ctx = VMap->addDefinition(VD, E, Ctx); 1146 modifiedCtx = true; 1147 } 1148 } 1149 } 1150 if (modifiedCtx) 1151 VMap->saveContext(S, Ctx); 1152} 1153 1154// Update local variable definitions in variable map 1155void VarMapBuilder::VisitBinaryOperator(BinaryOperator *BO) { 1156 if (!BO->isAssignmentOp()) 1157 return; 1158 1159 Expr *LHSExp = BO->getLHS()->IgnoreParenCasts(); 1160 1161 // Update the variable map and current context. 1162 if (DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(LHSExp)) { 1163 ValueDecl *VDec = DRE->getDecl(); 1164 if (Ctx.lookup(VDec)) { 1165 if (BO->getOpcode() == BO_Assign) 1166 Ctx = VMap->updateDefinition(VDec, BO->getRHS(), Ctx); 1167 else 1168 // FIXME -- handle compound assignment operators 1169 Ctx = VMap->clearDefinition(VDec, Ctx); 1170 VMap->saveContext(BO, Ctx); 1171 } 1172 } 1173} 1174 1175 1176// Computes the intersection of two contexts. The intersection is the 1177// set of variables which have the same definition in both contexts; 1178// variables with different definitions are discarded. 1179LocalVariableMap::Context 1180LocalVariableMap::intersectContexts(Context C1, Context C2) { 1181 Context Result = C1; 1182 for (Context::iterator I = C1.begin(), E = C1.end(); I != E; ++I) { 1183 const NamedDecl *Dec = I.getKey(); 1184 unsigned i1 = I.getData(); 1185 const unsigned *i2 = C2.lookup(Dec); 1186 if (!i2) // variable doesn't exist on second path 1187 Result = removeDefinition(Dec, Result); 1188 else if (*i2 != i1) // variable exists, but has different definition 1189 Result = clearDefinition(Dec, Result); 1190 } 1191 return Result; 1192} 1193 1194// For every variable in C, create a new variable that refers to the 1195// definition in C. Return a new context that contains these new variables. 1196// (We use this for a naive implementation of SSA on loop back-edges.) 1197LocalVariableMap::Context LocalVariableMap::createReferenceContext(Context C) { 1198 Context Result = getEmptyContext(); 1199 for (Context::iterator I = C.begin(), E = C.end(); I != E; ++I) { 1200 const NamedDecl *Dec = I.getKey(); 1201 unsigned i = I.getData(); 1202 Result = addReference(Dec, i, Result); 1203 } 1204 return Result; 1205} 1206 1207// This routine also takes the intersection of C1 and C2, but it does so by 1208// altering the VarDefinitions. C1 must be the result of an earlier call to 1209// createReferenceContext. 1210void LocalVariableMap::intersectBackEdge(Context C1, Context C2) { 1211 for (Context::iterator I = C1.begin(), E = C1.end(); I != E; ++I) { 1212 const NamedDecl *Dec = I.getKey(); 1213 unsigned i1 = I.getData(); 1214 VarDefinition *VDef = &VarDefinitions[i1]; 1215 assert(VDef->isReference()); 1216 1217 const unsigned *i2 = C2.lookup(Dec); 1218 if (!i2 || (*i2 != i1)) 1219 VDef->Ref = 0; // Mark this variable as undefined 1220 } 1221} 1222 1223 1224// Traverse the CFG in topological order, so all predecessors of a block 1225// (excluding back-edges) are visited before the block itself. At 1226// each point in the code, we calculate a Context, which holds the set of 1227// variable definitions which are visible at that point in execution. 1228// Visible variables are mapped to their definitions using an array that 1229// contains all definitions. 1230// 1231// At join points in the CFG, the set is computed as the intersection of 1232// the incoming sets along each edge, E.g. 1233// 1234// { Context | VarDefinitions } 1235// int x = 0; { x -> x1 | x1 = 0 } 1236// int y = 0; { x -> x1, y -> y1 | y1 = 0, x1 = 0 } 1237// if (b) x = 1; { x -> x2, y -> y1 | x2 = 1, y1 = 0, ... } 1238// else x = 2; { x -> x3, y -> y1 | x3 = 2, x2 = 1, ... } 1239// ... { y -> y1 (x is unknown) | x3 = 2, x2 = 1, ... } 1240// 1241// This is essentially a simpler and more naive version of the standard SSA 1242// algorithm. Those definitions that remain in the intersection are from blocks 1243// that strictly dominate the current block. We do not bother to insert proper 1244// phi nodes, because they are not used in our analysis; instead, wherever 1245// a phi node would be required, we simply remove that definition from the 1246// context (E.g. x above). 1247// 1248// The initial traversal does not capture back-edges, so those need to be 1249// handled on a separate pass. Whenever the first pass encounters an 1250// incoming back edge, it duplicates the context, creating new definitions 1251// that refer back to the originals. (These correspond to places where SSA 1252// might have to insert a phi node.) On the second pass, these definitions are 1253// set to NULL if the variable has changed on the back-edge (i.e. a phi 1254// node was actually required.) E.g. 1255// 1256// { Context | VarDefinitions } 1257// int x = 0, y = 0; { x -> x1, y -> y1 | y1 = 0, x1 = 0 } 1258// while (b) { x -> x2, y -> y1 | [1st:] x2=x1; [2nd:] x2=NULL; } 1259// x = x+1; { x -> x3, y -> y1 | x3 = x2 + 1, ... } 1260// ... { y -> y1 | x3 = 2, x2 = 1, ... } 1261// 1262void LocalVariableMap::traverseCFG(CFG *CFGraph, 1263 PostOrderCFGView *SortedGraph, 1264 std::vector<CFGBlockInfo> &BlockInfo) { 1265 PostOrderCFGView::CFGBlockSet VisitedBlocks(CFGraph); 1266 1267 CtxIndices.resize(CFGraph->getNumBlockIDs()); 1268 1269 for (PostOrderCFGView::iterator I = SortedGraph->begin(), 1270 E = SortedGraph->end(); I!= E; ++I) { 1271 const CFGBlock *CurrBlock = *I; 1272 int CurrBlockID = CurrBlock->getBlockID(); 1273 CFGBlockInfo *CurrBlockInfo = &BlockInfo[CurrBlockID]; 1274 1275 VisitedBlocks.insert(CurrBlock); 1276 1277 // Calculate the entry context for the current block 1278 bool HasBackEdges = false; 1279 bool CtxInit = true; 1280 for (CFGBlock::const_pred_iterator PI = CurrBlock->pred_begin(), 1281 PE = CurrBlock->pred_end(); PI != PE; ++PI) { 1282 // if *PI -> CurrBlock is a back edge, so skip it 1283 if (*PI == 0 || !VisitedBlocks.alreadySet(*PI)) { 1284 HasBackEdges = true; 1285 continue; 1286 } 1287 1288 int PrevBlockID = (*PI)->getBlockID(); 1289 CFGBlockInfo *PrevBlockInfo = &BlockInfo[PrevBlockID]; 1290 1291 if (CtxInit) { 1292 CurrBlockInfo->EntryContext = PrevBlockInfo->ExitContext; 1293 CtxInit = false; 1294 } 1295 else { 1296 CurrBlockInfo->EntryContext = 1297 intersectContexts(CurrBlockInfo->EntryContext, 1298 PrevBlockInfo->ExitContext); 1299 } 1300 } 1301 1302 // Duplicate the context if we have back-edges, so we can call 1303 // intersectBackEdges later. 1304 if (HasBackEdges) 1305 CurrBlockInfo->EntryContext = 1306 createReferenceContext(CurrBlockInfo->EntryContext); 1307 1308 // Create a starting context index for the current block 1309 saveContext(0, CurrBlockInfo->EntryContext); 1310 CurrBlockInfo->EntryIndex = getContextIndex(); 1311 1312 // Visit all the statements in the basic block. 1313 VarMapBuilder VMapBuilder(this, CurrBlockInfo->EntryContext); 1314 for (CFGBlock::const_iterator BI = CurrBlock->begin(), 1315 BE = CurrBlock->end(); BI != BE; ++BI) { 1316 switch (BI->getKind()) { 1317 case CFGElement::Statement: { 1318 const CFGStmt *CS = cast<CFGStmt>(&*BI); 1319 VMapBuilder.Visit(const_cast<Stmt*>(CS->getStmt())); 1320 break; 1321 } 1322 default: 1323 break; 1324 } 1325 } 1326 CurrBlockInfo->ExitContext = VMapBuilder.Ctx; 1327 1328 // Mark variables on back edges as "unknown" if they've been changed. 1329 for (CFGBlock::const_succ_iterator SI = CurrBlock->succ_begin(), 1330 SE = CurrBlock->succ_end(); SI != SE; ++SI) { 1331 // if CurrBlock -> *SI is *not* a back edge 1332 if (*SI == 0 || !VisitedBlocks.alreadySet(*SI)) 1333 continue; 1334 1335 CFGBlock *FirstLoopBlock = *SI; 1336 Context LoopBegin = BlockInfo[FirstLoopBlock->getBlockID()].EntryContext; 1337 Context LoopEnd = CurrBlockInfo->ExitContext; 1338 intersectBackEdge(LoopBegin, LoopEnd); 1339 } 1340 } 1341 1342 // Put an extra entry at the end of the indexed context array 1343 unsigned exitID = CFGraph->getExit().getBlockID(); 1344 saveContext(0, BlockInfo[exitID].ExitContext); 1345} 1346 1347/// Find the appropriate source locations to use when producing diagnostics for 1348/// each block in the CFG. 1349static void findBlockLocations(CFG *CFGraph, 1350 PostOrderCFGView *SortedGraph, 1351 std::vector<CFGBlockInfo> &BlockInfo) { 1352 for (PostOrderCFGView::iterator I = SortedGraph->begin(), 1353 E = SortedGraph->end(); I!= E; ++I) { 1354 const CFGBlock *CurrBlock = *I; 1355 CFGBlockInfo *CurrBlockInfo = &BlockInfo[CurrBlock->getBlockID()]; 1356 1357 // Find the source location of the last statement in the block, if the 1358 // block is not empty. 1359 if (const Stmt *S = CurrBlock->getTerminator()) { 1360 CurrBlockInfo->EntryLoc = CurrBlockInfo->ExitLoc = S->getLocStart(); 1361 } else { 1362 for (CFGBlock::const_reverse_iterator BI = CurrBlock->rbegin(), 1363 BE = CurrBlock->rend(); BI != BE; ++BI) { 1364 // FIXME: Handle other CFGElement kinds. 1365 if (const CFGStmt *CS = dyn_cast<CFGStmt>(&*BI)) { 1366 CurrBlockInfo->ExitLoc = CS->getStmt()->getLocStart(); 1367 break; 1368 } 1369 } 1370 } 1371 1372 if (!CurrBlockInfo->ExitLoc.isInvalid()) { 1373 // This block contains at least one statement. Find the source location 1374 // of the first statement in the block. 1375 for (CFGBlock::const_iterator BI = CurrBlock->begin(), 1376 BE = CurrBlock->end(); BI != BE; ++BI) { 1377 // FIXME: Handle other CFGElement kinds. 1378 if (const CFGStmt *CS = dyn_cast<CFGStmt>(&*BI)) { 1379 CurrBlockInfo->EntryLoc = CS->getStmt()->getLocStart(); 1380 break; 1381 } 1382 } 1383 } else if (CurrBlock->pred_size() == 1 && *CurrBlock->pred_begin() && 1384 CurrBlock != &CFGraph->getExit()) { 1385 // The block is empty, and has a single predecessor. Use its exit 1386 // location. 1387 CurrBlockInfo->EntryLoc = CurrBlockInfo->ExitLoc = 1388 BlockInfo[(*CurrBlock->pred_begin())->getBlockID()].ExitLoc; 1389 } 1390 } 1391} 1392 1393/// \brief Class which implements the core thread safety analysis routines. 1394class ThreadSafetyAnalyzer { 1395 friend class BuildLockset; 1396 1397 ThreadSafetyHandler &Handler; 1398 LocalVariableMap LocalVarMap; 1399 FactManager FactMan; 1400 std::vector<CFGBlockInfo> BlockInfo; 1401 1402public: 1403 ThreadSafetyAnalyzer(ThreadSafetyHandler &H) : Handler(H) {} 1404 1405 void addLock(FactSet &FSet, const SExpr &Mutex, const LockData &LDat); 1406 void removeLock(FactSet &FSet, const SExpr &Mutex, 1407 SourceLocation UnlockLoc, bool FullyRemove=false); 1408 1409 template <typename AttrType> 1410 void getMutexIDs(MutexIDList &Mtxs, AttrType *Attr, Expr *Exp, 1411 const NamedDecl *D); 1412 1413 template <class AttrType> 1414 void getMutexIDs(MutexIDList &Mtxs, AttrType *Attr, Expr *Exp, 1415 const NamedDecl *D, 1416 const CFGBlock *PredBlock, const CFGBlock *CurrBlock, 1417 Expr *BrE, bool Neg); 1418 1419 const CallExpr* getTrylockCallExpr(const Stmt *Cond, LocalVarContext C, 1420 bool &Negate); 1421 1422 void getEdgeLockset(FactSet &Result, const FactSet &ExitSet, 1423 const CFGBlock* PredBlock, 1424 const CFGBlock *CurrBlock); 1425 1426 void intersectAndWarn(FactSet &FSet1, const FactSet &FSet2, 1427 SourceLocation JoinLoc, 1428 LockErrorKind LEK1, LockErrorKind LEK2, 1429 bool Modify=true); 1430 1431 void intersectAndWarn(FactSet &FSet1, const FactSet &FSet2, 1432 SourceLocation JoinLoc, LockErrorKind LEK1, 1433 bool Modify=true) { 1434 intersectAndWarn(FSet1, FSet2, JoinLoc, LEK1, LEK1, Modify); 1435 } 1436 1437 void runAnalysis(AnalysisDeclContext &AC); 1438}; 1439 1440 1441/// \brief Add a new lock to the lockset, warning if the lock is already there. 1442/// \param Mutex -- the Mutex expression for the lock 1443/// \param LDat -- the LockData for the lock 1444void ThreadSafetyAnalyzer::addLock(FactSet &FSet, const SExpr &Mutex, 1445 const LockData &LDat) { 1446 // FIXME: deal with acquired before/after annotations. 1447 // FIXME: Don't always warn when we have support for reentrant locks. 1448 if (Mutex.shouldIgnore()) 1449 return; 1450 1451 if (FSet.findLock(FactMan, Mutex)) { 1452 Handler.handleDoubleLock(Mutex.toString(), LDat.AcquireLoc); 1453 } else { 1454 FSet.addLock(FactMan, Mutex, LDat); 1455 } 1456} 1457 1458 1459/// \brief Remove a lock from the lockset, warning if the lock is not there. 1460/// \param Mutex The lock expression corresponding to the lock to be removed 1461/// \param UnlockLoc The source location of the unlock (only used in error msg) 1462void ThreadSafetyAnalyzer::removeLock(FactSet &FSet, 1463 const SExpr &Mutex, 1464 SourceLocation UnlockLoc, 1465 bool FullyRemove) { 1466 if (Mutex.shouldIgnore()) 1467 return; 1468 1469 const LockData *LDat = FSet.findLock(FactMan, Mutex); 1470 if (!LDat) { 1471 Handler.handleUnmatchedUnlock(Mutex.toString(), UnlockLoc); 1472 return; 1473 } 1474 1475 if (LDat->UnderlyingMutex.isValid()) { 1476 // This is scoped lockable object, which manages the real mutex. 1477 if (FullyRemove) { 1478 // We're destroying the managing object. 1479 // Remove the underlying mutex if it exists; but don't warn. 1480 if (FSet.findLock(FactMan, LDat->UnderlyingMutex)) 1481 FSet.removeLock(FactMan, LDat->UnderlyingMutex); 1482 } else { 1483 // We're releasing the underlying mutex, but not destroying the 1484 // managing object. Warn on dual release. 1485 if (!FSet.findLock(FactMan, LDat->UnderlyingMutex)) { 1486 Handler.handleUnmatchedUnlock(LDat->UnderlyingMutex.toString(), 1487 UnlockLoc); 1488 } 1489 FSet.removeLock(FactMan, LDat->UnderlyingMutex); 1490 return; 1491 } 1492 } 1493 FSet.removeLock(FactMan, Mutex); 1494} 1495 1496 1497/// \brief Extract the list of mutexIDs from the attribute on an expression, 1498/// and push them onto Mtxs, discarding any duplicates. 1499template <typename AttrType> 1500void ThreadSafetyAnalyzer::getMutexIDs(MutexIDList &Mtxs, AttrType *Attr, 1501 Expr *Exp, const NamedDecl *D) { 1502 typedef typename AttrType::args_iterator iterator_type; 1503 1504 if (Attr->args_size() == 0) { 1505 // The mutex held is the "this" object. 1506 SExpr Mu(0, Exp, D); 1507 if (!Mu.isValid()) 1508 SExpr::warnInvalidLock(Handler, 0, Exp, D); 1509 else 1510 Mtxs.push_back_nodup(Mu); 1511 return; 1512 } 1513 1514 for (iterator_type I=Attr->args_begin(), E=Attr->args_end(); I != E; ++I) { 1515 SExpr Mu(*I, Exp, D); 1516 if (!Mu.isValid()) 1517 SExpr::warnInvalidLock(Handler, *I, Exp, D); 1518 else 1519 Mtxs.push_back_nodup(Mu); 1520 } 1521} 1522 1523 1524/// \brief Extract the list of mutexIDs from a trylock attribute. If the 1525/// trylock applies to the given edge, then push them onto Mtxs, discarding 1526/// any duplicates. 1527template <class AttrType> 1528void ThreadSafetyAnalyzer::getMutexIDs(MutexIDList &Mtxs, AttrType *Attr, 1529 Expr *Exp, const NamedDecl *D, 1530 const CFGBlock *PredBlock, 1531 const CFGBlock *CurrBlock, 1532 Expr *BrE, bool Neg) { 1533 // Find out which branch has the lock 1534 bool branch = 0; 1535 if (CXXBoolLiteralExpr *BLE = dyn_cast_or_null<CXXBoolLiteralExpr>(BrE)) { 1536 branch = BLE->getValue(); 1537 } 1538 else if (IntegerLiteral *ILE = dyn_cast_or_null<IntegerLiteral>(BrE)) { 1539 branch = ILE->getValue().getBoolValue(); 1540 } 1541 int branchnum = branch ? 0 : 1; 1542 if (Neg) branchnum = !branchnum; 1543 1544 // If we've taken the trylock branch, then add the lock 1545 int i = 0; 1546 for (CFGBlock::const_succ_iterator SI = PredBlock->succ_begin(), 1547 SE = PredBlock->succ_end(); SI != SE && i < 2; ++SI, ++i) { 1548 if (*SI == CurrBlock && i == branchnum) { 1549 getMutexIDs(Mtxs, Attr, Exp, D); 1550 } 1551 } 1552} 1553 1554 1555bool getStaticBooleanValue(Expr* E, bool& TCond) { 1556 if (isa<CXXNullPtrLiteralExpr>(E) || isa<GNUNullExpr>(E)) { 1557 TCond = false; 1558 return true; 1559 } else if (CXXBoolLiteralExpr *BLE = dyn_cast<CXXBoolLiteralExpr>(E)) { 1560 TCond = BLE->getValue(); 1561 return true; 1562 } else if (IntegerLiteral *ILE = dyn_cast<IntegerLiteral>(E)) { 1563 TCond = ILE->getValue().getBoolValue(); 1564 return true; 1565 } else if (ImplicitCastExpr *CE = dyn_cast<ImplicitCastExpr>(E)) { 1566 return getStaticBooleanValue(CE->getSubExpr(), TCond); 1567 } 1568 return false; 1569} 1570 1571 1572// If Cond can be traced back to a function call, return the call expression. 1573// The negate variable should be called with false, and will be set to true 1574// if the function call is negated, e.g. if (!mu.tryLock(...)) 1575const CallExpr* ThreadSafetyAnalyzer::getTrylockCallExpr(const Stmt *Cond, 1576 LocalVarContext C, 1577 bool &Negate) { 1578 if (!Cond) 1579 return 0; 1580 1581 if (const CallExpr *CallExp = dyn_cast<CallExpr>(Cond)) { 1582 return CallExp; 1583 } 1584 else if (const ParenExpr *PE = dyn_cast<ParenExpr>(Cond)) { 1585 return getTrylockCallExpr(PE->getSubExpr(), C, Negate); 1586 } 1587 else if (const ImplicitCastExpr *CE = dyn_cast<ImplicitCastExpr>(Cond)) { 1588 return getTrylockCallExpr(CE->getSubExpr(), C, Negate); 1589 } 1590 else if (const ExprWithCleanups* EWC = dyn_cast<ExprWithCleanups>(Cond)) { 1591 return getTrylockCallExpr(EWC->getSubExpr(), C, Negate); 1592 } 1593 else if (const DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(Cond)) { 1594 const Expr *E = LocalVarMap.lookupExpr(DRE->getDecl(), C); 1595 return getTrylockCallExpr(E, C, Negate); 1596 } 1597 else if (const UnaryOperator *UOP = dyn_cast<UnaryOperator>(Cond)) { 1598 if (UOP->getOpcode() == UO_LNot) { 1599 Negate = !Negate; 1600 return getTrylockCallExpr(UOP->getSubExpr(), C, Negate); 1601 } 1602 return 0; 1603 } 1604 else if (const BinaryOperator *BOP = dyn_cast<BinaryOperator>(Cond)) { 1605 if (BOP->getOpcode() == BO_EQ || BOP->getOpcode() == BO_NE) { 1606 if (BOP->getOpcode() == BO_NE) 1607 Negate = !Negate; 1608 1609 bool TCond = false; 1610 if (getStaticBooleanValue(BOP->getRHS(), TCond)) { 1611 if (!TCond) Negate = !Negate; 1612 return getTrylockCallExpr(BOP->getLHS(), C, Negate); 1613 } 1614 else if (getStaticBooleanValue(BOP->getLHS(), TCond)) { 1615 if (!TCond) Negate = !Negate; 1616 return getTrylockCallExpr(BOP->getRHS(), C, Negate); 1617 } 1618 return 0; 1619 } 1620 return 0; 1621 } 1622 // FIXME -- handle && and || as well. 1623 return 0; 1624} 1625 1626 1627/// \brief Find the lockset that holds on the edge between PredBlock 1628/// and CurrBlock. The edge set is the exit set of PredBlock (passed 1629/// as the ExitSet parameter) plus any trylocks, which are conditionally held. 1630void ThreadSafetyAnalyzer::getEdgeLockset(FactSet& Result, 1631 const FactSet &ExitSet, 1632 const CFGBlock *PredBlock, 1633 const CFGBlock *CurrBlock) { 1634 Result = ExitSet; 1635 1636 if (!PredBlock->getTerminatorCondition()) 1637 return; 1638 1639 bool Negate = false; 1640 const Stmt *Cond = PredBlock->getTerminatorCondition(); 1641 const CFGBlockInfo *PredBlockInfo = &BlockInfo[PredBlock->getBlockID()]; 1642 const LocalVarContext &LVarCtx = PredBlockInfo->ExitContext; 1643 1644 CallExpr *Exp = 1645 const_cast<CallExpr*>(getTrylockCallExpr(Cond, LVarCtx, Negate)); 1646 if (!Exp) 1647 return; 1648 1649 NamedDecl *FunDecl = dyn_cast_or_null<NamedDecl>(Exp->getCalleeDecl()); 1650 if(!FunDecl || !FunDecl->hasAttrs()) 1651 return; 1652 1653 1654 MutexIDList ExclusiveLocksToAdd; 1655 MutexIDList SharedLocksToAdd; 1656 1657 // If the condition is a call to a Trylock function, then grab the attributes 1658 AttrVec &ArgAttrs = FunDecl->getAttrs(); 1659 for (unsigned i = 0; i < ArgAttrs.size(); ++i) { 1660 Attr *Attr = ArgAttrs[i]; 1661 switch (Attr->getKind()) { 1662 case attr::ExclusiveTrylockFunction: { 1663 ExclusiveTrylockFunctionAttr *A = 1664 cast<ExclusiveTrylockFunctionAttr>(Attr); 1665 getMutexIDs(ExclusiveLocksToAdd, A, Exp, FunDecl, 1666 PredBlock, CurrBlock, A->getSuccessValue(), Negate); 1667 break; 1668 } 1669 case attr::SharedTrylockFunction: { 1670 SharedTrylockFunctionAttr *A = 1671 cast<SharedTrylockFunctionAttr>(Attr); 1672 getMutexIDs(ExclusiveLocksToAdd, A, Exp, FunDecl, 1673 PredBlock, CurrBlock, A->getSuccessValue(), Negate); 1674 break; 1675 } 1676 default: 1677 break; 1678 } 1679 } 1680 1681 // Add and remove locks. 1682 SourceLocation Loc = Exp->getExprLoc(); 1683 for (unsigned i=0,n=ExclusiveLocksToAdd.size(); i<n; ++i) { 1684 addLock(Result, ExclusiveLocksToAdd[i], 1685 LockData(Loc, LK_Exclusive)); 1686 } 1687 for (unsigned i=0,n=SharedLocksToAdd.size(); i<n; ++i) { 1688 addLock(Result, SharedLocksToAdd[i], 1689 LockData(Loc, LK_Shared)); 1690 } 1691} 1692 1693 1694/// \brief We use this class to visit different types of expressions in 1695/// CFGBlocks, and build up the lockset. 1696/// An expression may cause us to add or remove locks from the lockset, or else 1697/// output error messages related to missing locks. 1698/// FIXME: In future, we may be able to not inherit from a visitor. 1699class BuildLockset : public StmtVisitor<BuildLockset> { 1700 friend class ThreadSafetyAnalyzer; 1701 1702 ThreadSafetyAnalyzer *Analyzer; 1703 FactSet FSet; 1704 LocalVariableMap::Context LVarCtx; 1705 unsigned CtxIndex; 1706 1707 // Helper functions 1708 const ValueDecl *getValueDecl(Expr *Exp); 1709 1710 void warnIfMutexNotHeld(const NamedDecl *D, Expr *Exp, AccessKind AK, 1711 Expr *MutexExp, ProtectedOperationKind POK); 1712 void warnIfMutexHeld(const NamedDecl *D, Expr *Exp, Expr *MutexExp); 1713 1714 void checkAccess(Expr *Exp, AccessKind AK); 1715 void checkDereference(Expr *Exp, AccessKind AK); 1716 void handleCall(Expr *Exp, const NamedDecl *D, VarDecl *VD = 0); 1717 1718public: 1719 BuildLockset(ThreadSafetyAnalyzer *Anlzr, CFGBlockInfo &Info) 1720 : StmtVisitor<BuildLockset>(), 1721 Analyzer(Anlzr), 1722 FSet(Info.EntrySet), 1723 LVarCtx(Info.EntryContext), 1724 CtxIndex(Info.EntryIndex) 1725 {} 1726 1727 void VisitUnaryOperator(UnaryOperator *UO); 1728 void VisitBinaryOperator(BinaryOperator *BO); 1729 void VisitCastExpr(CastExpr *CE); 1730 void VisitCallExpr(CallExpr *Exp); 1731 void VisitCXXConstructExpr(CXXConstructExpr *Exp); 1732 void VisitDeclStmt(DeclStmt *S); 1733}; 1734 1735 1736/// \brief Gets the value decl pointer from DeclRefExprs or MemberExprs 1737const ValueDecl *BuildLockset::getValueDecl(Expr *Exp) { 1738 if (const DeclRefExpr *DR = dyn_cast<DeclRefExpr>(Exp)) 1739 return DR->getDecl(); 1740 1741 if (const MemberExpr *ME = dyn_cast<MemberExpr>(Exp)) 1742 return ME->getMemberDecl(); 1743 1744 return 0; 1745} 1746 1747/// \brief Warn if the LSet does not contain a lock sufficient to protect access 1748/// of at least the passed in AccessKind. 1749void BuildLockset::warnIfMutexNotHeld(const NamedDecl *D, Expr *Exp, 1750 AccessKind AK, Expr *MutexExp, 1751 ProtectedOperationKind POK) { 1752 LockKind LK = getLockKindFromAccessKind(AK); 1753 1754 SExpr Mutex(MutexExp, Exp, D); 1755 if (!Mutex.isValid()) { 1756 SExpr::warnInvalidLock(Analyzer->Handler, MutexExp, Exp, D); 1757 return; 1758 } else if (Mutex.shouldIgnore()) { 1759 return; 1760 } 1761 1762 LockData* LDat = FSet.findLockUniv(Analyzer->FactMan, Mutex); 1763 bool NoError = true; 1764 if (!LDat) { 1765 // No exact match found. Look for a partial match. 1766 FactEntry* FEntry = FSet.findPartialMatch(Analyzer->FactMan, Mutex); 1767 if (FEntry) { 1768 // Warn that there's no precise match. 1769 LDat = &FEntry->LDat; 1770 std::string PartMatchStr = FEntry->MutID.toString(); 1771 StringRef PartMatchName(PartMatchStr); 1772 Analyzer->Handler.handleMutexNotHeld(D, POK, Mutex.toString(), LK, 1773 Exp->getExprLoc(), &PartMatchName); 1774 } else { 1775 // Warn that there's no match at all. 1776 Analyzer->Handler.handleMutexNotHeld(D, POK, Mutex.toString(), LK, 1777 Exp->getExprLoc()); 1778 } 1779 NoError = false; 1780 } 1781 // Make sure the mutex we found is the right kind. 1782 if (NoError && LDat && !LDat->isAtLeast(LK)) 1783 Analyzer->Handler.handleMutexNotHeld(D, POK, Mutex.toString(), LK, 1784 Exp->getExprLoc()); 1785} 1786 1787/// \brief Warn if the LSet contains the given lock. 1788void BuildLockset::warnIfMutexHeld(const NamedDecl *D, Expr* Exp, 1789 Expr *MutexExp) { 1790 SExpr Mutex(MutexExp, Exp, D); 1791 if (!Mutex.isValid()) { 1792 SExpr::warnInvalidLock(Analyzer->Handler, MutexExp, Exp, D); 1793 return; 1794 } 1795 1796 LockData* LDat = FSet.findLock(Analyzer->FactMan, Mutex); 1797 if (LDat) { 1798 std::string DeclName = D->getNameAsString(); 1799 StringRef DeclNameSR (DeclName); 1800 Analyzer->Handler.handleFunExcludesLock(DeclNameSR, Mutex.toString(), 1801 Exp->getExprLoc()); 1802 } 1803} 1804 1805 1806/// \brief This method identifies variable dereferences and checks pt_guarded_by 1807/// and pt_guarded_var annotations. Note that we only check these annotations 1808/// at the time a pointer is dereferenced. 1809/// FIXME: We need to check for other types of pointer dereferences 1810/// (e.g. [], ->) and deal with them here. 1811/// \param Exp An expression that has been read or written. 1812void BuildLockset::checkDereference(Expr *Exp, AccessKind AK) { 1813 UnaryOperator *UO = dyn_cast<UnaryOperator>(Exp); 1814 if (!UO || UO->getOpcode() != clang::UO_Deref) 1815 return; 1816 Exp = UO->getSubExpr()->IgnoreParenCasts(); 1817 1818 const ValueDecl *D = getValueDecl(Exp); 1819 if(!D || !D->hasAttrs()) 1820 return; 1821 1822 if (D->getAttr<PtGuardedVarAttr>() && FSet.isEmpty()) 1823 Analyzer->Handler.handleNoMutexHeld(D, POK_VarDereference, AK, 1824 Exp->getExprLoc()); 1825 1826 const AttrVec &ArgAttrs = D->getAttrs(); 1827 for(unsigned i = 0, Size = ArgAttrs.size(); i < Size; ++i) 1828 if (PtGuardedByAttr *PGBAttr = dyn_cast<PtGuardedByAttr>(ArgAttrs[i])) 1829 warnIfMutexNotHeld(D, Exp, AK, PGBAttr->getArg(), POK_VarDereference); 1830} 1831 1832/// \brief Checks guarded_by and guarded_var attributes. 1833/// Whenever we identify an access (read or write) of a DeclRefExpr or 1834/// MemberExpr, we need to check whether there are any guarded_by or 1835/// guarded_var attributes, and make sure we hold the appropriate mutexes. 1836void BuildLockset::checkAccess(Expr *Exp, AccessKind AK) { 1837 const ValueDecl *D = getValueDecl(Exp); 1838 if(!D || !D->hasAttrs()) 1839 return; 1840 1841 if (D->getAttr<GuardedVarAttr>() && FSet.isEmpty()) 1842 Analyzer->Handler.handleNoMutexHeld(D, POK_VarAccess, AK, 1843 Exp->getExprLoc()); 1844 1845 const AttrVec &ArgAttrs = D->getAttrs(); 1846 for(unsigned i = 0, Size = ArgAttrs.size(); i < Size; ++i) 1847 if (GuardedByAttr *GBAttr = dyn_cast<GuardedByAttr>(ArgAttrs[i])) 1848 warnIfMutexNotHeld(D, Exp, AK, GBAttr->getArg(), POK_VarAccess); 1849} 1850 1851/// \brief Process a function call, method call, constructor call, 1852/// or destructor call. This involves looking at the attributes on the 1853/// corresponding function/method/constructor/destructor, issuing warnings, 1854/// and updating the locksets accordingly. 1855/// 1856/// FIXME: For classes annotated with one of the guarded annotations, we need 1857/// to treat const method calls as reads and non-const method calls as writes, 1858/// and check that the appropriate locks are held. Non-const method calls with 1859/// the same signature as const method calls can be also treated as reads. 1860/// 1861void BuildLockset::handleCall(Expr *Exp, const NamedDecl *D, VarDecl *VD) { 1862 const AttrVec &ArgAttrs = D->getAttrs(); 1863 MutexIDList ExclusiveLocksToAdd; 1864 MutexIDList SharedLocksToAdd; 1865 MutexIDList LocksToRemove; 1866 1867 for(unsigned i = 0; i < ArgAttrs.size(); ++i) { 1868 Attr *At = const_cast<Attr*>(ArgAttrs[i]); 1869 switch (At->getKind()) { 1870 // When we encounter an exclusive lock function, we need to add the lock 1871 // to our lockset with kind exclusive. 1872 case attr::ExclusiveLockFunction: { 1873 ExclusiveLockFunctionAttr *A = cast<ExclusiveLockFunctionAttr>(At); 1874 Analyzer->getMutexIDs(ExclusiveLocksToAdd, A, Exp, D); 1875 break; 1876 } 1877 1878 // When we encounter a shared lock function, we need to add the lock 1879 // to our lockset with kind shared. 1880 case attr::SharedLockFunction: { 1881 SharedLockFunctionAttr *A = cast<SharedLockFunctionAttr>(At); 1882 Analyzer->getMutexIDs(SharedLocksToAdd, A, Exp, D); 1883 break; 1884 } 1885 1886 // When we encounter an unlock function, we need to remove unlocked 1887 // mutexes from the lockset, and flag a warning if they are not there. 1888 case attr::UnlockFunction: { 1889 UnlockFunctionAttr *A = cast<UnlockFunctionAttr>(At); 1890 Analyzer->getMutexIDs(LocksToRemove, A, Exp, D); 1891 break; 1892 } 1893 1894 case attr::ExclusiveLocksRequired: { 1895 ExclusiveLocksRequiredAttr *A = cast<ExclusiveLocksRequiredAttr>(At); 1896 1897 for (ExclusiveLocksRequiredAttr::args_iterator 1898 I = A->args_begin(), E = A->args_end(); I != E; ++I) 1899 warnIfMutexNotHeld(D, Exp, AK_Written, *I, POK_FunctionCall); 1900 break; 1901 } 1902 1903 case attr::SharedLocksRequired: { 1904 SharedLocksRequiredAttr *A = cast<SharedLocksRequiredAttr>(At); 1905 1906 for (SharedLocksRequiredAttr::args_iterator I = A->args_begin(), 1907 E = A->args_end(); I != E; ++I) 1908 warnIfMutexNotHeld(D, Exp, AK_Read, *I, POK_FunctionCall); 1909 break; 1910 } 1911 1912 case attr::LocksExcluded: { 1913 LocksExcludedAttr *A = cast<LocksExcludedAttr>(At); 1914 1915 for (LocksExcludedAttr::args_iterator I = A->args_begin(), 1916 E = A->args_end(); I != E; ++I) { 1917 warnIfMutexHeld(D, Exp, *I); 1918 } 1919 break; 1920 } 1921 1922 // Ignore other (non thread-safety) attributes 1923 default: 1924 break; 1925 } 1926 } 1927 1928 // Figure out if we're calling the constructor of scoped lockable class 1929 bool isScopedVar = false; 1930 if (VD) { 1931 if (const CXXConstructorDecl *CD = dyn_cast<const CXXConstructorDecl>(D)) { 1932 const CXXRecordDecl* PD = CD->getParent(); 1933 if (PD && PD->getAttr<ScopedLockableAttr>()) 1934 isScopedVar = true; 1935 } 1936 } 1937 1938 // Add locks. 1939 SourceLocation Loc = Exp->getExprLoc(); 1940 for (unsigned i=0,n=ExclusiveLocksToAdd.size(); i<n; ++i) { 1941 Analyzer->addLock(FSet, ExclusiveLocksToAdd[i], 1942 LockData(Loc, LK_Exclusive, isScopedVar)); 1943 } 1944 for (unsigned i=0,n=SharedLocksToAdd.size(); i<n; ++i) { 1945 Analyzer->addLock(FSet, SharedLocksToAdd[i], 1946 LockData(Loc, LK_Shared, isScopedVar)); 1947 } 1948 1949 // Add the managing object as a dummy mutex, mapped to the underlying mutex. 1950 // FIXME -- this doesn't work if we acquire multiple locks. 1951 if (isScopedVar) { 1952 SourceLocation MLoc = VD->getLocation(); 1953 DeclRefExpr DRE(VD, false, VD->getType(), VK_LValue, VD->getLocation()); 1954 SExpr SMutex(&DRE, 0, 0); 1955 1956 for (unsigned i=0,n=ExclusiveLocksToAdd.size(); i<n; ++i) { 1957 Analyzer->addLock(FSet, SMutex, LockData(MLoc, LK_Exclusive, 1958 ExclusiveLocksToAdd[i])); 1959 } 1960 for (unsigned i=0,n=SharedLocksToAdd.size(); i<n; ++i) { 1961 Analyzer->addLock(FSet, SMutex, LockData(MLoc, LK_Shared, 1962 SharedLocksToAdd[i])); 1963 } 1964 } 1965 1966 // Remove locks. 1967 // FIXME -- should only fully remove if the attribute refers to 'this'. 1968 bool Dtor = isa<CXXDestructorDecl>(D); 1969 for (unsigned i=0,n=LocksToRemove.size(); i<n; ++i) { 1970 Analyzer->removeLock(FSet, LocksToRemove[i], Loc, Dtor); 1971 } 1972} 1973 1974 1975/// \brief For unary operations which read and write a variable, we need to 1976/// check whether we hold any required mutexes. Reads are checked in 1977/// VisitCastExpr. 1978void BuildLockset::VisitUnaryOperator(UnaryOperator *UO) { 1979 switch (UO->getOpcode()) { 1980 case clang::UO_PostDec: 1981 case clang::UO_PostInc: 1982 case clang::UO_PreDec: 1983 case clang::UO_PreInc: { 1984 Expr *SubExp = UO->getSubExpr()->IgnoreParenCasts(); 1985 checkAccess(SubExp, AK_Written); 1986 checkDereference(SubExp, AK_Written); 1987 break; 1988 } 1989 default: 1990 break; 1991 } 1992} 1993 1994/// For binary operations which assign to a variable (writes), we need to check 1995/// whether we hold any required mutexes. 1996/// FIXME: Deal with non-primitive types. 1997void BuildLockset::VisitBinaryOperator(BinaryOperator *BO) { 1998 if (!BO->isAssignmentOp()) 1999 return; 2000 2001 // adjust the context 2002 LVarCtx = Analyzer->LocalVarMap.getNextContext(CtxIndex, BO, LVarCtx); 2003 2004 Expr *LHSExp = BO->getLHS()->IgnoreParenCasts(); 2005 checkAccess(LHSExp, AK_Written); 2006 checkDereference(LHSExp, AK_Written); 2007} 2008 2009/// Whenever we do an LValue to Rvalue cast, we are reading a variable and 2010/// need to ensure we hold any required mutexes. 2011/// FIXME: Deal with non-primitive types. 2012void BuildLockset::VisitCastExpr(CastExpr *CE) { 2013 if (CE->getCastKind() != CK_LValueToRValue) 2014 return; 2015 Expr *SubExp = CE->getSubExpr()->IgnoreParenCasts(); 2016 checkAccess(SubExp, AK_Read); 2017 checkDereference(SubExp, AK_Read); 2018} 2019 2020 2021void BuildLockset::VisitCallExpr(CallExpr *Exp) { 2022 NamedDecl *D = dyn_cast_or_null<NamedDecl>(Exp->getCalleeDecl()); 2023 if(!D || !D->hasAttrs()) 2024 return; 2025 handleCall(Exp, D); 2026} 2027 2028void BuildLockset::VisitCXXConstructExpr(CXXConstructExpr *Exp) { 2029 // FIXME -- only handles constructors in DeclStmt below. 2030} 2031 2032void BuildLockset::VisitDeclStmt(DeclStmt *S) { 2033 // adjust the context 2034 LVarCtx = Analyzer->LocalVarMap.getNextContext(CtxIndex, S, LVarCtx); 2035 2036 DeclGroupRef DGrp = S->getDeclGroup(); 2037 for (DeclGroupRef::iterator I = DGrp.begin(), E = DGrp.end(); I != E; ++I) { 2038 Decl *D = *I; 2039 if (VarDecl *VD = dyn_cast_or_null<VarDecl>(D)) { 2040 Expr *E = VD->getInit(); 2041 // handle constructors that involve temporaries 2042 if (ExprWithCleanups *EWC = dyn_cast_or_null<ExprWithCleanups>(E)) 2043 E = EWC->getSubExpr(); 2044 2045 if (CXXConstructExpr *CE = dyn_cast_or_null<CXXConstructExpr>(E)) { 2046 NamedDecl *CtorD = dyn_cast_or_null<NamedDecl>(CE->getConstructor()); 2047 if (!CtorD || !CtorD->hasAttrs()) 2048 return; 2049 handleCall(CE, CtorD, VD); 2050 } 2051 } 2052 } 2053} 2054 2055 2056 2057/// \brief Compute the intersection of two locksets and issue warnings for any 2058/// locks in the symmetric difference. 2059/// 2060/// This function is used at a merge point in the CFG when comparing the lockset 2061/// of each branch being merged. For example, given the following sequence: 2062/// A; if () then B; else C; D; we need to check that the lockset after B and C 2063/// are the same. In the event of a difference, we use the intersection of these 2064/// two locksets at the start of D. 2065/// 2066/// \param FSet1 The first lockset. 2067/// \param FSet2 The second lockset. 2068/// \param JoinLoc The location of the join point for error reporting 2069/// \param LEK1 The error message to report if a mutex is missing from LSet1 2070/// \param LEK2 The error message to report if a mutex is missing from Lset2 2071void ThreadSafetyAnalyzer::intersectAndWarn(FactSet &FSet1, 2072 const FactSet &FSet2, 2073 SourceLocation JoinLoc, 2074 LockErrorKind LEK1, 2075 LockErrorKind LEK2, 2076 bool Modify) { 2077 FactSet FSet1Orig = FSet1; 2078 2079 for (FactSet::const_iterator I = FSet2.begin(), E = FSet2.end(); 2080 I != E; ++I) { 2081 const SExpr &FSet2Mutex = FactMan[*I].MutID; 2082 const LockData &LDat2 = FactMan[*I].LDat; 2083 2084 if (const LockData *LDat1 = FSet1.findLock(FactMan, FSet2Mutex)) { 2085 if (LDat1->LKind != LDat2.LKind) { 2086 Handler.handleExclusiveAndShared(FSet2Mutex.toString(), 2087 LDat2.AcquireLoc, 2088 LDat1->AcquireLoc); 2089 if (Modify && LDat1->LKind != LK_Exclusive) { 2090 FSet1.removeLock(FactMan, FSet2Mutex); 2091 FSet1.addLock(FactMan, FSet2Mutex, LDat2); 2092 } 2093 } 2094 } else { 2095 if (LDat2.UnderlyingMutex.isValid()) { 2096 if (FSet2.findLock(FactMan, LDat2.UnderlyingMutex)) { 2097 // If this is a scoped lock that manages another mutex, and if the 2098 // underlying mutex is still held, then warn about the underlying 2099 // mutex. 2100 Handler.handleMutexHeldEndOfScope(LDat2.UnderlyingMutex.toString(), 2101 LDat2.AcquireLoc, 2102 JoinLoc, LEK1); 2103 } 2104 } 2105 else if (!LDat2.Managed && !FSet2Mutex.isUniversal()) 2106 Handler.handleMutexHeldEndOfScope(FSet2Mutex.toString(), 2107 LDat2.AcquireLoc, 2108 JoinLoc, LEK1); 2109 } 2110 } 2111 2112 for (FactSet::const_iterator I = FSet1.begin(), E = FSet1.end(); 2113 I != E; ++I) { 2114 const SExpr &FSet1Mutex = FactMan[*I].MutID; 2115 const LockData &LDat1 = FactMan[*I].LDat; 2116 2117 if (!FSet2.findLock(FactMan, FSet1Mutex)) { 2118 if (LDat1.UnderlyingMutex.isValid()) { 2119 if (FSet1Orig.findLock(FactMan, LDat1.UnderlyingMutex)) { 2120 // If this is a scoped lock that manages another mutex, and if the 2121 // underlying mutex is still held, then warn about the underlying 2122 // mutex. 2123 Handler.handleMutexHeldEndOfScope(LDat1.UnderlyingMutex.toString(), 2124 LDat1.AcquireLoc, 2125 JoinLoc, LEK1); 2126 } 2127 } 2128 else if (!LDat1.Managed && !FSet1Mutex.isUniversal()) 2129 Handler.handleMutexHeldEndOfScope(FSet1Mutex.toString(), 2130 LDat1.AcquireLoc, 2131 JoinLoc, LEK2); 2132 if (Modify) 2133 FSet1.removeLock(FactMan, FSet1Mutex); 2134 } 2135 } 2136} 2137 2138 2139 2140/// \brief Check a function's CFG for thread-safety violations. 2141/// 2142/// We traverse the blocks in the CFG, compute the set of mutexes that are held 2143/// at the end of each block, and issue warnings for thread safety violations. 2144/// Each block in the CFG is traversed exactly once. 2145void ThreadSafetyAnalyzer::runAnalysis(AnalysisDeclContext &AC) { 2146 CFG *CFGraph = AC.getCFG(); 2147 if (!CFGraph) return; 2148 const NamedDecl *D = dyn_cast_or_null<NamedDecl>(AC.getDecl()); 2149 2150 // AC.dumpCFG(true); 2151 2152 if (!D) 2153 return; // Ignore anonymous functions for now. 2154 if (D->getAttr<NoThreadSafetyAnalysisAttr>()) 2155 return; 2156 // FIXME: Do something a bit more intelligent inside constructor and 2157 // destructor code. Constructors and destructors must assume unique access 2158 // to 'this', so checks on member variable access is disabled, but we should 2159 // still enable checks on other objects. 2160 if (isa<CXXConstructorDecl>(D)) 2161 return; // Don't check inside constructors. 2162 if (isa<CXXDestructorDecl>(D)) 2163 return; // Don't check inside destructors. 2164 2165 BlockInfo.resize(CFGraph->getNumBlockIDs(), 2166 CFGBlockInfo::getEmptyBlockInfo(LocalVarMap)); 2167 2168 // We need to explore the CFG via a "topological" ordering. 2169 // That way, we will be guaranteed to have information about required 2170 // predecessor locksets when exploring a new block. 2171 PostOrderCFGView *SortedGraph = AC.getAnalysis<PostOrderCFGView>(); 2172 PostOrderCFGView::CFGBlockSet VisitedBlocks(CFGraph); 2173 2174 // Compute SSA names for local variables 2175 LocalVarMap.traverseCFG(CFGraph, SortedGraph, BlockInfo); 2176 2177 // Fill in source locations for all CFGBlocks. 2178 findBlockLocations(CFGraph, SortedGraph, BlockInfo); 2179 2180 // Add locks from exclusive_locks_required and shared_locks_required 2181 // to initial lockset. Also turn off checking for lock and unlock functions. 2182 // FIXME: is there a more intelligent way to check lock/unlock functions? 2183 if (!SortedGraph->empty() && D->hasAttrs()) { 2184 const CFGBlock *FirstBlock = *SortedGraph->begin(); 2185 FactSet &InitialLockset = BlockInfo[FirstBlock->getBlockID()].EntrySet; 2186 const AttrVec &ArgAttrs = D->getAttrs(); 2187 2188 MutexIDList ExclusiveLocksToAdd; 2189 MutexIDList SharedLocksToAdd; 2190 2191 SourceLocation Loc = D->getLocation(); 2192 for (unsigned i = 0; i < ArgAttrs.size(); ++i) { 2193 Attr *Attr = ArgAttrs[i]; 2194 Loc = Attr->getLocation(); 2195 if (ExclusiveLocksRequiredAttr *A 2196 = dyn_cast<ExclusiveLocksRequiredAttr>(Attr)) { 2197 getMutexIDs(ExclusiveLocksToAdd, A, (Expr*) 0, D); 2198 } else if (SharedLocksRequiredAttr *A 2199 = dyn_cast<SharedLocksRequiredAttr>(Attr)) { 2200 getMutexIDs(SharedLocksToAdd, A, (Expr*) 0, D); 2201 } else if (isa<UnlockFunctionAttr>(Attr)) { 2202 // Don't try to check unlock functions for now 2203 return; 2204 } else if (isa<ExclusiveLockFunctionAttr>(Attr)) { 2205 // Don't try to check lock functions for now 2206 return; 2207 } else if (isa<SharedLockFunctionAttr>(Attr)) { 2208 // Don't try to check lock functions for now 2209 return; 2210 } else if (isa<ExclusiveTrylockFunctionAttr>(Attr)) { 2211 // Don't try to check trylock functions for now 2212 return; 2213 } else if (isa<SharedTrylockFunctionAttr>(Attr)) { 2214 // Don't try to check trylock functions for now 2215 return; 2216 } 2217 } 2218 2219 // FIXME -- Loc can be wrong here. 2220 for (unsigned i=0,n=ExclusiveLocksToAdd.size(); i<n; ++i) { 2221 addLock(InitialLockset, ExclusiveLocksToAdd[i], 2222 LockData(Loc, LK_Exclusive)); 2223 } 2224 for (unsigned i=0,n=SharedLocksToAdd.size(); i<n; ++i) { 2225 addLock(InitialLockset, SharedLocksToAdd[i], 2226 LockData(Loc, LK_Shared)); 2227 } 2228 } 2229 2230 for (PostOrderCFGView::iterator I = SortedGraph->begin(), 2231 E = SortedGraph->end(); I!= E; ++I) { 2232 const CFGBlock *CurrBlock = *I; 2233 int CurrBlockID = CurrBlock->getBlockID(); 2234 CFGBlockInfo *CurrBlockInfo = &BlockInfo[CurrBlockID]; 2235 2236 // Use the default initial lockset in case there are no predecessors. 2237 VisitedBlocks.insert(CurrBlock); 2238 2239 // Iterate through the predecessor blocks and warn if the lockset for all 2240 // predecessors is not the same. We take the entry lockset of the current 2241 // block to be the intersection of all previous locksets. 2242 // FIXME: By keeping the intersection, we may output more errors in future 2243 // for a lock which is not in the intersection, but was in the union. We 2244 // may want to also keep the union in future. As an example, let's say 2245 // the intersection contains Mutex L, and the union contains L and M. 2246 // Later we unlock M. At this point, we would output an error because we 2247 // never locked M; although the real error is probably that we forgot to 2248 // lock M on all code paths. Conversely, let's say that later we lock M. 2249 // In this case, we should compare against the intersection instead of the 2250 // union because the real error is probably that we forgot to unlock M on 2251 // all code paths. 2252 bool LocksetInitialized = false; 2253 llvm::SmallVector<CFGBlock*, 8> SpecialBlocks; 2254 for (CFGBlock::const_pred_iterator PI = CurrBlock->pred_begin(), 2255 PE = CurrBlock->pred_end(); PI != PE; ++PI) { 2256 2257 // if *PI -> CurrBlock is a back edge 2258 if (*PI == 0 || !VisitedBlocks.alreadySet(*PI)) 2259 continue; 2260 2261 // Ignore edges from blocks that can't return. 2262 if ((*PI)->hasNoReturnElement()) 2263 continue; 2264 2265 // If the previous block ended in a 'continue' or 'break' statement, then 2266 // a difference in locksets is probably due to a bug in that block, rather 2267 // than in some other predecessor. In that case, keep the other 2268 // predecessor's lockset. 2269 if (const Stmt *Terminator = (*PI)->getTerminator()) { 2270 if (isa<ContinueStmt>(Terminator) || isa<BreakStmt>(Terminator)) { 2271 SpecialBlocks.push_back(*PI); 2272 continue; 2273 } 2274 } 2275 2276 int PrevBlockID = (*PI)->getBlockID(); 2277 CFGBlockInfo *PrevBlockInfo = &BlockInfo[PrevBlockID]; 2278 FactSet PrevLockset; 2279 getEdgeLockset(PrevLockset, PrevBlockInfo->ExitSet, *PI, CurrBlock); 2280 2281 if (!LocksetInitialized) { 2282 CurrBlockInfo->EntrySet = PrevLockset; 2283 LocksetInitialized = true; 2284 } else { 2285 intersectAndWarn(CurrBlockInfo->EntrySet, PrevLockset, 2286 CurrBlockInfo->EntryLoc, 2287 LEK_LockedSomePredecessors); 2288 } 2289 } 2290 2291 // Process continue and break blocks. Assume that the lockset for the 2292 // resulting block is unaffected by any discrepancies in them. 2293 for (unsigned SpecialI = 0, SpecialN = SpecialBlocks.size(); 2294 SpecialI < SpecialN; ++SpecialI) { 2295 CFGBlock *PrevBlock = SpecialBlocks[SpecialI]; 2296 int PrevBlockID = PrevBlock->getBlockID(); 2297 CFGBlockInfo *PrevBlockInfo = &BlockInfo[PrevBlockID]; 2298 2299 if (!LocksetInitialized) { 2300 CurrBlockInfo->EntrySet = PrevBlockInfo->ExitSet; 2301 LocksetInitialized = true; 2302 } else { 2303 // Determine whether this edge is a loop terminator for diagnostic 2304 // purposes. FIXME: A 'break' statement might be a loop terminator, but 2305 // it might also be part of a switch. Also, a subsequent destructor 2306 // might add to the lockset, in which case the real issue might be a 2307 // double lock on the other path. 2308 const Stmt *Terminator = PrevBlock->getTerminator(); 2309 bool IsLoop = Terminator && isa<ContinueStmt>(Terminator); 2310 2311 FactSet PrevLockset; 2312 getEdgeLockset(PrevLockset, PrevBlockInfo->ExitSet, 2313 PrevBlock, CurrBlock); 2314 2315 // Do not update EntrySet. 2316 intersectAndWarn(CurrBlockInfo->EntrySet, PrevLockset, 2317 PrevBlockInfo->ExitLoc, 2318 IsLoop ? LEK_LockedSomeLoopIterations 2319 : LEK_LockedSomePredecessors, 2320 false); 2321 } 2322 } 2323 2324 BuildLockset LocksetBuilder(this, *CurrBlockInfo); 2325 2326 // Visit all the statements in the basic block. 2327 for (CFGBlock::const_iterator BI = CurrBlock->begin(), 2328 BE = CurrBlock->end(); BI != BE; ++BI) { 2329 switch (BI->getKind()) { 2330 case CFGElement::Statement: { 2331 const CFGStmt *CS = cast<CFGStmt>(&*BI); 2332 LocksetBuilder.Visit(const_cast<Stmt*>(CS->getStmt())); 2333 break; 2334 } 2335 // Ignore BaseDtor, MemberDtor, and TemporaryDtor for now. 2336 case CFGElement::AutomaticObjectDtor: { 2337 const CFGAutomaticObjDtor *AD = cast<CFGAutomaticObjDtor>(&*BI); 2338 CXXDestructorDecl *DD = const_cast<CXXDestructorDecl*>( 2339 AD->getDestructorDecl(AC.getASTContext())); 2340 if (!DD->hasAttrs()) 2341 break; 2342 2343 // Create a dummy expression, 2344 VarDecl *VD = const_cast<VarDecl*>(AD->getVarDecl()); 2345 DeclRefExpr DRE(VD, false, VD->getType(), VK_LValue, 2346 AD->getTriggerStmt()->getLocEnd()); 2347 LocksetBuilder.handleCall(&DRE, DD); 2348 break; 2349 } 2350 default: 2351 break; 2352 } 2353 } 2354 CurrBlockInfo->ExitSet = LocksetBuilder.FSet; 2355 2356 // For every back edge from CurrBlock (the end of the loop) to another block 2357 // (FirstLoopBlock) we need to check that the Lockset of Block is equal to 2358 // the one held at the beginning of FirstLoopBlock. We can look up the 2359 // Lockset held at the beginning of FirstLoopBlock in the EntryLockSets map. 2360 for (CFGBlock::const_succ_iterator SI = CurrBlock->succ_begin(), 2361 SE = CurrBlock->succ_end(); SI != SE; ++SI) { 2362 2363 // if CurrBlock -> *SI is *not* a back edge 2364 if (*SI == 0 || !VisitedBlocks.alreadySet(*SI)) 2365 continue; 2366 2367 CFGBlock *FirstLoopBlock = *SI; 2368 CFGBlockInfo *PreLoop = &BlockInfo[FirstLoopBlock->getBlockID()]; 2369 CFGBlockInfo *LoopEnd = &BlockInfo[CurrBlockID]; 2370 intersectAndWarn(LoopEnd->ExitSet, PreLoop->EntrySet, 2371 PreLoop->EntryLoc, 2372 LEK_LockedSomeLoopIterations, 2373 false); 2374 } 2375 } 2376 2377 2378 // Check to make sure that the exit block is reachable 2379 bool ExitUnreachable = true; 2380 for (CFGBlock::const_pred_iterator PI = CFGraph->getExit().pred_begin(), 2381 PE = CFGraph->getExit().pred_end(); PI != PE; ++PI) { 2382 if (!(*PI)->hasNoReturnElement()) { 2383 ExitUnreachable = false; 2384 break; 2385 } 2386 } 2387 // Skip the final check if the exit block is unreachable. 2388 if (ExitUnreachable) 2389 return; 2390 2391 CFGBlockInfo *Initial = &BlockInfo[CFGraph->getEntry().getBlockID()]; 2392 CFGBlockInfo *Final = &BlockInfo[CFGraph->getExit().getBlockID()]; 2393 2394 // FIXME: Should we call this function for all blocks which exit the function? 2395 intersectAndWarn(Initial->EntrySet, Final->ExitSet, 2396 Final->ExitLoc, 2397 LEK_LockedAtEndOfFunction, 2398 LEK_NotLockedAtEndOfFunction, 2399 false); 2400} 2401 2402} // end anonymous namespace 2403 2404 2405namespace clang { 2406namespace thread_safety { 2407 2408/// \brief Check a function's CFG for thread-safety violations. 2409/// 2410/// We traverse the blocks in the CFG, compute the set of mutexes that are held 2411/// at the end of each block, and issue warnings for thread safety violations. 2412/// Each block in the CFG is traversed exactly once. 2413void runThreadSafetyAnalysis(AnalysisDeclContext &AC, 2414 ThreadSafetyHandler &Handler) { 2415 ThreadSafetyAnalyzer Analyzer(Handler); 2416 Analyzer.runAnalysis(AC); 2417} 2418 2419/// \brief Helper function that returns a LockKind required for the given level 2420/// of access. 2421LockKind getLockKindFromAccessKind(AccessKind AK) { 2422 switch (AK) { 2423 case AK_Read : 2424 return LK_Shared; 2425 case AK_Written : 2426 return LK_Exclusive; 2427 } 2428 llvm_unreachable("Unknown AccessKind"); 2429} 2430 2431}} // end namespace clang::thread_safety 2432