AnalysisBasedWarnings.cpp revision a49d1d8a34381802040c3d7fa218e93b457d2b1d
1//=- AnalysisBasedWarnings.cpp - Sema warnings based on libAnalysis -*- C++ -*-=// 2// 3// The LLVM Compiler Infrastructure 4// 5// This file is distributed under the University of Illinois Open Source 6// License. See LICENSE.TXT for details. 7// 8//===----------------------------------------------------------------------===// 9// 10// This file defines analysis_warnings::[Policy,Executor]. 11// Together they are used by Sema to issue warnings based on inexpensive 12// static analysis algorithms in libAnalysis. 13// 14//===----------------------------------------------------------------------===// 15 16#include "clang/Sema/AnalysisBasedWarnings.h" 17#include "clang/Sema/SemaInternal.h" 18#include "clang/Sema/ScopeInfo.h" 19#include "clang/Basic/SourceManager.h" 20#include "clang/Basic/SourceLocation.h" 21#include "clang/Lex/Preprocessor.h" 22#include "clang/AST/DeclObjC.h" 23#include "clang/AST/DeclCXX.h" 24#include "clang/AST/ExprObjC.h" 25#include "clang/AST/ExprCXX.h" 26#include "clang/AST/StmtObjC.h" 27#include "clang/AST/StmtCXX.h" 28#include "clang/AST/EvaluatedExprVisitor.h" 29#include "clang/AST/StmtVisitor.h" 30#include "clang/Analysis/AnalysisContext.h" 31#include "clang/Analysis/CFG.h" 32#include "clang/Analysis/Analyses/ReachableCode.h" 33#include "clang/Analysis/Analyses/CFGReachabilityAnalysis.h" 34#include "clang/Analysis/CFGStmtMap.h" 35#include "clang/Analysis/Analyses/UninitializedValues.h" 36#include "llvm/ADT/BitVector.h" 37#include "llvm/ADT/FoldingSet.h" 38#include "llvm/ADT/ImmutableMap.h" 39#include "llvm/ADT/PostOrderIterator.h" 40#include "llvm/ADT/SmallVector.h" 41#include "llvm/ADT/StringRef.h" 42#include "llvm/Support/Casting.h" 43#include <algorithm> 44#include <vector> 45 46using namespace clang; 47 48//===----------------------------------------------------------------------===// 49// Unreachable code analysis. 50//===----------------------------------------------------------------------===// 51 52namespace { 53 class UnreachableCodeHandler : public reachable_code::Callback { 54 Sema &S; 55 public: 56 UnreachableCodeHandler(Sema &s) : S(s) {} 57 58 void HandleUnreachable(SourceLocation L, SourceRange R1, SourceRange R2) { 59 S.Diag(L, diag::warn_unreachable) << R1 << R2; 60 } 61 }; 62} 63 64/// CheckUnreachable - Check for unreachable code. 65static void CheckUnreachable(Sema &S, AnalysisContext &AC) { 66 UnreachableCodeHandler UC(S); 67 reachable_code::FindUnreachableCode(AC, UC); 68} 69 70//===----------------------------------------------------------------------===// 71// Check for missing return value. 72//===----------------------------------------------------------------------===// 73 74enum ControlFlowKind { 75 UnknownFallThrough, 76 NeverFallThrough, 77 MaybeFallThrough, 78 AlwaysFallThrough, 79 NeverFallThroughOrReturn 80}; 81 82/// CheckFallThrough - Check that we don't fall off the end of a 83/// Statement that should return a value. 84/// 85/// \returns AlwaysFallThrough iff we always fall off the end of the statement, 86/// MaybeFallThrough iff we might or might not fall off the end, 87/// NeverFallThroughOrReturn iff we never fall off the end of the statement or 88/// return. We assume NeverFallThrough iff we never fall off the end of the 89/// statement but we may return. We assume that functions not marked noreturn 90/// will return. 91static ControlFlowKind CheckFallThrough(AnalysisContext &AC) { 92 CFG *cfg = AC.getCFG(); 93 if (cfg == 0) return UnknownFallThrough; 94 95 // The CFG leaves in dead things, and we don't want the dead code paths to 96 // confuse us, so we mark all live things first. 97 llvm::BitVector live(cfg->getNumBlockIDs()); 98 unsigned count = reachable_code::ScanReachableFromBlock(&cfg->getEntry(), 99 live); 100 101 bool AddEHEdges = AC.getAddEHEdges(); 102 if (!AddEHEdges && count != cfg->getNumBlockIDs()) 103 // When there are things remaining dead, and we didn't add EH edges 104 // from CallExprs to the catch clauses, we have to go back and 105 // mark them as live. 106 for (CFG::iterator I = cfg->begin(), E = cfg->end(); I != E; ++I) { 107 CFGBlock &b = **I; 108 if (!live[b.getBlockID()]) { 109 if (b.pred_begin() == b.pred_end()) { 110 if (b.getTerminator() && isa<CXXTryStmt>(b.getTerminator())) 111 // When not adding EH edges from calls, catch clauses 112 // can otherwise seem dead. Avoid noting them as dead. 113 count += reachable_code::ScanReachableFromBlock(&b, live); 114 continue; 115 } 116 } 117 } 118 119 // Now we know what is live, we check the live precessors of the exit block 120 // and look for fall through paths, being careful to ignore normal returns, 121 // and exceptional paths. 122 bool HasLiveReturn = false; 123 bool HasFakeEdge = false; 124 bool HasPlainEdge = false; 125 bool HasAbnormalEdge = false; 126 127 // Ignore default cases that aren't likely to be reachable because all 128 // enums in a switch(X) have explicit case statements. 129 CFGBlock::FilterOptions FO; 130 FO.IgnoreDefaultsWithCoveredEnums = 1; 131 132 for (CFGBlock::filtered_pred_iterator 133 I = cfg->getExit().filtered_pred_start_end(FO); I.hasMore(); ++I) { 134 const CFGBlock& B = **I; 135 if (!live[B.getBlockID()]) 136 continue; 137 138 // Destructors can appear after the 'return' in the CFG. This is 139 // normal. We need to look pass the destructors for the return 140 // statement (if it exists). 141 CFGBlock::const_reverse_iterator ri = B.rbegin(), re = B.rend(); 142 bool hasNoReturnDtor = false; 143 144 for ( ; ri != re ; ++ri) { 145 CFGElement CE = *ri; 146 147 // FIXME: The right solution is to just sever the edges in the 148 // CFG itself. 149 if (const CFGImplicitDtor *iDtor = ri->getAs<CFGImplicitDtor>()) 150 if (iDtor->isNoReturn(AC.getASTContext())) { 151 hasNoReturnDtor = true; 152 HasFakeEdge = true; 153 break; 154 } 155 156 if (isa<CFGStmt>(CE)) 157 break; 158 } 159 160 if (hasNoReturnDtor) 161 continue; 162 163 // No more CFGElements in the block? 164 if (ri == re) { 165 if (B.getTerminator() && isa<CXXTryStmt>(B.getTerminator())) { 166 HasAbnormalEdge = true; 167 continue; 168 } 169 // A labeled empty statement, or the entry block... 170 HasPlainEdge = true; 171 continue; 172 } 173 174 CFGStmt CS = cast<CFGStmt>(*ri); 175 const Stmt *S = CS.getStmt(); 176 if (isa<ReturnStmt>(S)) { 177 HasLiveReturn = true; 178 continue; 179 } 180 if (isa<ObjCAtThrowStmt>(S)) { 181 HasFakeEdge = true; 182 continue; 183 } 184 if (isa<CXXThrowExpr>(S)) { 185 HasFakeEdge = true; 186 continue; 187 } 188 if (const AsmStmt *AS = dyn_cast<AsmStmt>(S)) { 189 if (AS->isMSAsm()) { 190 HasFakeEdge = true; 191 HasLiveReturn = true; 192 continue; 193 } 194 } 195 if (isa<CXXTryStmt>(S)) { 196 HasAbnormalEdge = true; 197 continue; 198 } 199 200 bool NoReturnEdge = false; 201 if (const CallExpr *C = dyn_cast<CallExpr>(S)) { 202 if (std::find(B.succ_begin(), B.succ_end(), &cfg->getExit()) 203 == B.succ_end()) { 204 HasAbnormalEdge = true; 205 continue; 206 } 207 const Expr *CEE = C->getCallee()->IgnoreParenCasts(); 208 QualType calleeType = CEE->getType(); 209 if (calleeType == AC.getASTContext().BoundMemberTy) { 210 calleeType = Expr::findBoundMemberType(CEE); 211 assert(!calleeType.isNull() && "analyzing unresolved call?"); 212 } 213 if (getFunctionExtInfo(calleeType).getNoReturn()) { 214 NoReturnEdge = true; 215 HasFakeEdge = true; 216 } else if (const DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(CEE)) { 217 const ValueDecl *VD = DRE->getDecl(); 218 if (VD->hasAttr<NoReturnAttr>()) { 219 NoReturnEdge = true; 220 HasFakeEdge = true; 221 } 222 } 223 } 224 // FIXME: Add noreturn message sends. 225 if (NoReturnEdge == false) 226 HasPlainEdge = true; 227 } 228 if (!HasPlainEdge) { 229 if (HasLiveReturn) 230 return NeverFallThrough; 231 return NeverFallThroughOrReturn; 232 } 233 if (HasAbnormalEdge || HasFakeEdge || HasLiveReturn) 234 return MaybeFallThrough; 235 // This says AlwaysFallThrough for calls to functions that are not marked 236 // noreturn, that don't return. If people would like this warning to be more 237 // accurate, such functions should be marked as noreturn. 238 return AlwaysFallThrough; 239} 240 241namespace { 242 243struct CheckFallThroughDiagnostics { 244 unsigned diag_MaybeFallThrough_HasNoReturn; 245 unsigned diag_MaybeFallThrough_ReturnsNonVoid; 246 unsigned diag_AlwaysFallThrough_HasNoReturn; 247 unsigned diag_AlwaysFallThrough_ReturnsNonVoid; 248 unsigned diag_NeverFallThroughOrReturn; 249 bool funMode; 250 SourceLocation FuncLoc; 251 252 static CheckFallThroughDiagnostics MakeForFunction(const Decl *Func) { 253 CheckFallThroughDiagnostics D; 254 D.FuncLoc = Func->getLocation(); 255 D.diag_MaybeFallThrough_HasNoReturn = 256 diag::warn_falloff_noreturn_function; 257 D.diag_MaybeFallThrough_ReturnsNonVoid = 258 diag::warn_maybe_falloff_nonvoid_function; 259 D.diag_AlwaysFallThrough_HasNoReturn = 260 diag::warn_falloff_noreturn_function; 261 D.diag_AlwaysFallThrough_ReturnsNonVoid = 262 diag::warn_falloff_nonvoid_function; 263 264 // Don't suggest that virtual functions be marked "noreturn", since they 265 // might be overridden by non-noreturn functions. 266 bool isVirtualMethod = false; 267 if (const CXXMethodDecl *Method = dyn_cast<CXXMethodDecl>(Func)) 268 isVirtualMethod = Method->isVirtual(); 269 270 if (!isVirtualMethod) 271 D.diag_NeverFallThroughOrReturn = 272 diag::warn_suggest_noreturn_function; 273 else 274 D.diag_NeverFallThroughOrReturn = 0; 275 276 D.funMode = true; 277 return D; 278 } 279 280 static CheckFallThroughDiagnostics MakeForBlock() { 281 CheckFallThroughDiagnostics D; 282 D.diag_MaybeFallThrough_HasNoReturn = 283 diag::err_noreturn_block_has_return_expr; 284 D.diag_MaybeFallThrough_ReturnsNonVoid = 285 diag::err_maybe_falloff_nonvoid_block; 286 D.diag_AlwaysFallThrough_HasNoReturn = 287 diag::err_noreturn_block_has_return_expr; 288 D.diag_AlwaysFallThrough_ReturnsNonVoid = 289 diag::err_falloff_nonvoid_block; 290 D.diag_NeverFallThroughOrReturn = 291 diag::warn_suggest_noreturn_block; 292 D.funMode = false; 293 return D; 294 } 295 296 bool checkDiagnostics(Diagnostic &D, bool ReturnsVoid, 297 bool HasNoReturn) const { 298 if (funMode) { 299 return (ReturnsVoid || 300 D.getDiagnosticLevel(diag::warn_maybe_falloff_nonvoid_function, 301 FuncLoc) == Diagnostic::Ignored) 302 && (!HasNoReturn || 303 D.getDiagnosticLevel(diag::warn_noreturn_function_has_return_expr, 304 FuncLoc) == Diagnostic::Ignored) 305 && (!ReturnsVoid || 306 D.getDiagnosticLevel(diag::warn_suggest_noreturn_block, FuncLoc) 307 == Diagnostic::Ignored); 308 } 309 310 // For blocks. 311 return ReturnsVoid && !HasNoReturn 312 && (!ReturnsVoid || 313 D.getDiagnosticLevel(diag::warn_suggest_noreturn_block, FuncLoc) 314 == Diagnostic::Ignored); 315 } 316}; 317 318} 319 320/// CheckFallThroughForFunctionDef - Check that we don't fall off the end of a 321/// function that should return a value. Check that we don't fall off the end 322/// of a noreturn function. We assume that functions and blocks not marked 323/// noreturn will return. 324static void CheckFallThroughForBody(Sema &S, const Decl *D, const Stmt *Body, 325 const BlockExpr *blkExpr, 326 const CheckFallThroughDiagnostics& CD, 327 AnalysisContext &AC) { 328 329 bool ReturnsVoid = false; 330 bool HasNoReturn = false; 331 332 if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(D)) { 333 ReturnsVoid = FD->getResultType()->isVoidType(); 334 HasNoReturn = FD->hasAttr<NoReturnAttr>() || 335 FD->getType()->getAs<FunctionType>()->getNoReturnAttr(); 336 } 337 else if (const ObjCMethodDecl *MD = dyn_cast<ObjCMethodDecl>(D)) { 338 ReturnsVoid = MD->getResultType()->isVoidType(); 339 HasNoReturn = MD->hasAttr<NoReturnAttr>(); 340 } 341 else if (isa<BlockDecl>(D)) { 342 QualType BlockTy = blkExpr->getType(); 343 if (const FunctionType *FT = 344 BlockTy->getPointeeType()->getAs<FunctionType>()) { 345 if (FT->getResultType()->isVoidType()) 346 ReturnsVoid = true; 347 if (FT->getNoReturnAttr()) 348 HasNoReturn = true; 349 } 350 } 351 352 Diagnostic &Diags = S.getDiagnostics(); 353 354 // Short circuit for compilation speed. 355 if (CD.checkDiagnostics(Diags, ReturnsVoid, HasNoReturn)) 356 return; 357 358 // FIXME: Function try block 359 if (const CompoundStmt *Compound = dyn_cast<CompoundStmt>(Body)) { 360 switch (CheckFallThrough(AC)) { 361 case UnknownFallThrough: 362 break; 363 364 case MaybeFallThrough: 365 if (HasNoReturn) 366 S.Diag(Compound->getRBracLoc(), 367 CD.diag_MaybeFallThrough_HasNoReturn); 368 else if (!ReturnsVoid) 369 S.Diag(Compound->getRBracLoc(), 370 CD.diag_MaybeFallThrough_ReturnsNonVoid); 371 break; 372 case AlwaysFallThrough: 373 if (HasNoReturn) 374 S.Diag(Compound->getRBracLoc(), 375 CD.diag_AlwaysFallThrough_HasNoReturn); 376 else if (!ReturnsVoid) 377 S.Diag(Compound->getRBracLoc(), 378 CD.diag_AlwaysFallThrough_ReturnsNonVoid); 379 break; 380 case NeverFallThroughOrReturn: 381 if (ReturnsVoid && !HasNoReturn && CD.diag_NeverFallThroughOrReturn) { 382 if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(D)) { 383 S.Diag(Compound->getLBracLoc(), CD.diag_NeverFallThroughOrReturn) 384 << FD; 385 } else { 386 S.Diag(Compound->getLBracLoc(), CD.diag_NeverFallThroughOrReturn); 387 } 388 } 389 break; 390 case NeverFallThrough: 391 break; 392 } 393 } 394} 395 396//===----------------------------------------------------------------------===// 397// -Wuninitialized 398//===----------------------------------------------------------------------===// 399 400namespace { 401/// ContainsReference - A visitor class to search for references to 402/// a particular declaration (the needle) within any evaluated component of an 403/// expression (recursively). 404class ContainsReference : public EvaluatedExprVisitor<ContainsReference> { 405 bool FoundReference; 406 const DeclRefExpr *Needle; 407 408public: 409 ContainsReference(ASTContext &Context, const DeclRefExpr *Needle) 410 : EvaluatedExprVisitor<ContainsReference>(Context), 411 FoundReference(false), Needle(Needle) {} 412 413 void VisitExpr(Expr *E) { 414 // Stop evaluating if we already have a reference. 415 if (FoundReference) 416 return; 417 418 EvaluatedExprVisitor<ContainsReference>::VisitExpr(E); 419 } 420 421 void VisitDeclRefExpr(DeclRefExpr *E) { 422 if (E == Needle) 423 FoundReference = true; 424 else 425 EvaluatedExprVisitor<ContainsReference>::VisitDeclRefExpr(E); 426 } 427 428 bool doesContainReference() const { return FoundReference; } 429}; 430} 431 432/// DiagnoseUninitializedUse -- Helper function for diagnosing uses of an 433/// uninitialized variable. This manages the different forms of diagnostic 434/// emitted for particular types of uses. Returns true if the use was diagnosed 435/// as a warning. If a pariticular use is one we omit warnings for, returns 436/// false. 437static bool DiagnoseUninitializedUse(Sema &S, const VarDecl *VD, 438 const Expr *E, bool isAlwaysUninit) { 439 bool isSelfInit = false; 440 441 if (const DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(E)) { 442 if (isAlwaysUninit) { 443 // Inspect the initializer of the variable declaration which is 444 // being referenced prior to its initialization. We emit 445 // specialized diagnostics for self-initialization, and we 446 // specifically avoid warning about self references which take the 447 // form of: 448 // 449 // int x = x; 450 // 451 // This is used to indicate to GCC that 'x' is intentionally left 452 // uninitialized. Proven code paths which access 'x' in 453 // an uninitialized state after this will still warn. 454 // 455 // TODO: Should we suppress maybe-uninitialized warnings for 456 // variables initialized in this way? 457 if (const Expr *Initializer = VD->getInit()) { 458 if (DRE == Initializer->IgnoreParenImpCasts()) 459 return false; 460 461 ContainsReference CR(S.Context, DRE); 462 CR.Visit(const_cast<Expr*>(Initializer)); 463 isSelfInit = CR.doesContainReference(); 464 } 465 if (isSelfInit) { 466 S.Diag(DRE->getLocStart(), 467 diag::warn_uninit_self_reference_in_init) 468 << VD->getDeclName() << VD->getLocation() << DRE->getSourceRange(); 469 } else { 470 S.Diag(DRE->getLocStart(), diag::warn_uninit_var) 471 << VD->getDeclName() << DRE->getSourceRange(); 472 } 473 } else { 474 S.Diag(DRE->getLocStart(), diag::warn_maybe_uninit_var) 475 << VD->getDeclName() << DRE->getSourceRange(); 476 } 477 } else { 478 const BlockExpr *BE = cast<BlockExpr>(E); 479 S.Diag(BE->getLocStart(), 480 isAlwaysUninit ? diag::warn_uninit_var_captured_by_block 481 : diag::warn_maybe_uninit_var_captured_by_block) 482 << VD->getDeclName(); 483 } 484 485 // Report where the variable was declared when the use wasn't within 486 // the initializer of that declaration. 487 if (!isSelfInit) 488 S.Diag(VD->getLocStart(), diag::note_uninit_var_def) 489 << VD->getDeclName(); 490 491 return true; 492} 493 494static void SuggestInitializationFixit(Sema &S, const VarDecl *VD) { 495 // Don't issue a fixit if there is already an initializer. 496 if (VD->getInit()) 497 return; 498 499 // Suggest possible initialization (if any). 500 const char *initialization = 0; 501 QualType VariableTy = VD->getType().getCanonicalType(); 502 503 if (VariableTy->isObjCObjectPointerType() || 504 VariableTy->isBlockPointerType()) { 505 // Check if 'nil' is defined. 506 if (S.PP.getMacroInfo(&S.getASTContext().Idents.get("nil"))) 507 initialization = " = nil"; 508 else 509 initialization = " = 0"; 510 } 511 else if (VariableTy->isRealFloatingType()) 512 initialization = " = 0.0"; 513 else if (VariableTy->isBooleanType() && S.Context.getLangOptions().CPlusPlus) 514 initialization = " = false"; 515 else if (VariableTy->isEnumeralType()) 516 return; 517 else if (VariableTy->isPointerType() || VariableTy->isMemberPointerType()) { 518 if (S.Context.getLangOptions().CPlusPlus0x) 519 initialization = " = nullptr"; 520 // Check if 'NULL' is defined. 521 else if (S.PP.getMacroInfo(&S.getASTContext().Idents.get("NULL"))) 522 initialization = " = NULL"; 523 else 524 initialization = " = 0"; 525 } 526 else if (VariableTy->isScalarType()) 527 initialization = " = 0"; 528 529 if (initialization) { 530 SourceLocation loc = S.PP.getLocForEndOfToken(VD->getLocEnd()); 531 S.Diag(loc, diag::note_var_fixit_add_initialization) 532 << FixItHint::CreateInsertion(loc, initialization); 533 } 534} 535 536typedef std::pair<const Expr*, bool> UninitUse; 537 538namespace { 539struct SLocSort { 540 bool operator()(const UninitUse &a, const UninitUse &b) { 541 SourceLocation aLoc = a.first->getLocStart(); 542 SourceLocation bLoc = b.first->getLocStart(); 543 return aLoc.getRawEncoding() < bLoc.getRawEncoding(); 544 } 545}; 546 547class UninitValsDiagReporter : public UninitVariablesHandler { 548 Sema &S; 549 typedef SmallVector<UninitUse, 2> UsesVec; 550 typedef llvm::DenseMap<const VarDecl *, UsesVec*> UsesMap; 551 UsesMap *uses; 552 553public: 554 UninitValsDiagReporter(Sema &S) : S(S), uses(0) {} 555 ~UninitValsDiagReporter() { 556 flushDiagnostics(); 557 } 558 559 void handleUseOfUninitVariable(const Expr *ex, const VarDecl *vd, 560 bool isAlwaysUninit) { 561 if (!uses) 562 uses = new UsesMap(); 563 564 UsesVec *&vec = (*uses)[vd]; 565 if (!vec) 566 vec = new UsesVec(); 567 568 vec->push_back(std::make_pair(ex, isAlwaysUninit)); 569 } 570 571 void flushDiagnostics() { 572 if (!uses) 573 return; 574 575 for (UsesMap::iterator i = uses->begin(), e = uses->end(); i != e; ++i) { 576 const VarDecl *vd = i->first; 577 UsesVec *vec = i->second; 578 579 // Sort the uses by their SourceLocations. While not strictly 580 // guaranteed to produce them in line/column order, this will provide 581 // a stable ordering. 582 std::sort(vec->begin(), vec->end(), SLocSort()); 583 584 for (UsesVec::iterator vi = vec->begin(), ve = vec->end(); vi != ve; 585 ++vi) { 586 if (!DiagnoseUninitializedUse(S, vd, vi->first, 587 /*isAlwaysUninit=*/vi->second)) 588 continue; 589 590 SuggestInitializationFixit(S, vd); 591 592 // Skip further diagnostics for this variable. We try to warn only on 593 // the first point at which a variable is used uninitialized. 594 break; 595 } 596 597 delete vec; 598 } 599 delete uses; 600 } 601}; 602} 603 604 605//===----------------------------------------------------------------------===// 606// -Wthread-safety 607//===----------------------------------------------------------------------===// 608namespace clang { 609namespace thread_safety { 610typedef std::pair<SourceLocation, PartialDiagnostic> DelayedDiag; 611typedef llvm::SmallVector<DelayedDiag, 4> DiagList; 612 613enum ProtectedOperationKind { 614 POK_VarDereference, 615 POK_VarAccess, 616 POK_FunctionCall 617}; 618 619enum LockKind { 620 LK_Shared, 621 LK_Exclusive 622}; 623 624enum AccessKind { 625 AK_Read, 626 AK_Written 627}; 628 629 630struct SortDiagBySourceLocation { 631 Sema &S; 632 SortDiagBySourceLocation(Sema &S) : S(S) {} 633 634 bool operator()(const DelayedDiag &left, const DelayedDiag &right) { 635 // Although this call will be slow, this is only called when outputting 636 // multiple warnings. 637 return S.getSourceManager().isBeforeInTranslationUnit(left.first, 638 right.first); 639 } 640}; 641 642/// \brief Helper function that returns a LockKind required for the given level 643/// of access. 644LockKind getLockKindFromAccessKind(AccessKind AK) { 645 switch (AK) { 646 case AK_Read : 647 return LK_Shared; 648 case AK_Written : 649 return LK_Exclusive; 650 } 651} 652 653class ThreadSafetyHandler { 654public: 655 typedef llvm::StringRef Name; 656 ThreadSafetyHandler() {} 657 virtual ~ThreadSafetyHandler() {} 658 virtual void handleUnmatchedUnlock(Name LockName, SourceLocation Loc) {} 659 virtual void handleDoubleLock(Name LockName, SourceLocation Loc) {} 660 virtual void handleMutexHeldEndOfScope(Name LockName, SourceLocation Loc){} 661 virtual void handleNoLockLoopEntry(Name LockName, SourceLocation Loc) {} 662 virtual void handleNoUnlock(Name LockName, Name FunName, 663 SourceLocation Loc) {} 664 virtual void handleExclusiveAndShared(Name LockName, SourceLocation Loc1, 665 SourceLocation Loc2) {} 666 virtual void handleNoMutexHeld(const NamedDecl *D, ProtectedOperationKind POK, 667 AccessKind AK, SourceLocation Loc) {} 668 virtual void handleMutexNotHeld(const NamedDecl *D, 669 ProtectedOperationKind POK, Name LockName, 670 LockKind LK, SourceLocation Loc) {} 671 virtual void handleFunExcludesLock(Name FunName, Name LockName, 672 SourceLocation Loc) {} 673}; 674 675class ThreadSafetyReporter : public clang::thread_safety::ThreadSafetyHandler { 676 Sema &S; 677 DiagList Warnings; 678 679 // Helper functions 680 void warnLockMismatch(unsigned DiagID, Name LockName, SourceLocation Loc) { 681 PartialDiagnostic Warning = S.PDiag(DiagID) << LockName; 682 Warnings.push_back(DelayedDiag(Loc, Warning)); 683 } 684 685 public: 686 ThreadSafetyReporter(Sema &S) : S(S) {} 687 688 /// \brief Emit all buffered diagnostics in order of sourcelocation. 689 /// We need to output diagnostics produced while iterating through 690 /// the lockset in deterministic order, so this function orders diagnostics 691 /// and outputs them. 692 void emitDiagnostics() { 693 SortDiagBySourceLocation SortDiagBySL(S); 694 sort(Warnings.begin(), Warnings.end(), SortDiagBySL); 695 for (DiagList::iterator I = Warnings.begin(), E = Warnings.end(); 696 I != E; ++I) 697 S.Diag(I->first, I->second); 698 } 699 700 void handleUnmatchedUnlock(Name LockName, SourceLocation Loc) { 701 warnLockMismatch(diag::warn_unlock_but_no_lock, LockName, Loc); 702 } 703 704 void handleDoubleLock(Name LockName, SourceLocation Loc) { 705 warnLockMismatch(diag::warn_double_lock, LockName, Loc); 706 } 707 708 void handleMutexHeldEndOfScope(Name LockName, SourceLocation Loc){ 709 warnLockMismatch(diag::warn_lock_at_end_of_scope, LockName, Loc); 710 } 711 712 void handleNoLockLoopEntry(Name LockName, SourceLocation Loc) { 713 warnLockMismatch(diag::warn_expecting_lock_held_on_loop, LockName, Loc); 714 } 715 716 void handleNoUnlock(Name LockName, llvm::StringRef FunName, 717 SourceLocation Loc) { 718 PartialDiagnostic Warning = 719 S.PDiag(diag::warn_no_unlock) << LockName << FunName; 720 Warnings.push_back(DelayedDiag(Loc, Warning)); 721 } 722 723 void handleExclusiveAndShared(Name LockName, SourceLocation Loc1, 724 SourceLocation Loc2) { 725 PartialDiagnostic Warning = 726 S.PDiag(diag::warn_lock_exclusive_and_shared) << LockName; 727 PartialDiagnostic Note = 728 S.PDiag(diag::note_lock_exclusive_and_shared) << LockName; 729 Warnings.push_back(DelayedDiag(Loc1, Warning)); 730 Warnings.push_back(DelayedDiag(Loc2, Note)); 731 } 732 733 void handleNoMutexHeld(const NamedDecl *D, ProtectedOperationKind POK, 734 AccessKind AK, SourceLocation Loc) { 735 // FIXME: It would be nice if this case printed without single quotes around 736 // the phrase 'any mutex' 737 handleMutexNotHeld(D, POK, "any mutex", getLockKindFromAccessKind(AK), Loc); 738 } 739 740 void handleMutexNotHeld(const NamedDecl *D, ProtectedOperationKind POK, 741 Name LockName, LockKind LK, SourceLocation Loc) { 742 unsigned DiagID; 743 switch (POK) { 744 case POK_VarAccess: 745 DiagID = diag::warn_variable_requires_lock; 746 break; 747 case POK_VarDereference: 748 DiagID = diag::warn_var_deref_requires_lock; 749 break; 750 case POK_FunctionCall: 751 DiagID = diag::warn_fun_requires_lock; 752 break; 753 } 754 PartialDiagnostic Warning = S.PDiag(DiagID) 755 << D->getName().str() << LockName << LK; 756 Warnings.push_back(DelayedDiag(Loc, Warning)); 757 } 758 759 void handleFunExcludesLock(Name FunName, Name LockName, SourceLocation Loc) { 760 PartialDiagnostic Warning = 761 S.PDiag(diag::warn_fun_excludes_mutex) << FunName << LockName; 762 Warnings.push_back(DelayedDiag(Loc, Warning)); 763 } 764}; 765} 766} 767 768using namespace thread_safety; 769 770namespace { 771/// \brief Implements a set of CFGBlocks using a BitVector. 772/// 773/// This class contains a minimal interface, primarily dictated by the SetType 774/// template parameter of the llvm::po_iterator template, as used with external 775/// storage. We also use this set to keep track of which CFGBlocks we visit 776/// during the analysis. 777class CFGBlockSet { 778 llvm::BitVector VisitedBlockIDs; 779 780public: 781 // po_iterator requires this iterator, but the only interface needed is the 782 // value_type typedef. 783 struct iterator { 784 typedef const CFGBlock *value_type; 785 }; 786 787 CFGBlockSet() {} 788 CFGBlockSet(const CFG *G) : VisitedBlockIDs(G->getNumBlockIDs(), false) {} 789 790 /// \brief Set the bit associated with a particular CFGBlock. 791 /// This is the important method for the SetType template parameter. 792 bool insert(const CFGBlock *Block) { 793 // Note that insert() is called by po_iterator, which doesn't check to make 794 // sure that Block is non-null. Moreover, the CFGBlock iterator will 795 // occasionally hand out null pointers for pruned edges, so we catch those 796 // here. 797 if (Block == 0) 798 return false; // if an edge is trivially false. 799 if (VisitedBlockIDs.test(Block->getBlockID())) 800 return false; 801 VisitedBlockIDs.set(Block->getBlockID()); 802 return true; 803 } 804 805 /// \brief Check if the bit for a CFGBlock has been already set. 806 /// This method is for tracking visited blocks in the main threadsafety loop. 807 /// Block must not be null. 808 bool alreadySet(const CFGBlock *Block) { 809 return VisitedBlockIDs.test(Block->getBlockID()); 810 } 811}; 812 813/// \brief We create a helper class which we use to iterate through CFGBlocks in 814/// the topological order. 815class TopologicallySortedCFG { 816 typedef llvm::po_iterator<const CFG*, CFGBlockSet, true> po_iterator; 817 818 std::vector<const CFGBlock*> Blocks; 819 820public: 821 typedef std::vector<const CFGBlock*>::reverse_iterator iterator; 822 823 TopologicallySortedCFG(const CFG *CFGraph) { 824 Blocks.reserve(CFGraph->getNumBlockIDs()); 825 CFGBlockSet BSet(CFGraph); 826 827 for (po_iterator I = po_iterator::begin(CFGraph, BSet), 828 E = po_iterator::end(CFGraph, BSet); I != E; ++I) { 829 Blocks.push_back(*I); 830 } 831 } 832 833 iterator begin() { 834 return Blocks.rbegin(); 835 } 836 837 iterator end() { 838 return Blocks.rend(); 839 } 840}; 841 842/// \brief A MutexID object uniquely identifies a particular mutex, and 843/// is built from an Expr* (i.e. calling a lock function). 844/// 845/// Thread-safety analysis works by comparing lock expressions. Within the 846/// body of a function, an expression such as "x->foo->bar.mu" will resolve to 847/// a particular mutex object at run-time. Subsequent occurrences of the same 848/// expression (where "same" means syntactic equality) will refer to the same 849/// run-time object if three conditions hold: 850/// (1) Local variables in the expression, such as "x" have not changed. 851/// (2) Values on the heap that affect the expression have not changed. 852/// (3) The expression involves only pure function calls. 853/// The current implementation assumes, but does not verify, that multiple uses 854/// of the same lock expression satisfies these criteria. 855/// 856/// Clang introduces an additional wrinkle, which is that it is difficult to 857/// derive canonical expressions, or compare expressions directly for equality. 858/// Thus, we identify a mutex not by an Expr, but by the set of named 859/// declarations that are referenced by the Expr. In other words, 860/// x->foo->bar.mu will be a four element vector with the Decls for 861/// mu, bar, and foo, and x. The vector will uniquely identify the expression 862/// for all practical purposes. 863/// 864/// Note we will need to perform substitution on "this" and function parameter 865/// names when constructing a lock expression. 866/// 867/// For example: 868/// class C { Mutex Mu; void lock() EXCLUSIVE_LOCK_FUNCTION(this->Mu); }; 869/// void myFunc(C *X) { ... X->lock() ... } 870/// The original expression for the mutex acquired by myFunc is "this->Mu", but 871/// "X" is substituted for "this" so we get X->Mu(); 872/// 873/// For another example: 874/// foo(MyList *L) EXCLUSIVE_LOCKS_REQUIRED(L->Mu) { ... } 875/// MyList *MyL; 876/// foo(MyL); // requires lock MyL->Mu to be held 877class MutexID { 878 SmallVector<NamedDecl*, 2> DeclSeq; 879 880 /// Build a Decl sequence representing the lock from the given expression. 881 /// Recursive function that bottoms out when the final DeclRefExpr is reached. 882 void buildMutexID(Expr *Exp) { 883 if (DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(Exp)) { 884 NamedDecl *ND = cast<NamedDecl>(DRE->getDecl()->getCanonicalDecl()); 885 DeclSeq.push_back(ND); 886 } else if (MemberExpr *ME = dyn_cast<MemberExpr>(Exp)) { 887 NamedDecl *ND = ME->getMemberDecl(); 888 DeclSeq.push_back(ND); 889 buildMutexID(ME->getBase()); 890 } else if (isa<CXXThisExpr>(Exp)) { 891 return; 892 } else { 893 // FIXME: add diagnostic 894 llvm::report_fatal_error("Expected lock expression!"); 895 } 896 } 897 898public: 899 MutexID(Expr *LExpr) { 900 buildMutexID(LExpr); 901 assert(!DeclSeq.empty()); 902 } 903 904 bool operator==(const MutexID &other) const { 905 return DeclSeq == other.DeclSeq; 906 } 907 908 bool operator!=(const MutexID &other) const { 909 return !(*this == other); 910 } 911 912 // SmallVector overloads Operator< to do lexicographic ordering. Note that 913 // we use pointer equality (and <) to compare NamedDecls. This means the order 914 // of MutexIDs in a lockset is nondeterministic. In order to output 915 // diagnostics in a deterministic ordering, we must order all diagnostics to 916 // output by SourceLocation when iterating through this lockset. 917 bool operator<(const MutexID &other) const { 918 return DeclSeq < other.DeclSeq; 919 } 920 921 /// \brief Returns the name of the first Decl in the list for a given MutexID; 922 /// e.g. the lock expression foo.bar() has name "bar". 923 /// The caret will point unambiguously to the lock expression, so using this 924 /// name in diagnostics is a way to get simple, and consistent, mutex names. 925 /// We do not want to output the entire expression text for security reasons. 926 StringRef getName() const { 927 return DeclSeq.front()->getName(); 928 } 929 930 void Profile(llvm::FoldingSetNodeID &ID) const { 931 for (SmallVectorImpl<NamedDecl*>::const_iterator I = DeclSeq.begin(), 932 E = DeclSeq.end(); I != E; ++I) { 933 ID.AddPointer(*I); 934 } 935 } 936}; 937 938/// \brief This is a helper class that stores info about the most recent 939/// accquire of a Lock. 940/// 941/// The main body of the analysis maps MutexIDs to LockDatas. 942struct LockData { 943 SourceLocation AcquireLoc; 944 945 /// \brief LKind stores whether a lock is held shared or exclusively. 946 /// Note that this analysis does not currently support either re-entrant 947 /// locking or lock "upgrading" and "downgrading" between exclusive and 948 /// shared. 949 /// 950 /// FIXME: add support for re-entrant locking and lock up/downgrading 951 LockKind LKind; 952 953 LockData(SourceLocation AcquireLoc, LockKind LKind) 954 : AcquireLoc(AcquireLoc), LKind(LKind) {} 955 956 bool operator==(const LockData &other) const { 957 return AcquireLoc == other.AcquireLoc && LKind == other.LKind; 958 } 959 960 bool operator!=(const LockData &other) const { 961 return !(*this == other); 962 } 963 964 void Profile(llvm::FoldingSetNodeID &ID) const { 965 ID.AddInteger(AcquireLoc.getRawEncoding()); 966 ID.AddInteger(LKind); 967 } 968}; 969 970/// A Lockset maps each MutexID (defined above) to information about how it has 971/// been locked. 972typedef llvm::ImmutableMap<MutexID, LockData> Lockset; 973 974/// \brief We use this class to visit different types of expressions in 975/// CFGBlocks, and build up the lockset. 976/// An expression may cause us to add or remove locks from the lockset, or else 977/// output error messages related to missing locks. 978/// FIXME: In future, we may be able to not inherit from a visitor. 979class BuildLockset : public StmtVisitor<BuildLockset> { 980 ThreadSafetyHandler &Handler; 981 Lockset LSet; 982 Lockset::Factory &LocksetFactory; 983 984 // Helper functions 985 void removeLock(SourceLocation UnlockLoc, Expr *LockExp); 986 void addLock(SourceLocation LockLoc, Expr *LockExp, LockKind LK); 987 const ValueDecl *getValueDecl(Expr *Exp); 988 void warnIfMutexNotHeld (const NamedDecl *D, Expr *Exp, AccessKind AK, 989 Expr *MutexExp, ProtectedOperationKind POK); 990 void checkAccess(Expr *Exp, AccessKind AK); 991 void checkDereference(Expr *Exp, AccessKind AK); 992 993 template <class AttrType> 994 void addLocksToSet(LockKind LK, Attr *Attr, CXXMemberCallExpr *Exp); 995 996 /// \brief Returns true if the lockset contains a lock, regardless of whether 997 /// the lock is held exclusively or shared. 998 bool locksetContains(MutexID Lock) const { 999 return LSet.lookup(Lock); 1000 } 1001 1002 /// \brief Returns true if the lockset contains a lock with the passed in 1003 /// locktype. 1004 bool locksetContains(MutexID Lock, LockKind KindRequested) const { 1005 const LockData *LockHeld = LSet.lookup(Lock); 1006 return (LockHeld && KindRequested == LockHeld->LKind); 1007 } 1008 1009 /// \brief Returns true if the lockset contains a lock with at least the 1010 /// passed in locktype. So for example, if we pass in LK_Shared, this function 1011 /// returns true if the lock is held LK_Shared or LK_Exclusive. If we pass in 1012 /// LK_Exclusive, this function returns true if the lock is held LK_Exclusive. 1013 bool locksetContainsAtLeast(MutexID Lock, LockKind KindRequested) const { 1014 switch (KindRequested) { 1015 case LK_Shared: 1016 return locksetContains(Lock); 1017 case LK_Exclusive: 1018 return locksetContains(Lock, KindRequested); 1019 } 1020 } 1021 1022public: 1023 BuildLockset(ThreadSafetyHandler &Handler, Lockset LS, Lockset::Factory &F) 1024 : StmtVisitor<BuildLockset>(), Handler(Handler), LSet(LS), 1025 LocksetFactory(F) {} 1026 1027 Lockset getLockset() { 1028 return LSet; 1029 } 1030 1031 void VisitUnaryOperator(UnaryOperator *UO); 1032 void VisitBinaryOperator(BinaryOperator *BO); 1033 void VisitCastExpr(CastExpr *CE); 1034 void VisitCXXMemberCallExpr(CXXMemberCallExpr *Exp); 1035}; 1036 1037/// \brief Add a new lock to the lockset, warning if the lock is already there. 1038/// \param LockLoc The source location of the acquire 1039/// \param LockExp The lock expression corresponding to the lock to be added 1040void BuildLockset::addLock(SourceLocation LockLoc, Expr *LockExp, 1041 LockKind LK) { 1042 // FIXME: deal with acquired before/after annotations 1043 MutexID Mutex(LockExp); 1044 LockData NewLock(LockLoc, LK); 1045 1046 // FIXME: Don't always warn when we have support for reentrant locks. 1047 if (locksetContains(Mutex)) 1048 Handler.handleDoubleLock(Mutex.getName(), LockLoc); 1049 LSet = LocksetFactory.add(LSet, Mutex, NewLock); 1050} 1051 1052/// \brief Remove a lock from the lockset, warning if the lock is not there. 1053/// \param LockExp The lock expression corresponding to the lock to be removed 1054/// \param UnlockLoc The source location of the unlock (only used in error msg) 1055void BuildLockset::removeLock(SourceLocation UnlockLoc, Expr *LockExp) { 1056 MutexID Mutex(LockExp); 1057 1058 Lockset NewLSet = LocksetFactory.remove(LSet, Mutex); 1059 if(NewLSet == LSet) 1060 Handler.handleUnmatchedUnlock(Mutex.getName(), UnlockLoc); 1061 1062 LSet = NewLSet; 1063} 1064 1065/// \brief Gets the value decl pointer from DeclRefExprs or MemberExprs 1066const ValueDecl *BuildLockset::getValueDecl(Expr *Exp) { 1067 if (const DeclRefExpr *DR = dyn_cast<DeclRefExpr>(Exp)) 1068 return DR->getDecl(); 1069 1070 if (const MemberExpr *ME = dyn_cast<MemberExpr>(Exp)) 1071 return ME->getMemberDecl(); 1072 1073 return 0; 1074} 1075 1076/// \brief Warn if the LSet does not contain a lock sufficient to protect access 1077/// of at least the passed in AccessType. 1078void BuildLockset::warnIfMutexNotHeld(const NamedDecl *D, Expr *Exp, 1079 AccessKind AK, Expr *MutexExp, 1080 ProtectedOperationKind POK) { 1081 LockKind LK = getLockKindFromAccessKind(AK); 1082 MutexID Mutex(MutexExp); 1083 if (!locksetContainsAtLeast(Mutex, LK)) 1084 Handler.handleMutexNotHeld(D, POK, Mutex.getName(), LK, Exp->getExprLoc()); 1085} 1086 1087 1088/// \brief This method identifies variable dereferences and checks pt_guarded_by 1089/// and pt_guarded_var annotations. Note that we only check these annotations 1090/// at the time a pointer is dereferenced. 1091/// FIXME: We need to check for other types of pointer dereferences 1092/// (e.g. [], ->) and deal with them here. 1093/// \param Exp An expression that has been read or written. 1094void BuildLockset::checkDereference(Expr *Exp, AccessKind AK) { 1095 UnaryOperator *UO = dyn_cast<UnaryOperator>(Exp); 1096 if (!UO || UO->getOpcode() != clang::UO_Deref) 1097 return; 1098 Exp = UO->getSubExpr()->IgnoreParenCasts(); 1099 1100 const ValueDecl *D = getValueDecl(Exp); 1101 if(!D || !D->hasAttrs()) 1102 return; 1103 1104 if (D->getAttr<PtGuardedVarAttr>() && LSet.isEmpty()) 1105 Handler.handleNoMutexHeld(D, POK_VarDereference, AK, Exp->getExprLoc()); 1106 1107 const AttrVec &ArgAttrs = D->getAttrs(); 1108 for(unsigned i = 0, Size = ArgAttrs.size(); i < Size; ++i) 1109 if (PtGuardedByAttr *PGBAttr = dyn_cast<PtGuardedByAttr>(ArgAttrs[i])) 1110 warnIfMutexNotHeld(D, Exp, AK, PGBAttr->getArg(), POK_VarDereference); 1111} 1112 1113/// \brief Checks guarded_by and guarded_var attributes. 1114/// Whenever we identify an access (read or write) of a DeclRefExpr or 1115/// MemberExpr, we need to check whether there are any guarded_by or 1116/// guarded_var attributes, and make sure we hold the appropriate mutexes. 1117void BuildLockset::checkAccess(Expr *Exp, AccessKind AK) { 1118 const ValueDecl *D = getValueDecl(Exp); 1119 if(!D || !D->hasAttrs()) 1120 return; 1121 1122 if (D->getAttr<GuardedVarAttr>() && LSet.isEmpty()) 1123 Handler.handleNoMutexHeld(D, POK_VarAccess, AK, Exp->getExprLoc()); 1124 1125 const AttrVec &ArgAttrs = D->getAttrs(); 1126 for(unsigned i = 0, Size = ArgAttrs.size(); i < Size; ++i) 1127 if (GuardedByAttr *GBAttr = dyn_cast<GuardedByAttr>(ArgAttrs[i])) 1128 warnIfMutexNotHeld(D, Exp, AK, GBAttr->getArg(), POK_VarAccess); 1129} 1130 1131/// \brief For unary operations which read and write a variable, we need to 1132/// check whether we hold any required mutexes. Reads are checked in 1133/// VisitCastExpr. 1134void BuildLockset::VisitUnaryOperator(UnaryOperator *UO) { 1135 switch (UO->getOpcode()) { 1136 case clang::UO_PostDec: 1137 case clang::UO_PostInc: 1138 case clang::UO_PreDec: 1139 case clang::UO_PreInc: { 1140 Expr *SubExp = UO->getSubExpr()->IgnoreParenCasts(); 1141 checkAccess(SubExp, AK_Written); 1142 checkDereference(SubExp, AK_Written); 1143 break; 1144 } 1145 default: 1146 break; 1147 } 1148} 1149 1150/// For binary operations which assign to a variable (writes), we need to check 1151/// whether we hold any required mutexes. 1152/// FIXME: Deal with non-primitive types. 1153void BuildLockset::VisitBinaryOperator(BinaryOperator *BO) { 1154 if (!BO->isAssignmentOp()) 1155 return; 1156 Expr *LHSExp = BO->getLHS()->IgnoreParenCasts(); 1157 checkAccess(LHSExp, AK_Written); 1158 checkDereference(LHSExp, AK_Written); 1159} 1160 1161/// Whenever we do an LValue to Rvalue cast, we are reading a variable and 1162/// need to ensure we hold any required mutexes. 1163/// FIXME: Deal with non-primitive types. 1164void BuildLockset::VisitCastExpr(CastExpr *CE) { 1165 if (CE->getCastKind() != CK_LValueToRValue) 1166 return; 1167 Expr *SubExp = CE->getSubExpr()->IgnoreParenCasts(); 1168 checkAccess(SubExp, AK_Read); 1169 checkDereference(SubExp, AK_Read); 1170} 1171 1172/// \brief This function, parameterized by an attribute type, is used to add a 1173/// set of locks specified as attribute arguments to the lockset. 1174template <typename AttrType> 1175void BuildLockset::addLocksToSet(LockKind LK, Attr *Attr, 1176 CXXMemberCallExpr *Exp) { 1177 typedef typename AttrType::args_iterator iterator_type; 1178 SourceLocation ExpLocation = Exp->getExprLoc(); 1179 Expr *Parent = Exp->getImplicitObjectArgument(); 1180 AttrType *SpecificAttr = cast<AttrType>(Attr); 1181 1182 if (SpecificAttr->args_size() == 0) { 1183 // The mutex held is the "this" object. 1184 addLock(ExpLocation, Parent, LK); 1185 return; 1186 } 1187 1188 for (iterator_type I = SpecificAttr->args_begin(), 1189 E = SpecificAttr->args_end(); I != E; ++I) 1190 addLock(ExpLocation, *I, LK); 1191} 1192 1193/// \brief When visiting CXXMemberCallExprs we need to examine the attributes on 1194/// the method that is being called and add, remove or check locks in the 1195/// lockset accordingly. 1196/// 1197/// FIXME: For classes annotated with one of the guarded annotations, we need 1198/// to treat const method calls as reads and non-const method calls as writes, 1199/// and check that the appropriate locks are held. Non-const method calls with 1200/// the same signature as const method calls can be also treated as reads. 1201/// 1202/// FIXME: We need to also visit CallExprs to catch/check global functions. 1203void BuildLockset::VisitCXXMemberCallExpr(CXXMemberCallExpr *Exp) { 1204 NamedDecl *D = dyn_cast_or_null<NamedDecl>(Exp->getCalleeDecl()); 1205 1206 SourceLocation ExpLocation = Exp->getExprLoc(); 1207 Expr *Parent = Exp->getImplicitObjectArgument(); 1208 1209 if(!D || !D->hasAttrs()) 1210 return; 1211 1212 AttrVec &ArgAttrs = D->getAttrs(); 1213 for(unsigned i = 0; i < ArgAttrs.size(); ++i) { 1214 Attr *Attr = ArgAttrs[i]; 1215 switch (Attr->getKind()) { 1216 // When we encounter an exclusive lock function, we need to add the lock 1217 // to our lockset with kind exclusive. 1218 case attr::ExclusiveLockFunction: 1219 addLocksToSet<ExclusiveLockFunctionAttr>(LK_Exclusive, Attr, Exp); 1220 break; 1221 1222 // When we encounter a shared lock function, we need to add the lock 1223 // to our lockset with kind shared. 1224 case attr::SharedLockFunction: 1225 addLocksToSet<SharedLockFunctionAttr>(LK_Shared, Attr, Exp); 1226 break; 1227 1228 // When we encounter an unlock function, we need to remove unlocked 1229 // mutexes from the lockset, and flag a warning if they are not there. 1230 case attr::UnlockFunction: { 1231 UnlockFunctionAttr *UFAttr = cast<UnlockFunctionAttr>(Attr); 1232 1233 if (UFAttr->args_size() == 0) { // The lock held is the "this" object. 1234 removeLock(ExpLocation, Parent); 1235 break; 1236 } 1237 1238 for (UnlockFunctionAttr::args_iterator I = UFAttr->args_begin(), 1239 E = UFAttr->args_end(); I != E; ++I) 1240 removeLock(ExpLocation, *I); 1241 break; 1242 } 1243 1244 case attr::ExclusiveLocksRequired: { 1245 // FIXME: Also use this attribute to add required locks to the initial 1246 // lockset when processing a CFG for a function annotated with this 1247 // attribute. 1248 ExclusiveLocksRequiredAttr *ELRAttr = 1249 cast<ExclusiveLocksRequiredAttr>(Attr); 1250 1251 for (ExclusiveLocksRequiredAttr::args_iterator 1252 I = ELRAttr->args_begin(), E = ELRAttr->args_end(); I != E; ++I) 1253 warnIfMutexNotHeld(D, Exp, AK_Written, *I, POK_FunctionCall); 1254 break; 1255 } 1256 1257 case attr::SharedLocksRequired: { 1258 // FIXME: Also use this attribute to add required locks to the initial 1259 // lockset when processing a CFG for a function annotated with this 1260 // attribute. 1261 SharedLocksRequiredAttr *SLRAttr = cast<SharedLocksRequiredAttr>(Attr); 1262 1263 for (SharedLocksRequiredAttr::args_iterator I = SLRAttr->args_begin(), 1264 E = SLRAttr->args_end(); I != E; ++I) 1265 warnIfMutexNotHeld(D, Exp, AK_Read, *I, POK_FunctionCall); 1266 break; 1267 } 1268 1269 case attr::LocksExcluded: { 1270 LocksExcludedAttr *LEAttr = cast<LocksExcludedAttr>(Attr); 1271 for (LocksExcludedAttr::args_iterator I = LEAttr->args_begin(), 1272 E = LEAttr->args_end(); I != E; ++I) { 1273 MutexID Mutex(*I); 1274 if (locksetContains(Mutex)) 1275 Handler.handleFunExcludesLock(D->getName(), Mutex.getName(), 1276 ExpLocation); 1277 } 1278 break; 1279 } 1280 1281 case attr::LockReturned: 1282 // FIXME: Deal with this attribute. 1283 break; 1284 1285 // Ignore other (non thread-safety) attributes 1286 default: 1287 break; 1288 } 1289 } 1290} 1291 1292} // end anonymous namespace 1293 1294/// \brief Flags a warning for each lock that is in LSet2 but not LSet1, or 1295/// else mutexes that are held shared in one lockset and exclusive in the other. 1296static Lockset warnIfNotInFirstSetOrNotSameKind(ThreadSafetyHandler &Handler, 1297 const Lockset LSet1, 1298 const Lockset LSet2, 1299 Lockset Intersection, 1300 Lockset::Factory &Fact) { 1301 for (Lockset::iterator I = LSet2.begin(), E = LSet2.end(); I != E; ++I) { 1302 const MutexID &LSet2Mutex = I.getKey(); 1303 const LockData &LSet2LockData = I.getData(); 1304 if (const LockData *LD = LSet1.lookup(LSet2Mutex)) { 1305 if (LD->LKind != LSet2LockData.LKind) { 1306 Handler.handleExclusiveAndShared(LSet2Mutex.getName(), 1307 LSet2LockData.AcquireLoc, 1308 LD->AcquireLoc); 1309 if (LD->LKind != LK_Exclusive) 1310 Intersection = Fact.add(Intersection, LSet2Mutex, LSet2LockData); 1311 } 1312 } else { 1313 Handler.handleMutexHeldEndOfScope(LSet2Mutex.getName(), 1314 LSet2LockData.AcquireLoc); 1315 } 1316 } 1317 return Intersection; 1318} 1319 1320 1321/// \brief Compute the intersection of two locksets and issue warnings for any 1322/// locks in the symmetric difference. 1323/// 1324/// This function is used at a merge point in the CFG when comparing the lockset 1325/// of each branch being merged. For example, given the following sequence: 1326/// A; if () then B; else C; D; we need to check that the lockset after B and C 1327/// are the same. In the event of a difference, we use the intersection of these 1328/// two locksets at the start of D. 1329static Lockset intersectAndWarn(ThreadSafetyHandler &Handler, 1330 const Lockset LSet1, const Lockset LSet2, 1331 Lockset::Factory &Fact) { 1332 Lockset Intersection = LSet1; 1333 Intersection = warnIfNotInFirstSetOrNotSameKind(Handler, LSet1, LSet2, 1334 Intersection, Fact); 1335 1336 for (Lockset::iterator I = LSet1.begin(), E = LSet1.end(); I != E; ++I) { 1337 if (!LSet2.contains(I.getKey())) { 1338 const MutexID &Mutex = I.getKey(); 1339 const LockData &MissingLock = I.getData(); 1340 Handler.handleMutexHeldEndOfScope(Mutex.getName(), 1341 MissingLock.AcquireLoc); 1342 Intersection = Fact.remove(Intersection, Mutex); 1343 } 1344 } 1345 return Intersection; 1346} 1347 1348/// \brief Returns the location of the first Stmt in a Block. 1349static SourceLocation getFirstStmtLocation(CFGBlock *Block) { 1350 SourceLocation Loc; 1351 for (CFGBlock::const_iterator BI = Block->begin(), BE = Block->end(); 1352 BI != BE; ++BI) { 1353 if (const CFGStmt *CfgStmt = dyn_cast<CFGStmt>(&(*BI))) { 1354 Loc = CfgStmt->getStmt()->getLocStart(); 1355 if (Loc.isValid()) return Loc; 1356 } 1357 } 1358 if (Stmt *S = Block->getTerminator().getStmt()) { 1359 Loc = S->getLocStart(); 1360 if (Loc.isValid()) return Loc; 1361 } 1362 return Loc; 1363} 1364 1365/// \brief Warn about different locksets along backedges of loops. 1366/// This function is called when we encounter a back edge. At that point, 1367/// we need to verify that the lockset before taking the backedge is the 1368/// same as the lockset before entering the loop. 1369/// 1370/// \param LoopEntrySet Locks before starting the loop 1371/// \param LoopReentrySet Locks in the last CFG block of the loop 1372static void warnBackEdgeUnequalLocksets(ThreadSafetyHandler &Handler, 1373 const Lockset LoopReentrySet, 1374 const Lockset LoopEntrySet, 1375 SourceLocation FirstLocInLoop, 1376 Lockset::Factory &Fact) { 1377 assert(FirstLocInLoop.isValid()); 1378 // Warn for locks held at the start of the loop, but not the end. 1379 for (Lockset::iterator I = LoopEntrySet.begin(), E = LoopEntrySet.end(); 1380 I != E; ++I) { 1381 if (!LoopReentrySet.contains(I.getKey())) { 1382 // We report this error at the location of the first statement in a loop 1383 Handler.handleNoLockLoopEntry(I.getKey().getName(), FirstLocInLoop); 1384 } 1385 } 1386 1387 // Warn for locks held at the end of the loop, but not at the start. 1388 warnIfNotInFirstSetOrNotSameKind(Handler, LoopEntrySet, LoopReentrySet, 1389 LoopReentrySet, Fact); 1390} 1391 1392 1393namespace clang { namespace thread_safety { 1394/// \brief Check a function's CFG for thread-safety violations. 1395/// 1396/// We traverse the blocks in the CFG, compute the set of mutexes that are held 1397/// at the end of each block, and issue warnings for thread safety violations. 1398/// Each block in the CFG is traversed exactly once. 1399void runThreadSafetyAnalysis(AnalysisContext &AC, 1400 ThreadSafetyHandler &Handler) { 1401 CFG *CFGraph = AC.getCFG(); 1402 if (!CFGraph) return; 1403 const Decl *D = AC.getDecl(); 1404 if (D && D->getAttr<NoThreadSafetyAnalysisAttr>()) return; 1405 1406 Lockset::Factory LocksetFactory; 1407 1408 // FIXME: Swith to SmallVector? Otherwise improve performance impact? 1409 std::vector<Lockset> EntryLocksets(CFGraph->getNumBlockIDs(), 1410 LocksetFactory.getEmptyMap()); 1411 std::vector<Lockset> ExitLocksets(CFGraph->getNumBlockIDs(), 1412 LocksetFactory.getEmptyMap()); 1413 1414 // We need to explore the CFG via a "topological" ordering. 1415 // That way, we will be guaranteed to have information about required 1416 // predecessor locksets when exploring a new block. 1417 TopologicallySortedCFG SortedGraph(CFGraph); 1418 CFGBlockSet VisitedBlocks(CFGraph); 1419 1420 for (TopologicallySortedCFG::iterator I = SortedGraph.begin(), 1421 E = SortedGraph.end(); I!= E; ++I) { 1422 const CFGBlock *CurrBlock = *I; 1423 int CurrBlockID = CurrBlock->getBlockID(); 1424 1425 VisitedBlocks.insert(CurrBlock); 1426 1427 // Use the default initial lockset in case there are no predecessors. 1428 Lockset &Entryset = EntryLocksets[CurrBlockID]; 1429 Lockset &Exitset = ExitLocksets[CurrBlockID]; 1430 1431 // Iterate through the predecessor blocks and warn if the lockset for all 1432 // predecessors is not the same. We take the entry lockset of the current 1433 // block to be the intersection of all previous locksets. 1434 // FIXME: By keeping the intersection, we may output more errors in future 1435 // for a lock which is not in the intersection, but was in the union. We 1436 // may want to also keep the union in future. As an example, let's say 1437 // the intersection contains Mutex L, and the union contains L and M. 1438 // Later we unlock M. At this point, we would output an error because we 1439 // never locked M; although the real error is probably that we forgot to 1440 // lock M on all code paths. Conversely, let's say that later we lock M. 1441 // In this case, we should compare against the intersection instead of the 1442 // union because the real error is probably that we forgot to unlock M on 1443 // all code paths. 1444 bool LocksetInitialized = false; 1445 for (CFGBlock::const_pred_iterator PI = CurrBlock->pred_begin(), 1446 PE = CurrBlock->pred_end(); PI != PE; ++PI) { 1447 1448 // if *PI -> CurrBlock is a back edge 1449 if (*PI == 0 || !VisitedBlocks.alreadySet(*PI)) 1450 continue; 1451 1452 int PrevBlockID = (*PI)->getBlockID(); 1453 if (!LocksetInitialized) { 1454 Entryset = ExitLocksets[PrevBlockID]; 1455 LocksetInitialized = true; 1456 } else { 1457 Entryset = intersectAndWarn(Handler, Entryset, 1458 ExitLocksets[PrevBlockID], LocksetFactory); 1459 } 1460 } 1461 1462 BuildLockset LocksetBuilder(Handler, Entryset, LocksetFactory); 1463 for (CFGBlock::const_iterator BI = CurrBlock->begin(), 1464 BE = CurrBlock->end(); BI != BE; ++BI) { 1465 if (const CFGStmt *CfgStmt = dyn_cast<CFGStmt>(&*BI)) 1466 LocksetBuilder.Visit(const_cast<Stmt*>(CfgStmt->getStmt())); 1467 } 1468 Exitset = LocksetBuilder.getLockset(); 1469 1470 // For every back edge from CurrBlock (the end of the loop) to another block 1471 // (FirstLoopBlock) we need to check that the Lockset of Block is equal to 1472 // the one held at the beginning of FirstLoopBlock. We can look up the 1473 // Lockset held at the beginning of FirstLoopBlock in the EntryLockSets map. 1474 for (CFGBlock::const_succ_iterator SI = CurrBlock->succ_begin(), 1475 SE = CurrBlock->succ_end(); SI != SE; ++SI) { 1476 1477 // if CurrBlock -> *SI is *not* a back edge 1478 if (*SI == 0 || !VisitedBlocks.alreadySet(*SI)) 1479 continue; 1480 1481 CFGBlock *FirstLoopBlock = *SI; 1482 SourceLocation FirstLoopLocation = getFirstStmtLocation(FirstLoopBlock); 1483 1484 assert(FirstLoopLocation.isValid()); 1485 1486 // Fail gracefully in release code. 1487 if (!FirstLoopLocation.isValid()) 1488 continue; 1489 1490 Lockset PreLoop = EntryLocksets[FirstLoopBlock->getBlockID()]; 1491 Lockset LoopEnd = ExitLocksets[CurrBlockID]; 1492 warnBackEdgeUnequalLocksets(Handler, LoopEnd, PreLoop, FirstLoopLocation, 1493 LocksetFactory); 1494 } 1495 } 1496 1497 Lockset FinalLockset = ExitLocksets[CFGraph->getExit().getBlockID()]; 1498 if (!FinalLockset.isEmpty()) { 1499 for (Lockset::iterator I=FinalLockset.begin(), E=FinalLockset.end(); 1500 I != E; ++I) { 1501 const MutexID &Mutex = I.getKey(); 1502 const LockData &MissingLock = I.getData(); 1503 1504 std::string FunName = "<unknown>"; 1505 if (const NamedDecl *ContextDecl = dyn_cast<NamedDecl>(AC.getDecl())) { 1506 FunName = ContextDecl->getDeclName().getAsString(); 1507 } 1508 1509 Handler.handleNoUnlock(Mutex.getName(), FunName, MissingLock.AcquireLoc); 1510 } 1511 } 1512} 1513 1514}} // end namespace clang::thread_safety 1515 1516 1517//===----------------------------------------------------------------------===// 1518// AnalysisBasedWarnings - Worker object used by Sema to execute analysis-based 1519// warnings on a function, method, or block. 1520//===----------------------------------------------------------------------===// 1521 1522clang::sema::AnalysisBasedWarnings::Policy::Policy() { 1523 enableCheckFallThrough = 1; 1524 enableCheckUnreachable = 0; 1525 enableThreadSafetyAnalysis = 0; 1526} 1527 1528clang::sema::AnalysisBasedWarnings::AnalysisBasedWarnings(Sema &s) 1529 : S(s), 1530 NumFunctionsAnalyzed(0), 1531 NumFunctionsWithBadCFGs(0), 1532 NumCFGBlocks(0), 1533 MaxCFGBlocksPerFunction(0), 1534 NumUninitAnalysisFunctions(0), 1535 NumUninitAnalysisVariables(0), 1536 MaxUninitAnalysisVariablesPerFunction(0), 1537 NumUninitAnalysisBlockVisits(0), 1538 MaxUninitAnalysisBlockVisitsPerFunction(0) { 1539 Diagnostic &D = S.getDiagnostics(); 1540 DefaultPolicy.enableCheckUnreachable = (unsigned) 1541 (D.getDiagnosticLevel(diag::warn_unreachable, SourceLocation()) != 1542 Diagnostic::Ignored); 1543 DefaultPolicy.enableThreadSafetyAnalysis = (unsigned) 1544 (D.getDiagnosticLevel(diag::warn_double_lock, SourceLocation()) != 1545 Diagnostic::Ignored); 1546 1547} 1548 1549static void flushDiagnostics(Sema &S, sema::FunctionScopeInfo *fscope) { 1550 for (SmallVectorImpl<sema::PossiblyUnreachableDiag>::iterator 1551 i = fscope->PossiblyUnreachableDiags.begin(), 1552 e = fscope->PossiblyUnreachableDiags.end(); 1553 i != e; ++i) { 1554 const sema::PossiblyUnreachableDiag &D = *i; 1555 S.Diag(D.Loc, D.PD); 1556 } 1557} 1558 1559void clang::sema:: 1560AnalysisBasedWarnings::IssueWarnings(sema::AnalysisBasedWarnings::Policy P, 1561 sema::FunctionScopeInfo *fscope, 1562 const Decl *D, const BlockExpr *blkExpr) { 1563 1564 // We avoid doing analysis-based warnings when there are errors for 1565 // two reasons: 1566 // (1) The CFGs often can't be constructed (if the body is invalid), so 1567 // don't bother trying. 1568 // (2) The code already has problems; running the analysis just takes more 1569 // time. 1570 Diagnostic &Diags = S.getDiagnostics(); 1571 1572 // Do not do any analysis for declarations in system headers if we are 1573 // going to just ignore them. 1574 if (Diags.getSuppressSystemWarnings() && 1575 S.SourceMgr.isInSystemHeader(D->getLocation())) 1576 return; 1577 1578 // For code in dependent contexts, we'll do this at instantiation time. 1579 if (cast<DeclContext>(D)->isDependentContext()) 1580 return; 1581 1582 if (Diags.hasErrorOccurred() || Diags.hasFatalErrorOccurred()) { 1583 // Flush out any possibly unreachable diagnostics. 1584 flushDiagnostics(S, fscope); 1585 return; 1586 } 1587 1588 const Stmt *Body = D->getBody(); 1589 assert(Body); 1590 1591 AnalysisContext AC(D, 0); 1592 1593 // Don't generate EH edges for CallExprs as we'd like to avoid the n^2 1594 // explosion for destrutors that can result and the compile time hit. 1595 AC.getCFGBuildOptions().PruneTriviallyFalseEdges = true; 1596 AC.getCFGBuildOptions().AddEHEdges = false; 1597 AC.getCFGBuildOptions().AddInitializers = true; 1598 AC.getCFGBuildOptions().AddImplicitDtors = true; 1599 1600 // Force that certain expressions appear as CFGElements in the CFG. This 1601 // is used to speed up various analyses. 1602 // FIXME: This isn't the right factoring. This is here for initial 1603 // prototyping, but we need a way for analyses to say what expressions they 1604 // expect to always be CFGElements and then fill in the BuildOptions 1605 // appropriately. This is essentially a layering violation. 1606 if (P.enableCheckUnreachable) { 1607 // Unreachable code analysis requires a linearized CFG. 1608 AC.getCFGBuildOptions().setAllAlwaysAdd(); 1609 } 1610 else { 1611 AC.getCFGBuildOptions() 1612 .setAlwaysAdd(Stmt::BinaryOperatorClass) 1613 .setAlwaysAdd(Stmt::BlockExprClass) 1614 .setAlwaysAdd(Stmt::CStyleCastExprClass) 1615 .setAlwaysAdd(Stmt::DeclRefExprClass) 1616 .setAlwaysAdd(Stmt::ImplicitCastExprClass) 1617 .setAlwaysAdd(Stmt::UnaryOperatorClass); 1618 } 1619 1620 // Construct the analysis context with the specified CFG build options. 1621 1622 // Emit delayed diagnostics. 1623 if (!fscope->PossiblyUnreachableDiags.empty()) { 1624 bool analyzed = false; 1625 1626 // Register the expressions with the CFGBuilder. 1627 for (SmallVectorImpl<sema::PossiblyUnreachableDiag>::iterator 1628 i = fscope->PossiblyUnreachableDiags.begin(), 1629 e = fscope->PossiblyUnreachableDiags.end(); 1630 i != e; ++i) { 1631 if (const Stmt *stmt = i->stmt) 1632 AC.registerForcedBlockExpression(stmt); 1633 } 1634 1635 if (AC.getCFG()) { 1636 analyzed = true; 1637 for (SmallVectorImpl<sema::PossiblyUnreachableDiag>::iterator 1638 i = fscope->PossiblyUnreachableDiags.begin(), 1639 e = fscope->PossiblyUnreachableDiags.end(); 1640 i != e; ++i) 1641 { 1642 const sema::PossiblyUnreachableDiag &D = *i; 1643 bool processed = false; 1644 if (const Stmt *stmt = i->stmt) { 1645 const CFGBlock *block = AC.getBlockForRegisteredExpression(stmt); 1646 assert(block); 1647 if (CFGReverseBlockReachabilityAnalysis *cra = AC.getCFGReachablityAnalysis()) { 1648 // Can this block be reached from the entrance? 1649 if (cra->isReachable(&AC.getCFG()->getEntry(), block)) 1650 S.Diag(D.Loc, D.PD); 1651 processed = true; 1652 } 1653 } 1654 if (!processed) { 1655 // Emit the warning anyway if we cannot map to a basic block. 1656 S.Diag(D.Loc, D.PD); 1657 } 1658 } 1659 } 1660 1661 if (!analyzed) 1662 flushDiagnostics(S, fscope); 1663 } 1664 1665 1666 // Warning: check missing 'return' 1667 if (P.enableCheckFallThrough) { 1668 const CheckFallThroughDiagnostics &CD = 1669 (isa<BlockDecl>(D) ? CheckFallThroughDiagnostics::MakeForBlock() 1670 : CheckFallThroughDiagnostics::MakeForFunction(D)); 1671 CheckFallThroughForBody(S, D, Body, blkExpr, CD, AC); 1672 } 1673 1674 // Warning: check for unreachable code 1675 if (P.enableCheckUnreachable) 1676 CheckUnreachable(S, AC); 1677 1678 // Check for thread safety violations 1679 if (P.enableThreadSafetyAnalysis) { 1680 thread_safety::ThreadSafetyReporter Reporter(S); 1681 thread_safety::runThreadSafetyAnalysis(AC, Reporter); 1682 Reporter.emitDiagnostics(); 1683 } 1684 1685 if (Diags.getDiagnosticLevel(diag::warn_uninit_var, D->getLocStart()) 1686 != Diagnostic::Ignored || 1687 Diags.getDiagnosticLevel(diag::warn_maybe_uninit_var, D->getLocStart()) 1688 != Diagnostic::Ignored) { 1689 if (CFG *cfg = AC.getCFG()) { 1690 UninitValsDiagReporter reporter(S); 1691 UninitVariablesAnalysisStats stats; 1692 std::memset(&stats, 0, sizeof(UninitVariablesAnalysisStats)); 1693 runUninitializedVariablesAnalysis(*cast<DeclContext>(D), *cfg, AC, 1694 reporter, stats); 1695 1696 if (S.CollectStats && stats.NumVariablesAnalyzed > 0) { 1697 ++NumUninitAnalysisFunctions; 1698 NumUninitAnalysisVariables += stats.NumVariablesAnalyzed; 1699 NumUninitAnalysisBlockVisits += stats.NumBlockVisits; 1700 MaxUninitAnalysisVariablesPerFunction = 1701 std::max(MaxUninitAnalysisVariablesPerFunction, 1702 stats.NumVariablesAnalyzed); 1703 MaxUninitAnalysisBlockVisitsPerFunction = 1704 std::max(MaxUninitAnalysisBlockVisitsPerFunction, 1705 stats.NumBlockVisits); 1706 } 1707 } 1708 } 1709 1710 // Collect statistics about the CFG if it was built. 1711 if (S.CollectStats && AC.isCFGBuilt()) { 1712 ++NumFunctionsAnalyzed; 1713 if (CFG *cfg = AC.getCFG()) { 1714 // If we successfully built a CFG for this context, record some more 1715 // detail information about it. 1716 NumCFGBlocks += cfg->getNumBlockIDs(); 1717 MaxCFGBlocksPerFunction = std::max(MaxCFGBlocksPerFunction, 1718 cfg->getNumBlockIDs()); 1719 } else { 1720 ++NumFunctionsWithBadCFGs; 1721 } 1722 } 1723} 1724 1725void clang::sema::AnalysisBasedWarnings::PrintStats() const { 1726 llvm::errs() << "\n*** Analysis Based Warnings Stats:\n"; 1727 1728 unsigned NumCFGsBuilt = NumFunctionsAnalyzed - NumFunctionsWithBadCFGs; 1729 unsigned AvgCFGBlocksPerFunction = 1730 !NumCFGsBuilt ? 0 : NumCFGBlocks/NumCFGsBuilt; 1731 llvm::errs() << NumFunctionsAnalyzed << " functions analyzed (" 1732 << NumFunctionsWithBadCFGs << " w/o CFGs).\n" 1733 << " " << NumCFGBlocks << " CFG blocks built.\n" 1734 << " " << AvgCFGBlocksPerFunction 1735 << " average CFG blocks per function.\n" 1736 << " " << MaxCFGBlocksPerFunction 1737 << " max CFG blocks per function.\n"; 1738 1739 unsigned AvgUninitVariablesPerFunction = !NumUninitAnalysisFunctions ? 0 1740 : NumUninitAnalysisVariables/NumUninitAnalysisFunctions; 1741 unsigned AvgUninitBlockVisitsPerFunction = !NumUninitAnalysisFunctions ? 0 1742 : NumUninitAnalysisBlockVisits/NumUninitAnalysisFunctions; 1743 llvm::errs() << NumUninitAnalysisFunctions 1744 << " functions analyzed for uninitialiazed variables\n" 1745 << " " << NumUninitAnalysisVariables << " variables analyzed.\n" 1746 << " " << AvgUninitVariablesPerFunction 1747 << " average variables per function.\n" 1748 << " " << MaxUninitAnalysisVariablesPerFunction 1749 << " max variables per function.\n" 1750 << " " << NumUninitAnalysisBlockVisits << " block visits.\n" 1751 << " " << AvgUninitBlockVisitsPerFunction 1752 << " average block visits per function.\n" 1753 << " " << MaxUninitAnalysisBlockVisitsPerFunction 1754 << " max block visits per function.\n"; 1755} 1756