AnalysisBasedWarnings.cpp revision a189d8976f1193b788508a1a29b2e9d0aca06aca
1//=- AnalysisBasedWarnings.cpp - Sema warnings based on libAnalysis -*- C++ -*-=// 2// 3// The LLVM Compiler Infrastructure 4// 5// This file is distributed under the University of Illinois Open Source 6// License. See LICENSE.TXT for details. 7// 8//===----------------------------------------------------------------------===// 9// 10// This file defines analysis_warnings::[Policy,Executor]. 11// Together they are used by Sema to issue warnings based on inexpensive 12// static analysis algorithms in libAnalysis. 13// 14//===----------------------------------------------------------------------===// 15 16#include "clang/Sema/AnalysisBasedWarnings.h" 17#include "clang/Sema/SemaInternal.h" 18#include "clang/Sema/ScopeInfo.h" 19#include "clang/Basic/SourceManager.h" 20#include "clang/Basic/SourceLocation.h" 21#include "clang/Lex/Preprocessor.h" 22#include "clang/AST/DeclObjC.h" 23#include "clang/AST/DeclCXX.h" 24#include "clang/AST/ExprObjC.h" 25#include "clang/AST/ExprCXX.h" 26#include "clang/AST/StmtObjC.h" 27#include "clang/AST/StmtCXX.h" 28#include "clang/AST/EvaluatedExprVisitor.h" 29#include "clang/AST/StmtVisitor.h" 30#include "clang/AST/RecursiveASTVisitor.h" 31#include "clang/Analysis/AnalysisContext.h" 32#include "clang/Analysis/CFG.h" 33#include "clang/Analysis/Analyses/ReachableCode.h" 34#include "clang/Analysis/Analyses/CFGReachabilityAnalysis.h" 35#include "clang/Analysis/Analyses/ThreadSafety.h" 36#include "clang/Analysis/CFGStmtMap.h" 37#include "clang/Analysis/Analyses/UninitializedValues.h" 38#include "llvm/ADT/BitVector.h" 39#include "llvm/ADT/FoldingSet.h" 40#include "llvm/ADT/ImmutableMap.h" 41#include "llvm/ADT/PostOrderIterator.h" 42#include "llvm/ADT/SmallVector.h" 43#include "llvm/ADT/StringRef.h" 44#include "llvm/Support/Casting.h" 45#include <algorithm> 46#include <iterator> 47#include <vector> 48#include <deque> 49 50using namespace clang; 51 52//===----------------------------------------------------------------------===// 53// Unreachable code analysis. 54//===----------------------------------------------------------------------===// 55 56namespace { 57 class UnreachableCodeHandler : public reachable_code::Callback { 58 Sema &S; 59 public: 60 UnreachableCodeHandler(Sema &s) : S(s) {} 61 62 void HandleUnreachable(SourceLocation L, SourceRange R1, SourceRange R2) { 63 S.Diag(L, diag::warn_unreachable) << R1 << R2; 64 } 65 }; 66} 67 68/// CheckUnreachable - Check for unreachable code. 69static void CheckUnreachable(Sema &S, AnalysisDeclContext &AC) { 70 UnreachableCodeHandler UC(S); 71 reachable_code::FindUnreachableCode(AC, UC); 72} 73 74//===----------------------------------------------------------------------===// 75// Check for missing return value. 76//===----------------------------------------------------------------------===// 77 78enum ControlFlowKind { 79 UnknownFallThrough, 80 NeverFallThrough, 81 MaybeFallThrough, 82 AlwaysFallThrough, 83 NeverFallThroughOrReturn 84}; 85 86/// CheckFallThrough - Check that we don't fall off the end of a 87/// Statement that should return a value. 88/// 89/// \returns AlwaysFallThrough iff we always fall off the end of the statement, 90/// MaybeFallThrough iff we might or might not fall off the end, 91/// NeverFallThroughOrReturn iff we never fall off the end of the statement or 92/// return. We assume NeverFallThrough iff we never fall off the end of the 93/// statement but we may return. We assume that functions not marked noreturn 94/// will return. 95static ControlFlowKind CheckFallThrough(AnalysisDeclContext &AC) { 96 CFG *cfg = AC.getCFG(); 97 if (cfg == 0) return UnknownFallThrough; 98 99 // The CFG leaves in dead things, and we don't want the dead code paths to 100 // confuse us, so we mark all live things first. 101 llvm::BitVector live(cfg->getNumBlockIDs()); 102 unsigned count = reachable_code::ScanReachableFromBlock(&cfg->getEntry(), 103 live); 104 105 bool AddEHEdges = AC.getAddEHEdges(); 106 if (!AddEHEdges && count != cfg->getNumBlockIDs()) 107 // When there are things remaining dead, and we didn't add EH edges 108 // from CallExprs to the catch clauses, we have to go back and 109 // mark them as live. 110 for (CFG::iterator I = cfg->begin(), E = cfg->end(); I != E; ++I) { 111 CFGBlock &b = **I; 112 if (!live[b.getBlockID()]) { 113 if (b.pred_begin() == b.pred_end()) { 114 if (b.getTerminator() && isa<CXXTryStmt>(b.getTerminator())) 115 // When not adding EH edges from calls, catch clauses 116 // can otherwise seem dead. Avoid noting them as dead. 117 count += reachable_code::ScanReachableFromBlock(&b, live); 118 continue; 119 } 120 } 121 } 122 123 // Now we know what is live, we check the live precessors of the exit block 124 // and look for fall through paths, being careful to ignore normal returns, 125 // and exceptional paths. 126 bool HasLiveReturn = false; 127 bool HasFakeEdge = false; 128 bool HasPlainEdge = false; 129 bool HasAbnormalEdge = false; 130 131 // Ignore default cases that aren't likely to be reachable because all 132 // enums in a switch(X) have explicit case statements. 133 CFGBlock::FilterOptions FO; 134 FO.IgnoreDefaultsWithCoveredEnums = 1; 135 136 for (CFGBlock::filtered_pred_iterator 137 I = cfg->getExit().filtered_pred_start_end(FO); I.hasMore(); ++I) { 138 const CFGBlock& B = **I; 139 if (!live[B.getBlockID()]) 140 continue; 141 142 // Skip blocks which contain an element marked as no-return. They don't 143 // represent actually viable edges into the exit block, so mark them as 144 // abnormal. 145 if (B.hasNoReturnElement()) { 146 HasAbnormalEdge = true; 147 continue; 148 } 149 150 // Destructors can appear after the 'return' in the CFG. This is 151 // normal. We need to look pass the destructors for the return 152 // statement (if it exists). 153 CFGBlock::const_reverse_iterator ri = B.rbegin(), re = B.rend(); 154 155 for ( ; ri != re ; ++ri) 156 if (isa<CFGStmt>(*ri)) 157 break; 158 159 // No more CFGElements in the block? 160 if (ri == re) { 161 if (B.getTerminator() && isa<CXXTryStmt>(B.getTerminator())) { 162 HasAbnormalEdge = true; 163 continue; 164 } 165 // A labeled empty statement, or the entry block... 166 HasPlainEdge = true; 167 continue; 168 } 169 170 CFGStmt CS = cast<CFGStmt>(*ri); 171 const Stmt *S = CS.getStmt(); 172 if (isa<ReturnStmt>(S)) { 173 HasLiveReturn = true; 174 continue; 175 } 176 if (isa<ObjCAtThrowStmt>(S)) { 177 HasFakeEdge = true; 178 continue; 179 } 180 if (isa<CXXThrowExpr>(S)) { 181 HasFakeEdge = true; 182 continue; 183 } 184 if (const AsmStmt *AS = dyn_cast<AsmStmt>(S)) { 185 if (AS->isMSAsm()) { 186 HasFakeEdge = true; 187 HasLiveReturn = true; 188 continue; 189 } 190 } 191 if (isa<CXXTryStmt>(S)) { 192 HasAbnormalEdge = true; 193 continue; 194 } 195 if (std::find(B.succ_begin(), B.succ_end(), &cfg->getExit()) 196 == B.succ_end()) { 197 HasAbnormalEdge = true; 198 continue; 199 } 200 201 HasPlainEdge = true; 202 } 203 if (!HasPlainEdge) { 204 if (HasLiveReturn) 205 return NeverFallThrough; 206 return NeverFallThroughOrReturn; 207 } 208 if (HasAbnormalEdge || HasFakeEdge || HasLiveReturn) 209 return MaybeFallThrough; 210 // This says AlwaysFallThrough for calls to functions that are not marked 211 // noreturn, that don't return. If people would like this warning to be more 212 // accurate, such functions should be marked as noreturn. 213 return AlwaysFallThrough; 214} 215 216namespace { 217 218struct CheckFallThroughDiagnostics { 219 unsigned diag_MaybeFallThrough_HasNoReturn; 220 unsigned diag_MaybeFallThrough_ReturnsNonVoid; 221 unsigned diag_AlwaysFallThrough_HasNoReturn; 222 unsigned diag_AlwaysFallThrough_ReturnsNonVoid; 223 unsigned diag_NeverFallThroughOrReturn; 224 enum { Function, Block, Lambda } funMode; 225 SourceLocation FuncLoc; 226 227 static CheckFallThroughDiagnostics MakeForFunction(const Decl *Func) { 228 CheckFallThroughDiagnostics D; 229 D.FuncLoc = Func->getLocation(); 230 D.diag_MaybeFallThrough_HasNoReturn = 231 diag::warn_falloff_noreturn_function; 232 D.diag_MaybeFallThrough_ReturnsNonVoid = 233 diag::warn_maybe_falloff_nonvoid_function; 234 D.diag_AlwaysFallThrough_HasNoReturn = 235 diag::warn_falloff_noreturn_function; 236 D.diag_AlwaysFallThrough_ReturnsNonVoid = 237 diag::warn_falloff_nonvoid_function; 238 239 // Don't suggest that virtual functions be marked "noreturn", since they 240 // might be overridden by non-noreturn functions. 241 bool isVirtualMethod = false; 242 if (const CXXMethodDecl *Method = dyn_cast<CXXMethodDecl>(Func)) 243 isVirtualMethod = Method->isVirtual(); 244 245 // Don't suggest that template instantiations be marked "noreturn" 246 bool isTemplateInstantiation = false; 247 if (const FunctionDecl *Function = dyn_cast<FunctionDecl>(Func)) 248 isTemplateInstantiation = Function->isTemplateInstantiation(); 249 250 if (!isVirtualMethod && !isTemplateInstantiation) 251 D.diag_NeverFallThroughOrReturn = 252 diag::warn_suggest_noreturn_function; 253 else 254 D.diag_NeverFallThroughOrReturn = 0; 255 256 D.funMode = Function; 257 return D; 258 } 259 260 static CheckFallThroughDiagnostics MakeForBlock() { 261 CheckFallThroughDiagnostics D; 262 D.diag_MaybeFallThrough_HasNoReturn = 263 diag::err_noreturn_block_has_return_expr; 264 D.diag_MaybeFallThrough_ReturnsNonVoid = 265 diag::err_maybe_falloff_nonvoid_block; 266 D.diag_AlwaysFallThrough_HasNoReturn = 267 diag::err_noreturn_block_has_return_expr; 268 D.diag_AlwaysFallThrough_ReturnsNonVoid = 269 diag::err_falloff_nonvoid_block; 270 D.diag_NeverFallThroughOrReturn = 271 diag::warn_suggest_noreturn_block; 272 D.funMode = Block; 273 return D; 274 } 275 276 static CheckFallThroughDiagnostics MakeForLambda() { 277 CheckFallThroughDiagnostics D; 278 D.diag_MaybeFallThrough_HasNoReturn = 279 diag::err_noreturn_lambda_has_return_expr; 280 D.diag_MaybeFallThrough_ReturnsNonVoid = 281 diag::warn_maybe_falloff_nonvoid_lambda; 282 D.diag_AlwaysFallThrough_HasNoReturn = 283 diag::err_noreturn_lambda_has_return_expr; 284 D.diag_AlwaysFallThrough_ReturnsNonVoid = 285 diag::warn_falloff_nonvoid_lambda; 286 D.diag_NeverFallThroughOrReturn = 0; 287 D.funMode = Lambda; 288 return D; 289 } 290 291 bool checkDiagnostics(DiagnosticsEngine &D, bool ReturnsVoid, 292 bool HasNoReturn) const { 293 if (funMode == Function) { 294 return (ReturnsVoid || 295 D.getDiagnosticLevel(diag::warn_maybe_falloff_nonvoid_function, 296 FuncLoc) == DiagnosticsEngine::Ignored) 297 && (!HasNoReturn || 298 D.getDiagnosticLevel(diag::warn_noreturn_function_has_return_expr, 299 FuncLoc) == DiagnosticsEngine::Ignored) 300 && (!ReturnsVoid || 301 D.getDiagnosticLevel(diag::warn_suggest_noreturn_block, FuncLoc) 302 == DiagnosticsEngine::Ignored); 303 } 304 305 // For blocks / lambdas. 306 return ReturnsVoid && !HasNoReturn 307 && ((funMode == Lambda) || 308 D.getDiagnosticLevel(diag::warn_suggest_noreturn_block, FuncLoc) 309 == DiagnosticsEngine::Ignored); 310 } 311}; 312 313} 314 315/// CheckFallThroughForFunctionDef - Check that we don't fall off the end of a 316/// function that should return a value. Check that we don't fall off the end 317/// of a noreturn function. We assume that functions and blocks not marked 318/// noreturn will return. 319static void CheckFallThroughForBody(Sema &S, const Decl *D, const Stmt *Body, 320 const BlockExpr *blkExpr, 321 const CheckFallThroughDiagnostics& CD, 322 AnalysisDeclContext &AC) { 323 324 bool ReturnsVoid = false; 325 bool HasNoReturn = false; 326 327 if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(D)) { 328 ReturnsVoid = FD->getResultType()->isVoidType(); 329 HasNoReturn = FD->hasAttr<NoReturnAttr>() || 330 FD->getType()->getAs<FunctionType>()->getNoReturnAttr(); 331 } 332 else if (const ObjCMethodDecl *MD = dyn_cast<ObjCMethodDecl>(D)) { 333 ReturnsVoid = MD->getResultType()->isVoidType(); 334 HasNoReturn = MD->hasAttr<NoReturnAttr>(); 335 } 336 else if (isa<BlockDecl>(D)) { 337 QualType BlockTy = blkExpr->getType(); 338 if (const FunctionType *FT = 339 BlockTy->getPointeeType()->getAs<FunctionType>()) { 340 if (FT->getResultType()->isVoidType()) 341 ReturnsVoid = true; 342 if (FT->getNoReturnAttr()) 343 HasNoReturn = true; 344 } 345 } 346 347 DiagnosticsEngine &Diags = S.getDiagnostics(); 348 349 // Short circuit for compilation speed. 350 if (CD.checkDiagnostics(Diags, ReturnsVoid, HasNoReturn)) 351 return; 352 353 // FIXME: Function try block 354 if (const CompoundStmt *Compound = dyn_cast<CompoundStmt>(Body)) { 355 switch (CheckFallThrough(AC)) { 356 case UnknownFallThrough: 357 break; 358 359 case MaybeFallThrough: 360 if (HasNoReturn) 361 S.Diag(Compound->getRBracLoc(), 362 CD.diag_MaybeFallThrough_HasNoReturn); 363 else if (!ReturnsVoid) 364 S.Diag(Compound->getRBracLoc(), 365 CD.diag_MaybeFallThrough_ReturnsNonVoid); 366 break; 367 case AlwaysFallThrough: 368 if (HasNoReturn) 369 S.Diag(Compound->getRBracLoc(), 370 CD.diag_AlwaysFallThrough_HasNoReturn); 371 else if (!ReturnsVoid) 372 S.Diag(Compound->getRBracLoc(), 373 CD.diag_AlwaysFallThrough_ReturnsNonVoid); 374 break; 375 case NeverFallThroughOrReturn: 376 if (ReturnsVoid && !HasNoReturn && CD.diag_NeverFallThroughOrReturn) { 377 if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(D)) { 378 S.Diag(Compound->getLBracLoc(), CD.diag_NeverFallThroughOrReturn) 379 << 0 << FD; 380 } else if (const ObjCMethodDecl *MD = dyn_cast<ObjCMethodDecl>(D)) { 381 S.Diag(Compound->getLBracLoc(), CD.diag_NeverFallThroughOrReturn) 382 << 1 << MD; 383 } else { 384 S.Diag(Compound->getLBracLoc(), CD.diag_NeverFallThroughOrReturn); 385 } 386 } 387 break; 388 case NeverFallThrough: 389 break; 390 } 391 } 392} 393 394//===----------------------------------------------------------------------===// 395// -Wuninitialized 396//===----------------------------------------------------------------------===// 397 398namespace { 399/// ContainsReference - A visitor class to search for references to 400/// a particular declaration (the needle) within any evaluated component of an 401/// expression (recursively). 402class ContainsReference : public EvaluatedExprVisitor<ContainsReference> { 403 bool FoundReference; 404 const DeclRefExpr *Needle; 405 406public: 407 ContainsReference(ASTContext &Context, const DeclRefExpr *Needle) 408 : EvaluatedExprVisitor<ContainsReference>(Context), 409 FoundReference(false), Needle(Needle) {} 410 411 void VisitExpr(Expr *E) { 412 // Stop evaluating if we already have a reference. 413 if (FoundReference) 414 return; 415 416 EvaluatedExprVisitor<ContainsReference>::VisitExpr(E); 417 } 418 419 void VisitDeclRefExpr(DeclRefExpr *E) { 420 if (E == Needle) 421 FoundReference = true; 422 else 423 EvaluatedExprVisitor<ContainsReference>::VisitDeclRefExpr(E); 424 } 425 426 bool doesContainReference() const { return FoundReference; } 427}; 428} 429 430static bool SuggestInitializationFixit(Sema &S, const VarDecl *VD) { 431 QualType VariableTy = VD->getType().getCanonicalType(); 432 if (VariableTy->isBlockPointerType() && 433 !VD->hasAttr<BlocksAttr>()) { 434 S.Diag(VD->getLocation(), diag::note_block_var_fixit_add_initialization) << VD->getDeclName() 435 << FixItHint::CreateInsertion(VD->getLocation(), "__block "); 436 return true; 437 } 438 439 // Don't issue a fixit if there is already an initializer. 440 if (VD->getInit()) 441 return false; 442 443 // Suggest possible initialization (if any). 444 std::string Init = S.getFixItZeroInitializerForType(VariableTy); 445 if (Init.empty()) 446 return false; 447 448 // Don't suggest a fixit inside macros. 449 if (VD->getLocEnd().isMacroID()) 450 return false; 451 452 SourceLocation Loc = S.PP.getLocForEndOfToken(VD->getLocEnd()); 453 454 S.Diag(Loc, diag::note_var_fixit_add_initialization) << VD->getDeclName() 455 << FixItHint::CreateInsertion(Loc, Init); 456 return true; 457} 458 459/// NoteUninitBranches -- Helper function to produce notes for branches which 460/// inevitably lead to an uninitialized variable use. 461static void NoteUninitBranches(Sema &S, const UninitUse &Use) { 462 for (UninitUse::branch_iterator I = Use.branch_begin(), E = Use.branch_end(); 463 I != E; ++I) { 464 const Stmt *Term = I->Terminator; 465 unsigned DiagKind; 466 SourceRange Range; 467 const char *Str; 468 switch (Term->getStmtClass()) { 469 default: 470 // Don't know how to report this. 471 continue; 472 473 // "condition is true / condition is false". 474 case Stmt::IfStmtClass: 475 DiagKind = 0; 476 Str = "if"; 477 Range = cast<IfStmt>(Term)->getCond()->getSourceRange(); 478 break; 479 case Stmt::ConditionalOperatorClass: 480 DiagKind = 0; 481 Str = "?:"; 482 Range = cast<ConditionalOperator>(Term)->getCond()->getSourceRange(); 483 break; 484 case Stmt::BinaryOperatorClass: { 485 const BinaryOperator *BO = cast<BinaryOperator>(Term); 486 if (!BO->isLogicalOp()) 487 continue; 488 DiagKind = 0; 489 Str = BO->getOpcodeStr(); 490 Range = BO->getLHS()->getSourceRange(); 491 break; 492 } 493 494 // "loop is entered / loop is exited". 495 case Stmt::WhileStmtClass: 496 DiagKind = 1; 497 Str = "while"; 498 Range = cast<WhileStmt>(Term)->getCond()->getSourceRange(); 499 break; 500 case Stmt::ForStmtClass: 501 DiagKind = 1; 502 Str = "for"; 503 Range = cast<ForStmt>(Term)->getCond()->getSourceRange(); 504 break; 505 case Stmt::CXXForRangeStmtClass: 506 DiagKind = 1; 507 Str = "for"; 508 Range = cast<CXXForRangeStmt>(Term)->getCond()->getSourceRange(); 509 break; 510 511 // "condition is true / loop is exited". 512 case Stmt::DoStmtClass: 513 DiagKind = 2; 514 Str = "do"; 515 Range = cast<DoStmt>(Term)->getCond()->getSourceRange(); 516 break; 517 518 // "switch case is taken". 519 case Stmt::CaseStmtClass: 520 DiagKind = 3; 521 Str = "case"; 522 Range = cast<CaseStmt>(Term)->getLHS()->getSourceRange(); 523 break; 524 case Stmt::DefaultStmtClass: 525 DiagKind = 3; 526 Str = "default"; 527 Range = cast<DefaultStmt>(Term)->getDefaultLoc(); 528 break; 529 } 530 531 S.Diag(Range.getBegin(), diag::note_sometimes_uninit_var_branch) 532 << DiagKind << Str << I->Output << Range; 533 } 534} 535 536/// DiagnoseUninitializedUse -- Helper function for diagnosing uses of an 537/// uninitialized variable. This manages the different forms of diagnostic 538/// emitted for particular types of uses. Returns true if the use was diagnosed 539/// as a warning. If a particular use is one we omit warnings for, returns 540/// false. 541static bool DiagnoseUninitializedUse(Sema &S, const VarDecl *VD, 542 const UninitUse &Use, 543 bool alwaysReportSelfInit = false) { 544 545 if (const DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(Use.getUser())) { 546 // Inspect the initializer of the variable declaration which is 547 // being referenced prior to its initialization. We emit 548 // specialized diagnostics for self-initialization, and we 549 // specifically avoid warning about self references which take the 550 // form of: 551 // 552 // int x = x; 553 // 554 // This is used to indicate to GCC that 'x' is intentionally left 555 // uninitialized. Proven code paths which access 'x' in 556 // an uninitialized state after this will still warn. 557 if (const Expr *Initializer = VD->getInit()) { 558 if (!alwaysReportSelfInit && DRE == Initializer->IgnoreParenImpCasts()) 559 return false; 560 561 ContainsReference CR(S.Context, DRE); 562 CR.Visit(const_cast<Expr*>(Initializer)); 563 if (CR.doesContainReference()) { 564 S.Diag(DRE->getLocStart(), 565 diag::warn_uninit_self_reference_in_init) 566 << VD->getDeclName() << VD->getLocation() << DRE->getSourceRange(); 567 return true; 568 } 569 } 570 571 unsigned DiagID = 0; 572 switch (Use.getKind()) { 573 case UninitUse::Always: DiagID = diag::warn_uninit_var; break; 574 case UninitUse::Sometimes: DiagID = diag::warn_sometimes_uninit_var; break; 575 case UninitUse::Maybe: DiagID = diag::warn_maybe_uninit_var; break; 576 } 577 S.Diag(DRE->getLocStart(), DiagID) 578 << VD->getDeclName() << DRE->getSourceRange(); 579 NoteUninitBranches(S, Use); 580 } else { 581 const BlockExpr *BE = cast<BlockExpr>(Use.getUser()); 582 if (VD->getType()->isBlockPointerType() && 583 !VD->hasAttr<BlocksAttr>()) 584 S.Diag(BE->getLocStart(), diag::warn_uninit_byref_blockvar_captured_by_block) 585 << VD->getDeclName(); 586 else { 587 unsigned DiagID = 0; 588 switch (Use.getKind()) { 589 case UninitUse::Always: 590 DiagID = diag::warn_uninit_var_captured_by_block; 591 break; 592 case UninitUse::Sometimes: 593 DiagID = diag::warn_sometimes_uninit_var_captured_by_block; 594 break; 595 case UninitUse::Maybe: 596 DiagID = diag::warn_maybe_uninit_var_captured_by_block; 597 break; 598 } 599 S.Diag(BE->getLocStart(), DiagID) << VD->getDeclName(); 600 NoteUninitBranches(S, Use); 601 } 602 } 603 604 // Report where the variable was declared when the use wasn't within 605 // the initializer of that declaration & we didn't already suggest 606 // an initialization fixit. 607 if (!SuggestInitializationFixit(S, VD)) 608 S.Diag(VD->getLocStart(), diag::note_uninit_var_def) 609 << VD->getDeclName(); 610 611 return true; 612} 613 614namespace { 615 class FallthroughMapper : public RecursiveASTVisitor<FallthroughMapper> { 616 public: 617 FallthroughMapper(Sema &S) 618 : FoundSwitchStatements(false), 619 S(S) { 620 } 621 622 bool foundSwitchStatements() const { return FoundSwitchStatements; } 623 624 void markFallthroughVisited(const AttributedStmt *Stmt) { 625 bool Found = FallthroughStmts.erase(Stmt); 626 assert(Found); 627 (void)Found; 628 } 629 630 typedef llvm::SmallPtrSet<const AttributedStmt*, 8> AttrStmts; 631 632 const AttrStmts &getFallthroughStmts() const { 633 return FallthroughStmts; 634 } 635 636 bool checkFallThroughIntoBlock(const CFGBlock &B, int &AnnotatedCnt) { 637 int UnannotatedCnt = 0; 638 AnnotatedCnt = 0; 639 640 std::deque<const CFGBlock*> BlockQueue; 641 642 std::copy(B.pred_begin(), B.pred_end(), std::back_inserter(BlockQueue)); 643 644 while (!BlockQueue.empty()) { 645 const CFGBlock *P = BlockQueue.front(); 646 BlockQueue.pop_front(); 647 648 const Stmt *Term = P->getTerminator(); 649 if (Term && isa<SwitchStmt>(Term)) 650 continue; // Switch statement, good. 651 652 const SwitchCase *SW = dyn_cast_or_null<SwitchCase>(P->getLabel()); 653 if (SW && SW->getSubStmt() == B.getLabel() && P->begin() == P->end()) 654 continue; // Previous case label has no statements, good. 655 656 if (P->pred_begin() == P->pred_end()) { // The block is unreachable. 657 // This only catches trivially unreachable blocks. 658 for (CFGBlock::const_iterator ElIt = P->begin(), ElEnd = P->end(); 659 ElIt != ElEnd; ++ElIt) { 660 if (const CFGStmt *CS = ElIt->getAs<CFGStmt>()){ 661 if (const AttributedStmt *AS = asFallThroughAttr(CS->getStmt())) { 662 S.Diag(AS->getLocStart(), 663 diag::warn_fallthrough_attr_unreachable); 664 markFallthroughVisited(AS); 665 ++AnnotatedCnt; 666 } 667 // Don't care about other unreachable statements. 668 } 669 } 670 // If there are no unreachable statements, this may be a special 671 // case in CFG: 672 // case X: { 673 // A a; // A has a destructor. 674 // break; 675 // } 676 // // <<<< This place is represented by a 'hanging' CFG block. 677 // case Y: 678 continue; 679 } 680 681 const Stmt *LastStmt = getLastStmt(*P); 682 if (const AttributedStmt *AS = asFallThroughAttr(LastStmt)) { 683 markFallthroughVisited(AS); 684 ++AnnotatedCnt; 685 continue; // Fallthrough annotation, good. 686 } 687 688 if (!LastStmt) { // This block contains no executable statements. 689 // Traverse its predecessors. 690 std::copy(P->pred_begin(), P->pred_end(), 691 std::back_inserter(BlockQueue)); 692 continue; 693 } 694 695 ++UnannotatedCnt; 696 } 697 return !!UnannotatedCnt; 698 } 699 700 // RecursiveASTVisitor setup. 701 bool shouldWalkTypesOfTypeLocs() const { return false; } 702 703 bool VisitAttributedStmt(AttributedStmt *S) { 704 if (asFallThroughAttr(S)) 705 FallthroughStmts.insert(S); 706 return true; 707 } 708 709 bool VisitSwitchStmt(SwitchStmt *S) { 710 FoundSwitchStatements = true; 711 return true; 712 } 713 714 private: 715 716 static const AttributedStmt *asFallThroughAttr(const Stmt *S) { 717 if (const AttributedStmt *AS = dyn_cast_or_null<AttributedStmt>(S)) { 718 if (hasSpecificAttr<FallThroughAttr>(AS->getAttrs())) 719 return AS; 720 } 721 return 0; 722 } 723 724 static const Stmt *getLastStmt(const CFGBlock &B) { 725 if (const Stmt *Term = B.getTerminator()) 726 return Term; 727 for (CFGBlock::const_reverse_iterator ElemIt = B.rbegin(), 728 ElemEnd = B.rend(); 729 ElemIt != ElemEnd; ++ElemIt) { 730 if (const CFGStmt *CS = ElemIt->getAs<CFGStmt>()) 731 return CS->getStmt(); 732 } 733 // Workaround to detect a statement thrown out by CFGBuilder: 734 // case X: {} case Y: 735 // case X: ; case Y: 736 if (const SwitchCase *SW = dyn_cast_or_null<SwitchCase>(B.getLabel())) 737 if (!isa<SwitchCase>(SW->getSubStmt())) 738 return SW->getSubStmt(); 739 740 return 0; 741 } 742 743 bool FoundSwitchStatements; 744 AttrStmts FallthroughStmts; 745 Sema &S; 746 }; 747} 748 749static void DiagnoseSwitchLabelsFallthrough(Sema &S, AnalysisDeclContext &AC) { 750 FallthroughMapper FM(S); 751 FM.TraverseStmt(AC.getBody()); 752 753 if (!FM.foundSwitchStatements()) 754 return; 755 756 CFG *Cfg = AC.getCFG(); 757 758 if (!Cfg) 759 return; 760 761 int AnnotatedCnt; 762 763 for (CFG::reverse_iterator I = Cfg->rbegin(), E = Cfg->rend(); I != E; ++I) { 764 const CFGBlock &B = **I; 765 const Stmt *Label = B.getLabel(); 766 767 if (!Label || !isa<SwitchCase>(Label)) 768 continue; 769 770 if (!FM.checkFallThroughIntoBlock(B, AnnotatedCnt)) 771 continue; 772 773 S.Diag(Label->getLocStart(), diag::warn_unannotated_fallthrough); 774 775 if (!AnnotatedCnt) { 776 SourceLocation L = Label->getLocStart(); 777 if (L.isMacroID()) 778 continue; 779 if (S.getLangOpts().CPlusPlus0x) { 780 const Stmt *Term = B.getTerminator(); 781 if (!(B.empty() && Term && isa<BreakStmt>(Term))) { 782 S.Diag(L, diag::note_insert_fallthrough_fixit) << 783 FixItHint::CreateInsertion(L, "[[clang::fallthrough]]; "); 784 } 785 } 786 S.Diag(L, diag::note_insert_break_fixit) << 787 FixItHint::CreateInsertion(L, "break; "); 788 } 789 } 790 791 const FallthroughMapper::AttrStmts &Fallthroughs = FM.getFallthroughStmts(); 792 for (FallthroughMapper::AttrStmts::const_iterator I = Fallthroughs.begin(), 793 E = Fallthroughs.end(); 794 I != E; ++I) { 795 S.Diag((*I)->getLocStart(), diag::warn_fallthrough_attr_invalid_placement); 796 } 797 798} 799 800namespace { 801struct SLocSort { 802 bool operator()(const UninitUse &a, const UninitUse &b) { 803 // Prefer a more confident report over a less confident one. 804 if (a.getKind() != b.getKind()) 805 return a.getKind() > b.getKind(); 806 SourceLocation aLoc = a.getUser()->getLocStart(); 807 SourceLocation bLoc = b.getUser()->getLocStart(); 808 return aLoc.getRawEncoding() < bLoc.getRawEncoding(); 809 } 810}; 811 812class UninitValsDiagReporter : public UninitVariablesHandler { 813 Sema &S; 814 typedef SmallVector<UninitUse, 2> UsesVec; 815 typedef llvm::DenseMap<const VarDecl *, std::pair<UsesVec*, bool> > UsesMap; 816 UsesMap *uses; 817 818public: 819 UninitValsDiagReporter(Sema &S) : S(S), uses(0) {} 820 ~UninitValsDiagReporter() { 821 flushDiagnostics(); 822 } 823 824 std::pair<UsesVec*, bool> &getUses(const VarDecl *vd) { 825 if (!uses) 826 uses = new UsesMap(); 827 828 UsesMap::mapped_type &V = (*uses)[vd]; 829 UsesVec *&vec = V.first; 830 if (!vec) 831 vec = new UsesVec(); 832 833 return V; 834 } 835 836 void handleUseOfUninitVariable(const VarDecl *vd, const UninitUse &use) { 837 getUses(vd).first->push_back(use); 838 } 839 840 void handleSelfInit(const VarDecl *vd) { 841 getUses(vd).second = true; 842 } 843 844 void flushDiagnostics() { 845 if (!uses) 846 return; 847 848 // FIXME: This iteration order, and thus the resulting diagnostic order, 849 // is nondeterministic. 850 for (UsesMap::iterator i = uses->begin(), e = uses->end(); i != e; ++i) { 851 const VarDecl *vd = i->first; 852 const UsesMap::mapped_type &V = i->second; 853 854 UsesVec *vec = V.first; 855 bool hasSelfInit = V.second; 856 857 // Specially handle the case where we have uses of an uninitialized 858 // variable, but the root cause is an idiomatic self-init. We want 859 // to report the diagnostic at the self-init since that is the root cause. 860 if (!vec->empty() && hasSelfInit && hasAlwaysUninitializedUse(vec)) 861 DiagnoseUninitializedUse(S, vd, 862 UninitUse(vd->getInit()->IgnoreParenCasts(), 863 /* isAlwaysUninit */ true), 864 /* alwaysReportSelfInit */ true); 865 else { 866 // Sort the uses by their SourceLocations. While not strictly 867 // guaranteed to produce them in line/column order, this will provide 868 // a stable ordering. 869 std::sort(vec->begin(), vec->end(), SLocSort()); 870 871 for (UsesVec::iterator vi = vec->begin(), ve = vec->end(); vi != ve; 872 ++vi) { 873 // If we have self-init, downgrade all uses to 'may be uninitialized'. 874 UninitUse Use = hasSelfInit ? UninitUse(vi->getUser(), false) : *vi; 875 876 if (DiagnoseUninitializedUse(S, vd, Use)) 877 // Skip further diagnostics for this variable. We try to warn only 878 // on the first point at which a variable is used uninitialized. 879 break; 880 } 881 } 882 883 // Release the uses vector. 884 delete vec; 885 } 886 delete uses; 887 } 888 889private: 890 static bool hasAlwaysUninitializedUse(const UsesVec* vec) { 891 for (UsesVec::const_iterator i = vec->begin(), e = vec->end(); i != e; ++i) { 892 if (i->getKind() == UninitUse::Always) { 893 return true; 894 } 895 } 896 return false; 897} 898}; 899} 900 901 902//===----------------------------------------------------------------------===// 903// -Wthread-safety 904//===----------------------------------------------------------------------===// 905namespace clang { 906namespace thread_safety { 907typedef llvm::SmallVector<PartialDiagnosticAt, 1> OptionalNotes; 908typedef std::pair<PartialDiagnosticAt, OptionalNotes> DelayedDiag; 909typedef std::list<DelayedDiag> DiagList; 910 911struct SortDiagBySourceLocation { 912 SourceManager &SM; 913 SortDiagBySourceLocation(SourceManager &SM) : SM(SM) {} 914 915 bool operator()(const DelayedDiag &left, const DelayedDiag &right) { 916 // Although this call will be slow, this is only called when outputting 917 // multiple warnings. 918 return SM.isBeforeInTranslationUnit(left.first.first, right.first.first); 919 } 920}; 921 922namespace { 923class ThreadSafetyReporter : public clang::thread_safety::ThreadSafetyHandler { 924 Sema &S; 925 DiagList Warnings; 926 SourceLocation FunLocation, FunEndLocation; 927 928 // Helper functions 929 void warnLockMismatch(unsigned DiagID, Name LockName, SourceLocation Loc) { 930 // Gracefully handle rare cases when the analysis can't get a more 931 // precise source location. 932 if (!Loc.isValid()) 933 Loc = FunLocation; 934 PartialDiagnosticAt Warning(Loc, S.PDiag(DiagID) << LockName); 935 Warnings.push_back(DelayedDiag(Warning, OptionalNotes())); 936 } 937 938 public: 939 ThreadSafetyReporter(Sema &S, SourceLocation FL, SourceLocation FEL) 940 : S(S), FunLocation(FL), FunEndLocation(FEL) {} 941 942 /// \brief Emit all buffered diagnostics in order of sourcelocation. 943 /// We need to output diagnostics produced while iterating through 944 /// the lockset in deterministic order, so this function orders diagnostics 945 /// and outputs them. 946 void emitDiagnostics() { 947 Warnings.sort(SortDiagBySourceLocation(S.getSourceManager())); 948 for (DiagList::iterator I = Warnings.begin(), E = Warnings.end(); 949 I != E; ++I) { 950 S.Diag(I->first.first, I->first.second); 951 const OptionalNotes &Notes = I->second; 952 for (unsigned NoteI = 0, NoteN = Notes.size(); NoteI != NoteN; ++NoteI) 953 S.Diag(Notes[NoteI].first, Notes[NoteI].second); 954 } 955 } 956 957 void handleInvalidLockExp(SourceLocation Loc) { 958 PartialDiagnosticAt Warning(Loc, 959 S.PDiag(diag::warn_cannot_resolve_lock) << Loc); 960 Warnings.push_back(DelayedDiag(Warning, OptionalNotes())); 961 } 962 void handleUnmatchedUnlock(Name LockName, SourceLocation Loc) { 963 warnLockMismatch(diag::warn_unlock_but_no_lock, LockName, Loc); 964 } 965 966 void handleDoubleLock(Name LockName, SourceLocation Loc) { 967 warnLockMismatch(diag::warn_double_lock, LockName, Loc); 968 } 969 970 void handleMutexHeldEndOfScope(Name LockName, SourceLocation LocLocked, 971 SourceLocation LocEndOfScope, 972 LockErrorKind LEK){ 973 unsigned DiagID = 0; 974 switch (LEK) { 975 case LEK_LockedSomePredecessors: 976 DiagID = diag::warn_lock_some_predecessors; 977 break; 978 case LEK_LockedSomeLoopIterations: 979 DiagID = diag::warn_expecting_lock_held_on_loop; 980 break; 981 case LEK_LockedAtEndOfFunction: 982 DiagID = diag::warn_no_unlock; 983 break; 984 } 985 if (LocEndOfScope.isInvalid()) 986 LocEndOfScope = FunEndLocation; 987 988 PartialDiagnosticAt Warning(LocEndOfScope, S.PDiag(DiagID) << LockName); 989 PartialDiagnosticAt Note(LocLocked, S.PDiag(diag::note_locked_here)); 990 Warnings.push_back(DelayedDiag(Warning, OptionalNotes(1, Note))); 991 } 992 993 994 void handleExclusiveAndShared(Name LockName, SourceLocation Loc1, 995 SourceLocation Loc2) { 996 PartialDiagnosticAt Warning( 997 Loc1, S.PDiag(diag::warn_lock_exclusive_and_shared) << LockName); 998 PartialDiagnosticAt Note( 999 Loc2, S.PDiag(diag::note_lock_exclusive_and_shared) << LockName); 1000 Warnings.push_back(DelayedDiag(Warning, OptionalNotes(1, Note))); 1001 } 1002 1003 void handleNoMutexHeld(const NamedDecl *D, ProtectedOperationKind POK, 1004 AccessKind AK, SourceLocation Loc) { 1005 assert((POK == POK_VarAccess || POK == POK_VarDereference) 1006 && "Only works for variables"); 1007 unsigned DiagID = POK == POK_VarAccess? 1008 diag::warn_variable_requires_any_lock: 1009 diag::warn_var_deref_requires_any_lock; 1010 PartialDiagnosticAt Warning(Loc, S.PDiag(DiagID) 1011 << D->getName() << getLockKindFromAccessKind(AK)); 1012 Warnings.push_back(DelayedDiag(Warning, OptionalNotes())); 1013 } 1014 1015 void handleMutexNotHeld(const NamedDecl *D, ProtectedOperationKind POK, 1016 Name LockName, LockKind LK, SourceLocation Loc) { 1017 unsigned DiagID = 0; 1018 switch (POK) { 1019 case POK_VarAccess: 1020 DiagID = diag::warn_variable_requires_lock; 1021 break; 1022 case POK_VarDereference: 1023 DiagID = diag::warn_var_deref_requires_lock; 1024 break; 1025 case POK_FunctionCall: 1026 DiagID = diag::warn_fun_requires_lock; 1027 break; 1028 } 1029 PartialDiagnosticAt Warning(Loc, S.PDiag(DiagID) 1030 << D->getName() << LockName << LK); 1031 Warnings.push_back(DelayedDiag(Warning, OptionalNotes())); 1032 } 1033 1034 void handleFunExcludesLock(Name FunName, Name LockName, SourceLocation Loc) { 1035 PartialDiagnosticAt Warning(Loc, 1036 S.PDiag(diag::warn_fun_excludes_mutex) << FunName << LockName); 1037 Warnings.push_back(DelayedDiag(Warning, OptionalNotes())); 1038 } 1039}; 1040} 1041} 1042} 1043 1044//===----------------------------------------------------------------------===// 1045// AnalysisBasedWarnings - Worker object used by Sema to execute analysis-based 1046// warnings on a function, method, or block. 1047//===----------------------------------------------------------------------===// 1048 1049clang::sema::AnalysisBasedWarnings::Policy::Policy() { 1050 enableCheckFallThrough = 1; 1051 enableCheckUnreachable = 0; 1052 enableThreadSafetyAnalysis = 0; 1053} 1054 1055clang::sema::AnalysisBasedWarnings::AnalysisBasedWarnings(Sema &s) 1056 : S(s), 1057 NumFunctionsAnalyzed(0), 1058 NumFunctionsWithBadCFGs(0), 1059 NumCFGBlocks(0), 1060 MaxCFGBlocksPerFunction(0), 1061 NumUninitAnalysisFunctions(0), 1062 NumUninitAnalysisVariables(0), 1063 MaxUninitAnalysisVariablesPerFunction(0), 1064 NumUninitAnalysisBlockVisits(0), 1065 MaxUninitAnalysisBlockVisitsPerFunction(0) { 1066 DiagnosticsEngine &D = S.getDiagnostics(); 1067 DefaultPolicy.enableCheckUnreachable = (unsigned) 1068 (D.getDiagnosticLevel(diag::warn_unreachable, SourceLocation()) != 1069 DiagnosticsEngine::Ignored); 1070 DefaultPolicy.enableThreadSafetyAnalysis = (unsigned) 1071 (D.getDiagnosticLevel(diag::warn_double_lock, SourceLocation()) != 1072 DiagnosticsEngine::Ignored); 1073 1074} 1075 1076static void flushDiagnostics(Sema &S, sema::FunctionScopeInfo *fscope) { 1077 for (SmallVectorImpl<sema::PossiblyUnreachableDiag>::iterator 1078 i = fscope->PossiblyUnreachableDiags.begin(), 1079 e = fscope->PossiblyUnreachableDiags.end(); 1080 i != e; ++i) { 1081 const sema::PossiblyUnreachableDiag &D = *i; 1082 S.Diag(D.Loc, D.PD); 1083 } 1084} 1085 1086void clang::sema:: 1087AnalysisBasedWarnings::IssueWarnings(sema::AnalysisBasedWarnings::Policy P, 1088 sema::FunctionScopeInfo *fscope, 1089 const Decl *D, const BlockExpr *blkExpr) { 1090 1091 // We avoid doing analysis-based warnings when there are errors for 1092 // two reasons: 1093 // (1) The CFGs often can't be constructed (if the body is invalid), so 1094 // don't bother trying. 1095 // (2) The code already has problems; running the analysis just takes more 1096 // time. 1097 DiagnosticsEngine &Diags = S.getDiagnostics(); 1098 1099 // Do not do any analysis for declarations in system headers if we are 1100 // going to just ignore them. 1101 if (Diags.getSuppressSystemWarnings() && 1102 S.SourceMgr.isInSystemHeader(D->getLocation())) 1103 return; 1104 1105 // For code in dependent contexts, we'll do this at instantiation time. 1106 if (cast<DeclContext>(D)->isDependentContext()) 1107 return; 1108 1109 if (Diags.hasErrorOccurred() || Diags.hasFatalErrorOccurred()) { 1110 // Flush out any possibly unreachable diagnostics. 1111 flushDiagnostics(S, fscope); 1112 return; 1113 } 1114 1115 const Stmt *Body = D->getBody(); 1116 assert(Body); 1117 1118 AnalysisDeclContext AC(/* AnalysisDeclContextManager */ 0, D); 1119 1120 // Don't generate EH edges for CallExprs as we'd like to avoid the n^2 1121 // explosion for destrutors that can result and the compile time hit. 1122 AC.getCFGBuildOptions().PruneTriviallyFalseEdges = true; 1123 AC.getCFGBuildOptions().AddEHEdges = false; 1124 AC.getCFGBuildOptions().AddInitializers = true; 1125 AC.getCFGBuildOptions().AddImplicitDtors = true; 1126 1127 // Force that certain expressions appear as CFGElements in the CFG. This 1128 // is used to speed up various analyses. 1129 // FIXME: This isn't the right factoring. This is here for initial 1130 // prototyping, but we need a way for analyses to say what expressions they 1131 // expect to always be CFGElements and then fill in the BuildOptions 1132 // appropriately. This is essentially a layering violation. 1133 if (P.enableCheckUnreachable || P.enableThreadSafetyAnalysis) { 1134 // Unreachable code analysis and thread safety require a linearized CFG. 1135 AC.getCFGBuildOptions().setAllAlwaysAdd(); 1136 } 1137 else { 1138 AC.getCFGBuildOptions() 1139 .setAlwaysAdd(Stmt::BinaryOperatorClass) 1140 .setAlwaysAdd(Stmt::BlockExprClass) 1141 .setAlwaysAdd(Stmt::CStyleCastExprClass) 1142 .setAlwaysAdd(Stmt::DeclRefExprClass) 1143 .setAlwaysAdd(Stmt::ImplicitCastExprClass) 1144 .setAlwaysAdd(Stmt::UnaryOperatorClass) 1145 .setAlwaysAdd(Stmt::AttributedStmtClass); 1146 } 1147 1148 // Construct the analysis context with the specified CFG build options. 1149 1150 // Emit delayed diagnostics. 1151 if (!fscope->PossiblyUnreachableDiags.empty()) { 1152 bool analyzed = false; 1153 1154 // Register the expressions with the CFGBuilder. 1155 for (SmallVectorImpl<sema::PossiblyUnreachableDiag>::iterator 1156 i = fscope->PossiblyUnreachableDiags.begin(), 1157 e = fscope->PossiblyUnreachableDiags.end(); 1158 i != e; ++i) { 1159 if (const Stmt *stmt = i->stmt) 1160 AC.registerForcedBlockExpression(stmt); 1161 } 1162 1163 if (AC.getCFG()) { 1164 analyzed = true; 1165 for (SmallVectorImpl<sema::PossiblyUnreachableDiag>::iterator 1166 i = fscope->PossiblyUnreachableDiags.begin(), 1167 e = fscope->PossiblyUnreachableDiags.end(); 1168 i != e; ++i) 1169 { 1170 const sema::PossiblyUnreachableDiag &D = *i; 1171 bool processed = false; 1172 if (const Stmt *stmt = i->stmt) { 1173 const CFGBlock *block = AC.getBlockForRegisteredExpression(stmt); 1174 CFGReverseBlockReachabilityAnalysis *cra = 1175 AC.getCFGReachablityAnalysis(); 1176 // FIXME: We should be able to assert that block is non-null, but 1177 // the CFG analysis can skip potentially-evaluated expressions in 1178 // edge cases; see test/Sema/vla-2.c. 1179 if (block && cra) { 1180 // Can this block be reached from the entrance? 1181 if (cra->isReachable(&AC.getCFG()->getEntry(), block)) 1182 S.Diag(D.Loc, D.PD); 1183 processed = true; 1184 } 1185 } 1186 if (!processed) { 1187 // Emit the warning anyway if we cannot map to a basic block. 1188 S.Diag(D.Loc, D.PD); 1189 } 1190 } 1191 } 1192 1193 if (!analyzed) 1194 flushDiagnostics(S, fscope); 1195 } 1196 1197 1198 // Warning: check missing 'return' 1199 if (P.enableCheckFallThrough) { 1200 const CheckFallThroughDiagnostics &CD = 1201 (isa<BlockDecl>(D) ? CheckFallThroughDiagnostics::MakeForBlock() 1202 : (isa<CXXMethodDecl>(D) && 1203 cast<CXXMethodDecl>(D)->getOverloadedOperator() == OO_Call && 1204 cast<CXXMethodDecl>(D)->getParent()->isLambda()) 1205 ? CheckFallThroughDiagnostics::MakeForLambda() 1206 : CheckFallThroughDiagnostics::MakeForFunction(D)); 1207 CheckFallThroughForBody(S, D, Body, blkExpr, CD, AC); 1208 } 1209 1210 // Warning: check for unreachable code 1211 if (P.enableCheckUnreachable) { 1212 // Only check for unreachable code on non-template instantiations. 1213 // Different template instantiations can effectively change the control-flow 1214 // and it is very difficult to prove that a snippet of code in a template 1215 // is unreachable for all instantiations. 1216 bool isTemplateInstantiation = false; 1217 if (const FunctionDecl *Function = dyn_cast<FunctionDecl>(D)) 1218 isTemplateInstantiation = Function->isTemplateInstantiation(); 1219 if (!isTemplateInstantiation) 1220 CheckUnreachable(S, AC); 1221 } 1222 1223 // Check for thread safety violations 1224 if (P.enableThreadSafetyAnalysis) { 1225 SourceLocation FL = AC.getDecl()->getLocation(); 1226 SourceLocation FEL = AC.getDecl()->getLocEnd(); 1227 thread_safety::ThreadSafetyReporter Reporter(S, FL, FEL); 1228 thread_safety::runThreadSafetyAnalysis(AC, Reporter); 1229 Reporter.emitDiagnostics(); 1230 } 1231 1232 if (Diags.getDiagnosticLevel(diag::warn_uninit_var, D->getLocStart()) 1233 != DiagnosticsEngine::Ignored || 1234 Diags.getDiagnosticLevel(diag::warn_sometimes_uninit_var,D->getLocStart()) 1235 != DiagnosticsEngine::Ignored || 1236 Diags.getDiagnosticLevel(diag::warn_maybe_uninit_var, D->getLocStart()) 1237 != DiagnosticsEngine::Ignored) { 1238 if (CFG *cfg = AC.getCFG()) { 1239 UninitValsDiagReporter reporter(S); 1240 UninitVariablesAnalysisStats stats; 1241 std::memset(&stats, 0, sizeof(UninitVariablesAnalysisStats)); 1242 runUninitializedVariablesAnalysis(*cast<DeclContext>(D), *cfg, AC, 1243 reporter, stats); 1244 1245 if (S.CollectStats && stats.NumVariablesAnalyzed > 0) { 1246 ++NumUninitAnalysisFunctions; 1247 NumUninitAnalysisVariables += stats.NumVariablesAnalyzed; 1248 NumUninitAnalysisBlockVisits += stats.NumBlockVisits; 1249 MaxUninitAnalysisVariablesPerFunction = 1250 std::max(MaxUninitAnalysisVariablesPerFunction, 1251 stats.NumVariablesAnalyzed); 1252 MaxUninitAnalysisBlockVisitsPerFunction = 1253 std::max(MaxUninitAnalysisBlockVisitsPerFunction, 1254 stats.NumBlockVisits); 1255 } 1256 } 1257 } 1258 1259 if (Diags.getDiagnosticLevel(diag::warn_unannotated_fallthrough, 1260 D->getLocStart()) != DiagnosticsEngine::Ignored) { 1261 DiagnoseSwitchLabelsFallthrough(S, AC); 1262 } 1263 1264 // Collect statistics about the CFG if it was built. 1265 if (S.CollectStats && AC.isCFGBuilt()) { 1266 ++NumFunctionsAnalyzed; 1267 if (CFG *cfg = AC.getCFG()) { 1268 // If we successfully built a CFG for this context, record some more 1269 // detail information about it. 1270 NumCFGBlocks += cfg->getNumBlockIDs(); 1271 MaxCFGBlocksPerFunction = std::max(MaxCFGBlocksPerFunction, 1272 cfg->getNumBlockIDs()); 1273 } else { 1274 ++NumFunctionsWithBadCFGs; 1275 } 1276 } 1277} 1278 1279void clang::sema::AnalysisBasedWarnings::PrintStats() const { 1280 llvm::errs() << "\n*** Analysis Based Warnings Stats:\n"; 1281 1282 unsigned NumCFGsBuilt = NumFunctionsAnalyzed - NumFunctionsWithBadCFGs; 1283 unsigned AvgCFGBlocksPerFunction = 1284 !NumCFGsBuilt ? 0 : NumCFGBlocks/NumCFGsBuilt; 1285 llvm::errs() << NumFunctionsAnalyzed << " functions analyzed (" 1286 << NumFunctionsWithBadCFGs << " w/o CFGs).\n" 1287 << " " << NumCFGBlocks << " CFG blocks built.\n" 1288 << " " << AvgCFGBlocksPerFunction 1289 << " average CFG blocks per function.\n" 1290 << " " << MaxCFGBlocksPerFunction 1291 << " max CFG blocks per function.\n"; 1292 1293 unsigned AvgUninitVariablesPerFunction = !NumUninitAnalysisFunctions ? 0 1294 : NumUninitAnalysisVariables/NumUninitAnalysisFunctions; 1295 unsigned AvgUninitBlockVisitsPerFunction = !NumUninitAnalysisFunctions ? 0 1296 : NumUninitAnalysisBlockVisits/NumUninitAnalysisFunctions; 1297 llvm::errs() << NumUninitAnalysisFunctions 1298 << " functions analyzed for uninitialiazed variables\n" 1299 << " " << NumUninitAnalysisVariables << " variables analyzed.\n" 1300 << " " << AvgUninitVariablesPerFunction 1301 << " average variables per function.\n" 1302 << " " << MaxUninitAnalysisVariablesPerFunction 1303 << " max variables per function.\n" 1304 << " " << NumUninitAnalysisBlockVisits << " block visits.\n" 1305 << " " << AvgUninitBlockVisitsPerFunction 1306 << " average block visits per function.\n" 1307 << " " << MaxUninitAnalysisBlockVisitsPerFunction 1308 << " max block visits per function.\n"; 1309} 1310