AnalysisBasedWarnings.cpp revision 8cd64b4c5553fa6284d248336cb7c82dc960a394
1//=- AnalysisBasedWarnings.cpp - Sema warnings based on libAnalysis -*- C++ -*-=// 2// 3// The LLVM Compiler Infrastructure 4// 5// This file is distributed under the University of Illinois Open Source 6// License. See LICENSE.TXT for details. 7// 8//===----------------------------------------------------------------------===// 9// 10// This file defines analysis_warnings::[Policy,Executor]. 11// Together they are used by Sema to issue warnings based on inexpensive 12// static analysis algorithms in libAnalysis. 13// 14//===----------------------------------------------------------------------===// 15 16#include "clang/Sema/AnalysisBasedWarnings.h" 17#include "clang/Sema/SemaInternal.h" 18#include "clang/Sema/ScopeInfo.h" 19#include "clang/Basic/SourceManager.h" 20#include "clang/Basic/SourceLocation.h" 21#include "clang/Lex/Preprocessor.h" 22#include "clang/Lex/Lexer.h" 23#include "clang/AST/DeclObjC.h" 24#include "clang/AST/DeclCXX.h" 25#include "clang/AST/ExprObjC.h" 26#include "clang/AST/ExprCXX.h" 27#include "clang/AST/StmtObjC.h" 28#include "clang/AST/StmtCXX.h" 29#include "clang/AST/EvaluatedExprVisitor.h" 30#include "clang/AST/StmtVisitor.h" 31#include "clang/AST/RecursiveASTVisitor.h" 32#include "clang/Analysis/AnalysisContext.h" 33#include "clang/Analysis/CFG.h" 34#include "clang/Analysis/Analyses/ReachableCode.h" 35#include "clang/Analysis/Analyses/CFGReachabilityAnalysis.h" 36#include "clang/Analysis/Analyses/ThreadSafety.h" 37#include "clang/Analysis/CFGStmtMap.h" 38#include "clang/Analysis/Analyses/UninitializedValues.h" 39#include "llvm/ADT/BitVector.h" 40#include "llvm/ADT/FoldingSet.h" 41#include "llvm/ADT/ImmutableMap.h" 42#include "llvm/ADT/PostOrderIterator.h" 43#include "llvm/ADT/SmallVector.h" 44#include "llvm/ADT/StringRef.h" 45#include "llvm/Support/Casting.h" 46#include <algorithm> 47#include <iterator> 48#include <vector> 49#include <deque> 50 51using namespace clang; 52 53//===----------------------------------------------------------------------===// 54// Unreachable code analysis. 55//===----------------------------------------------------------------------===// 56 57namespace { 58 class UnreachableCodeHandler : public reachable_code::Callback { 59 Sema &S; 60 public: 61 UnreachableCodeHandler(Sema &s) : S(s) {} 62 63 void HandleUnreachable(SourceLocation L, SourceRange R1, SourceRange R2) { 64 S.Diag(L, diag::warn_unreachable) << R1 << R2; 65 } 66 }; 67} 68 69/// CheckUnreachable - Check for unreachable code. 70static void CheckUnreachable(Sema &S, AnalysisDeclContext &AC) { 71 UnreachableCodeHandler UC(S); 72 reachable_code::FindUnreachableCode(AC, UC); 73} 74 75//===----------------------------------------------------------------------===// 76// Check for missing return value. 77//===----------------------------------------------------------------------===// 78 79enum ControlFlowKind { 80 UnknownFallThrough, 81 NeverFallThrough, 82 MaybeFallThrough, 83 AlwaysFallThrough, 84 NeverFallThroughOrReturn 85}; 86 87/// CheckFallThrough - Check that we don't fall off the end of a 88/// Statement that should return a value. 89/// 90/// \returns AlwaysFallThrough iff we always fall off the end of the statement, 91/// MaybeFallThrough iff we might or might not fall off the end, 92/// NeverFallThroughOrReturn iff we never fall off the end of the statement or 93/// return. We assume NeverFallThrough iff we never fall off the end of the 94/// statement but we may return. We assume that functions not marked noreturn 95/// will return. 96static ControlFlowKind CheckFallThrough(AnalysisDeclContext &AC) { 97 CFG *cfg = AC.getCFG(); 98 if (cfg == 0) return UnknownFallThrough; 99 100 // The CFG leaves in dead things, and we don't want the dead code paths to 101 // confuse us, so we mark all live things first. 102 llvm::BitVector live(cfg->getNumBlockIDs()); 103 unsigned count = reachable_code::ScanReachableFromBlock(&cfg->getEntry(), 104 live); 105 106 bool AddEHEdges = AC.getAddEHEdges(); 107 if (!AddEHEdges && count != cfg->getNumBlockIDs()) 108 // When there are things remaining dead, and we didn't add EH edges 109 // from CallExprs to the catch clauses, we have to go back and 110 // mark them as live. 111 for (CFG::iterator I = cfg->begin(), E = cfg->end(); I != E; ++I) { 112 CFGBlock &b = **I; 113 if (!live[b.getBlockID()]) { 114 if (b.pred_begin() == b.pred_end()) { 115 if (b.getTerminator() && isa<CXXTryStmt>(b.getTerminator())) 116 // When not adding EH edges from calls, catch clauses 117 // can otherwise seem dead. Avoid noting them as dead. 118 count += reachable_code::ScanReachableFromBlock(&b, live); 119 continue; 120 } 121 } 122 } 123 124 // Now we know what is live, we check the live precessors of the exit block 125 // and look for fall through paths, being careful to ignore normal returns, 126 // and exceptional paths. 127 bool HasLiveReturn = false; 128 bool HasFakeEdge = false; 129 bool HasPlainEdge = false; 130 bool HasAbnormalEdge = false; 131 132 // Ignore default cases that aren't likely to be reachable because all 133 // enums in a switch(X) have explicit case statements. 134 CFGBlock::FilterOptions FO; 135 FO.IgnoreDefaultsWithCoveredEnums = 1; 136 137 for (CFGBlock::filtered_pred_iterator 138 I = cfg->getExit().filtered_pred_start_end(FO); I.hasMore(); ++I) { 139 const CFGBlock& B = **I; 140 if (!live[B.getBlockID()]) 141 continue; 142 143 // Skip blocks which contain an element marked as no-return. They don't 144 // represent actually viable edges into the exit block, so mark them as 145 // abnormal. 146 if (B.hasNoReturnElement()) { 147 HasAbnormalEdge = true; 148 continue; 149 } 150 151 // Destructors can appear after the 'return' in the CFG. This is 152 // normal. We need to look pass the destructors for the return 153 // statement (if it exists). 154 CFGBlock::const_reverse_iterator ri = B.rbegin(), re = B.rend(); 155 156 for ( ; ri != re ; ++ri) 157 if (isa<CFGStmt>(*ri)) 158 break; 159 160 // No more CFGElements in the block? 161 if (ri == re) { 162 if (B.getTerminator() && isa<CXXTryStmt>(B.getTerminator())) { 163 HasAbnormalEdge = true; 164 continue; 165 } 166 // A labeled empty statement, or the entry block... 167 HasPlainEdge = true; 168 continue; 169 } 170 171 CFGStmt CS = cast<CFGStmt>(*ri); 172 const Stmt *S = CS.getStmt(); 173 if (isa<ReturnStmt>(S)) { 174 HasLiveReturn = true; 175 continue; 176 } 177 if (isa<ObjCAtThrowStmt>(S)) { 178 HasFakeEdge = true; 179 continue; 180 } 181 if (isa<CXXThrowExpr>(S)) { 182 HasFakeEdge = true; 183 continue; 184 } 185 if (const AsmStmt *AS = dyn_cast<AsmStmt>(S)) { 186 if (AS->isMSAsm()) { 187 HasFakeEdge = true; 188 HasLiveReturn = true; 189 continue; 190 } 191 } 192 if (isa<MSAsmStmt>(S)) { 193 // TODO: Verify this is correct. 194 HasFakeEdge = true; 195 HasLiveReturn = true; 196 continue; 197 } 198 if (isa<CXXTryStmt>(S)) { 199 HasAbnormalEdge = true; 200 continue; 201 } 202 if (std::find(B.succ_begin(), B.succ_end(), &cfg->getExit()) 203 == B.succ_end()) { 204 HasAbnormalEdge = true; 205 continue; 206 } 207 208 HasPlainEdge = true; 209 } 210 if (!HasPlainEdge) { 211 if (HasLiveReturn) 212 return NeverFallThrough; 213 return NeverFallThroughOrReturn; 214 } 215 if (HasAbnormalEdge || HasFakeEdge || HasLiveReturn) 216 return MaybeFallThrough; 217 // This says AlwaysFallThrough for calls to functions that are not marked 218 // noreturn, that don't return. If people would like this warning to be more 219 // accurate, such functions should be marked as noreturn. 220 return AlwaysFallThrough; 221} 222 223namespace { 224 225struct CheckFallThroughDiagnostics { 226 unsigned diag_MaybeFallThrough_HasNoReturn; 227 unsigned diag_MaybeFallThrough_ReturnsNonVoid; 228 unsigned diag_AlwaysFallThrough_HasNoReturn; 229 unsigned diag_AlwaysFallThrough_ReturnsNonVoid; 230 unsigned diag_NeverFallThroughOrReturn; 231 enum { Function, Block, Lambda } funMode; 232 SourceLocation FuncLoc; 233 234 static CheckFallThroughDiagnostics MakeForFunction(const Decl *Func) { 235 CheckFallThroughDiagnostics D; 236 D.FuncLoc = Func->getLocation(); 237 D.diag_MaybeFallThrough_HasNoReturn = 238 diag::warn_falloff_noreturn_function; 239 D.diag_MaybeFallThrough_ReturnsNonVoid = 240 diag::warn_maybe_falloff_nonvoid_function; 241 D.diag_AlwaysFallThrough_HasNoReturn = 242 diag::warn_falloff_noreturn_function; 243 D.diag_AlwaysFallThrough_ReturnsNonVoid = 244 diag::warn_falloff_nonvoid_function; 245 246 // Don't suggest that virtual functions be marked "noreturn", since they 247 // might be overridden by non-noreturn functions. 248 bool isVirtualMethod = false; 249 if (const CXXMethodDecl *Method = dyn_cast<CXXMethodDecl>(Func)) 250 isVirtualMethod = Method->isVirtual(); 251 252 // Don't suggest that template instantiations be marked "noreturn" 253 bool isTemplateInstantiation = false; 254 if (const FunctionDecl *Function = dyn_cast<FunctionDecl>(Func)) 255 isTemplateInstantiation = Function->isTemplateInstantiation(); 256 257 if (!isVirtualMethod && !isTemplateInstantiation) 258 D.diag_NeverFallThroughOrReturn = 259 diag::warn_suggest_noreturn_function; 260 else 261 D.diag_NeverFallThroughOrReturn = 0; 262 263 D.funMode = Function; 264 return D; 265 } 266 267 static CheckFallThroughDiagnostics MakeForBlock() { 268 CheckFallThroughDiagnostics D; 269 D.diag_MaybeFallThrough_HasNoReturn = 270 diag::err_noreturn_block_has_return_expr; 271 D.diag_MaybeFallThrough_ReturnsNonVoid = 272 diag::err_maybe_falloff_nonvoid_block; 273 D.diag_AlwaysFallThrough_HasNoReturn = 274 diag::err_noreturn_block_has_return_expr; 275 D.diag_AlwaysFallThrough_ReturnsNonVoid = 276 diag::err_falloff_nonvoid_block; 277 D.diag_NeverFallThroughOrReturn = 278 diag::warn_suggest_noreturn_block; 279 D.funMode = Block; 280 return D; 281 } 282 283 static CheckFallThroughDiagnostics MakeForLambda() { 284 CheckFallThroughDiagnostics D; 285 D.diag_MaybeFallThrough_HasNoReturn = 286 diag::err_noreturn_lambda_has_return_expr; 287 D.diag_MaybeFallThrough_ReturnsNonVoid = 288 diag::warn_maybe_falloff_nonvoid_lambda; 289 D.diag_AlwaysFallThrough_HasNoReturn = 290 diag::err_noreturn_lambda_has_return_expr; 291 D.diag_AlwaysFallThrough_ReturnsNonVoid = 292 diag::warn_falloff_nonvoid_lambda; 293 D.diag_NeverFallThroughOrReturn = 0; 294 D.funMode = Lambda; 295 return D; 296 } 297 298 bool checkDiagnostics(DiagnosticsEngine &D, bool ReturnsVoid, 299 bool HasNoReturn) const { 300 if (funMode == Function) { 301 return (ReturnsVoid || 302 D.getDiagnosticLevel(diag::warn_maybe_falloff_nonvoid_function, 303 FuncLoc) == DiagnosticsEngine::Ignored) 304 && (!HasNoReturn || 305 D.getDiagnosticLevel(diag::warn_noreturn_function_has_return_expr, 306 FuncLoc) == DiagnosticsEngine::Ignored) 307 && (!ReturnsVoid || 308 D.getDiagnosticLevel(diag::warn_suggest_noreturn_block, FuncLoc) 309 == DiagnosticsEngine::Ignored); 310 } 311 312 // For blocks / lambdas. 313 return ReturnsVoid && !HasNoReturn 314 && ((funMode == Lambda) || 315 D.getDiagnosticLevel(diag::warn_suggest_noreturn_block, FuncLoc) 316 == DiagnosticsEngine::Ignored); 317 } 318}; 319 320} 321 322/// CheckFallThroughForFunctionDef - Check that we don't fall off the end of a 323/// function that should return a value. Check that we don't fall off the end 324/// of a noreturn function. We assume that functions and blocks not marked 325/// noreturn will return. 326static void CheckFallThroughForBody(Sema &S, const Decl *D, const Stmt *Body, 327 const BlockExpr *blkExpr, 328 const CheckFallThroughDiagnostics& CD, 329 AnalysisDeclContext &AC) { 330 331 bool ReturnsVoid = false; 332 bool HasNoReturn = false; 333 334 if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(D)) { 335 ReturnsVoid = FD->getResultType()->isVoidType(); 336 HasNoReturn = FD->hasAttr<NoReturnAttr>() || 337 FD->getType()->getAs<FunctionType>()->getNoReturnAttr(); 338 } 339 else if (const ObjCMethodDecl *MD = dyn_cast<ObjCMethodDecl>(D)) { 340 ReturnsVoid = MD->getResultType()->isVoidType(); 341 HasNoReturn = MD->hasAttr<NoReturnAttr>(); 342 } 343 else if (isa<BlockDecl>(D)) { 344 QualType BlockTy = blkExpr->getType(); 345 if (const FunctionType *FT = 346 BlockTy->getPointeeType()->getAs<FunctionType>()) { 347 if (FT->getResultType()->isVoidType()) 348 ReturnsVoid = true; 349 if (FT->getNoReturnAttr()) 350 HasNoReturn = true; 351 } 352 } 353 354 DiagnosticsEngine &Diags = S.getDiagnostics(); 355 356 // Short circuit for compilation speed. 357 if (CD.checkDiagnostics(Diags, ReturnsVoid, HasNoReturn)) 358 return; 359 360 // FIXME: Function try block 361 if (const CompoundStmt *Compound = dyn_cast<CompoundStmt>(Body)) { 362 switch (CheckFallThrough(AC)) { 363 case UnknownFallThrough: 364 break; 365 366 case MaybeFallThrough: 367 if (HasNoReturn) 368 S.Diag(Compound->getRBracLoc(), 369 CD.diag_MaybeFallThrough_HasNoReturn); 370 else if (!ReturnsVoid) 371 S.Diag(Compound->getRBracLoc(), 372 CD.diag_MaybeFallThrough_ReturnsNonVoid); 373 break; 374 case AlwaysFallThrough: 375 if (HasNoReturn) 376 S.Diag(Compound->getRBracLoc(), 377 CD.diag_AlwaysFallThrough_HasNoReturn); 378 else if (!ReturnsVoid) 379 S.Diag(Compound->getRBracLoc(), 380 CD.diag_AlwaysFallThrough_ReturnsNonVoid); 381 break; 382 case NeverFallThroughOrReturn: 383 if (ReturnsVoid && !HasNoReturn && CD.diag_NeverFallThroughOrReturn) { 384 if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(D)) { 385 S.Diag(Compound->getLBracLoc(), CD.diag_NeverFallThroughOrReturn) 386 << 0 << FD; 387 } else if (const ObjCMethodDecl *MD = dyn_cast<ObjCMethodDecl>(D)) { 388 S.Diag(Compound->getLBracLoc(), CD.diag_NeverFallThroughOrReturn) 389 << 1 << MD; 390 } else { 391 S.Diag(Compound->getLBracLoc(), CD.diag_NeverFallThroughOrReturn); 392 } 393 } 394 break; 395 case NeverFallThrough: 396 break; 397 } 398 } 399} 400 401//===----------------------------------------------------------------------===// 402// -Wuninitialized 403//===----------------------------------------------------------------------===// 404 405namespace { 406/// ContainsReference - A visitor class to search for references to 407/// a particular declaration (the needle) within any evaluated component of an 408/// expression (recursively). 409class ContainsReference : public EvaluatedExprVisitor<ContainsReference> { 410 bool FoundReference; 411 const DeclRefExpr *Needle; 412 413public: 414 ContainsReference(ASTContext &Context, const DeclRefExpr *Needle) 415 : EvaluatedExprVisitor<ContainsReference>(Context), 416 FoundReference(false), Needle(Needle) {} 417 418 void VisitExpr(Expr *E) { 419 // Stop evaluating if we already have a reference. 420 if (FoundReference) 421 return; 422 423 EvaluatedExprVisitor<ContainsReference>::VisitExpr(E); 424 } 425 426 void VisitDeclRefExpr(DeclRefExpr *E) { 427 if (E == Needle) 428 FoundReference = true; 429 else 430 EvaluatedExprVisitor<ContainsReference>::VisitDeclRefExpr(E); 431 } 432 433 bool doesContainReference() const { return FoundReference; } 434}; 435} 436 437static bool SuggestInitializationFixit(Sema &S, const VarDecl *VD) { 438 QualType VariableTy = VD->getType().getCanonicalType(); 439 if (VariableTy->isBlockPointerType() && 440 !VD->hasAttr<BlocksAttr>()) { 441 S.Diag(VD->getLocation(), diag::note_block_var_fixit_add_initialization) << VD->getDeclName() 442 << FixItHint::CreateInsertion(VD->getLocation(), "__block "); 443 return true; 444 } 445 446 // Don't issue a fixit if there is already an initializer. 447 if (VD->getInit()) 448 return false; 449 450 // Suggest possible initialization (if any). 451 std::string Init = S.getFixItZeroInitializerForType(VariableTy); 452 if (Init.empty()) 453 return false; 454 455 // Don't suggest a fixit inside macros. 456 if (VD->getLocEnd().isMacroID()) 457 return false; 458 459 SourceLocation Loc = S.PP.getLocForEndOfToken(VD->getLocEnd()); 460 461 S.Diag(Loc, diag::note_var_fixit_add_initialization) << VD->getDeclName() 462 << FixItHint::CreateInsertion(Loc, Init); 463 return true; 464} 465 466/// Create a fixit to remove an if-like statement, on the assumption that its 467/// condition is CondVal. 468static void CreateIfFixit(Sema &S, const Stmt *If, const Stmt *Then, 469 const Stmt *Else, bool CondVal, 470 FixItHint &Fixit1, FixItHint &Fixit2) { 471 if (CondVal) { 472 // If condition is always true, remove all but the 'then'. 473 Fixit1 = FixItHint::CreateRemoval( 474 CharSourceRange::getCharRange(If->getLocStart(), 475 Then->getLocStart())); 476 if (Else) { 477 SourceLocation ElseKwLoc = Lexer::getLocForEndOfToken( 478 Then->getLocEnd(), 0, S.getSourceManager(), S.getLangOpts()); 479 Fixit2 = FixItHint::CreateRemoval( 480 SourceRange(ElseKwLoc, Else->getLocEnd())); 481 } 482 } else { 483 // If condition is always false, remove all but the 'else'. 484 if (Else) 485 Fixit1 = FixItHint::CreateRemoval( 486 CharSourceRange::getCharRange(If->getLocStart(), 487 Else->getLocStart())); 488 else 489 Fixit1 = FixItHint::CreateRemoval(If->getSourceRange()); 490 } 491} 492 493/// DiagUninitUse -- Helper function to produce a diagnostic for an 494/// uninitialized use of a variable. 495static void DiagUninitUse(Sema &S, const VarDecl *VD, const UninitUse &Use, 496 bool IsCapturedByBlock) { 497 bool Diagnosed = false; 498 499 // Diagnose each branch which leads to a sometimes-uninitialized use. 500 for (UninitUse::branch_iterator I = Use.branch_begin(), E = Use.branch_end(); 501 I != E; ++I) { 502 assert(Use.getKind() == UninitUse::Sometimes); 503 504 const Expr *User = Use.getUser(); 505 const Stmt *Term = I->Terminator; 506 507 // Information used when building the diagnostic. 508 unsigned DiagKind; 509 const char *Str; 510 SourceRange Range; 511 512 // FixIts to suppress the diagnosic by removing the dead condition. 513 // For all binary terminators, branch 0 is taken if the condition is true, 514 // and branch 1 is taken if the condition is false. 515 int RemoveDiagKind = -1; 516 const char *FixitStr = 517 S.getLangOpts().CPlusPlus ? (I->Output ? "true" : "false") 518 : (I->Output ? "1" : "0"); 519 FixItHint Fixit1, Fixit2; 520 521 switch (Term->getStmtClass()) { 522 default: 523 // Don't know how to report this. Just fall back to 'may be used 524 // uninitialized'. This happens for range-based for, which the user 525 // can't explicitly fix. 526 // FIXME: This also happens if the first use of a variable is always 527 // uninitialized, eg "for (int n; n < 10; ++n)". We should report that 528 // with the 'is uninitialized' diagnostic. 529 continue; 530 531 // "condition is true / condition is false". 532 case Stmt::IfStmtClass: { 533 const IfStmt *IS = cast<IfStmt>(Term); 534 DiagKind = 0; 535 Str = "if"; 536 Range = IS->getCond()->getSourceRange(); 537 RemoveDiagKind = 0; 538 CreateIfFixit(S, IS, IS->getThen(), IS->getElse(), 539 I->Output, Fixit1, Fixit2); 540 break; 541 } 542 case Stmt::ConditionalOperatorClass: { 543 const ConditionalOperator *CO = cast<ConditionalOperator>(Term); 544 DiagKind = 0; 545 Str = "?:"; 546 Range = CO->getCond()->getSourceRange(); 547 RemoveDiagKind = 0; 548 CreateIfFixit(S, CO, CO->getTrueExpr(), CO->getFalseExpr(), 549 I->Output, Fixit1, Fixit2); 550 break; 551 } 552 case Stmt::BinaryOperatorClass: { 553 const BinaryOperator *BO = cast<BinaryOperator>(Term); 554 if (!BO->isLogicalOp()) 555 continue; 556 DiagKind = 0; 557 Str = BO->getOpcodeStr(); 558 Range = BO->getLHS()->getSourceRange(); 559 RemoveDiagKind = 0; 560 if ((BO->getOpcode() == BO_LAnd && I->Output) || 561 (BO->getOpcode() == BO_LOr && !I->Output)) 562 // true && y -> y, false || y -> y. 563 Fixit1 = FixItHint::CreateRemoval(SourceRange(BO->getLocStart(), 564 BO->getOperatorLoc())); 565 else 566 // false && y -> false, true || y -> true. 567 Fixit1 = FixItHint::CreateReplacement(BO->getSourceRange(), FixitStr); 568 break; 569 } 570 571 // "loop is entered / loop is exited". 572 case Stmt::WhileStmtClass: 573 DiagKind = 1; 574 Str = "while"; 575 Range = cast<WhileStmt>(Term)->getCond()->getSourceRange(); 576 RemoveDiagKind = 1; 577 Fixit1 = FixItHint::CreateReplacement(Range, FixitStr); 578 break; 579 case Stmt::ForStmtClass: 580 DiagKind = 1; 581 Str = "for"; 582 Range = cast<ForStmt>(Term)->getCond()->getSourceRange(); 583 RemoveDiagKind = 1; 584 if (I->Output) 585 Fixit1 = FixItHint::CreateRemoval(Range); 586 else 587 Fixit1 = FixItHint::CreateReplacement(Range, FixitStr); 588 break; 589 590 // "condition is true / loop is exited". 591 case Stmt::DoStmtClass: 592 DiagKind = 2; 593 Str = "do"; 594 Range = cast<DoStmt>(Term)->getCond()->getSourceRange(); 595 RemoveDiagKind = 1; 596 Fixit1 = FixItHint::CreateReplacement(Range, FixitStr); 597 break; 598 599 // "switch case is taken". 600 case Stmt::CaseStmtClass: 601 DiagKind = 3; 602 Str = "case"; 603 Range = cast<CaseStmt>(Term)->getLHS()->getSourceRange(); 604 break; 605 case Stmt::DefaultStmtClass: 606 DiagKind = 3; 607 Str = "default"; 608 Range = cast<DefaultStmt>(Term)->getDefaultLoc(); 609 break; 610 } 611 612 S.Diag(Range.getBegin(), diag::warn_sometimes_uninit_var) 613 << VD->getDeclName() << IsCapturedByBlock << DiagKind 614 << Str << I->Output << Range; 615 S.Diag(User->getLocStart(), diag::note_uninit_var_use) 616 << IsCapturedByBlock << User->getSourceRange(); 617 if (RemoveDiagKind != -1) 618 S.Diag(Fixit1.RemoveRange.getBegin(), diag::note_uninit_fixit_remove_cond) 619 << RemoveDiagKind << Str << I->Output << Fixit1 << Fixit2; 620 621 Diagnosed = true; 622 } 623 624 if (!Diagnosed) 625 S.Diag(Use.getUser()->getLocStart(), 626 Use.getKind() == UninitUse::Always ? diag::warn_uninit_var 627 : diag::warn_maybe_uninit_var) 628 << VD->getDeclName() << IsCapturedByBlock 629 << Use.getUser()->getSourceRange(); 630} 631 632/// DiagnoseUninitializedUse -- Helper function for diagnosing uses of an 633/// uninitialized variable. This manages the different forms of diagnostic 634/// emitted for particular types of uses. Returns true if the use was diagnosed 635/// as a warning. If a particular use is one we omit warnings for, returns 636/// false. 637static bool DiagnoseUninitializedUse(Sema &S, const VarDecl *VD, 638 const UninitUse &Use, 639 bool alwaysReportSelfInit = false) { 640 641 if (const DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(Use.getUser())) { 642 // Inspect the initializer of the variable declaration which is 643 // being referenced prior to its initialization. We emit 644 // specialized diagnostics for self-initialization, and we 645 // specifically avoid warning about self references which take the 646 // form of: 647 // 648 // int x = x; 649 // 650 // This is used to indicate to GCC that 'x' is intentionally left 651 // uninitialized. Proven code paths which access 'x' in 652 // an uninitialized state after this will still warn. 653 if (const Expr *Initializer = VD->getInit()) { 654 if (!alwaysReportSelfInit && DRE == Initializer->IgnoreParenImpCasts()) 655 return false; 656 657 ContainsReference CR(S.Context, DRE); 658 CR.Visit(const_cast<Expr*>(Initializer)); 659 if (CR.doesContainReference()) { 660 S.Diag(DRE->getLocStart(), 661 diag::warn_uninit_self_reference_in_init) 662 << VD->getDeclName() << VD->getLocation() << DRE->getSourceRange(); 663 return true; 664 } 665 } 666 667 DiagUninitUse(S, VD, Use, false); 668 } else { 669 const BlockExpr *BE = cast<BlockExpr>(Use.getUser()); 670 if (VD->getType()->isBlockPointerType() && !VD->hasAttr<BlocksAttr>()) 671 S.Diag(BE->getLocStart(), 672 diag::warn_uninit_byref_blockvar_captured_by_block) 673 << VD->getDeclName(); 674 else 675 DiagUninitUse(S, VD, Use, true); 676 } 677 678 // Report where the variable was declared when the use wasn't within 679 // the initializer of that declaration & we didn't already suggest 680 // an initialization fixit. 681 if (!SuggestInitializationFixit(S, VD)) 682 S.Diag(VD->getLocStart(), diag::note_uninit_var_def) 683 << VD->getDeclName(); 684 685 return true; 686} 687 688namespace { 689 class FallthroughMapper : public RecursiveASTVisitor<FallthroughMapper> { 690 public: 691 FallthroughMapper(Sema &S) 692 : FoundSwitchStatements(false), 693 S(S) { 694 } 695 696 bool foundSwitchStatements() const { return FoundSwitchStatements; } 697 698 void markFallthroughVisited(const AttributedStmt *Stmt) { 699 bool Found = FallthroughStmts.erase(Stmt); 700 assert(Found); 701 (void)Found; 702 } 703 704 typedef llvm::SmallPtrSet<const AttributedStmt*, 8> AttrStmts; 705 706 const AttrStmts &getFallthroughStmts() const { 707 return FallthroughStmts; 708 } 709 710 bool checkFallThroughIntoBlock(const CFGBlock &B, int &AnnotatedCnt) { 711 int UnannotatedCnt = 0; 712 AnnotatedCnt = 0; 713 714 std::deque<const CFGBlock*> BlockQueue; 715 716 std::copy(B.pred_begin(), B.pred_end(), std::back_inserter(BlockQueue)); 717 718 while (!BlockQueue.empty()) { 719 const CFGBlock *P = BlockQueue.front(); 720 BlockQueue.pop_front(); 721 722 const Stmt *Term = P->getTerminator(); 723 if (Term && isa<SwitchStmt>(Term)) 724 continue; // Switch statement, good. 725 726 const SwitchCase *SW = dyn_cast_or_null<SwitchCase>(P->getLabel()); 727 if (SW && SW->getSubStmt() == B.getLabel() && P->begin() == P->end()) 728 continue; // Previous case label has no statements, good. 729 730 if (P->pred_begin() == P->pred_end()) { // The block is unreachable. 731 // This only catches trivially unreachable blocks. 732 for (CFGBlock::const_iterator ElIt = P->begin(), ElEnd = P->end(); 733 ElIt != ElEnd; ++ElIt) { 734 if (const CFGStmt *CS = ElIt->getAs<CFGStmt>()){ 735 if (const AttributedStmt *AS = asFallThroughAttr(CS->getStmt())) { 736 S.Diag(AS->getLocStart(), 737 diag::warn_fallthrough_attr_unreachable); 738 markFallthroughVisited(AS); 739 ++AnnotatedCnt; 740 } 741 // Don't care about other unreachable statements. 742 } 743 } 744 // If there are no unreachable statements, this may be a special 745 // case in CFG: 746 // case X: { 747 // A a; // A has a destructor. 748 // break; 749 // } 750 // // <<<< This place is represented by a 'hanging' CFG block. 751 // case Y: 752 continue; 753 } 754 755 const Stmt *LastStmt = getLastStmt(*P); 756 if (const AttributedStmt *AS = asFallThroughAttr(LastStmt)) { 757 markFallthroughVisited(AS); 758 ++AnnotatedCnt; 759 continue; // Fallthrough annotation, good. 760 } 761 762 if (!LastStmt) { // This block contains no executable statements. 763 // Traverse its predecessors. 764 std::copy(P->pred_begin(), P->pred_end(), 765 std::back_inserter(BlockQueue)); 766 continue; 767 } 768 769 ++UnannotatedCnt; 770 } 771 return !!UnannotatedCnt; 772 } 773 774 // RecursiveASTVisitor setup. 775 bool shouldWalkTypesOfTypeLocs() const { return false; } 776 777 bool VisitAttributedStmt(AttributedStmt *S) { 778 if (asFallThroughAttr(S)) 779 FallthroughStmts.insert(S); 780 return true; 781 } 782 783 bool VisitSwitchStmt(SwitchStmt *S) { 784 FoundSwitchStatements = true; 785 return true; 786 } 787 788 private: 789 790 static const AttributedStmt *asFallThroughAttr(const Stmt *S) { 791 if (const AttributedStmt *AS = dyn_cast_or_null<AttributedStmt>(S)) { 792 if (hasSpecificAttr<FallThroughAttr>(AS->getAttrs())) 793 return AS; 794 } 795 return 0; 796 } 797 798 static const Stmt *getLastStmt(const CFGBlock &B) { 799 if (const Stmt *Term = B.getTerminator()) 800 return Term; 801 for (CFGBlock::const_reverse_iterator ElemIt = B.rbegin(), 802 ElemEnd = B.rend(); 803 ElemIt != ElemEnd; ++ElemIt) { 804 if (const CFGStmt *CS = ElemIt->getAs<CFGStmt>()) 805 return CS->getStmt(); 806 } 807 // Workaround to detect a statement thrown out by CFGBuilder: 808 // case X: {} case Y: 809 // case X: ; case Y: 810 if (const SwitchCase *SW = dyn_cast_or_null<SwitchCase>(B.getLabel())) 811 if (!isa<SwitchCase>(SW->getSubStmt())) 812 return SW->getSubStmt(); 813 814 return 0; 815 } 816 817 bool FoundSwitchStatements; 818 AttrStmts FallthroughStmts; 819 Sema &S; 820 }; 821} 822 823static void DiagnoseSwitchLabelsFallthrough(Sema &S, AnalysisDeclContext &AC, 824 bool PerMethod) { 825 FallthroughMapper FM(S); 826 FM.TraverseStmt(AC.getBody()); 827 828 if (!FM.foundSwitchStatements()) 829 return; 830 831 if (PerMethod && FM.getFallthroughStmts().empty()) 832 return; 833 834 CFG *Cfg = AC.getCFG(); 835 836 if (!Cfg) 837 return; 838 839 int AnnotatedCnt; 840 841 for (CFG::reverse_iterator I = Cfg->rbegin(), E = Cfg->rend(); I != E; ++I) { 842 const CFGBlock &B = **I; 843 const Stmt *Label = B.getLabel(); 844 845 if (!Label || !isa<SwitchCase>(Label)) 846 continue; 847 848 if (!FM.checkFallThroughIntoBlock(B, AnnotatedCnt)) 849 continue; 850 851 S.Diag(Label->getLocStart(), 852 PerMethod ? diag::warn_unannotated_fallthrough_per_method 853 : diag::warn_unannotated_fallthrough); 854 855 if (!AnnotatedCnt) { 856 SourceLocation L = Label->getLocStart(); 857 if (L.isMacroID()) 858 continue; 859 if (S.getLangOpts().CPlusPlus0x) { 860 const Stmt *Term = B.getTerminator(); 861 if (!(B.empty() && Term && isa<BreakStmt>(Term))) { 862 S.Diag(L, diag::note_insert_fallthrough_fixit) << 863 FixItHint::CreateInsertion(L, "[[clang::fallthrough]]; "); 864 } 865 } 866 S.Diag(L, diag::note_insert_break_fixit) << 867 FixItHint::CreateInsertion(L, "break; "); 868 } 869 } 870 871 const FallthroughMapper::AttrStmts &Fallthroughs = FM.getFallthroughStmts(); 872 for (FallthroughMapper::AttrStmts::const_iterator I = Fallthroughs.begin(), 873 E = Fallthroughs.end(); 874 I != E; ++I) { 875 S.Diag((*I)->getLocStart(), diag::warn_fallthrough_attr_invalid_placement); 876 } 877 878} 879 880namespace { 881struct SLocSort { 882 bool operator()(const UninitUse &a, const UninitUse &b) { 883 // Prefer a more confident report over a less confident one. 884 if (a.getKind() != b.getKind()) 885 return a.getKind() > b.getKind(); 886 SourceLocation aLoc = a.getUser()->getLocStart(); 887 SourceLocation bLoc = b.getUser()->getLocStart(); 888 return aLoc.getRawEncoding() < bLoc.getRawEncoding(); 889 } 890}; 891 892class UninitValsDiagReporter : public UninitVariablesHandler { 893 Sema &S; 894 typedef SmallVector<UninitUse, 2> UsesVec; 895 typedef llvm::DenseMap<const VarDecl *, std::pair<UsesVec*, bool> > UsesMap; 896 UsesMap *uses; 897 898public: 899 UninitValsDiagReporter(Sema &S) : S(S), uses(0) {} 900 ~UninitValsDiagReporter() { 901 flushDiagnostics(); 902 } 903 904 std::pair<UsesVec*, bool> &getUses(const VarDecl *vd) { 905 if (!uses) 906 uses = new UsesMap(); 907 908 UsesMap::mapped_type &V = (*uses)[vd]; 909 UsesVec *&vec = V.first; 910 if (!vec) 911 vec = new UsesVec(); 912 913 return V; 914 } 915 916 void handleUseOfUninitVariable(const VarDecl *vd, const UninitUse &use) { 917 getUses(vd).first->push_back(use); 918 } 919 920 void handleSelfInit(const VarDecl *vd) { 921 getUses(vd).second = true; 922 } 923 924 void flushDiagnostics() { 925 if (!uses) 926 return; 927 928 // FIXME: This iteration order, and thus the resulting diagnostic order, 929 // is nondeterministic. 930 for (UsesMap::iterator i = uses->begin(), e = uses->end(); i != e; ++i) { 931 const VarDecl *vd = i->first; 932 const UsesMap::mapped_type &V = i->second; 933 934 UsesVec *vec = V.first; 935 bool hasSelfInit = V.second; 936 937 // Specially handle the case where we have uses of an uninitialized 938 // variable, but the root cause is an idiomatic self-init. We want 939 // to report the diagnostic at the self-init since that is the root cause. 940 if (!vec->empty() && hasSelfInit && hasAlwaysUninitializedUse(vec)) 941 DiagnoseUninitializedUse(S, vd, 942 UninitUse(vd->getInit()->IgnoreParenCasts(), 943 /* isAlwaysUninit */ true), 944 /* alwaysReportSelfInit */ true); 945 else { 946 // Sort the uses by their SourceLocations. While not strictly 947 // guaranteed to produce them in line/column order, this will provide 948 // a stable ordering. 949 std::sort(vec->begin(), vec->end(), SLocSort()); 950 951 for (UsesVec::iterator vi = vec->begin(), ve = vec->end(); vi != ve; 952 ++vi) { 953 // If we have self-init, downgrade all uses to 'may be uninitialized'. 954 UninitUse Use = hasSelfInit ? UninitUse(vi->getUser(), false) : *vi; 955 956 if (DiagnoseUninitializedUse(S, vd, Use)) 957 // Skip further diagnostics for this variable. We try to warn only 958 // on the first point at which a variable is used uninitialized. 959 break; 960 } 961 } 962 963 // Release the uses vector. 964 delete vec; 965 } 966 delete uses; 967 } 968 969private: 970 static bool hasAlwaysUninitializedUse(const UsesVec* vec) { 971 for (UsesVec::const_iterator i = vec->begin(), e = vec->end(); i != e; ++i) { 972 if (i->getKind() == UninitUse::Always) { 973 return true; 974 } 975 } 976 return false; 977} 978}; 979} 980 981 982//===----------------------------------------------------------------------===// 983// -Wthread-safety 984//===----------------------------------------------------------------------===// 985namespace clang { 986namespace thread_safety { 987typedef llvm::SmallVector<PartialDiagnosticAt, 1> OptionalNotes; 988typedef std::pair<PartialDiagnosticAt, OptionalNotes> DelayedDiag; 989typedef std::list<DelayedDiag> DiagList; 990 991struct SortDiagBySourceLocation { 992 SourceManager &SM; 993 SortDiagBySourceLocation(SourceManager &SM) : SM(SM) {} 994 995 bool operator()(const DelayedDiag &left, const DelayedDiag &right) { 996 // Although this call will be slow, this is only called when outputting 997 // multiple warnings. 998 return SM.isBeforeInTranslationUnit(left.first.first, right.first.first); 999 } 1000}; 1001 1002namespace { 1003class ThreadSafetyReporter : public clang::thread_safety::ThreadSafetyHandler { 1004 Sema &S; 1005 DiagList Warnings; 1006 SourceLocation FunLocation, FunEndLocation; 1007 1008 // Helper functions 1009 void warnLockMismatch(unsigned DiagID, Name LockName, SourceLocation Loc) { 1010 // Gracefully handle rare cases when the analysis can't get a more 1011 // precise source location. 1012 if (!Loc.isValid()) 1013 Loc = FunLocation; 1014 PartialDiagnosticAt Warning(Loc, S.PDiag(DiagID) << LockName); 1015 Warnings.push_back(DelayedDiag(Warning, OptionalNotes())); 1016 } 1017 1018 public: 1019 ThreadSafetyReporter(Sema &S, SourceLocation FL, SourceLocation FEL) 1020 : S(S), FunLocation(FL), FunEndLocation(FEL) {} 1021 1022 /// \brief Emit all buffered diagnostics in order of sourcelocation. 1023 /// We need to output diagnostics produced while iterating through 1024 /// the lockset in deterministic order, so this function orders diagnostics 1025 /// and outputs them. 1026 void emitDiagnostics() { 1027 Warnings.sort(SortDiagBySourceLocation(S.getSourceManager())); 1028 for (DiagList::iterator I = Warnings.begin(), E = Warnings.end(); 1029 I != E; ++I) { 1030 S.Diag(I->first.first, I->first.second); 1031 const OptionalNotes &Notes = I->second; 1032 for (unsigned NoteI = 0, NoteN = Notes.size(); NoteI != NoteN; ++NoteI) 1033 S.Diag(Notes[NoteI].first, Notes[NoteI].second); 1034 } 1035 } 1036 1037 void handleInvalidLockExp(SourceLocation Loc) { 1038 PartialDiagnosticAt Warning(Loc, 1039 S.PDiag(diag::warn_cannot_resolve_lock) << Loc); 1040 Warnings.push_back(DelayedDiag(Warning, OptionalNotes())); 1041 } 1042 void handleUnmatchedUnlock(Name LockName, SourceLocation Loc) { 1043 warnLockMismatch(diag::warn_unlock_but_no_lock, LockName, Loc); 1044 } 1045 1046 void handleDoubleLock(Name LockName, SourceLocation Loc) { 1047 warnLockMismatch(diag::warn_double_lock, LockName, Loc); 1048 } 1049 1050 void handleMutexHeldEndOfScope(Name LockName, SourceLocation LocLocked, 1051 SourceLocation LocEndOfScope, 1052 LockErrorKind LEK){ 1053 unsigned DiagID = 0; 1054 switch (LEK) { 1055 case LEK_LockedSomePredecessors: 1056 DiagID = diag::warn_lock_some_predecessors; 1057 break; 1058 case LEK_LockedSomeLoopIterations: 1059 DiagID = diag::warn_expecting_lock_held_on_loop; 1060 break; 1061 case LEK_LockedAtEndOfFunction: 1062 DiagID = diag::warn_no_unlock; 1063 break; 1064 } 1065 if (LocEndOfScope.isInvalid()) 1066 LocEndOfScope = FunEndLocation; 1067 1068 PartialDiagnosticAt Warning(LocEndOfScope, S.PDiag(DiagID) << LockName); 1069 PartialDiagnosticAt Note(LocLocked, S.PDiag(diag::note_locked_here)); 1070 Warnings.push_back(DelayedDiag(Warning, OptionalNotes(1, Note))); 1071 } 1072 1073 1074 void handleExclusiveAndShared(Name LockName, SourceLocation Loc1, 1075 SourceLocation Loc2) { 1076 PartialDiagnosticAt Warning( 1077 Loc1, S.PDiag(diag::warn_lock_exclusive_and_shared) << LockName); 1078 PartialDiagnosticAt Note( 1079 Loc2, S.PDiag(diag::note_lock_exclusive_and_shared) << LockName); 1080 Warnings.push_back(DelayedDiag(Warning, OptionalNotes(1, Note))); 1081 } 1082 1083 void handleNoMutexHeld(const NamedDecl *D, ProtectedOperationKind POK, 1084 AccessKind AK, SourceLocation Loc) { 1085 assert((POK == POK_VarAccess || POK == POK_VarDereference) 1086 && "Only works for variables"); 1087 unsigned DiagID = POK == POK_VarAccess? 1088 diag::warn_variable_requires_any_lock: 1089 diag::warn_var_deref_requires_any_lock; 1090 PartialDiagnosticAt Warning(Loc, S.PDiag(DiagID) 1091 << D->getName() << getLockKindFromAccessKind(AK)); 1092 Warnings.push_back(DelayedDiag(Warning, OptionalNotes())); 1093 } 1094 1095 void handleMutexNotHeld(const NamedDecl *D, ProtectedOperationKind POK, 1096 Name LockName, LockKind LK, SourceLocation Loc) { 1097 unsigned DiagID = 0; 1098 switch (POK) { 1099 case POK_VarAccess: 1100 DiagID = diag::warn_variable_requires_lock; 1101 break; 1102 case POK_VarDereference: 1103 DiagID = diag::warn_var_deref_requires_lock; 1104 break; 1105 case POK_FunctionCall: 1106 DiagID = diag::warn_fun_requires_lock; 1107 break; 1108 } 1109 PartialDiagnosticAt Warning(Loc, S.PDiag(DiagID) 1110 << D->getName() << LockName << LK); 1111 Warnings.push_back(DelayedDiag(Warning, OptionalNotes())); 1112 } 1113 1114 void handleFunExcludesLock(Name FunName, Name LockName, SourceLocation Loc) { 1115 PartialDiagnosticAt Warning(Loc, 1116 S.PDiag(diag::warn_fun_excludes_mutex) << FunName << LockName); 1117 Warnings.push_back(DelayedDiag(Warning, OptionalNotes())); 1118 } 1119}; 1120} 1121} 1122} 1123 1124//===----------------------------------------------------------------------===// 1125// AnalysisBasedWarnings - Worker object used by Sema to execute analysis-based 1126// warnings on a function, method, or block. 1127//===----------------------------------------------------------------------===// 1128 1129clang::sema::AnalysisBasedWarnings::Policy::Policy() { 1130 enableCheckFallThrough = 1; 1131 enableCheckUnreachable = 0; 1132 enableThreadSafetyAnalysis = 0; 1133} 1134 1135clang::sema::AnalysisBasedWarnings::AnalysisBasedWarnings(Sema &s) 1136 : S(s), 1137 NumFunctionsAnalyzed(0), 1138 NumFunctionsWithBadCFGs(0), 1139 NumCFGBlocks(0), 1140 MaxCFGBlocksPerFunction(0), 1141 NumUninitAnalysisFunctions(0), 1142 NumUninitAnalysisVariables(0), 1143 MaxUninitAnalysisVariablesPerFunction(0), 1144 NumUninitAnalysisBlockVisits(0), 1145 MaxUninitAnalysisBlockVisitsPerFunction(0) { 1146 DiagnosticsEngine &D = S.getDiagnostics(); 1147 DefaultPolicy.enableCheckUnreachable = (unsigned) 1148 (D.getDiagnosticLevel(diag::warn_unreachable, SourceLocation()) != 1149 DiagnosticsEngine::Ignored); 1150 DefaultPolicy.enableThreadSafetyAnalysis = (unsigned) 1151 (D.getDiagnosticLevel(diag::warn_double_lock, SourceLocation()) != 1152 DiagnosticsEngine::Ignored); 1153 1154} 1155 1156static void flushDiagnostics(Sema &S, sema::FunctionScopeInfo *fscope) { 1157 for (SmallVectorImpl<sema::PossiblyUnreachableDiag>::iterator 1158 i = fscope->PossiblyUnreachableDiags.begin(), 1159 e = fscope->PossiblyUnreachableDiags.end(); 1160 i != e; ++i) { 1161 const sema::PossiblyUnreachableDiag &D = *i; 1162 S.Diag(D.Loc, D.PD); 1163 } 1164} 1165 1166void clang::sema:: 1167AnalysisBasedWarnings::IssueWarnings(sema::AnalysisBasedWarnings::Policy P, 1168 sema::FunctionScopeInfo *fscope, 1169 const Decl *D, const BlockExpr *blkExpr) { 1170 1171 // We avoid doing analysis-based warnings when there are errors for 1172 // two reasons: 1173 // (1) The CFGs often can't be constructed (if the body is invalid), so 1174 // don't bother trying. 1175 // (2) The code already has problems; running the analysis just takes more 1176 // time. 1177 DiagnosticsEngine &Diags = S.getDiagnostics(); 1178 1179 // Do not do any analysis for declarations in system headers if we are 1180 // going to just ignore them. 1181 if (Diags.getSuppressSystemWarnings() && 1182 S.SourceMgr.isInSystemHeader(D->getLocation())) 1183 return; 1184 1185 // For code in dependent contexts, we'll do this at instantiation time. 1186 if (cast<DeclContext>(D)->isDependentContext()) 1187 return; 1188 1189 if (Diags.hasErrorOccurred() || Diags.hasFatalErrorOccurred()) { 1190 // Flush out any possibly unreachable diagnostics. 1191 flushDiagnostics(S, fscope); 1192 return; 1193 } 1194 1195 const Stmt *Body = D->getBody(); 1196 assert(Body); 1197 1198 AnalysisDeclContext AC(/* AnalysisDeclContextManager */ 0, D); 1199 1200 // Don't generate EH edges for CallExprs as we'd like to avoid the n^2 1201 // explosion for destrutors that can result and the compile time hit. 1202 AC.getCFGBuildOptions().PruneTriviallyFalseEdges = true; 1203 AC.getCFGBuildOptions().AddEHEdges = false; 1204 AC.getCFGBuildOptions().AddInitializers = true; 1205 AC.getCFGBuildOptions().AddImplicitDtors = true; 1206 1207 // Force that certain expressions appear as CFGElements in the CFG. This 1208 // is used to speed up various analyses. 1209 // FIXME: This isn't the right factoring. This is here for initial 1210 // prototyping, but we need a way for analyses to say what expressions they 1211 // expect to always be CFGElements and then fill in the BuildOptions 1212 // appropriately. This is essentially a layering violation. 1213 if (P.enableCheckUnreachable || P.enableThreadSafetyAnalysis) { 1214 // Unreachable code analysis and thread safety require a linearized CFG. 1215 AC.getCFGBuildOptions().setAllAlwaysAdd(); 1216 } 1217 else { 1218 AC.getCFGBuildOptions() 1219 .setAlwaysAdd(Stmt::BinaryOperatorClass) 1220 .setAlwaysAdd(Stmt::BlockExprClass) 1221 .setAlwaysAdd(Stmt::CStyleCastExprClass) 1222 .setAlwaysAdd(Stmt::DeclRefExprClass) 1223 .setAlwaysAdd(Stmt::ImplicitCastExprClass) 1224 .setAlwaysAdd(Stmt::UnaryOperatorClass) 1225 .setAlwaysAdd(Stmt::AttributedStmtClass); 1226 } 1227 1228 // Construct the analysis context with the specified CFG build options. 1229 1230 // Emit delayed diagnostics. 1231 if (!fscope->PossiblyUnreachableDiags.empty()) { 1232 bool analyzed = false; 1233 1234 // Register the expressions with the CFGBuilder. 1235 for (SmallVectorImpl<sema::PossiblyUnreachableDiag>::iterator 1236 i = fscope->PossiblyUnreachableDiags.begin(), 1237 e = fscope->PossiblyUnreachableDiags.end(); 1238 i != e; ++i) { 1239 if (const Stmt *stmt = i->stmt) 1240 AC.registerForcedBlockExpression(stmt); 1241 } 1242 1243 if (AC.getCFG()) { 1244 analyzed = true; 1245 for (SmallVectorImpl<sema::PossiblyUnreachableDiag>::iterator 1246 i = fscope->PossiblyUnreachableDiags.begin(), 1247 e = fscope->PossiblyUnreachableDiags.end(); 1248 i != e; ++i) 1249 { 1250 const sema::PossiblyUnreachableDiag &D = *i; 1251 bool processed = false; 1252 if (const Stmt *stmt = i->stmt) { 1253 const CFGBlock *block = AC.getBlockForRegisteredExpression(stmt); 1254 CFGReverseBlockReachabilityAnalysis *cra = 1255 AC.getCFGReachablityAnalysis(); 1256 // FIXME: We should be able to assert that block is non-null, but 1257 // the CFG analysis can skip potentially-evaluated expressions in 1258 // edge cases; see test/Sema/vla-2.c. 1259 if (block && cra) { 1260 // Can this block be reached from the entrance? 1261 if (cra->isReachable(&AC.getCFG()->getEntry(), block)) 1262 S.Diag(D.Loc, D.PD); 1263 processed = true; 1264 } 1265 } 1266 if (!processed) { 1267 // Emit the warning anyway if we cannot map to a basic block. 1268 S.Diag(D.Loc, D.PD); 1269 } 1270 } 1271 } 1272 1273 if (!analyzed) 1274 flushDiagnostics(S, fscope); 1275 } 1276 1277 1278 // Warning: check missing 'return' 1279 if (P.enableCheckFallThrough) { 1280 const CheckFallThroughDiagnostics &CD = 1281 (isa<BlockDecl>(D) ? CheckFallThroughDiagnostics::MakeForBlock() 1282 : (isa<CXXMethodDecl>(D) && 1283 cast<CXXMethodDecl>(D)->getOverloadedOperator() == OO_Call && 1284 cast<CXXMethodDecl>(D)->getParent()->isLambda()) 1285 ? CheckFallThroughDiagnostics::MakeForLambda() 1286 : CheckFallThroughDiagnostics::MakeForFunction(D)); 1287 CheckFallThroughForBody(S, D, Body, blkExpr, CD, AC); 1288 } 1289 1290 // Warning: check for unreachable code 1291 if (P.enableCheckUnreachable) { 1292 // Only check for unreachable code on non-template instantiations. 1293 // Different template instantiations can effectively change the control-flow 1294 // and it is very difficult to prove that a snippet of code in a template 1295 // is unreachable for all instantiations. 1296 bool isTemplateInstantiation = false; 1297 if (const FunctionDecl *Function = dyn_cast<FunctionDecl>(D)) 1298 isTemplateInstantiation = Function->isTemplateInstantiation(); 1299 if (!isTemplateInstantiation) 1300 CheckUnreachable(S, AC); 1301 } 1302 1303 // Check for thread safety violations 1304 if (P.enableThreadSafetyAnalysis) { 1305 SourceLocation FL = AC.getDecl()->getLocation(); 1306 SourceLocation FEL = AC.getDecl()->getLocEnd(); 1307 thread_safety::ThreadSafetyReporter Reporter(S, FL, FEL); 1308 thread_safety::runThreadSafetyAnalysis(AC, Reporter); 1309 Reporter.emitDiagnostics(); 1310 } 1311 1312 if (Diags.getDiagnosticLevel(diag::warn_uninit_var, D->getLocStart()) 1313 != DiagnosticsEngine::Ignored || 1314 Diags.getDiagnosticLevel(diag::warn_sometimes_uninit_var,D->getLocStart()) 1315 != DiagnosticsEngine::Ignored || 1316 Diags.getDiagnosticLevel(diag::warn_maybe_uninit_var, D->getLocStart()) 1317 != DiagnosticsEngine::Ignored) { 1318 if (CFG *cfg = AC.getCFG()) { 1319 UninitValsDiagReporter reporter(S); 1320 UninitVariablesAnalysisStats stats; 1321 std::memset(&stats, 0, sizeof(UninitVariablesAnalysisStats)); 1322 runUninitializedVariablesAnalysis(*cast<DeclContext>(D), *cfg, AC, 1323 reporter, stats); 1324 1325 if (S.CollectStats && stats.NumVariablesAnalyzed > 0) { 1326 ++NumUninitAnalysisFunctions; 1327 NumUninitAnalysisVariables += stats.NumVariablesAnalyzed; 1328 NumUninitAnalysisBlockVisits += stats.NumBlockVisits; 1329 MaxUninitAnalysisVariablesPerFunction = 1330 std::max(MaxUninitAnalysisVariablesPerFunction, 1331 stats.NumVariablesAnalyzed); 1332 MaxUninitAnalysisBlockVisitsPerFunction = 1333 std::max(MaxUninitAnalysisBlockVisitsPerFunction, 1334 stats.NumBlockVisits); 1335 } 1336 } 1337 } 1338 1339 bool FallThroughDiagFull = 1340 Diags.getDiagnosticLevel(diag::warn_unannotated_fallthrough, 1341 D->getLocStart()) != DiagnosticsEngine::Ignored; 1342 bool FallThroughDiagPerMethod = 1343 Diags.getDiagnosticLevel(diag::warn_unannotated_fallthrough_per_method, 1344 D->getLocStart()) != DiagnosticsEngine::Ignored; 1345 if (FallThroughDiagFull || FallThroughDiagPerMethod) { 1346 DiagnoseSwitchLabelsFallthrough(S, AC, !FallThroughDiagFull); 1347 } 1348 1349 // Collect statistics about the CFG if it was built. 1350 if (S.CollectStats && AC.isCFGBuilt()) { 1351 ++NumFunctionsAnalyzed; 1352 if (CFG *cfg = AC.getCFG()) { 1353 // If we successfully built a CFG for this context, record some more 1354 // detail information about it. 1355 NumCFGBlocks += cfg->getNumBlockIDs(); 1356 MaxCFGBlocksPerFunction = std::max(MaxCFGBlocksPerFunction, 1357 cfg->getNumBlockIDs()); 1358 } else { 1359 ++NumFunctionsWithBadCFGs; 1360 } 1361 } 1362} 1363 1364void clang::sema::AnalysisBasedWarnings::PrintStats() const { 1365 llvm::errs() << "\n*** Analysis Based Warnings Stats:\n"; 1366 1367 unsigned NumCFGsBuilt = NumFunctionsAnalyzed - NumFunctionsWithBadCFGs; 1368 unsigned AvgCFGBlocksPerFunction = 1369 !NumCFGsBuilt ? 0 : NumCFGBlocks/NumCFGsBuilt; 1370 llvm::errs() << NumFunctionsAnalyzed << " functions analyzed (" 1371 << NumFunctionsWithBadCFGs << " w/o CFGs).\n" 1372 << " " << NumCFGBlocks << " CFG blocks built.\n" 1373 << " " << AvgCFGBlocksPerFunction 1374 << " average CFG blocks per function.\n" 1375 << " " << MaxCFGBlocksPerFunction 1376 << " max CFG blocks per function.\n"; 1377 1378 unsigned AvgUninitVariablesPerFunction = !NumUninitAnalysisFunctions ? 0 1379 : NumUninitAnalysisVariables/NumUninitAnalysisFunctions; 1380 unsigned AvgUninitBlockVisitsPerFunction = !NumUninitAnalysisFunctions ? 0 1381 : NumUninitAnalysisBlockVisits/NumUninitAnalysisFunctions; 1382 llvm::errs() << NumUninitAnalysisFunctions 1383 << " functions analyzed for uninitialiazed variables\n" 1384 << " " << NumUninitAnalysisVariables << " variables analyzed.\n" 1385 << " " << AvgUninitVariablesPerFunction 1386 << " average variables per function.\n" 1387 << " " << MaxUninitAnalysisVariablesPerFunction 1388 << " max variables per function.\n" 1389 << " " << NumUninitAnalysisBlockVisits << " block visits.\n" 1390 << " " << AvgUninitBlockVisitsPerFunction 1391 << " average block visits per function.\n" 1392 << " " << MaxUninitAnalysisBlockVisitsPerFunction 1393 << " max block visits per function.\n"; 1394} 1395