AnalysisBasedWarnings.cpp revision c6dcea93b499b504da22f9921fc198423ad0b13b
1//=- AnalysisBasedWarnings.cpp - Sema warnings based on libAnalysis -*- C++ -*-=//
2//
3//                     The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10// This file defines analysis_warnings::[Policy,Executor].
11// Together they are used by Sema to issue warnings based on inexpensive
12// static analysis algorithms in libAnalysis.
13//
14//===----------------------------------------------------------------------===//
15
16#include "clang/Sema/AnalysisBasedWarnings.h"
17#include "clang/AST/DeclCXX.h"
18#include "clang/AST/DeclObjC.h"
19#include "clang/AST/EvaluatedExprVisitor.h"
20#include "clang/AST/ExprCXX.h"
21#include "clang/AST/ExprObjC.h"
22#include "clang/AST/ParentMap.h"
23#include "clang/AST/RecursiveASTVisitor.h"
24#include "clang/AST/StmtCXX.h"
25#include "clang/AST/StmtObjC.h"
26#include "clang/AST/StmtVisitor.h"
27#include "clang/Analysis/Analyses/CFGReachabilityAnalysis.h"
28#include "clang/Analysis/Analyses/ReachableCode.h"
29#include "clang/Analysis/Analyses/ThreadSafety.h"
30#include "clang/Analysis/Analyses/UninitializedValues.h"
31#include "clang/Analysis/AnalysisContext.h"
32#include "clang/Analysis/CFG.h"
33#include "clang/Analysis/CFGStmtMap.h"
34#include "clang/Basic/SourceLocation.h"
35#include "clang/Basic/SourceManager.h"
36#include "clang/Lex/Lexer.h"
37#include "clang/Lex/Preprocessor.h"
38#include "clang/Sema/ScopeInfo.h"
39#include "clang/Sema/SemaInternal.h"
40#include "llvm/ADT/ArrayRef.h"
41#include "llvm/ADT/BitVector.h"
42#include "llvm/ADT/FoldingSet.h"
43#include "llvm/ADT/ImmutableMap.h"
44#include "llvm/ADT/PostOrderIterator.h"
45#include "llvm/ADT/SmallString.h"
46#include "llvm/ADT/SmallVector.h"
47#include "llvm/ADT/StringRef.h"
48#include "llvm/Support/Casting.h"
49#include <algorithm>
50#include <deque>
51#include <iterator>
52#include <vector>
53
54using namespace clang;
55
56//===----------------------------------------------------------------------===//
57// Unreachable code analysis.
58//===----------------------------------------------------------------------===//
59
60namespace {
61  class UnreachableCodeHandler : public reachable_code::Callback {
62    Sema &S;
63  public:
64    UnreachableCodeHandler(Sema &s) : S(s) {}
65
66    void HandleUnreachable(SourceLocation L, SourceRange R1, SourceRange R2) {
67      S.Diag(L, diag::warn_unreachable) << R1 << R2;
68    }
69  };
70}
71
72/// CheckUnreachable - Check for unreachable code.
73static void CheckUnreachable(Sema &S, AnalysisDeclContext &AC) {
74  UnreachableCodeHandler UC(S);
75  reachable_code::FindUnreachableCode(AC, UC);
76}
77
78//===----------------------------------------------------------------------===//
79// Check for missing return value.
80//===----------------------------------------------------------------------===//
81
82enum ControlFlowKind {
83  UnknownFallThrough,
84  NeverFallThrough,
85  MaybeFallThrough,
86  AlwaysFallThrough,
87  NeverFallThroughOrReturn
88};
89
90/// CheckFallThrough - Check that we don't fall off the end of a
91/// Statement that should return a value.
92///
93/// \returns AlwaysFallThrough iff we always fall off the end of the statement,
94/// MaybeFallThrough iff we might or might not fall off the end,
95/// NeverFallThroughOrReturn iff we never fall off the end of the statement or
96/// return.  We assume NeverFallThrough iff we never fall off the end of the
97/// statement but we may return.  We assume that functions not marked noreturn
98/// will return.
99static ControlFlowKind CheckFallThrough(AnalysisDeclContext &AC) {
100  CFG *cfg = AC.getCFG();
101  if (cfg == 0) return UnknownFallThrough;
102
103  // The CFG leaves in dead things, and we don't want the dead code paths to
104  // confuse us, so we mark all live things first.
105  llvm::BitVector live(cfg->getNumBlockIDs());
106  unsigned count = reachable_code::ScanReachableFromBlock(&cfg->getEntry(),
107                                                          live);
108
109  bool AddEHEdges = AC.getAddEHEdges();
110  if (!AddEHEdges && count != cfg->getNumBlockIDs())
111    // When there are things remaining dead, and we didn't add EH edges
112    // from CallExprs to the catch clauses, we have to go back and
113    // mark them as live.
114    for (CFG::iterator I = cfg->begin(), E = cfg->end(); I != E; ++I) {
115      CFGBlock &b = **I;
116      if (!live[b.getBlockID()]) {
117        if (b.pred_begin() == b.pred_end()) {
118          if (b.getTerminator() && isa<CXXTryStmt>(b.getTerminator()))
119            // When not adding EH edges from calls, catch clauses
120            // can otherwise seem dead.  Avoid noting them as dead.
121            count += reachable_code::ScanReachableFromBlock(&b, live);
122          continue;
123        }
124      }
125    }
126
127  // Now we know what is live, we check the live precessors of the exit block
128  // and look for fall through paths, being careful to ignore normal returns,
129  // and exceptional paths.
130  bool HasLiveReturn = false;
131  bool HasFakeEdge = false;
132  bool HasPlainEdge = false;
133  bool HasAbnormalEdge = false;
134
135  // Ignore default cases that aren't likely to be reachable because all
136  // enums in a switch(X) have explicit case statements.
137  CFGBlock::FilterOptions FO;
138  FO.IgnoreDefaultsWithCoveredEnums = 1;
139
140  for (CFGBlock::filtered_pred_iterator
141	 I = cfg->getExit().filtered_pred_start_end(FO); I.hasMore(); ++I) {
142    const CFGBlock& B = **I;
143    if (!live[B.getBlockID()])
144      continue;
145
146    // Skip blocks which contain an element marked as no-return. They don't
147    // represent actually viable edges into the exit block, so mark them as
148    // abnormal.
149    if (B.hasNoReturnElement()) {
150      HasAbnormalEdge = true;
151      continue;
152    }
153
154    // Destructors can appear after the 'return' in the CFG.  This is
155    // normal.  We need to look pass the destructors for the return
156    // statement (if it exists).
157    CFGBlock::const_reverse_iterator ri = B.rbegin(), re = B.rend();
158
159    for ( ; ri != re ; ++ri)
160      if (isa<CFGStmt>(*ri))
161        break;
162
163    // No more CFGElements in the block?
164    if (ri == re) {
165      if (B.getTerminator() && isa<CXXTryStmt>(B.getTerminator())) {
166        HasAbnormalEdge = true;
167        continue;
168      }
169      // A labeled empty statement, or the entry block...
170      HasPlainEdge = true;
171      continue;
172    }
173
174    CFGStmt CS = cast<CFGStmt>(*ri);
175    const Stmt *S = CS.getStmt();
176    if (isa<ReturnStmt>(S)) {
177      HasLiveReturn = true;
178      continue;
179    }
180    if (isa<ObjCAtThrowStmt>(S)) {
181      HasFakeEdge = true;
182      continue;
183    }
184    if (isa<CXXThrowExpr>(S)) {
185      HasFakeEdge = true;
186      continue;
187    }
188    if (isa<MSAsmStmt>(S)) {
189      // TODO: Verify this is correct.
190      HasFakeEdge = true;
191      HasLiveReturn = true;
192      continue;
193    }
194    if (isa<CXXTryStmt>(S)) {
195      HasAbnormalEdge = true;
196      continue;
197    }
198    if (std::find(B.succ_begin(), B.succ_end(), &cfg->getExit())
199        == B.succ_end()) {
200      HasAbnormalEdge = true;
201      continue;
202    }
203
204    HasPlainEdge = true;
205  }
206  if (!HasPlainEdge) {
207    if (HasLiveReturn)
208      return NeverFallThrough;
209    return NeverFallThroughOrReturn;
210  }
211  if (HasAbnormalEdge || HasFakeEdge || HasLiveReturn)
212    return MaybeFallThrough;
213  // This says AlwaysFallThrough for calls to functions that are not marked
214  // noreturn, that don't return.  If people would like this warning to be more
215  // accurate, such functions should be marked as noreturn.
216  return AlwaysFallThrough;
217}
218
219namespace {
220
221struct CheckFallThroughDiagnostics {
222  unsigned diag_MaybeFallThrough_HasNoReturn;
223  unsigned diag_MaybeFallThrough_ReturnsNonVoid;
224  unsigned diag_AlwaysFallThrough_HasNoReturn;
225  unsigned diag_AlwaysFallThrough_ReturnsNonVoid;
226  unsigned diag_NeverFallThroughOrReturn;
227  enum { Function, Block, Lambda } funMode;
228  SourceLocation FuncLoc;
229
230  static CheckFallThroughDiagnostics MakeForFunction(const Decl *Func) {
231    CheckFallThroughDiagnostics D;
232    D.FuncLoc = Func->getLocation();
233    D.diag_MaybeFallThrough_HasNoReturn =
234      diag::warn_falloff_noreturn_function;
235    D.diag_MaybeFallThrough_ReturnsNonVoid =
236      diag::warn_maybe_falloff_nonvoid_function;
237    D.diag_AlwaysFallThrough_HasNoReturn =
238      diag::warn_falloff_noreturn_function;
239    D.diag_AlwaysFallThrough_ReturnsNonVoid =
240      diag::warn_falloff_nonvoid_function;
241
242    // Don't suggest that virtual functions be marked "noreturn", since they
243    // might be overridden by non-noreturn functions.
244    bool isVirtualMethod = false;
245    if (const CXXMethodDecl *Method = dyn_cast<CXXMethodDecl>(Func))
246      isVirtualMethod = Method->isVirtual();
247
248    // Don't suggest that template instantiations be marked "noreturn"
249    bool isTemplateInstantiation = false;
250    if (const FunctionDecl *Function = dyn_cast<FunctionDecl>(Func))
251      isTemplateInstantiation = Function->isTemplateInstantiation();
252
253    if (!isVirtualMethod && !isTemplateInstantiation)
254      D.diag_NeverFallThroughOrReturn =
255        diag::warn_suggest_noreturn_function;
256    else
257      D.diag_NeverFallThroughOrReturn = 0;
258
259    D.funMode = Function;
260    return D;
261  }
262
263  static CheckFallThroughDiagnostics MakeForBlock() {
264    CheckFallThroughDiagnostics D;
265    D.diag_MaybeFallThrough_HasNoReturn =
266      diag::err_noreturn_block_has_return_expr;
267    D.diag_MaybeFallThrough_ReturnsNonVoid =
268      diag::err_maybe_falloff_nonvoid_block;
269    D.diag_AlwaysFallThrough_HasNoReturn =
270      diag::err_noreturn_block_has_return_expr;
271    D.diag_AlwaysFallThrough_ReturnsNonVoid =
272      diag::err_falloff_nonvoid_block;
273    D.diag_NeverFallThroughOrReturn =
274      diag::warn_suggest_noreturn_block;
275    D.funMode = Block;
276    return D;
277  }
278
279  static CheckFallThroughDiagnostics MakeForLambda() {
280    CheckFallThroughDiagnostics D;
281    D.diag_MaybeFallThrough_HasNoReturn =
282      diag::err_noreturn_lambda_has_return_expr;
283    D.diag_MaybeFallThrough_ReturnsNonVoid =
284      diag::warn_maybe_falloff_nonvoid_lambda;
285    D.diag_AlwaysFallThrough_HasNoReturn =
286      diag::err_noreturn_lambda_has_return_expr;
287    D.diag_AlwaysFallThrough_ReturnsNonVoid =
288      diag::warn_falloff_nonvoid_lambda;
289    D.diag_NeverFallThroughOrReturn = 0;
290    D.funMode = Lambda;
291    return D;
292  }
293
294  bool checkDiagnostics(DiagnosticsEngine &D, bool ReturnsVoid,
295                        bool HasNoReturn) const {
296    if (funMode == Function) {
297      return (ReturnsVoid ||
298              D.getDiagnosticLevel(diag::warn_maybe_falloff_nonvoid_function,
299                                   FuncLoc) == DiagnosticsEngine::Ignored)
300        && (!HasNoReturn ||
301            D.getDiagnosticLevel(diag::warn_noreturn_function_has_return_expr,
302                                 FuncLoc) == DiagnosticsEngine::Ignored)
303        && (!ReturnsVoid ||
304            D.getDiagnosticLevel(diag::warn_suggest_noreturn_block, FuncLoc)
305              == DiagnosticsEngine::Ignored);
306    }
307
308    // For blocks / lambdas.
309    return ReturnsVoid && !HasNoReturn
310            && ((funMode == Lambda) ||
311                D.getDiagnosticLevel(diag::warn_suggest_noreturn_block, FuncLoc)
312                  == DiagnosticsEngine::Ignored);
313  }
314};
315
316}
317
318/// CheckFallThroughForFunctionDef - Check that we don't fall off the end of a
319/// function that should return a value.  Check that we don't fall off the end
320/// of a noreturn function.  We assume that functions and blocks not marked
321/// noreturn will return.
322static void CheckFallThroughForBody(Sema &S, const Decl *D, const Stmt *Body,
323                                    const BlockExpr *blkExpr,
324                                    const CheckFallThroughDiagnostics& CD,
325                                    AnalysisDeclContext &AC) {
326
327  bool ReturnsVoid = false;
328  bool HasNoReturn = false;
329
330  if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(D)) {
331    ReturnsVoid = FD->getResultType()->isVoidType();
332    HasNoReturn = FD->isNoReturn();
333  }
334  else if (const ObjCMethodDecl *MD = dyn_cast<ObjCMethodDecl>(D)) {
335    ReturnsVoid = MD->getResultType()->isVoidType();
336    HasNoReturn = MD->hasAttr<NoReturnAttr>();
337  }
338  else if (isa<BlockDecl>(D)) {
339    QualType BlockTy = blkExpr->getType();
340    if (const FunctionType *FT =
341          BlockTy->getPointeeType()->getAs<FunctionType>()) {
342      if (FT->getResultType()->isVoidType())
343        ReturnsVoid = true;
344      if (FT->getNoReturnAttr())
345        HasNoReturn = true;
346    }
347  }
348
349  DiagnosticsEngine &Diags = S.getDiagnostics();
350
351  // Short circuit for compilation speed.
352  if (CD.checkDiagnostics(Diags, ReturnsVoid, HasNoReturn))
353      return;
354
355  // FIXME: Function try block
356  if (const CompoundStmt *Compound = dyn_cast<CompoundStmt>(Body)) {
357    switch (CheckFallThrough(AC)) {
358      case UnknownFallThrough:
359        break;
360
361      case MaybeFallThrough:
362        if (HasNoReturn)
363          S.Diag(Compound->getRBracLoc(),
364                 CD.diag_MaybeFallThrough_HasNoReturn);
365        else if (!ReturnsVoid)
366          S.Diag(Compound->getRBracLoc(),
367                 CD.diag_MaybeFallThrough_ReturnsNonVoid);
368        break;
369      case AlwaysFallThrough:
370        if (HasNoReturn)
371          S.Diag(Compound->getRBracLoc(),
372                 CD.diag_AlwaysFallThrough_HasNoReturn);
373        else if (!ReturnsVoid)
374          S.Diag(Compound->getRBracLoc(),
375                 CD.diag_AlwaysFallThrough_ReturnsNonVoid);
376        break;
377      case NeverFallThroughOrReturn:
378        if (ReturnsVoid && !HasNoReturn && CD.diag_NeverFallThroughOrReturn) {
379          if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(D)) {
380            S.Diag(Compound->getLBracLoc(), CD.diag_NeverFallThroughOrReturn)
381              << 0 << FD;
382          } else if (const ObjCMethodDecl *MD = dyn_cast<ObjCMethodDecl>(D)) {
383            S.Diag(Compound->getLBracLoc(), CD.diag_NeverFallThroughOrReturn)
384              << 1 << MD;
385          } else {
386            S.Diag(Compound->getLBracLoc(), CD.diag_NeverFallThroughOrReturn);
387          }
388        }
389        break;
390      case NeverFallThrough:
391        break;
392    }
393  }
394}
395
396//===----------------------------------------------------------------------===//
397// -Wuninitialized
398//===----------------------------------------------------------------------===//
399
400namespace {
401/// ContainsReference - A visitor class to search for references to
402/// a particular declaration (the needle) within any evaluated component of an
403/// expression (recursively).
404class ContainsReference : public EvaluatedExprVisitor<ContainsReference> {
405  bool FoundReference;
406  const DeclRefExpr *Needle;
407
408public:
409  ContainsReference(ASTContext &Context, const DeclRefExpr *Needle)
410    : EvaluatedExprVisitor<ContainsReference>(Context),
411      FoundReference(false), Needle(Needle) {}
412
413  void VisitExpr(Expr *E) {
414    // Stop evaluating if we already have a reference.
415    if (FoundReference)
416      return;
417
418    EvaluatedExprVisitor<ContainsReference>::VisitExpr(E);
419  }
420
421  void VisitDeclRefExpr(DeclRefExpr *E) {
422    if (E == Needle)
423      FoundReference = true;
424    else
425      EvaluatedExprVisitor<ContainsReference>::VisitDeclRefExpr(E);
426  }
427
428  bool doesContainReference() const { return FoundReference; }
429};
430}
431
432static bool SuggestInitializationFixit(Sema &S, const VarDecl *VD) {
433  QualType VariableTy = VD->getType().getCanonicalType();
434  if (VariableTy->isBlockPointerType() &&
435      !VD->hasAttr<BlocksAttr>()) {
436    S.Diag(VD->getLocation(), diag::note_block_var_fixit_add_initialization) << VD->getDeclName()
437    << FixItHint::CreateInsertion(VD->getLocation(), "__block ");
438    return true;
439  }
440
441  // Don't issue a fixit if there is already an initializer.
442  if (VD->getInit())
443    return false;
444
445  // Suggest possible initialization (if any).
446  std::string Init = S.getFixItZeroInitializerForType(VariableTy);
447  if (Init.empty())
448    return false;
449
450  // Don't suggest a fixit inside macros.
451  if (VD->getLocEnd().isMacroID())
452    return false;
453
454  SourceLocation Loc = S.PP.getLocForEndOfToken(VD->getLocEnd());
455
456  S.Diag(Loc, diag::note_var_fixit_add_initialization) << VD->getDeclName()
457    << FixItHint::CreateInsertion(Loc, Init);
458  return true;
459}
460
461/// Create a fixit to remove an if-like statement, on the assumption that its
462/// condition is CondVal.
463static void CreateIfFixit(Sema &S, const Stmt *If, const Stmt *Then,
464                          const Stmt *Else, bool CondVal,
465                          FixItHint &Fixit1, FixItHint &Fixit2) {
466  if (CondVal) {
467    // If condition is always true, remove all but the 'then'.
468    Fixit1 = FixItHint::CreateRemoval(
469        CharSourceRange::getCharRange(If->getLocStart(),
470                                      Then->getLocStart()));
471    if (Else) {
472      SourceLocation ElseKwLoc = Lexer::getLocForEndOfToken(
473          Then->getLocEnd(), 0, S.getSourceManager(), S.getLangOpts());
474      Fixit2 = FixItHint::CreateRemoval(
475          SourceRange(ElseKwLoc, Else->getLocEnd()));
476    }
477  } else {
478    // If condition is always false, remove all but the 'else'.
479    if (Else)
480      Fixit1 = FixItHint::CreateRemoval(
481          CharSourceRange::getCharRange(If->getLocStart(),
482                                        Else->getLocStart()));
483    else
484      Fixit1 = FixItHint::CreateRemoval(If->getSourceRange());
485  }
486}
487
488/// DiagUninitUse -- Helper function to produce a diagnostic for an
489/// uninitialized use of a variable.
490static void DiagUninitUse(Sema &S, const VarDecl *VD, const UninitUse &Use,
491                          bool IsCapturedByBlock) {
492  bool Diagnosed = false;
493
494  // Diagnose each branch which leads to a sometimes-uninitialized use.
495  for (UninitUse::branch_iterator I = Use.branch_begin(), E = Use.branch_end();
496       I != E; ++I) {
497    assert(Use.getKind() == UninitUse::Sometimes);
498
499    const Expr *User = Use.getUser();
500    const Stmt *Term = I->Terminator;
501
502    // Information used when building the diagnostic.
503    unsigned DiagKind;
504    StringRef Str;
505    SourceRange Range;
506
507    // FixIts to suppress the diagnosic by removing the dead condition.
508    // For all binary terminators, branch 0 is taken if the condition is true,
509    // and branch 1 is taken if the condition is false.
510    int RemoveDiagKind = -1;
511    const char *FixitStr =
512        S.getLangOpts().CPlusPlus ? (I->Output ? "true" : "false")
513                                  : (I->Output ? "1" : "0");
514    FixItHint Fixit1, Fixit2;
515
516    switch (Term->getStmtClass()) {
517    default:
518      // Don't know how to report this. Just fall back to 'may be used
519      // uninitialized'. This happens for range-based for, which the user
520      // can't explicitly fix.
521      // FIXME: This also happens if the first use of a variable is always
522      // uninitialized, eg "for (int n; n < 10; ++n)". We should report that
523      // with the 'is uninitialized' diagnostic.
524      continue;
525
526    // "condition is true / condition is false".
527    case Stmt::IfStmtClass: {
528      const IfStmt *IS = cast<IfStmt>(Term);
529      DiagKind = 0;
530      Str = "if";
531      Range = IS->getCond()->getSourceRange();
532      RemoveDiagKind = 0;
533      CreateIfFixit(S, IS, IS->getThen(), IS->getElse(),
534                    I->Output, Fixit1, Fixit2);
535      break;
536    }
537    case Stmt::ConditionalOperatorClass: {
538      const ConditionalOperator *CO = cast<ConditionalOperator>(Term);
539      DiagKind = 0;
540      Str = "?:";
541      Range = CO->getCond()->getSourceRange();
542      RemoveDiagKind = 0;
543      CreateIfFixit(S, CO, CO->getTrueExpr(), CO->getFalseExpr(),
544                    I->Output, Fixit1, Fixit2);
545      break;
546    }
547    case Stmt::BinaryOperatorClass: {
548      const BinaryOperator *BO = cast<BinaryOperator>(Term);
549      if (!BO->isLogicalOp())
550        continue;
551      DiagKind = 0;
552      Str = BO->getOpcodeStr();
553      Range = BO->getLHS()->getSourceRange();
554      RemoveDiagKind = 0;
555      if ((BO->getOpcode() == BO_LAnd && I->Output) ||
556          (BO->getOpcode() == BO_LOr && !I->Output))
557        // true && y -> y, false || y -> y.
558        Fixit1 = FixItHint::CreateRemoval(SourceRange(BO->getLocStart(),
559                                                      BO->getOperatorLoc()));
560      else
561        // false && y -> false, true || y -> true.
562        Fixit1 = FixItHint::CreateReplacement(BO->getSourceRange(), FixitStr);
563      break;
564    }
565
566    // "loop is entered / loop is exited".
567    case Stmt::WhileStmtClass:
568      DiagKind = 1;
569      Str = "while";
570      Range = cast<WhileStmt>(Term)->getCond()->getSourceRange();
571      RemoveDiagKind = 1;
572      Fixit1 = FixItHint::CreateReplacement(Range, FixitStr);
573      break;
574    case Stmt::ForStmtClass:
575      DiagKind = 1;
576      Str = "for";
577      Range = cast<ForStmt>(Term)->getCond()->getSourceRange();
578      RemoveDiagKind = 1;
579      if (I->Output)
580        Fixit1 = FixItHint::CreateRemoval(Range);
581      else
582        Fixit1 = FixItHint::CreateReplacement(Range, FixitStr);
583      break;
584
585    // "condition is true / loop is exited".
586    case Stmt::DoStmtClass:
587      DiagKind = 2;
588      Str = "do";
589      Range = cast<DoStmt>(Term)->getCond()->getSourceRange();
590      RemoveDiagKind = 1;
591      Fixit1 = FixItHint::CreateReplacement(Range, FixitStr);
592      break;
593
594    // "switch case is taken".
595    case Stmt::CaseStmtClass:
596      DiagKind = 3;
597      Str = "case";
598      Range = cast<CaseStmt>(Term)->getLHS()->getSourceRange();
599      break;
600    case Stmt::DefaultStmtClass:
601      DiagKind = 3;
602      Str = "default";
603      Range = cast<DefaultStmt>(Term)->getDefaultLoc();
604      break;
605    }
606
607    S.Diag(Range.getBegin(), diag::warn_sometimes_uninit_var)
608      << VD->getDeclName() << IsCapturedByBlock << DiagKind
609      << Str << I->Output << Range;
610    S.Diag(User->getLocStart(), diag::note_uninit_var_use)
611      << IsCapturedByBlock << User->getSourceRange();
612    if (RemoveDiagKind != -1)
613      S.Diag(Fixit1.RemoveRange.getBegin(), diag::note_uninit_fixit_remove_cond)
614        << RemoveDiagKind << Str << I->Output << Fixit1 << Fixit2;
615
616    Diagnosed = true;
617  }
618
619  if (!Diagnosed)
620    S.Diag(Use.getUser()->getLocStart(),
621           Use.getKind() == UninitUse::Always ? diag::warn_uninit_var
622                                              : diag::warn_maybe_uninit_var)
623        << VD->getDeclName() << IsCapturedByBlock
624        << Use.getUser()->getSourceRange();
625}
626
627/// DiagnoseUninitializedUse -- Helper function for diagnosing uses of an
628/// uninitialized variable. This manages the different forms of diagnostic
629/// emitted for particular types of uses. Returns true if the use was diagnosed
630/// as a warning. If a particular use is one we omit warnings for, returns
631/// false.
632static bool DiagnoseUninitializedUse(Sema &S, const VarDecl *VD,
633                                     const UninitUse &Use,
634                                     bool alwaysReportSelfInit = false) {
635
636  if (const DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(Use.getUser())) {
637    // Inspect the initializer of the variable declaration which is
638    // being referenced prior to its initialization. We emit
639    // specialized diagnostics for self-initialization, and we
640    // specifically avoid warning about self references which take the
641    // form of:
642    //
643    //   int x = x;
644    //
645    // This is used to indicate to GCC that 'x' is intentionally left
646    // uninitialized. Proven code paths which access 'x' in
647    // an uninitialized state after this will still warn.
648    if (const Expr *Initializer = VD->getInit()) {
649      if (!alwaysReportSelfInit && DRE == Initializer->IgnoreParenImpCasts())
650        return false;
651
652      ContainsReference CR(S.Context, DRE);
653      CR.Visit(const_cast<Expr*>(Initializer));
654      if (CR.doesContainReference()) {
655        S.Diag(DRE->getLocStart(),
656               diag::warn_uninit_self_reference_in_init)
657          << VD->getDeclName() << VD->getLocation() << DRE->getSourceRange();
658        return true;
659      }
660    }
661
662    DiagUninitUse(S, VD, Use, false);
663  } else {
664    const BlockExpr *BE = cast<BlockExpr>(Use.getUser());
665    if (VD->getType()->isBlockPointerType() && !VD->hasAttr<BlocksAttr>())
666      S.Diag(BE->getLocStart(),
667             diag::warn_uninit_byref_blockvar_captured_by_block)
668        << VD->getDeclName();
669    else
670      DiagUninitUse(S, VD, Use, true);
671  }
672
673  // Report where the variable was declared when the use wasn't within
674  // the initializer of that declaration & we didn't already suggest
675  // an initialization fixit.
676  if (!SuggestInitializationFixit(S, VD))
677    S.Diag(VD->getLocStart(), diag::note_uninit_var_def)
678      << VD->getDeclName();
679
680  return true;
681}
682
683namespace {
684  class FallthroughMapper : public RecursiveASTVisitor<FallthroughMapper> {
685  public:
686    FallthroughMapper(Sema &S)
687      : FoundSwitchStatements(false),
688        S(S) {
689    }
690
691    bool foundSwitchStatements() const { return FoundSwitchStatements; }
692
693    void markFallthroughVisited(const AttributedStmt *Stmt) {
694      bool Found = FallthroughStmts.erase(Stmt);
695      assert(Found);
696      (void)Found;
697    }
698
699    typedef llvm::SmallPtrSet<const AttributedStmt*, 8> AttrStmts;
700
701    const AttrStmts &getFallthroughStmts() const {
702      return FallthroughStmts;
703    }
704
705    bool checkFallThroughIntoBlock(const CFGBlock &B, int &AnnotatedCnt) {
706      int UnannotatedCnt = 0;
707      AnnotatedCnt = 0;
708
709      std::deque<const CFGBlock*> BlockQueue;
710
711      std::copy(B.pred_begin(), B.pred_end(), std::back_inserter(BlockQueue));
712
713      while (!BlockQueue.empty()) {
714        const CFGBlock *P = BlockQueue.front();
715        BlockQueue.pop_front();
716
717        const Stmt *Term = P->getTerminator();
718        if (Term && isa<SwitchStmt>(Term))
719          continue; // Switch statement, good.
720
721        const SwitchCase *SW = dyn_cast_or_null<SwitchCase>(P->getLabel());
722        if (SW && SW->getSubStmt() == B.getLabel() && P->begin() == P->end())
723          continue; // Previous case label has no statements, good.
724
725        const LabelStmt *L = dyn_cast_or_null<LabelStmt>(P->getLabel());
726        if (L && L->getSubStmt() == B.getLabel() && P->begin() == P->end())
727          continue; // Case label is preceded with a normal label, good.
728
729        if (P->pred_begin() == P->pred_end()) {  // The block is unreachable.
730          // This only catches trivially unreachable blocks.
731          for (CFGBlock::const_iterator ElIt = P->begin(), ElEnd = P->end();
732               ElIt != ElEnd; ++ElIt) {
733            if (const CFGStmt *CS = ElIt->getAs<CFGStmt>()){
734              if (const AttributedStmt *AS = asFallThroughAttr(CS->getStmt())) {
735                S.Diag(AS->getLocStart(),
736                       diag::warn_fallthrough_attr_unreachable);
737                markFallthroughVisited(AS);
738                ++AnnotatedCnt;
739              }
740              // Don't care about other unreachable statements.
741            }
742          }
743          // If there are no unreachable statements, this may be a special
744          // case in CFG:
745          // case X: {
746          //    A a;  // A has a destructor.
747          //    break;
748          // }
749          // // <<<< This place is represented by a 'hanging' CFG block.
750          // case Y:
751          continue;
752        }
753
754        const Stmt *LastStmt = getLastStmt(*P);
755        if (const AttributedStmt *AS = asFallThroughAttr(LastStmt)) {
756          markFallthroughVisited(AS);
757          ++AnnotatedCnt;
758          continue; // Fallthrough annotation, good.
759        }
760
761        if (!LastStmt) { // This block contains no executable statements.
762          // Traverse its predecessors.
763          std::copy(P->pred_begin(), P->pred_end(),
764                    std::back_inserter(BlockQueue));
765          continue;
766        }
767
768        ++UnannotatedCnt;
769      }
770      return !!UnannotatedCnt;
771    }
772
773    // RecursiveASTVisitor setup.
774    bool shouldWalkTypesOfTypeLocs() const { return false; }
775
776    bool VisitAttributedStmt(AttributedStmt *S) {
777      if (asFallThroughAttr(S))
778        FallthroughStmts.insert(S);
779      return true;
780    }
781
782    bool VisitSwitchStmt(SwitchStmt *S) {
783      FoundSwitchStatements = true;
784      return true;
785    }
786
787  private:
788
789    static const AttributedStmt *asFallThroughAttr(const Stmt *S) {
790      if (const AttributedStmt *AS = dyn_cast_or_null<AttributedStmt>(S)) {
791        if (hasSpecificAttr<FallThroughAttr>(AS->getAttrs()))
792          return AS;
793      }
794      return 0;
795    }
796
797    static const Stmt *getLastStmt(const CFGBlock &B) {
798      if (const Stmt *Term = B.getTerminator())
799        return Term;
800      for (CFGBlock::const_reverse_iterator ElemIt = B.rbegin(),
801                                            ElemEnd = B.rend();
802                                            ElemIt != ElemEnd; ++ElemIt) {
803        if (const CFGStmt *CS = ElemIt->getAs<CFGStmt>())
804          return CS->getStmt();
805      }
806      // Workaround to detect a statement thrown out by CFGBuilder:
807      //   case X: {} case Y:
808      //   case X: ; case Y:
809      if (const SwitchCase *SW = dyn_cast_or_null<SwitchCase>(B.getLabel()))
810        if (!isa<SwitchCase>(SW->getSubStmt()))
811          return SW->getSubStmt();
812
813      return 0;
814    }
815
816    bool FoundSwitchStatements;
817    AttrStmts FallthroughStmts;
818    Sema &S;
819  };
820}
821
822static void DiagnoseSwitchLabelsFallthrough(Sema &S, AnalysisDeclContext &AC,
823                                            bool PerFunction) {
824  // Only perform this analysis when using C++11.  There is no good workflow
825  // for this warning when not using C++11.  There is no good way to silence
826  // the warning (no attribute is available) unless we are using C++11's support
827  // for generalized attributes.  Once could use pragmas to silence the warning,
828  // but as a general solution that is gross and not in the spirit of this
829  // warning.
830  //
831  // NOTE: This an intermediate solution.  There are on-going discussions on
832  // how to properly support this warning outside of C++11 with an annotation.
833  if (!AC.getASTContext().getLangOpts().CPlusPlus11)
834    return;
835
836  FallthroughMapper FM(S);
837  FM.TraverseStmt(AC.getBody());
838
839  if (!FM.foundSwitchStatements())
840    return;
841
842  if (PerFunction && FM.getFallthroughStmts().empty())
843    return;
844
845  CFG *Cfg = AC.getCFG();
846
847  if (!Cfg)
848    return;
849
850  int AnnotatedCnt;
851
852  for (CFG::reverse_iterator I = Cfg->rbegin(), E = Cfg->rend(); I != E; ++I) {
853    const CFGBlock *B = *I;
854    const Stmt *Label = B->getLabel();
855
856    if (!Label || !isa<SwitchCase>(Label))
857      continue;
858
859    if (!FM.checkFallThroughIntoBlock(*B, AnnotatedCnt))
860      continue;
861
862    S.Diag(Label->getLocStart(),
863        PerFunction ? diag::warn_unannotated_fallthrough_per_function
864                    : diag::warn_unannotated_fallthrough);
865
866    if (!AnnotatedCnt) {
867      SourceLocation L = Label->getLocStart();
868      if (L.isMacroID())
869        continue;
870      if (S.getLangOpts().CPlusPlus11) {
871        const Stmt *Term = B->getTerminator();
872        // Skip empty cases.
873        while (B->empty() && !Term && B->succ_size() == 1) {
874          B = *B->succ_begin();
875          Term = B->getTerminator();
876        }
877        if (!(B->empty() && Term && isa<BreakStmt>(Term))) {
878          Preprocessor &PP = S.getPreprocessor();
879          TokenValue Tokens[] = {
880            tok::l_square, tok::l_square, PP.getIdentifierInfo("clang"),
881            tok::coloncolon, PP.getIdentifierInfo("fallthrough"),
882            tok::r_square, tok::r_square
883          };
884          StringRef AnnotationSpelling = "[[clang::fallthrough]]";
885          StringRef MacroName = PP.getLastMacroWithSpelling(L, Tokens);
886          if (!MacroName.empty())
887            AnnotationSpelling = MacroName;
888          SmallString<64> TextToInsert(AnnotationSpelling);
889          TextToInsert += "; ";
890          S.Diag(L, diag::note_insert_fallthrough_fixit) <<
891              AnnotationSpelling <<
892              FixItHint::CreateInsertion(L, TextToInsert);
893        }
894      }
895      S.Diag(L, diag::note_insert_break_fixit) <<
896        FixItHint::CreateInsertion(L, "break; ");
897    }
898  }
899
900  const FallthroughMapper::AttrStmts &Fallthroughs = FM.getFallthroughStmts();
901  for (FallthroughMapper::AttrStmts::const_iterator I = Fallthroughs.begin(),
902                                                    E = Fallthroughs.end();
903                                                    I != E; ++I) {
904    S.Diag((*I)->getLocStart(), diag::warn_fallthrough_attr_invalid_placement);
905  }
906
907}
908
909namespace {
910typedef std::pair<const Stmt *,
911                  sema::FunctionScopeInfo::WeakObjectUseMap::const_iterator>
912        StmtUsesPair;
913
914class StmtUseSorter {
915  const SourceManager &SM;
916
917public:
918  explicit StmtUseSorter(const SourceManager &SM) : SM(SM) { }
919
920  bool operator()(const StmtUsesPair &LHS, const StmtUsesPair &RHS) {
921    return SM.isBeforeInTranslationUnit(LHS.first->getLocStart(),
922                                        RHS.first->getLocStart());
923  }
924};
925}
926
927static bool isInLoop(const ASTContext &Ctx, const ParentMap &PM,
928                     const Stmt *S) {
929  assert(S);
930
931  do {
932    switch (S->getStmtClass()) {
933    case Stmt::ForStmtClass:
934    case Stmt::WhileStmtClass:
935    case Stmt::CXXForRangeStmtClass:
936    case Stmt::ObjCForCollectionStmtClass:
937      return true;
938    case Stmt::DoStmtClass: {
939      const Expr *Cond = cast<DoStmt>(S)->getCond();
940      llvm::APSInt Val;
941      if (!Cond->EvaluateAsInt(Val, Ctx))
942        return true;
943      return Val.getBoolValue();
944    }
945    default:
946      break;
947    }
948  } while ((S = PM.getParent(S)));
949
950  return false;
951}
952
953
954static void diagnoseRepeatedUseOfWeak(Sema &S,
955                                      const sema::FunctionScopeInfo *CurFn,
956                                      const Decl *D,
957                                      const ParentMap &PM) {
958  typedef sema::FunctionScopeInfo::WeakObjectProfileTy WeakObjectProfileTy;
959  typedef sema::FunctionScopeInfo::WeakObjectUseMap WeakObjectUseMap;
960  typedef sema::FunctionScopeInfo::WeakUseVector WeakUseVector;
961
962  ASTContext &Ctx = S.getASTContext();
963
964  const WeakObjectUseMap &WeakMap = CurFn->getWeakObjectUses();
965
966  // Extract all weak objects that are referenced more than once.
967  SmallVector<StmtUsesPair, 8> UsesByStmt;
968  for (WeakObjectUseMap::const_iterator I = WeakMap.begin(), E = WeakMap.end();
969       I != E; ++I) {
970    const WeakUseVector &Uses = I->second;
971
972    // Find the first read of the weak object.
973    WeakUseVector::const_iterator UI = Uses.begin(), UE = Uses.end();
974    for ( ; UI != UE; ++UI) {
975      if (UI->isUnsafe())
976        break;
977    }
978
979    // If there were only writes to this object, don't warn.
980    if (UI == UE)
981      continue;
982
983    // If there was only one read, followed by any number of writes, and the
984    // read is not within a loop, don't warn. Additionally, don't warn in a
985    // loop if the base object is a local variable -- local variables are often
986    // changed in loops.
987    if (UI == Uses.begin()) {
988      WeakUseVector::const_iterator UI2 = UI;
989      for (++UI2; UI2 != UE; ++UI2)
990        if (UI2->isUnsafe())
991          break;
992
993      if (UI2 == UE) {
994        if (!isInLoop(Ctx, PM, UI->getUseExpr()))
995          continue;
996
997        const WeakObjectProfileTy &Profile = I->first;
998        if (!Profile.isExactProfile())
999          continue;
1000
1001        const NamedDecl *Base = Profile.getBase();
1002        if (!Base)
1003          Base = Profile.getProperty();
1004        assert(Base && "A profile always has a base or property.");
1005
1006        if (const VarDecl *BaseVar = dyn_cast<VarDecl>(Base))
1007          if (BaseVar->hasLocalStorage() && !isa<ParmVarDecl>(Base))
1008            continue;
1009      }
1010    }
1011
1012    UsesByStmt.push_back(StmtUsesPair(UI->getUseExpr(), I));
1013  }
1014
1015  if (UsesByStmt.empty())
1016    return;
1017
1018  // Sort by first use so that we emit the warnings in a deterministic order.
1019  std::sort(UsesByStmt.begin(), UsesByStmt.end(),
1020            StmtUseSorter(S.getSourceManager()));
1021
1022  // Classify the current code body for better warning text.
1023  // This enum should stay in sync with the cases in
1024  // warn_arc_repeated_use_of_weak and warn_arc_possible_repeated_use_of_weak.
1025  // FIXME: Should we use a common classification enum and the same set of
1026  // possibilities all throughout Sema?
1027  enum {
1028    Function,
1029    Method,
1030    Block,
1031    Lambda
1032  } FunctionKind;
1033
1034  if (isa<sema::BlockScopeInfo>(CurFn))
1035    FunctionKind = Block;
1036  else if (isa<sema::LambdaScopeInfo>(CurFn))
1037    FunctionKind = Lambda;
1038  else if (isa<ObjCMethodDecl>(D))
1039    FunctionKind = Method;
1040  else
1041    FunctionKind = Function;
1042
1043  // Iterate through the sorted problems and emit warnings for each.
1044  for (SmallVectorImpl<StmtUsesPair>::const_iterator I = UsesByStmt.begin(),
1045                                                     E = UsesByStmt.end();
1046       I != E; ++I) {
1047    const Stmt *FirstRead = I->first;
1048    const WeakObjectProfileTy &Key = I->second->first;
1049    const WeakUseVector &Uses = I->second->second;
1050
1051    // For complicated expressions like 'a.b.c' and 'x.b.c', WeakObjectProfileTy
1052    // may not contain enough information to determine that these are different
1053    // properties. We can only be 100% sure of a repeated use in certain cases,
1054    // and we adjust the diagnostic kind accordingly so that the less certain
1055    // case can be turned off if it is too noisy.
1056    unsigned DiagKind;
1057    if (Key.isExactProfile())
1058      DiagKind = diag::warn_arc_repeated_use_of_weak;
1059    else
1060      DiagKind = diag::warn_arc_possible_repeated_use_of_weak;
1061
1062    // Classify the weak object being accessed for better warning text.
1063    // This enum should stay in sync with the cases in
1064    // warn_arc_repeated_use_of_weak and warn_arc_possible_repeated_use_of_weak.
1065    enum {
1066      Variable,
1067      Property,
1068      ImplicitProperty,
1069      Ivar
1070    } ObjectKind;
1071
1072    const NamedDecl *D = Key.getProperty();
1073    if (isa<VarDecl>(D))
1074      ObjectKind = Variable;
1075    else if (isa<ObjCPropertyDecl>(D))
1076      ObjectKind = Property;
1077    else if (isa<ObjCMethodDecl>(D))
1078      ObjectKind = ImplicitProperty;
1079    else if (isa<ObjCIvarDecl>(D))
1080      ObjectKind = Ivar;
1081    else
1082      llvm_unreachable("Unexpected weak object kind!");
1083
1084    // Show the first time the object was read.
1085    S.Diag(FirstRead->getLocStart(), DiagKind)
1086      << ObjectKind << D << FunctionKind
1087      << FirstRead->getSourceRange();
1088
1089    // Print all the other accesses as notes.
1090    for (WeakUseVector::const_iterator UI = Uses.begin(), UE = Uses.end();
1091         UI != UE; ++UI) {
1092      if (UI->getUseExpr() == FirstRead)
1093        continue;
1094      S.Diag(UI->getUseExpr()->getLocStart(),
1095             diag::note_arc_weak_also_accessed_here)
1096        << UI->getUseExpr()->getSourceRange();
1097    }
1098  }
1099}
1100
1101
1102namespace {
1103struct SLocSort {
1104  bool operator()(const UninitUse &a, const UninitUse &b) {
1105    // Prefer a more confident report over a less confident one.
1106    if (a.getKind() != b.getKind())
1107      return a.getKind() > b.getKind();
1108    SourceLocation aLoc = a.getUser()->getLocStart();
1109    SourceLocation bLoc = b.getUser()->getLocStart();
1110    return aLoc.getRawEncoding() < bLoc.getRawEncoding();
1111  }
1112};
1113
1114class UninitValsDiagReporter : public UninitVariablesHandler {
1115  Sema &S;
1116  typedef SmallVector<UninitUse, 2> UsesVec;
1117  typedef llvm::DenseMap<const VarDecl *, std::pair<UsesVec*, bool> > UsesMap;
1118  UsesMap *uses;
1119
1120public:
1121  UninitValsDiagReporter(Sema &S) : S(S), uses(0) {}
1122  ~UninitValsDiagReporter() {
1123    flushDiagnostics();
1124  }
1125
1126  std::pair<UsesVec*, bool> &getUses(const VarDecl *vd) {
1127    if (!uses)
1128      uses = new UsesMap();
1129
1130    UsesMap::mapped_type &V = (*uses)[vd];
1131    UsesVec *&vec = V.first;
1132    if (!vec)
1133      vec = new UsesVec();
1134
1135    return V;
1136  }
1137
1138  void handleUseOfUninitVariable(const VarDecl *vd, const UninitUse &use) {
1139    getUses(vd).first->push_back(use);
1140  }
1141
1142  void handleSelfInit(const VarDecl *vd) {
1143    getUses(vd).second = true;
1144  }
1145
1146  void flushDiagnostics() {
1147    if (!uses)
1148      return;
1149
1150    // FIXME: This iteration order, and thus the resulting diagnostic order,
1151    //        is nondeterministic.
1152    for (UsesMap::iterator i = uses->begin(), e = uses->end(); i != e; ++i) {
1153      const VarDecl *vd = i->first;
1154      const UsesMap::mapped_type &V = i->second;
1155
1156      UsesVec *vec = V.first;
1157      bool hasSelfInit = V.second;
1158
1159      // Specially handle the case where we have uses of an uninitialized
1160      // variable, but the root cause is an idiomatic self-init.  We want
1161      // to report the diagnostic at the self-init since that is the root cause.
1162      if (!vec->empty() && hasSelfInit && hasAlwaysUninitializedUse(vec))
1163        DiagnoseUninitializedUse(S, vd,
1164                                 UninitUse(vd->getInit()->IgnoreParenCasts(),
1165                                           /* isAlwaysUninit */ true),
1166                                 /* alwaysReportSelfInit */ true);
1167      else {
1168        // Sort the uses by their SourceLocations.  While not strictly
1169        // guaranteed to produce them in line/column order, this will provide
1170        // a stable ordering.
1171        std::sort(vec->begin(), vec->end(), SLocSort());
1172
1173        for (UsesVec::iterator vi = vec->begin(), ve = vec->end(); vi != ve;
1174             ++vi) {
1175          // If we have self-init, downgrade all uses to 'may be uninitialized'.
1176          UninitUse Use = hasSelfInit ? UninitUse(vi->getUser(), false) : *vi;
1177
1178          if (DiagnoseUninitializedUse(S, vd, Use))
1179            // Skip further diagnostics for this variable. We try to warn only
1180            // on the first point at which a variable is used uninitialized.
1181            break;
1182        }
1183      }
1184
1185      // Release the uses vector.
1186      delete vec;
1187    }
1188    delete uses;
1189  }
1190
1191private:
1192  static bool hasAlwaysUninitializedUse(const UsesVec* vec) {
1193  for (UsesVec::const_iterator i = vec->begin(), e = vec->end(); i != e; ++i) {
1194    if (i->getKind() == UninitUse::Always) {
1195      return true;
1196    }
1197  }
1198  return false;
1199}
1200};
1201}
1202
1203
1204//===----------------------------------------------------------------------===//
1205// -Wthread-safety
1206//===----------------------------------------------------------------------===//
1207namespace clang {
1208namespace thread_safety {
1209typedef SmallVector<PartialDiagnosticAt, 1> OptionalNotes;
1210typedef std::pair<PartialDiagnosticAt, OptionalNotes> DelayedDiag;
1211typedef std::list<DelayedDiag> DiagList;
1212
1213struct SortDiagBySourceLocation {
1214  SourceManager &SM;
1215  SortDiagBySourceLocation(SourceManager &SM) : SM(SM) {}
1216
1217  bool operator()(const DelayedDiag &left, const DelayedDiag &right) {
1218    // Although this call will be slow, this is only called when outputting
1219    // multiple warnings.
1220    return SM.isBeforeInTranslationUnit(left.first.first, right.first.first);
1221  }
1222};
1223
1224namespace {
1225class ThreadSafetyReporter : public clang::thread_safety::ThreadSafetyHandler {
1226  Sema &S;
1227  DiagList Warnings;
1228  SourceLocation FunLocation, FunEndLocation;
1229
1230  // Helper functions
1231  void warnLockMismatch(unsigned DiagID, Name LockName, SourceLocation Loc) {
1232    // Gracefully handle rare cases when the analysis can't get a more
1233    // precise source location.
1234    if (!Loc.isValid())
1235      Loc = FunLocation;
1236    PartialDiagnosticAt Warning(Loc, S.PDiag(DiagID) << LockName);
1237    Warnings.push_back(DelayedDiag(Warning, OptionalNotes()));
1238  }
1239
1240 public:
1241  ThreadSafetyReporter(Sema &S, SourceLocation FL, SourceLocation FEL)
1242    : S(S), FunLocation(FL), FunEndLocation(FEL) {}
1243
1244  /// \brief Emit all buffered diagnostics in order of sourcelocation.
1245  /// We need to output diagnostics produced while iterating through
1246  /// the lockset in deterministic order, so this function orders diagnostics
1247  /// and outputs them.
1248  void emitDiagnostics() {
1249    Warnings.sort(SortDiagBySourceLocation(S.getSourceManager()));
1250    for (DiagList::iterator I = Warnings.begin(), E = Warnings.end();
1251         I != E; ++I) {
1252      S.Diag(I->first.first, I->first.second);
1253      const OptionalNotes &Notes = I->second;
1254      for (unsigned NoteI = 0, NoteN = Notes.size(); NoteI != NoteN; ++NoteI)
1255        S.Diag(Notes[NoteI].first, Notes[NoteI].second);
1256    }
1257  }
1258
1259  void handleInvalidLockExp(SourceLocation Loc) {
1260    PartialDiagnosticAt Warning(Loc,
1261                                S.PDiag(diag::warn_cannot_resolve_lock) << Loc);
1262    Warnings.push_back(DelayedDiag(Warning, OptionalNotes()));
1263  }
1264  void handleUnmatchedUnlock(Name LockName, SourceLocation Loc) {
1265    warnLockMismatch(diag::warn_unlock_but_no_lock, LockName, Loc);
1266  }
1267
1268  void handleDoubleLock(Name LockName, SourceLocation Loc) {
1269    warnLockMismatch(diag::warn_double_lock, LockName, Loc);
1270  }
1271
1272  void handleMutexHeldEndOfScope(Name LockName, SourceLocation LocLocked,
1273                                 SourceLocation LocEndOfScope,
1274                                 LockErrorKind LEK){
1275    unsigned DiagID = 0;
1276    switch (LEK) {
1277      case LEK_LockedSomePredecessors:
1278        DiagID = diag::warn_lock_some_predecessors;
1279        break;
1280      case LEK_LockedSomeLoopIterations:
1281        DiagID = diag::warn_expecting_lock_held_on_loop;
1282        break;
1283      case LEK_LockedAtEndOfFunction:
1284        DiagID = diag::warn_no_unlock;
1285        break;
1286      case LEK_NotLockedAtEndOfFunction:
1287        DiagID = diag::warn_expecting_locked;
1288        break;
1289    }
1290    if (LocEndOfScope.isInvalid())
1291      LocEndOfScope = FunEndLocation;
1292
1293    PartialDiagnosticAt Warning(LocEndOfScope, S.PDiag(DiagID) << LockName);
1294    PartialDiagnosticAt Note(LocLocked, S.PDiag(diag::note_locked_here));
1295    Warnings.push_back(DelayedDiag(Warning, OptionalNotes(1, Note)));
1296  }
1297
1298
1299  void handleExclusiveAndShared(Name LockName, SourceLocation Loc1,
1300                                SourceLocation Loc2) {
1301    PartialDiagnosticAt Warning(
1302      Loc1, S.PDiag(diag::warn_lock_exclusive_and_shared) << LockName);
1303    PartialDiagnosticAt Note(
1304      Loc2, S.PDiag(diag::note_lock_exclusive_and_shared) << LockName);
1305    Warnings.push_back(DelayedDiag(Warning, OptionalNotes(1, Note)));
1306  }
1307
1308  void handleNoMutexHeld(const NamedDecl *D, ProtectedOperationKind POK,
1309                         AccessKind AK, SourceLocation Loc) {
1310    assert((POK == POK_VarAccess || POK == POK_VarDereference)
1311             && "Only works for variables");
1312    unsigned DiagID = POK == POK_VarAccess?
1313                        diag::warn_variable_requires_any_lock:
1314                        diag::warn_var_deref_requires_any_lock;
1315    PartialDiagnosticAt Warning(Loc, S.PDiag(DiagID)
1316      << D->getNameAsString() << getLockKindFromAccessKind(AK));
1317    Warnings.push_back(DelayedDiag(Warning, OptionalNotes()));
1318  }
1319
1320  void handleMutexNotHeld(const NamedDecl *D, ProtectedOperationKind POK,
1321                          Name LockName, LockKind LK, SourceLocation Loc,
1322                          Name *PossibleMatch) {
1323    unsigned DiagID = 0;
1324    if (PossibleMatch) {
1325      switch (POK) {
1326        case POK_VarAccess:
1327          DiagID = diag::warn_variable_requires_lock_precise;
1328          break;
1329        case POK_VarDereference:
1330          DiagID = diag::warn_var_deref_requires_lock_precise;
1331          break;
1332        case POK_FunctionCall:
1333          DiagID = diag::warn_fun_requires_lock_precise;
1334          break;
1335      }
1336      PartialDiagnosticAt Warning(Loc, S.PDiag(DiagID)
1337        << D->getNameAsString() << LockName << LK);
1338      PartialDiagnosticAt Note(Loc, S.PDiag(diag::note_found_mutex_near_match)
1339                               << *PossibleMatch);
1340      Warnings.push_back(DelayedDiag(Warning, OptionalNotes(1, Note)));
1341    } else {
1342      switch (POK) {
1343        case POK_VarAccess:
1344          DiagID = diag::warn_variable_requires_lock;
1345          break;
1346        case POK_VarDereference:
1347          DiagID = diag::warn_var_deref_requires_lock;
1348          break;
1349        case POK_FunctionCall:
1350          DiagID = diag::warn_fun_requires_lock;
1351          break;
1352      }
1353      PartialDiagnosticAt Warning(Loc, S.PDiag(DiagID)
1354        << D->getNameAsString() << LockName << LK);
1355      Warnings.push_back(DelayedDiag(Warning, OptionalNotes()));
1356    }
1357  }
1358
1359  void handleFunExcludesLock(Name FunName, Name LockName, SourceLocation Loc) {
1360    PartialDiagnosticAt Warning(Loc,
1361      S.PDiag(diag::warn_fun_excludes_mutex) << FunName << LockName);
1362    Warnings.push_back(DelayedDiag(Warning, OptionalNotes()));
1363  }
1364};
1365}
1366}
1367}
1368
1369//===----------------------------------------------------------------------===//
1370// AnalysisBasedWarnings - Worker object used by Sema to execute analysis-based
1371//  warnings on a function, method, or block.
1372//===----------------------------------------------------------------------===//
1373
1374clang::sema::AnalysisBasedWarnings::Policy::Policy() {
1375  enableCheckFallThrough = 1;
1376  enableCheckUnreachable = 0;
1377  enableThreadSafetyAnalysis = 0;
1378}
1379
1380clang::sema::AnalysisBasedWarnings::AnalysisBasedWarnings(Sema &s)
1381  : S(s),
1382    NumFunctionsAnalyzed(0),
1383    NumFunctionsWithBadCFGs(0),
1384    NumCFGBlocks(0),
1385    MaxCFGBlocksPerFunction(0),
1386    NumUninitAnalysisFunctions(0),
1387    NumUninitAnalysisVariables(0),
1388    MaxUninitAnalysisVariablesPerFunction(0),
1389    NumUninitAnalysisBlockVisits(0),
1390    MaxUninitAnalysisBlockVisitsPerFunction(0) {
1391  DiagnosticsEngine &D = S.getDiagnostics();
1392  DefaultPolicy.enableCheckUnreachable = (unsigned)
1393    (D.getDiagnosticLevel(diag::warn_unreachable, SourceLocation()) !=
1394        DiagnosticsEngine::Ignored);
1395  DefaultPolicy.enableThreadSafetyAnalysis = (unsigned)
1396    (D.getDiagnosticLevel(diag::warn_double_lock, SourceLocation()) !=
1397     DiagnosticsEngine::Ignored);
1398
1399}
1400
1401static void flushDiagnostics(Sema &S, sema::FunctionScopeInfo *fscope) {
1402  for (SmallVectorImpl<sema::PossiblyUnreachableDiag>::iterator
1403       i = fscope->PossiblyUnreachableDiags.begin(),
1404       e = fscope->PossiblyUnreachableDiags.end();
1405       i != e; ++i) {
1406    const sema::PossiblyUnreachableDiag &D = *i;
1407    S.Diag(D.Loc, D.PD);
1408  }
1409}
1410
1411void clang::sema::
1412AnalysisBasedWarnings::IssueWarnings(sema::AnalysisBasedWarnings::Policy P,
1413                                     sema::FunctionScopeInfo *fscope,
1414                                     const Decl *D, const BlockExpr *blkExpr) {
1415
1416  // We avoid doing analysis-based warnings when there are errors for
1417  // two reasons:
1418  // (1) The CFGs often can't be constructed (if the body is invalid), so
1419  //     don't bother trying.
1420  // (2) The code already has problems; running the analysis just takes more
1421  //     time.
1422  DiagnosticsEngine &Diags = S.getDiagnostics();
1423
1424  // Do not do any analysis for declarations in system headers if we are
1425  // going to just ignore them.
1426  if (Diags.getSuppressSystemWarnings() &&
1427      S.SourceMgr.isInSystemHeader(D->getLocation()))
1428    return;
1429
1430  // For code in dependent contexts, we'll do this at instantiation time.
1431  if (cast<DeclContext>(D)->isDependentContext())
1432    return;
1433
1434  if (Diags.hasUncompilableErrorOccurred() || Diags.hasFatalErrorOccurred()) {
1435    // Flush out any possibly unreachable diagnostics.
1436    flushDiagnostics(S, fscope);
1437    return;
1438  }
1439
1440  const Stmt *Body = D->getBody();
1441  assert(Body);
1442
1443  AnalysisDeclContext AC(/* AnalysisDeclContextManager */ 0, D);
1444
1445  // Don't generate EH edges for CallExprs as we'd like to avoid the n^2
1446  // explosion for destrutors that can result and the compile time hit.
1447  AC.getCFGBuildOptions().PruneTriviallyFalseEdges = true;
1448  AC.getCFGBuildOptions().AddEHEdges = false;
1449  AC.getCFGBuildOptions().AddInitializers = true;
1450  AC.getCFGBuildOptions().AddImplicitDtors = true;
1451  AC.getCFGBuildOptions().AddTemporaryDtors = true;
1452
1453  // Force that certain expressions appear as CFGElements in the CFG.  This
1454  // is used to speed up various analyses.
1455  // FIXME: This isn't the right factoring.  This is here for initial
1456  // prototyping, but we need a way for analyses to say what expressions they
1457  // expect to always be CFGElements and then fill in the BuildOptions
1458  // appropriately.  This is essentially a layering violation.
1459  if (P.enableCheckUnreachable || P.enableThreadSafetyAnalysis) {
1460    // Unreachable code analysis and thread safety require a linearized CFG.
1461    AC.getCFGBuildOptions().setAllAlwaysAdd();
1462  }
1463  else {
1464    AC.getCFGBuildOptions()
1465      .setAlwaysAdd(Stmt::BinaryOperatorClass)
1466      .setAlwaysAdd(Stmt::CompoundAssignOperatorClass)
1467      .setAlwaysAdd(Stmt::BlockExprClass)
1468      .setAlwaysAdd(Stmt::CStyleCastExprClass)
1469      .setAlwaysAdd(Stmt::DeclRefExprClass)
1470      .setAlwaysAdd(Stmt::ImplicitCastExprClass)
1471      .setAlwaysAdd(Stmt::UnaryOperatorClass)
1472      .setAlwaysAdd(Stmt::AttributedStmtClass);
1473  }
1474
1475  // Construct the analysis context with the specified CFG build options.
1476
1477  // Emit delayed diagnostics.
1478  if (!fscope->PossiblyUnreachableDiags.empty()) {
1479    bool analyzed = false;
1480
1481    // Register the expressions with the CFGBuilder.
1482    for (SmallVectorImpl<sema::PossiblyUnreachableDiag>::iterator
1483         i = fscope->PossiblyUnreachableDiags.begin(),
1484         e = fscope->PossiblyUnreachableDiags.end();
1485         i != e; ++i) {
1486      if (const Stmt *stmt = i->stmt)
1487        AC.registerForcedBlockExpression(stmt);
1488    }
1489
1490    if (AC.getCFG()) {
1491      analyzed = true;
1492      for (SmallVectorImpl<sema::PossiblyUnreachableDiag>::iterator
1493            i = fscope->PossiblyUnreachableDiags.begin(),
1494            e = fscope->PossiblyUnreachableDiags.end();
1495            i != e; ++i)
1496      {
1497        const sema::PossiblyUnreachableDiag &D = *i;
1498        bool processed = false;
1499        if (const Stmt *stmt = i->stmt) {
1500          const CFGBlock *block = AC.getBlockForRegisteredExpression(stmt);
1501          CFGReverseBlockReachabilityAnalysis *cra =
1502              AC.getCFGReachablityAnalysis();
1503          // FIXME: We should be able to assert that block is non-null, but
1504          // the CFG analysis can skip potentially-evaluated expressions in
1505          // edge cases; see test/Sema/vla-2.c.
1506          if (block && cra) {
1507            // Can this block be reached from the entrance?
1508            if (cra->isReachable(&AC.getCFG()->getEntry(), block))
1509              S.Diag(D.Loc, D.PD);
1510            processed = true;
1511          }
1512        }
1513        if (!processed) {
1514          // Emit the warning anyway if we cannot map to a basic block.
1515          S.Diag(D.Loc, D.PD);
1516        }
1517      }
1518    }
1519
1520    if (!analyzed)
1521      flushDiagnostics(S, fscope);
1522  }
1523
1524
1525  // Warning: check missing 'return'
1526  if (P.enableCheckFallThrough) {
1527    const CheckFallThroughDiagnostics &CD =
1528      (isa<BlockDecl>(D) ? CheckFallThroughDiagnostics::MakeForBlock()
1529       : (isa<CXXMethodDecl>(D) &&
1530          cast<CXXMethodDecl>(D)->getOverloadedOperator() == OO_Call &&
1531          cast<CXXMethodDecl>(D)->getParent()->isLambda())
1532            ? CheckFallThroughDiagnostics::MakeForLambda()
1533            : CheckFallThroughDiagnostics::MakeForFunction(D));
1534    CheckFallThroughForBody(S, D, Body, blkExpr, CD, AC);
1535  }
1536
1537  // Warning: check for unreachable code
1538  if (P.enableCheckUnreachable) {
1539    // Only check for unreachable code on non-template instantiations.
1540    // Different template instantiations can effectively change the control-flow
1541    // and it is very difficult to prove that a snippet of code in a template
1542    // is unreachable for all instantiations.
1543    bool isTemplateInstantiation = false;
1544    if (const FunctionDecl *Function = dyn_cast<FunctionDecl>(D))
1545      isTemplateInstantiation = Function->isTemplateInstantiation();
1546    if (!isTemplateInstantiation)
1547      CheckUnreachable(S, AC);
1548  }
1549
1550  // Check for thread safety violations
1551  if (P.enableThreadSafetyAnalysis) {
1552    SourceLocation FL = AC.getDecl()->getLocation();
1553    SourceLocation FEL = AC.getDecl()->getLocEnd();
1554    thread_safety::ThreadSafetyReporter Reporter(S, FL, FEL);
1555    if (Diags.getDiagnosticLevel(diag::warn_thread_safety_beta,D->getLocStart())
1556        != DiagnosticsEngine::Ignored)
1557      Reporter.setIssueBetaWarnings(true);
1558
1559    thread_safety::runThreadSafetyAnalysis(AC, Reporter);
1560    Reporter.emitDiagnostics();
1561  }
1562
1563  if (Diags.getDiagnosticLevel(diag::warn_uninit_var, D->getLocStart())
1564      != DiagnosticsEngine::Ignored ||
1565      Diags.getDiagnosticLevel(diag::warn_sometimes_uninit_var,D->getLocStart())
1566      != DiagnosticsEngine::Ignored ||
1567      Diags.getDiagnosticLevel(diag::warn_maybe_uninit_var, D->getLocStart())
1568      != DiagnosticsEngine::Ignored) {
1569    if (CFG *cfg = AC.getCFG()) {
1570      UninitValsDiagReporter reporter(S);
1571      UninitVariablesAnalysisStats stats;
1572      std::memset(&stats, 0, sizeof(UninitVariablesAnalysisStats));
1573      runUninitializedVariablesAnalysis(*cast<DeclContext>(D), *cfg, AC,
1574                                        reporter, stats);
1575
1576      if (S.CollectStats && stats.NumVariablesAnalyzed > 0) {
1577        ++NumUninitAnalysisFunctions;
1578        NumUninitAnalysisVariables += stats.NumVariablesAnalyzed;
1579        NumUninitAnalysisBlockVisits += stats.NumBlockVisits;
1580        MaxUninitAnalysisVariablesPerFunction =
1581            std::max(MaxUninitAnalysisVariablesPerFunction,
1582                     stats.NumVariablesAnalyzed);
1583        MaxUninitAnalysisBlockVisitsPerFunction =
1584            std::max(MaxUninitAnalysisBlockVisitsPerFunction,
1585                     stats.NumBlockVisits);
1586      }
1587    }
1588  }
1589
1590  bool FallThroughDiagFull =
1591      Diags.getDiagnosticLevel(diag::warn_unannotated_fallthrough,
1592                               D->getLocStart()) != DiagnosticsEngine::Ignored;
1593  bool FallThroughDiagPerFunction =
1594      Diags.getDiagnosticLevel(diag::warn_unannotated_fallthrough_per_function,
1595                               D->getLocStart()) != DiagnosticsEngine::Ignored;
1596  if (FallThroughDiagFull || FallThroughDiagPerFunction) {
1597    DiagnoseSwitchLabelsFallthrough(S, AC, !FallThroughDiagFull);
1598  }
1599
1600  if (S.getLangOpts().ObjCARCWeak &&
1601      Diags.getDiagnosticLevel(diag::warn_arc_repeated_use_of_weak,
1602                               D->getLocStart()) != DiagnosticsEngine::Ignored)
1603    diagnoseRepeatedUseOfWeak(S, fscope, D, AC.getParentMap());
1604
1605  // Collect statistics about the CFG if it was built.
1606  if (S.CollectStats && AC.isCFGBuilt()) {
1607    ++NumFunctionsAnalyzed;
1608    if (CFG *cfg = AC.getCFG()) {
1609      // If we successfully built a CFG for this context, record some more
1610      // detail information about it.
1611      NumCFGBlocks += cfg->getNumBlockIDs();
1612      MaxCFGBlocksPerFunction = std::max(MaxCFGBlocksPerFunction,
1613                                         cfg->getNumBlockIDs());
1614    } else {
1615      ++NumFunctionsWithBadCFGs;
1616    }
1617  }
1618}
1619
1620void clang::sema::AnalysisBasedWarnings::PrintStats() const {
1621  llvm::errs() << "\n*** Analysis Based Warnings Stats:\n";
1622
1623  unsigned NumCFGsBuilt = NumFunctionsAnalyzed - NumFunctionsWithBadCFGs;
1624  unsigned AvgCFGBlocksPerFunction =
1625      !NumCFGsBuilt ? 0 : NumCFGBlocks/NumCFGsBuilt;
1626  llvm::errs() << NumFunctionsAnalyzed << " functions analyzed ("
1627               << NumFunctionsWithBadCFGs << " w/o CFGs).\n"
1628               << "  " << NumCFGBlocks << " CFG blocks built.\n"
1629               << "  " << AvgCFGBlocksPerFunction
1630               << " average CFG blocks per function.\n"
1631               << "  " << MaxCFGBlocksPerFunction
1632               << " max CFG blocks per function.\n";
1633
1634  unsigned AvgUninitVariablesPerFunction = !NumUninitAnalysisFunctions ? 0
1635      : NumUninitAnalysisVariables/NumUninitAnalysisFunctions;
1636  unsigned AvgUninitBlockVisitsPerFunction = !NumUninitAnalysisFunctions ? 0
1637      : NumUninitAnalysisBlockVisits/NumUninitAnalysisFunctions;
1638  llvm::errs() << NumUninitAnalysisFunctions
1639               << " functions analyzed for uninitialiazed variables\n"
1640               << "  " << NumUninitAnalysisVariables << " variables analyzed.\n"
1641               << "  " << AvgUninitVariablesPerFunction
1642               << " average variables per function.\n"
1643               << "  " << MaxUninitAnalysisVariablesPerFunction
1644               << " max variables per function.\n"
1645               << "  " << NumUninitAnalysisBlockVisits << " block visits.\n"
1646               << "  " << AvgUninitBlockVisitsPerFunction
1647               << " average block visits per function.\n"
1648               << "  " << MaxUninitAnalysisBlockVisitsPerFunction
1649               << " max block visits per function.\n";
1650}
1651