AnalysisBasedWarnings.cpp revision a34194f035096dd8dce10574e3a186da968aa211
1//=- AnalysisBasedWarnings.cpp - Sema warnings based on libAnalysis -*- C++ -*-=//
2//
3//                     The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10// This file defines analysis_warnings::[Policy,Executor].
11// Together they are used by Sema to issue warnings based on inexpensive
12// static analysis algorithms in libAnalysis.
13//
14//===----------------------------------------------------------------------===//
15
16#include "clang/Sema/AnalysisBasedWarnings.h"
17#include "clang/Sema/SemaInternal.h"
18#include "clang/Sema/ScopeInfo.h"
19#include "clang/Basic/SourceManager.h"
20#include "clang/Basic/SourceLocation.h"
21#include "clang/Lex/Preprocessor.h"
22#include "clang/AST/DeclObjC.h"
23#include "clang/AST/DeclCXX.h"
24#include "clang/AST/ExprObjC.h"
25#include "clang/AST/ExprCXX.h"
26#include "clang/AST/StmtObjC.h"
27#include "clang/AST/StmtCXX.h"
28#include "clang/AST/EvaluatedExprVisitor.h"
29#include "clang/AST/StmtVisitor.h"
30#include "clang/Analysis/AnalysisContext.h"
31#include "clang/Analysis/CFG.h"
32#include "clang/Analysis/Analyses/ReachableCode.h"
33#include "clang/Analysis/Analyses/CFGReachabilityAnalysis.h"
34#include "clang/Analysis/Analyses/ThreadSafety.h"
35#include "clang/Analysis/CFGStmtMap.h"
36#include "clang/Analysis/Analyses/UninitializedValues.h"
37#include "llvm/ADT/BitVector.h"
38#include "llvm/ADT/FoldingSet.h"
39#include "llvm/ADT/ImmutableMap.h"
40#include "llvm/ADT/PostOrderIterator.h"
41#include "llvm/ADT/SmallVector.h"
42#include "llvm/ADT/StringRef.h"
43#include "llvm/Support/Casting.h"
44#include <algorithm>
45#include <vector>
46
47using namespace clang;
48
49//===----------------------------------------------------------------------===//
50// Unreachable code analysis.
51//===----------------------------------------------------------------------===//
52
53namespace {
54  class UnreachableCodeHandler : public reachable_code::Callback {
55    Sema &S;
56  public:
57    UnreachableCodeHandler(Sema &s) : S(s) {}
58
59    void HandleUnreachable(SourceLocation L, SourceRange R1, SourceRange R2) {
60      S.Diag(L, diag::warn_unreachable) << R1 << R2;
61    }
62  };
63}
64
65/// CheckUnreachable - Check for unreachable code.
66static void CheckUnreachable(Sema &S, AnalysisDeclContext &AC) {
67  UnreachableCodeHandler UC(S);
68  reachable_code::FindUnreachableCode(AC, UC);
69}
70
71//===----------------------------------------------------------------------===//
72// Check for missing return value.
73//===----------------------------------------------------------------------===//
74
75enum ControlFlowKind {
76  UnknownFallThrough,
77  NeverFallThrough,
78  MaybeFallThrough,
79  AlwaysFallThrough,
80  NeverFallThroughOrReturn
81};
82
83/// CheckFallThrough - Check that we don't fall off the end of a
84/// Statement that should return a value.
85///
86/// \returns AlwaysFallThrough iff we always fall off the end of the statement,
87/// MaybeFallThrough iff we might or might not fall off the end,
88/// NeverFallThroughOrReturn iff we never fall off the end of the statement or
89/// return.  We assume NeverFallThrough iff we never fall off the end of the
90/// statement but we may return.  We assume that functions not marked noreturn
91/// will return.
92static ControlFlowKind CheckFallThrough(AnalysisDeclContext &AC) {
93  CFG *cfg = AC.getCFG();
94  if (cfg == 0) return UnknownFallThrough;
95
96  // The CFG leaves in dead things, and we don't want the dead code paths to
97  // confuse us, so we mark all live things first.
98  llvm::BitVector live(cfg->getNumBlockIDs());
99  unsigned count = reachable_code::ScanReachableFromBlock(&cfg->getEntry(),
100                                                          live);
101
102  bool AddEHEdges = AC.getAddEHEdges();
103  if (!AddEHEdges && count != cfg->getNumBlockIDs())
104    // When there are things remaining dead, and we didn't add EH edges
105    // from CallExprs to the catch clauses, we have to go back and
106    // mark them as live.
107    for (CFG::iterator I = cfg->begin(), E = cfg->end(); I != E; ++I) {
108      CFGBlock &b = **I;
109      if (!live[b.getBlockID()]) {
110        if (b.pred_begin() == b.pred_end()) {
111          if (b.getTerminator() && isa<CXXTryStmt>(b.getTerminator()))
112            // When not adding EH edges from calls, catch clauses
113            // can otherwise seem dead.  Avoid noting them as dead.
114            count += reachable_code::ScanReachableFromBlock(&b, live);
115          continue;
116        }
117      }
118    }
119
120  // Now we know what is live, we check the live precessors of the exit block
121  // and look for fall through paths, being careful to ignore normal returns,
122  // and exceptional paths.
123  bool HasLiveReturn = false;
124  bool HasFakeEdge = false;
125  bool HasPlainEdge = false;
126  bool HasAbnormalEdge = false;
127
128  // Ignore default cases that aren't likely to be reachable because all
129  // enums in a switch(X) have explicit case statements.
130  CFGBlock::FilterOptions FO;
131  FO.IgnoreDefaultsWithCoveredEnums = 1;
132
133  for (CFGBlock::filtered_pred_iterator
134	 I = cfg->getExit().filtered_pred_start_end(FO); I.hasMore(); ++I) {
135    const CFGBlock& B = **I;
136    if (!live[B.getBlockID()])
137      continue;
138
139    // Skip blocks which contain an element marked as no-return. They don't
140    // represent actually viable edges into the exit block, so mark them as
141    // abnormal.
142    if (B.hasNoReturnElement()) {
143      HasAbnormalEdge = true;
144      continue;
145    }
146
147    // Destructors can appear after the 'return' in the CFG.  This is
148    // normal.  We need to look pass the destructors for the return
149    // statement (if it exists).
150    CFGBlock::const_reverse_iterator ri = B.rbegin(), re = B.rend();
151
152    for ( ; ri != re ; ++ri)
153      if (isa<CFGStmt>(*ri))
154        break;
155
156    // No more CFGElements in the block?
157    if (ri == re) {
158      if (B.getTerminator() && isa<CXXTryStmt>(B.getTerminator())) {
159        HasAbnormalEdge = true;
160        continue;
161      }
162      // A labeled empty statement, or the entry block...
163      HasPlainEdge = true;
164      continue;
165    }
166
167    CFGStmt CS = cast<CFGStmt>(*ri);
168    const Stmt *S = CS.getStmt();
169    if (isa<ReturnStmt>(S)) {
170      HasLiveReturn = true;
171      continue;
172    }
173    if (isa<ObjCAtThrowStmt>(S)) {
174      HasFakeEdge = true;
175      continue;
176    }
177    if (isa<CXXThrowExpr>(S)) {
178      HasFakeEdge = true;
179      continue;
180    }
181    if (const AsmStmt *AS = dyn_cast<AsmStmt>(S)) {
182      if (AS->isMSAsm()) {
183        HasFakeEdge = true;
184        HasLiveReturn = true;
185        continue;
186      }
187    }
188    if (isa<CXXTryStmt>(S)) {
189      HasAbnormalEdge = true;
190      continue;
191    }
192    if (std::find(B.succ_begin(), B.succ_end(), &cfg->getExit())
193        == B.succ_end()) {
194      HasAbnormalEdge = true;
195      continue;
196    }
197
198    HasPlainEdge = true;
199  }
200  if (!HasPlainEdge) {
201    if (HasLiveReturn)
202      return NeverFallThrough;
203    return NeverFallThroughOrReturn;
204  }
205  if (HasAbnormalEdge || HasFakeEdge || HasLiveReturn)
206    return MaybeFallThrough;
207  // This says AlwaysFallThrough for calls to functions that are not marked
208  // noreturn, that don't return.  If people would like this warning to be more
209  // accurate, such functions should be marked as noreturn.
210  return AlwaysFallThrough;
211}
212
213namespace {
214
215struct CheckFallThroughDiagnostics {
216  unsigned diag_MaybeFallThrough_HasNoReturn;
217  unsigned diag_MaybeFallThrough_ReturnsNonVoid;
218  unsigned diag_AlwaysFallThrough_HasNoReturn;
219  unsigned diag_AlwaysFallThrough_ReturnsNonVoid;
220  unsigned diag_NeverFallThroughOrReturn;
221  enum { Function, Block, Lambda } funMode;
222  SourceLocation FuncLoc;
223
224  static CheckFallThroughDiagnostics MakeForFunction(const Decl *Func) {
225    CheckFallThroughDiagnostics D;
226    D.FuncLoc = Func->getLocation();
227    D.diag_MaybeFallThrough_HasNoReturn =
228      diag::warn_falloff_noreturn_function;
229    D.diag_MaybeFallThrough_ReturnsNonVoid =
230      diag::warn_maybe_falloff_nonvoid_function;
231    D.diag_AlwaysFallThrough_HasNoReturn =
232      diag::warn_falloff_noreturn_function;
233    D.diag_AlwaysFallThrough_ReturnsNonVoid =
234      diag::warn_falloff_nonvoid_function;
235
236    // Don't suggest that virtual functions be marked "noreturn", since they
237    // might be overridden by non-noreturn functions.
238    bool isVirtualMethod = false;
239    if (const CXXMethodDecl *Method = dyn_cast<CXXMethodDecl>(Func))
240      isVirtualMethod = Method->isVirtual();
241
242    // Don't suggest that template instantiations be marked "noreturn"
243    bool isTemplateInstantiation = false;
244    if (const FunctionDecl *Function = dyn_cast<FunctionDecl>(Func))
245      isTemplateInstantiation = Function->isTemplateInstantiation();
246
247    if (!isVirtualMethod && !isTemplateInstantiation)
248      D.diag_NeverFallThroughOrReturn =
249        diag::warn_suggest_noreturn_function;
250    else
251      D.diag_NeverFallThroughOrReturn = 0;
252
253    D.funMode = Function;
254    return D;
255  }
256
257  static CheckFallThroughDiagnostics MakeForBlock() {
258    CheckFallThroughDiagnostics D;
259    D.diag_MaybeFallThrough_HasNoReturn =
260      diag::err_noreturn_block_has_return_expr;
261    D.diag_MaybeFallThrough_ReturnsNonVoid =
262      diag::err_maybe_falloff_nonvoid_block;
263    D.diag_AlwaysFallThrough_HasNoReturn =
264      diag::err_noreturn_block_has_return_expr;
265    D.diag_AlwaysFallThrough_ReturnsNonVoid =
266      diag::err_falloff_nonvoid_block;
267    D.diag_NeverFallThroughOrReturn =
268      diag::warn_suggest_noreturn_block;
269    D.funMode = Block;
270    return D;
271  }
272
273  static CheckFallThroughDiagnostics MakeForLambda() {
274    CheckFallThroughDiagnostics D;
275    D.diag_MaybeFallThrough_HasNoReturn =
276      diag::err_noreturn_lambda_has_return_expr;
277    D.diag_MaybeFallThrough_ReturnsNonVoid =
278      diag::warn_maybe_falloff_nonvoid_lambda;
279    D.diag_AlwaysFallThrough_HasNoReturn =
280      diag::err_noreturn_lambda_has_return_expr;
281    D.diag_AlwaysFallThrough_ReturnsNonVoid =
282      diag::warn_falloff_nonvoid_lambda;
283    D.diag_NeverFallThroughOrReturn = 0;
284    D.funMode = Lambda;
285    return D;
286  }
287
288  bool checkDiagnostics(DiagnosticsEngine &D, bool ReturnsVoid,
289                        bool HasNoReturn) const {
290    if (funMode == Function) {
291      return (ReturnsVoid ||
292              D.getDiagnosticLevel(diag::warn_maybe_falloff_nonvoid_function,
293                                   FuncLoc) == DiagnosticsEngine::Ignored)
294        && (!HasNoReturn ||
295            D.getDiagnosticLevel(diag::warn_noreturn_function_has_return_expr,
296                                 FuncLoc) == DiagnosticsEngine::Ignored)
297        && (!ReturnsVoid ||
298            D.getDiagnosticLevel(diag::warn_suggest_noreturn_block, FuncLoc)
299              == DiagnosticsEngine::Ignored);
300    }
301
302    // For blocks / lambdas.
303    return ReturnsVoid && !HasNoReturn
304            && ((funMode == Lambda) ||
305                D.getDiagnosticLevel(diag::warn_suggest_noreturn_block, FuncLoc)
306                  == DiagnosticsEngine::Ignored);
307  }
308};
309
310}
311
312/// CheckFallThroughForFunctionDef - Check that we don't fall off the end of a
313/// function that should return a value.  Check that we don't fall off the end
314/// of a noreturn function.  We assume that functions and blocks not marked
315/// noreturn will return.
316static void CheckFallThroughForBody(Sema &S, const Decl *D, const Stmt *Body,
317                                    const BlockExpr *blkExpr,
318                                    const CheckFallThroughDiagnostics& CD,
319                                    AnalysisDeclContext &AC) {
320
321  bool ReturnsVoid = false;
322  bool HasNoReturn = false;
323
324  if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(D)) {
325    ReturnsVoid = FD->getResultType()->isVoidType();
326    HasNoReturn = FD->hasAttr<NoReturnAttr>() ||
327       FD->getType()->getAs<FunctionType>()->getNoReturnAttr();
328  }
329  else if (const ObjCMethodDecl *MD = dyn_cast<ObjCMethodDecl>(D)) {
330    ReturnsVoid = MD->getResultType()->isVoidType();
331    HasNoReturn = MD->hasAttr<NoReturnAttr>();
332  }
333  else if (isa<BlockDecl>(D)) {
334    QualType BlockTy = blkExpr->getType();
335    if (const FunctionType *FT =
336          BlockTy->getPointeeType()->getAs<FunctionType>()) {
337      if (FT->getResultType()->isVoidType())
338        ReturnsVoid = true;
339      if (FT->getNoReturnAttr())
340        HasNoReturn = true;
341    }
342  }
343
344  DiagnosticsEngine &Diags = S.getDiagnostics();
345
346  // Short circuit for compilation speed.
347  if (CD.checkDiagnostics(Diags, ReturnsVoid, HasNoReturn))
348      return;
349
350  // FIXME: Function try block
351  if (const CompoundStmt *Compound = dyn_cast<CompoundStmt>(Body)) {
352    switch (CheckFallThrough(AC)) {
353      case UnknownFallThrough:
354        break;
355
356      case MaybeFallThrough:
357        if (HasNoReturn)
358          S.Diag(Compound->getRBracLoc(),
359                 CD.diag_MaybeFallThrough_HasNoReturn);
360        else if (!ReturnsVoid)
361          S.Diag(Compound->getRBracLoc(),
362                 CD.diag_MaybeFallThrough_ReturnsNonVoid);
363        break;
364      case AlwaysFallThrough:
365        if (HasNoReturn)
366          S.Diag(Compound->getRBracLoc(),
367                 CD.diag_AlwaysFallThrough_HasNoReturn);
368        else if (!ReturnsVoid)
369          S.Diag(Compound->getRBracLoc(),
370                 CD.diag_AlwaysFallThrough_ReturnsNonVoid);
371        break;
372      case NeverFallThroughOrReturn:
373        if (ReturnsVoid && !HasNoReturn && CD.diag_NeverFallThroughOrReturn) {
374          if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(D)) {
375            S.Diag(Compound->getLBracLoc(), CD.diag_NeverFallThroughOrReturn)
376              << 0 << FD;
377          } else if (const ObjCMethodDecl *MD = dyn_cast<ObjCMethodDecl>(D)) {
378            S.Diag(Compound->getLBracLoc(), CD.diag_NeverFallThroughOrReturn)
379              << 1 << MD;
380          } else {
381            S.Diag(Compound->getLBracLoc(), CD.diag_NeverFallThroughOrReturn);
382          }
383        }
384        break;
385      case NeverFallThrough:
386        break;
387    }
388  }
389}
390
391//===----------------------------------------------------------------------===//
392// -Wuninitialized
393//===----------------------------------------------------------------------===//
394
395namespace {
396/// ContainsReference - A visitor class to search for references to
397/// a particular declaration (the needle) within any evaluated component of an
398/// expression (recursively).
399class ContainsReference : public EvaluatedExprVisitor<ContainsReference> {
400  bool FoundReference;
401  const DeclRefExpr *Needle;
402
403public:
404  ContainsReference(ASTContext &Context, const DeclRefExpr *Needle)
405    : EvaluatedExprVisitor<ContainsReference>(Context),
406      FoundReference(false), Needle(Needle) {}
407
408  void VisitExpr(Expr *E) {
409    // Stop evaluating if we already have a reference.
410    if (FoundReference)
411      return;
412
413    EvaluatedExprVisitor<ContainsReference>::VisitExpr(E);
414  }
415
416  void VisitDeclRefExpr(DeclRefExpr *E) {
417    if (E == Needle)
418      FoundReference = true;
419    else
420      EvaluatedExprVisitor<ContainsReference>::VisitDeclRefExpr(E);
421  }
422
423  bool doesContainReference() const { return FoundReference; }
424};
425}
426
427static bool SuggestInitializationFixit(Sema &S, const VarDecl *VD) {
428  QualType VariableTy = VD->getType().getCanonicalType();
429  if (VariableTy->isBlockPointerType() &&
430      !VD->hasAttr<BlocksAttr>()) {
431    S.Diag(VD->getLocation(), diag::note_block_var_fixit_add_initialization) << VD->getDeclName()
432    << FixItHint::CreateInsertion(VD->getLocation(), "__block ");
433    return true;
434  }
435
436  // Don't issue a fixit if there is already an initializer.
437  if (VD->getInit())
438    return false;
439
440  // Suggest possible initialization (if any).
441  const char *Init = S.getFixItZeroInitializerForType(VariableTy);
442  if (!Init)
443    return false;
444  SourceLocation Loc = S.PP.getLocForEndOfToken(VD->getLocEnd());
445
446  S.Diag(Loc, diag::note_var_fixit_add_initialization) << VD->getDeclName()
447    << FixItHint::CreateInsertion(Loc, Init);
448  return true;
449}
450
451/// DiagnoseUninitializedUse -- Helper function for diagnosing uses of an
452/// uninitialized variable. This manages the different forms of diagnostic
453/// emitted for particular types of uses. Returns true if the use was diagnosed
454/// as a warning. If a pariticular use is one we omit warnings for, returns
455/// false.
456static bool DiagnoseUninitializedUse(Sema &S, const VarDecl *VD,
457                                     const Expr *E, bool isAlwaysUninit,
458                                     bool alwaysReportSelfInit = false) {
459  bool isSelfInit = false;
460
461  if (const DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(E)) {
462    if (isAlwaysUninit) {
463      // Inspect the initializer of the variable declaration which is
464      // being referenced prior to its initialization. We emit
465      // specialized diagnostics for self-initialization, and we
466      // specifically avoid warning about self references which take the
467      // form of:
468      //
469      //   int x = x;
470      //
471      // This is used to indicate to GCC that 'x' is intentionally left
472      // uninitialized. Proven code paths which access 'x' in
473      // an uninitialized state after this will still warn.
474      //
475      // TODO: Should we suppress maybe-uninitialized warnings for
476      // variables initialized in this way?
477      if (const Expr *Initializer = VD->getInit()) {
478        if (!alwaysReportSelfInit && DRE == Initializer->IgnoreParenImpCasts())
479          return false;
480
481        ContainsReference CR(S.Context, DRE);
482        CR.Visit(const_cast<Expr*>(Initializer));
483        isSelfInit = CR.doesContainReference();
484      }
485      if (isSelfInit) {
486        S.Diag(DRE->getLocStart(),
487               diag::warn_uninit_self_reference_in_init)
488        << VD->getDeclName() << VD->getLocation() << DRE->getSourceRange();
489      } else {
490        S.Diag(DRE->getLocStart(), diag::warn_uninit_var)
491          << VD->getDeclName() << DRE->getSourceRange();
492      }
493    } else {
494      S.Diag(DRE->getLocStart(), diag::warn_maybe_uninit_var)
495        << VD->getDeclName() << DRE->getSourceRange();
496    }
497  } else {
498    const BlockExpr *BE = cast<BlockExpr>(E);
499    if (VD->getType()->isBlockPointerType() &&
500        !VD->hasAttr<BlocksAttr>())
501      S.Diag(BE->getLocStart(), diag::warn_uninit_byref_blockvar_captured_by_block)
502        << VD->getDeclName();
503    else
504      S.Diag(BE->getLocStart(),
505             isAlwaysUninit ? diag::warn_uninit_var_captured_by_block
506                            : diag::warn_maybe_uninit_var_captured_by_block)
507        << VD->getDeclName();
508  }
509
510  // Report where the variable was declared when the use wasn't within
511  // the initializer of that declaration & we didn't already suggest
512  // an initialization fixit.
513  if (!isSelfInit && !SuggestInitializationFixit(S, VD))
514    S.Diag(VD->getLocStart(), diag::note_uninit_var_def)
515      << VD->getDeclName();
516
517  return true;
518}
519
520typedef std::pair<const Expr*, bool> UninitUse;
521
522namespace {
523struct SLocSort {
524  bool operator()(const UninitUse &a, const UninitUse &b) {
525    SourceLocation aLoc = a.first->getLocStart();
526    SourceLocation bLoc = b.first->getLocStart();
527    return aLoc.getRawEncoding() < bLoc.getRawEncoding();
528  }
529};
530
531class UninitValsDiagReporter : public UninitVariablesHandler {
532  Sema &S;
533  typedef SmallVector<UninitUse, 2> UsesVec;
534  typedef llvm::DenseMap<const VarDecl *, std::pair<UsesVec*, bool> > UsesMap;
535  UsesMap *uses;
536
537public:
538  UninitValsDiagReporter(Sema &S) : S(S), uses(0) {}
539  ~UninitValsDiagReporter() {
540    flushDiagnostics();
541  }
542
543  std::pair<UsesVec*, bool> &getUses(const VarDecl *vd) {
544    if (!uses)
545      uses = new UsesMap();
546
547    UsesMap::mapped_type &V = (*uses)[vd];
548    UsesVec *&vec = V.first;
549    if (!vec)
550      vec = new UsesVec();
551
552    return V;
553  }
554
555  void handleUseOfUninitVariable(const Expr *ex, const VarDecl *vd,
556                                 bool isAlwaysUninit) {
557    getUses(vd).first->push_back(std::make_pair(ex, isAlwaysUninit));
558  }
559
560  void handleSelfInit(const VarDecl *vd) {
561    getUses(vd).second = true;
562  }
563
564  void flushDiagnostics() {
565    if (!uses)
566      return;
567
568    for (UsesMap::iterator i = uses->begin(), e = uses->end(); i != e; ++i) {
569      const VarDecl *vd = i->first;
570      const UsesMap::mapped_type &V = i->second;
571
572      UsesVec *vec = V.first;
573      bool hasSelfInit = V.second;
574
575      // Specially handle the case where we have uses of an uninitialized
576      // variable, but the root cause is an idiomatic self-init.  We want
577      // to report the diagnostic at the self-init since that is the root cause.
578      if (!vec->empty() && hasSelfInit && hasAlwaysUninitializedUse(vec))
579        DiagnoseUninitializedUse(S, vd, vd->getInit()->IgnoreParenCasts(),
580                                 /* isAlwaysUninit */ true,
581                                 /* alwaysReportSelfInit */ true);
582      else {
583        // Sort the uses by their SourceLocations.  While not strictly
584        // guaranteed to produce them in line/column order, this will provide
585        // a stable ordering.
586        std::sort(vec->begin(), vec->end(), SLocSort());
587
588        for (UsesVec::iterator vi = vec->begin(), ve = vec->end(); vi != ve;
589             ++vi) {
590          if (DiagnoseUninitializedUse(S, vd, vi->first,
591                                        /*isAlwaysUninit=*/vi->second))
592            // Skip further diagnostics for this variable. We try to warn only
593            // on the first point at which a variable is used uninitialized.
594            break;
595        }
596      }
597
598      // Release the uses vector.
599      delete vec;
600    }
601    delete uses;
602  }
603
604private:
605  static bool hasAlwaysUninitializedUse(const UsesVec* vec) {
606  for (UsesVec::const_iterator i = vec->begin(), e = vec->end(); i != e; ++i) {
607    if (i->second) {
608      return true;
609    }
610  }
611  return false;
612}
613};
614}
615
616
617//===----------------------------------------------------------------------===//
618// -Wthread-safety
619//===----------------------------------------------------------------------===//
620namespace clang {
621namespace thread_safety {
622typedef llvm::SmallVector<PartialDiagnosticAt, 1> OptionalNotes;
623typedef std::pair<PartialDiagnosticAt, OptionalNotes> DelayedDiag;
624typedef llvm::SmallVector<DelayedDiag, 4> DiagList;
625
626struct SortDiagBySourceLocation {
627  Sema &S;
628  SortDiagBySourceLocation(Sema &S) : S(S) {}
629
630  bool operator()(const DelayedDiag &left, const DelayedDiag &right) {
631    // Although this call will be slow, this is only called when outputting
632    // multiple warnings.
633    return S.getSourceManager().isBeforeInTranslationUnit(left.first.first,
634                                                          right.first.first);
635  }
636};
637
638namespace {
639class ThreadSafetyReporter : public clang::thread_safety::ThreadSafetyHandler {
640  Sema &S;
641  DiagList Warnings;
642  SourceLocation FunLocation, FunEndLocation;
643
644  // Helper functions
645  void warnLockMismatch(unsigned DiagID, Name LockName, SourceLocation Loc) {
646    // Gracefully handle rare cases when the analysis can't get a more
647    // precise source location.
648    if (!Loc.isValid())
649      Loc = FunLocation;
650    PartialDiagnosticAt Warning(Loc, S.PDiag(DiagID) << LockName);
651    Warnings.push_back(DelayedDiag(Warning, OptionalNotes()));
652  }
653
654 public:
655  ThreadSafetyReporter(Sema &S, SourceLocation FL, SourceLocation FEL)
656    : S(S), FunLocation(FL), FunEndLocation(FEL) {}
657
658  /// \brief Emit all buffered diagnostics in order of sourcelocation.
659  /// We need to output diagnostics produced while iterating through
660  /// the lockset in deterministic order, so this function orders diagnostics
661  /// and outputs them.
662  void emitDiagnostics() {
663    SortDiagBySourceLocation SortDiagBySL(S);
664    sort(Warnings.begin(), Warnings.end(), SortDiagBySL);
665    for (DiagList::iterator I = Warnings.begin(), E = Warnings.end();
666         I != E; ++I) {
667      S.Diag(I->first.first, I->first.second);
668      const OptionalNotes &Notes = I->second;
669      for (unsigned NoteI = 0, NoteN = Notes.size(); NoteI != NoteN; ++NoteI)
670        S.Diag(Notes[NoteI].first, Notes[NoteI].second);
671    }
672  }
673
674  void handleInvalidLockExp(SourceLocation Loc) {
675    PartialDiagnosticAt Warning(Loc,
676                                S.PDiag(diag::warn_cannot_resolve_lock) << Loc);
677    Warnings.push_back(DelayedDiag(Warning, OptionalNotes()));
678  }
679  void handleUnmatchedUnlock(Name LockName, SourceLocation Loc) {
680    warnLockMismatch(diag::warn_unlock_but_no_lock, LockName, Loc);
681  }
682
683  void handleDoubleLock(Name LockName, SourceLocation Loc) {
684    warnLockMismatch(diag::warn_double_lock, LockName, Loc);
685  }
686
687  void handleMutexHeldEndOfScope(Name LockName, SourceLocation LocLocked,
688                                 SourceLocation LocEndOfScope,
689                                 LockErrorKind LEK){
690    unsigned DiagID = 0;
691    switch (LEK) {
692      case LEK_LockedSomePredecessors:
693        DiagID = diag::warn_lock_some_predecessors;
694        break;
695      case LEK_LockedSomeLoopIterations:
696        DiagID = diag::warn_expecting_lock_held_on_loop;
697        break;
698      case LEK_LockedAtEndOfFunction:
699        DiagID = diag::warn_no_unlock;
700        break;
701    }
702    if (LocEndOfScope.isInvalid())
703      LocEndOfScope = FunEndLocation;
704
705    PartialDiagnosticAt Warning(LocEndOfScope, S.PDiag(DiagID) << LockName);
706    PartialDiagnosticAt Note(LocLocked, S.PDiag(diag::note_locked_here));
707    Warnings.push_back(DelayedDiag(Warning, OptionalNotes(1, Note)));
708  }
709
710
711  void handleExclusiveAndShared(Name LockName, SourceLocation Loc1,
712                                SourceLocation Loc2) {
713    PartialDiagnosticAt Warning(
714      Loc1, S.PDiag(diag::warn_lock_exclusive_and_shared) << LockName);
715    PartialDiagnosticAt Note(
716      Loc2, S.PDiag(diag::note_lock_exclusive_and_shared) << LockName);
717    Warnings.push_back(DelayedDiag(Warning, OptionalNotes(1, Note)));
718  }
719
720  void handleNoMutexHeld(const NamedDecl *D, ProtectedOperationKind POK,
721                         AccessKind AK, SourceLocation Loc) {
722    assert((POK == POK_VarAccess || POK == POK_VarDereference)
723             && "Only works for variables");
724    unsigned DiagID = POK == POK_VarAccess?
725                        diag::warn_variable_requires_any_lock:
726                        diag::warn_var_deref_requires_any_lock;
727    PartialDiagnosticAt Warning(Loc, S.PDiag(DiagID)
728      << D->getName() << getLockKindFromAccessKind(AK));
729    Warnings.push_back(DelayedDiag(Warning, OptionalNotes()));
730  }
731
732  void handleMutexNotHeld(const NamedDecl *D, ProtectedOperationKind POK,
733                          Name LockName, LockKind LK, SourceLocation Loc) {
734    unsigned DiagID = 0;
735    switch (POK) {
736      case POK_VarAccess:
737        DiagID = diag::warn_variable_requires_lock;
738        break;
739      case POK_VarDereference:
740        DiagID = diag::warn_var_deref_requires_lock;
741        break;
742      case POK_FunctionCall:
743        DiagID = diag::warn_fun_requires_lock;
744        break;
745    }
746    PartialDiagnosticAt Warning(Loc, S.PDiag(DiagID)
747      << D->getName() << LockName << LK);
748    Warnings.push_back(DelayedDiag(Warning, OptionalNotes()));
749  }
750
751  void handleFunExcludesLock(Name FunName, Name LockName, SourceLocation Loc) {
752    PartialDiagnosticAt Warning(Loc,
753      S.PDiag(diag::warn_fun_excludes_mutex) << FunName << LockName);
754    Warnings.push_back(DelayedDiag(Warning, OptionalNotes()));
755  }
756};
757}
758}
759}
760
761//===----------------------------------------------------------------------===//
762// AnalysisBasedWarnings - Worker object used by Sema to execute analysis-based
763//  warnings on a function, method, or block.
764//===----------------------------------------------------------------------===//
765
766clang::sema::AnalysisBasedWarnings::Policy::Policy() {
767  enableCheckFallThrough = 1;
768  enableCheckUnreachable = 0;
769  enableThreadSafetyAnalysis = 0;
770}
771
772clang::sema::AnalysisBasedWarnings::AnalysisBasedWarnings(Sema &s)
773  : S(s),
774    NumFunctionsAnalyzed(0),
775    NumFunctionsWithBadCFGs(0),
776    NumCFGBlocks(0),
777    MaxCFGBlocksPerFunction(0),
778    NumUninitAnalysisFunctions(0),
779    NumUninitAnalysisVariables(0),
780    MaxUninitAnalysisVariablesPerFunction(0),
781    NumUninitAnalysisBlockVisits(0),
782    MaxUninitAnalysisBlockVisitsPerFunction(0) {
783  DiagnosticsEngine &D = S.getDiagnostics();
784  DefaultPolicy.enableCheckUnreachable = (unsigned)
785    (D.getDiagnosticLevel(diag::warn_unreachable, SourceLocation()) !=
786        DiagnosticsEngine::Ignored);
787  DefaultPolicy.enableThreadSafetyAnalysis = (unsigned)
788    (D.getDiagnosticLevel(diag::warn_double_lock, SourceLocation()) !=
789     DiagnosticsEngine::Ignored);
790
791}
792
793static void flushDiagnostics(Sema &S, sema::FunctionScopeInfo *fscope) {
794  for (SmallVectorImpl<sema::PossiblyUnreachableDiag>::iterator
795       i = fscope->PossiblyUnreachableDiags.begin(),
796       e = fscope->PossiblyUnreachableDiags.end();
797       i != e; ++i) {
798    const sema::PossiblyUnreachableDiag &D = *i;
799    S.Diag(D.Loc, D.PD);
800  }
801}
802
803void clang::sema::
804AnalysisBasedWarnings::IssueWarnings(sema::AnalysisBasedWarnings::Policy P,
805                                     sema::FunctionScopeInfo *fscope,
806                                     const Decl *D, const BlockExpr *blkExpr) {
807
808  // We avoid doing analysis-based warnings when there are errors for
809  // two reasons:
810  // (1) The CFGs often can't be constructed (if the body is invalid), so
811  //     don't bother trying.
812  // (2) The code already has problems; running the analysis just takes more
813  //     time.
814  DiagnosticsEngine &Diags = S.getDiagnostics();
815
816  // Do not do any analysis for declarations in system headers if we are
817  // going to just ignore them.
818  if (Diags.getSuppressSystemWarnings() &&
819      S.SourceMgr.isInSystemHeader(D->getLocation()))
820    return;
821
822  // For code in dependent contexts, we'll do this at instantiation time.
823  if (cast<DeclContext>(D)->isDependentContext())
824    return;
825
826  if (Diags.hasErrorOccurred() || Diags.hasFatalErrorOccurred()) {
827    // Flush out any possibly unreachable diagnostics.
828    flushDiagnostics(S, fscope);
829    return;
830  }
831
832  const Stmt *Body = D->getBody();
833  assert(Body);
834
835  AnalysisDeclContext AC(/* AnalysisDeclContextManager */ 0,  D, 0);
836
837  // Don't generate EH edges for CallExprs as we'd like to avoid the n^2
838  // explosion for destrutors that can result and the compile time hit.
839  AC.getCFGBuildOptions().PruneTriviallyFalseEdges = true;
840  AC.getCFGBuildOptions().AddEHEdges = false;
841  AC.getCFGBuildOptions().AddInitializers = true;
842  AC.getCFGBuildOptions().AddImplicitDtors = true;
843
844  // Force that certain expressions appear as CFGElements in the CFG.  This
845  // is used to speed up various analyses.
846  // FIXME: This isn't the right factoring.  This is here for initial
847  // prototyping, but we need a way for analyses to say what expressions they
848  // expect to always be CFGElements and then fill in the BuildOptions
849  // appropriately.  This is essentially a layering violation.
850  if (P.enableCheckUnreachable || P.enableThreadSafetyAnalysis) {
851    // Unreachable code analysis and thread safety require a linearized CFG.
852    AC.getCFGBuildOptions().setAllAlwaysAdd();
853  }
854  else {
855    AC.getCFGBuildOptions()
856      .setAlwaysAdd(Stmt::BinaryOperatorClass)
857      .setAlwaysAdd(Stmt::BlockExprClass)
858      .setAlwaysAdd(Stmt::CStyleCastExprClass)
859      .setAlwaysAdd(Stmt::DeclRefExprClass)
860      .setAlwaysAdd(Stmt::ImplicitCastExprClass)
861      .setAlwaysAdd(Stmt::UnaryOperatorClass);
862  }
863
864  // Construct the analysis context with the specified CFG build options.
865
866  // Emit delayed diagnostics.
867  if (!fscope->PossiblyUnreachableDiags.empty()) {
868    bool analyzed = false;
869
870    // Register the expressions with the CFGBuilder.
871    for (SmallVectorImpl<sema::PossiblyUnreachableDiag>::iterator
872         i = fscope->PossiblyUnreachableDiags.begin(),
873         e = fscope->PossiblyUnreachableDiags.end();
874         i != e; ++i) {
875      if (const Stmt *stmt = i->stmt)
876        AC.registerForcedBlockExpression(stmt);
877    }
878
879    if (AC.getCFG()) {
880      analyzed = true;
881      for (SmallVectorImpl<sema::PossiblyUnreachableDiag>::iterator
882            i = fscope->PossiblyUnreachableDiags.begin(),
883            e = fscope->PossiblyUnreachableDiags.end();
884            i != e; ++i)
885      {
886        const sema::PossiblyUnreachableDiag &D = *i;
887        bool processed = false;
888        if (const Stmt *stmt = i->stmt) {
889          const CFGBlock *block = AC.getBlockForRegisteredExpression(stmt);
890          CFGReverseBlockReachabilityAnalysis *cra =
891              AC.getCFGReachablityAnalysis();
892          // FIXME: We should be able to assert that block is non-null, but
893          // the CFG analysis can skip potentially-evaluated expressions in
894          // edge cases; see test/Sema/vla-2.c.
895          if (block && cra) {
896            // Can this block be reached from the entrance?
897            if (cra->isReachable(&AC.getCFG()->getEntry(), block))
898              S.Diag(D.Loc, D.PD);
899            processed = true;
900          }
901        }
902        if (!processed) {
903          // Emit the warning anyway if we cannot map to a basic block.
904          S.Diag(D.Loc, D.PD);
905        }
906      }
907    }
908
909    if (!analyzed)
910      flushDiagnostics(S, fscope);
911  }
912
913
914  // Warning: check missing 'return'
915  if (P.enableCheckFallThrough) {
916    const CheckFallThroughDiagnostics &CD =
917      (isa<BlockDecl>(D) ? CheckFallThroughDiagnostics::MakeForBlock()
918       : (isa<CXXMethodDecl>(D) &&
919          cast<CXXMethodDecl>(D)->getOverloadedOperator() == OO_Call &&
920          cast<CXXMethodDecl>(D)->getParent()->isLambda())
921            ? CheckFallThroughDiagnostics::MakeForLambda()
922            : CheckFallThroughDiagnostics::MakeForFunction(D));
923    CheckFallThroughForBody(S, D, Body, blkExpr, CD, AC);
924  }
925
926  // Warning: check for unreachable code
927  if (P.enableCheckUnreachable) {
928    // Only check for unreachable code on non-template instantiations.
929    // Different template instantiations can effectively change the control-flow
930    // and it is very difficult to prove that a snippet of code in a template
931    // is unreachable for all instantiations.
932    bool isTemplateInstantiation = false;
933    if (const FunctionDecl *Function = dyn_cast<FunctionDecl>(D))
934      isTemplateInstantiation = Function->isTemplateInstantiation();
935    if (!isTemplateInstantiation)
936      CheckUnreachable(S, AC);
937  }
938
939  // Check for thread safety violations
940  if (P.enableThreadSafetyAnalysis) {
941    SourceLocation FL = AC.getDecl()->getLocation();
942    SourceLocation FEL = AC.getDecl()->getLocEnd();
943    thread_safety::ThreadSafetyReporter Reporter(S, FL, FEL);
944    thread_safety::runThreadSafetyAnalysis(AC, Reporter);
945    Reporter.emitDiagnostics();
946  }
947
948  if (Diags.getDiagnosticLevel(diag::warn_uninit_var, D->getLocStart())
949      != DiagnosticsEngine::Ignored ||
950      Diags.getDiagnosticLevel(diag::warn_maybe_uninit_var, D->getLocStart())
951      != DiagnosticsEngine::Ignored) {
952    if (CFG *cfg = AC.getCFG()) {
953      UninitValsDiagReporter reporter(S);
954      UninitVariablesAnalysisStats stats;
955      std::memset(&stats, 0, sizeof(UninitVariablesAnalysisStats));
956      runUninitializedVariablesAnalysis(*cast<DeclContext>(D), *cfg, AC,
957                                        reporter, stats);
958
959      if (S.CollectStats && stats.NumVariablesAnalyzed > 0) {
960        ++NumUninitAnalysisFunctions;
961        NumUninitAnalysisVariables += stats.NumVariablesAnalyzed;
962        NumUninitAnalysisBlockVisits += stats.NumBlockVisits;
963        MaxUninitAnalysisVariablesPerFunction =
964            std::max(MaxUninitAnalysisVariablesPerFunction,
965                     stats.NumVariablesAnalyzed);
966        MaxUninitAnalysisBlockVisitsPerFunction =
967            std::max(MaxUninitAnalysisBlockVisitsPerFunction,
968                     stats.NumBlockVisits);
969      }
970    }
971  }
972
973  // Collect statistics about the CFG if it was built.
974  if (S.CollectStats && AC.isCFGBuilt()) {
975    ++NumFunctionsAnalyzed;
976    if (CFG *cfg = AC.getCFG()) {
977      // If we successfully built a CFG for this context, record some more
978      // detail information about it.
979      NumCFGBlocks += cfg->getNumBlockIDs();
980      MaxCFGBlocksPerFunction = std::max(MaxCFGBlocksPerFunction,
981                                         cfg->getNumBlockIDs());
982    } else {
983      ++NumFunctionsWithBadCFGs;
984    }
985  }
986}
987
988void clang::sema::AnalysisBasedWarnings::PrintStats() const {
989  llvm::errs() << "\n*** Analysis Based Warnings Stats:\n";
990
991  unsigned NumCFGsBuilt = NumFunctionsAnalyzed - NumFunctionsWithBadCFGs;
992  unsigned AvgCFGBlocksPerFunction =
993      !NumCFGsBuilt ? 0 : NumCFGBlocks/NumCFGsBuilt;
994  llvm::errs() << NumFunctionsAnalyzed << " functions analyzed ("
995               << NumFunctionsWithBadCFGs << " w/o CFGs).\n"
996               << "  " << NumCFGBlocks << " CFG blocks built.\n"
997               << "  " << AvgCFGBlocksPerFunction
998               << " average CFG blocks per function.\n"
999               << "  " << MaxCFGBlocksPerFunction
1000               << " max CFG blocks per function.\n";
1001
1002  unsigned AvgUninitVariablesPerFunction = !NumUninitAnalysisFunctions ? 0
1003      : NumUninitAnalysisVariables/NumUninitAnalysisFunctions;
1004  unsigned AvgUninitBlockVisitsPerFunction = !NumUninitAnalysisFunctions ? 0
1005      : NumUninitAnalysisBlockVisits/NumUninitAnalysisFunctions;
1006  llvm::errs() << NumUninitAnalysisFunctions
1007               << " functions analyzed for uninitialiazed variables\n"
1008               << "  " << NumUninitAnalysisVariables << " variables analyzed.\n"
1009               << "  " << AvgUninitVariablesPerFunction
1010               << " average variables per function.\n"
1011               << "  " << MaxUninitAnalysisVariablesPerFunction
1012               << " max variables per function.\n"
1013               << "  " << NumUninitAnalysisBlockVisits << " block visits.\n"
1014               << "  " << AvgUninitBlockVisitsPerFunction
1015               << " average block visits per function.\n"
1016               << "  " << MaxUninitAnalysisBlockVisitsPerFunction
1017               << " max block visits per function.\n";
1018}
1019