ExprEngineCallAndReturn.cpp revision ac7cc2d37e82181e73fcc265c1d0a619d18b7605
1//=-- ExprEngineCallAndReturn.cpp - Support for call/return -----*- C++ -*-===//
2//
3//                     The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10//  This file defines ExprEngine's support for calls and returns.
11//
12//===----------------------------------------------------------------------===//
13
14#define DEBUG_TYPE "ExprEngine"
15
16#include "clang/StaticAnalyzer/Core/PathSensitive/ExprEngine.h"
17#include "PrettyStackTraceLocationContext.h"
18#include "clang/AST/CXXInheritance.h"
19#include "clang/AST/DeclCXX.h"
20#include "clang/AST/ParentMap.h"
21#include "clang/Analysis/Analyses/LiveVariables.h"
22#include "clang/StaticAnalyzer/Core/CheckerManager.h"
23#include "clang/StaticAnalyzer/Core/PathSensitive/CallEvent.h"
24#include "llvm/ADT/SmallSet.h"
25#include "llvm/ADT/Statistic.h"
26#include "llvm/Support/SaveAndRestore.h"
27
28using namespace clang;
29using namespace ento;
30
31STATISTIC(NumOfDynamicDispatchPathSplits,
32  "The # of times we split the path due to imprecise dynamic dispatch info");
33
34STATISTIC(NumInlinedCalls,
35  "The # of times we inlined a call");
36
37STATISTIC(NumReachedInlineCountMax,
38  "The # of times we reached inline count maximum");
39
40void ExprEngine::processCallEnter(CallEnter CE, ExplodedNode *Pred) {
41  // Get the entry block in the CFG of the callee.
42  const StackFrameContext *calleeCtx = CE.getCalleeContext();
43  PrettyStackTraceLocationContext CrashInfo(calleeCtx);
44
45  const CFG *CalleeCFG = calleeCtx->getCFG();
46  const CFGBlock *Entry = &(CalleeCFG->getEntry());
47
48  // Validate the CFG.
49  assert(Entry->empty());
50  assert(Entry->succ_size() == 1);
51
52  // Get the solitary sucessor.
53  const CFGBlock *Succ = *(Entry->succ_begin());
54
55  // Construct an edge representing the starting location in the callee.
56  BlockEdge Loc(Entry, Succ, calleeCtx);
57
58  ProgramStateRef state = Pred->getState();
59
60  // Construct a new node and add it to the worklist.
61  bool isNew;
62  ExplodedNode *Node = G.getNode(Loc, state, false, &isNew);
63  Node->addPredecessor(Pred, G);
64  if (isNew)
65    Engine.getWorkList()->enqueue(Node);
66}
67
68// Find the last statement on the path to the exploded node and the
69// corresponding Block.
70static std::pair<const Stmt*,
71                 const CFGBlock*> getLastStmt(const ExplodedNode *Node) {
72  const Stmt *S = 0;
73  const CFGBlock *Blk = 0;
74  const StackFrameContext *SF =
75          Node->getLocation().getLocationContext()->getCurrentStackFrame();
76
77  // Back up through the ExplodedGraph until we reach a statement node in this
78  // stack frame.
79  while (Node) {
80    const ProgramPoint &PP = Node->getLocation();
81
82    if (PP.getLocationContext()->getCurrentStackFrame() == SF) {
83      if (Optional<StmtPoint> SP = PP.getAs<StmtPoint>()) {
84        S = SP->getStmt();
85        break;
86      } else if (Optional<CallExitEnd> CEE = PP.getAs<CallExitEnd>()) {
87        S = CEE->getCalleeContext()->getCallSite();
88        if (S)
89          break;
90
91        // If there is no statement, this is an implicitly-generated call.
92        // We'll walk backwards over it and then continue the loop to find
93        // an actual statement.
94        Optional<CallEnter> CE;
95        do {
96          Node = Node->getFirstPred();
97          CE = Node->getLocationAs<CallEnter>();
98        } while (!CE || CE->getCalleeContext() != CEE->getCalleeContext());
99
100        // Continue searching the graph.
101      } else if (Optional<BlockEdge> BE = PP.getAs<BlockEdge>()) {
102        Blk = BE->getSrc();
103      }
104    } else if (Optional<CallEnter> CE = PP.getAs<CallEnter>()) {
105      // If we reached the CallEnter for this function, it has no statements.
106      if (CE->getCalleeContext() == SF)
107        break;
108    }
109
110    if (Node->pred_empty())
111      return std::pair<const Stmt*, const CFGBlock*>((Stmt*)0, (CFGBlock*)0);
112
113    Node = *Node->pred_begin();
114  }
115
116  return std::pair<const Stmt*, const CFGBlock*>(S, Blk);
117}
118
119/// Adjusts a return value when the called function's return type does not
120/// match the caller's expression type. This can happen when a dynamic call
121/// is devirtualized, and the overridding method has a covariant (more specific)
122/// return type than the parent's method. For C++ objects, this means we need
123/// to add base casts.
124static SVal adjustReturnValue(SVal V, QualType ExpectedTy, QualType ActualTy,
125                              StoreManager &StoreMgr) {
126  // For now, the only adjustments we handle apply only to locations.
127  if (!V.getAs<Loc>())
128    return V;
129
130  // If the types already match, don't do any unnecessary work.
131  ExpectedTy = ExpectedTy.getCanonicalType();
132  ActualTy = ActualTy.getCanonicalType();
133  if (ExpectedTy == ActualTy)
134    return V;
135
136  // No adjustment is needed between Objective-C pointer types.
137  if (ExpectedTy->isObjCObjectPointerType() &&
138      ActualTy->isObjCObjectPointerType())
139    return V;
140
141  // C++ object pointers may need "derived-to-base" casts.
142  const CXXRecordDecl *ExpectedClass = ExpectedTy->getPointeeCXXRecordDecl();
143  const CXXRecordDecl *ActualClass = ActualTy->getPointeeCXXRecordDecl();
144  if (ExpectedClass && ActualClass) {
145    CXXBasePaths Paths(/*FindAmbiguities=*/true, /*RecordPaths=*/true,
146                       /*DetectVirtual=*/false);
147    if (ActualClass->isDerivedFrom(ExpectedClass, Paths) &&
148        !Paths.isAmbiguous(ActualTy->getCanonicalTypeUnqualified())) {
149      return StoreMgr.evalDerivedToBase(V, Paths.front());
150    }
151  }
152
153  // Unfortunately, Objective-C does not enforce that overridden methods have
154  // covariant return types, so we can't assert that that never happens.
155  // Be safe and return UnknownVal().
156  return UnknownVal();
157}
158
159void ExprEngine::removeDeadOnEndOfFunction(NodeBuilderContext& BC,
160                                           ExplodedNode *Pred,
161                                           ExplodedNodeSet &Dst) {
162  // Find the last statement in the function and the corresponding basic block.
163  const Stmt *LastSt = 0;
164  const CFGBlock *Blk = 0;
165  llvm::tie(LastSt, Blk) = getLastStmt(Pred);
166  if (!Blk || !LastSt) {
167    Dst.Add(Pred);
168    return;
169  }
170
171  // Here, we destroy the current location context. We use the current
172  // function's entire body as a diagnostic statement, with which the program
173  // point will be associated. However, we only want to use LastStmt as a
174  // reference for what to clean up if it's a ReturnStmt; otherwise, everything
175  // is dead.
176  SaveAndRestore<const NodeBuilderContext *> NodeContextRAII(currBldrCtx, &BC);
177  const LocationContext *LCtx = Pred->getLocationContext();
178  removeDead(Pred, Dst, dyn_cast<ReturnStmt>(LastSt), LCtx,
179             LCtx->getAnalysisDeclContext()->getBody(),
180             ProgramPoint::PostStmtPurgeDeadSymbolsKind);
181}
182
183static bool wasDifferentDeclUsedForInlining(CallEventRef<> Call,
184    const StackFrameContext *calleeCtx) {
185  const Decl *RuntimeCallee = calleeCtx->getDecl();
186  const Decl *StaticDecl = Call->getDecl();
187  assert(RuntimeCallee);
188  if (!StaticDecl)
189    return true;
190  return RuntimeCallee->getCanonicalDecl() != StaticDecl->getCanonicalDecl();
191}
192
193/// Returns true if the CXXConstructExpr \p E was intended to construct a
194/// prvalue for the region in \p V.
195///
196/// Note that we can't just test for rvalue vs. glvalue because
197/// CXXConstructExprs embedded in DeclStmts and initializers are considered
198/// rvalues by the AST, and the analyzer would like to treat them as lvalues.
199static bool isTemporaryPRValue(const CXXConstructExpr *E, SVal V) {
200  if (E->isGLValue())
201    return false;
202
203  const MemRegion *MR = V.getAsRegion();
204  if (!MR)
205    return false;
206
207  return isa<CXXTempObjectRegion>(MR);
208}
209
210/// The call exit is simulated with a sequence of nodes, which occur between
211/// CallExitBegin and CallExitEnd. The following operations occur between the
212/// two program points:
213/// 1. CallExitBegin (triggers the start of call exit sequence)
214/// 2. Bind the return value
215/// 3. Run Remove dead bindings to clean up the dead symbols from the callee.
216/// 4. CallExitEnd (switch to the caller context)
217/// 5. PostStmt<CallExpr>
218void ExprEngine::processCallExit(ExplodedNode *CEBNode) {
219  // Step 1 CEBNode was generated before the call.
220  PrettyStackTraceLocationContext CrashInfo(CEBNode->getLocationContext());
221  const StackFrameContext *calleeCtx =
222      CEBNode->getLocationContext()->getCurrentStackFrame();
223
224  // The parent context might not be a stack frame, so make sure we
225  // look up the first enclosing stack frame.
226  const StackFrameContext *callerCtx =
227    calleeCtx->getParent()->getCurrentStackFrame();
228
229  const Stmt *CE = calleeCtx->getCallSite();
230  ProgramStateRef state = CEBNode->getState();
231  // Find the last statement in the function and the corresponding basic block.
232  const Stmt *LastSt = 0;
233  const CFGBlock *Blk = 0;
234  llvm::tie(LastSt, Blk) = getLastStmt(CEBNode);
235
236  // Generate a CallEvent /before/ cleaning the state, so that we can get the
237  // correct value for 'this' (if necessary).
238  CallEventManager &CEMgr = getStateManager().getCallEventManager();
239  CallEventRef<> Call = CEMgr.getCaller(calleeCtx, state);
240
241  // Step 2: generate node with bound return value: CEBNode -> BindedRetNode.
242
243  // If the callee returns an expression, bind its value to CallExpr.
244  if (CE) {
245    if (const ReturnStmt *RS = dyn_cast_or_null<ReturnStmt>(LastSt)) {
246      const LocationContext *LCtx = CEBNode->getLocationContext();
247      SVal V = state->getSVal(RS, LCtx);
248
249      // Ensure that the return type matches the type of the returned Expr.
250      if (wasDifferentDeclUsedForInlining(Call, calleeCtx)) {
251        QualType ReturnedTy =
252          CallEvent::getDeclaredResultType(calleeCtx->getDecl());
253        if (!ReturnedTy.isNull()) {
254          if (const Expr *Ex = dyn_cast<Expr>(CE)) {
255            V = adjustReturnValue(V, Ex->getType(), ReturnedTy,
256                                  getStoreManager());
257          }
258        }
259      }
260
261      state = state->BindExpr(CE, callerCtx, V);
262    }
263
264    // Bind the constructed object value to CXXConstructExpr.
265    if (const CXXConstructExpr *CCE = dyn_cast<CXXConstructExpr>(CE)) {
266      loc::MemRegionVal This =
267        svalBuilder.getCXXThis(CCE->getConstructor()->getParent(), calleeCtx);
268      SVal ThisV = state->getSVal(This);
269
270      // If the constructed object is a temporary prvalue, get its bindings.
271      if (isTemporaryPRValue(CCE, ThisV))
272        ThisV = state->getSVal(ThisV.castAs<Loc>());
273
274      state = state->BindExpr(CCE, callerCtx, ThisV);
275    }
276  }
277
278  // Step 3: BindedRetNode -> CleanedNodes
279  // If we can find a statement and a block in the inlined function, run remove
280  // dead bindings before returning from the call. This is important to ensure
281  // that we report the issues such as leaks in the stack contexts in which
282  // they occurred.
283  ExplodedNodeSet CleanedNodes;
284  if (LastSt && Blk && AMgr.options.AnalysisPurgeOpt != PurgeNone) {
285    static SimpleProgramPointTag retValBind("ExprEngine : Bind Return Value");
286    PostStmt Loc(LastSt, calleeCtx, &retValBind);
287    bool isNew;
288    ExplodedNode *BindedRetNode = G.getNode(Loc, state, false, &isNew);
289    BindedRetNode->addPredecessor(CEBNode, G);
290    if (!isNew)
291      return;
292
293    NodeBuilderContext Ctx(getCoreEngine(), Blk, BindedRetNode);
294    currBldrCtx = &Ctx;
295    // Here, we call the Symbol Reaper with 0 statement and callee location
296    // context, telling it to clean up everything in the callee's context
297    // (and its children). We use the callee's function body as a diagnostic
298    // statement, with which the program point will be associated.
299    removeDead(BindedRetNode, CleanedNodes, 0, calleeCtx,
300               calleeCtx->getAnalysisDeclContext()->getBody(),
301               ProgramPoint::PostStmtPurgeDeadSymbolsKind);
302    currBldrCtx = 0;
303  } else {
304    CleanedNodes.Add(CEBNode);
305  }
306
307  for (ExplodedNodeSet::iterator I = CleanedNodes.begin(),
308                                 E = CleanedNodes.end(); I != E; ++I) {
309
310    // Step 4: Generate the CallExit and leave the callee's context.
311    // CleanedNodes -> CEENode
312    CallExitEnd Loc(calleeCtx, callerCtx);
313    bool isNew;
314    ProgramStateRef CEEState = (*I == CEBNode) ? state : (*I)->getState();
315    ExplodedNode *CEENode = G.getNode(Loc, CEEState, false, &isNew);
316    CEENode->addPredecessor(*I, G);
317    if (!isNew)
318      return;
319
320    // Step 5: Perform the post-condition check of the CallExpr and enqueue the
321    // result onto the work list.
322    // CEENode -> Dst -> WorkList
323    NodeBuilderContext Ctx(Engine, calleeCtx->getCallSiteBlock(), CEENode);
324    SaveAndRestore<const NodeBuilderContext*> NBCSave(currBldrCtx,
325        &Ctx);
326    SaveAndRestore<unsigned> CBISave(currStmtIdx, calleeCtx->getIndex());
327
328    CallEventRef<> UpdatedCall = Call.cloneWithState(CEEState);
329
330    ExplodedNodeSet DstPostCall;
331    getCheckerManager().runCheckersForPostCall(DstPostCall, CEENode,
332                                               *UpdatedCall, *this,
333                                               /*WasInlined=*/true);
334
335    ExplodedNodeSet Dst;
336    if (const ObjCMethodCall *Msg = dyn_cast<ObjCMethodCall>(Call)) {
337      getCheckerManager().runCheckersForPostObjCMessage(Dst, DstPostCall, *Msg,
338                                                        *this,
339                                                        /*WasInlined=*/true);
340    } else if (CE) {
341      getCheckerManager().runCheckersForPostStmt(Dst, DstPostCall, CE,
342                                                 *this, /*WasInlined=*/true);
343    } else {
344      Dst.insert(DstPostCall);
345    }
346
347    // Enqueue the next element in the block.
348    for (ExplodedNodeSet::iterator PSI = Dst.begin(), PSE = Dst.end();
349                                   PSI != PSE; ++PSI) {
350      Engine.getWorkList()->enqueue(*PSI, calleeCtx->getCallSiteBlock(),
351                                    calleeCtx->getIndex()+1);
352    }
353  }
354}
355
356void ExprEngine::examineStackFrames(const Decl *D, const LocationContext *LCtx,
357                               bool &IsRecursive, unsigned &StackDepth) {
358  IsRecursive = false;
359  StackDepth = 0;
360
361  while (LCtx) {
362    if (const StackFrameContext *SFC = dyn_cast<StackFrameContext>(LCtx)) {
363      const Decl *DI = SFC->getDecl();
364
365      // Mark recursive (and mutually recursive) functions and always count
366      // them when measuring the stack depth.
367      if (DI == D) {
368        IsRecursive = true;
369        ++StackDepth;
370        LCtx = LCtx->getParent();
371        continue;
372      }
373
374      // Do not count the small functions when determining the stack depth.
375      AnalysisDeclContext *CalleeADC = AMgr.getAnalysisDeclContext(DI);
376      const CFG *CalleeCFG = CalleeADC->getCFG();
377      if (CalleeCFG->getNumBlockIDs() > AMgr.options.getAlwaysInlineSize())
378        ++StackDepth;
379    }
380    LCtx = LCtx->getParent();
381  }
382
383}
384
385static bool IsInStdNamespace(const FunctionDecl *FD) {
386  const DeclContext *DC = FD->getEnclosingNamespaceContext();
387  const NamespaceDecl *ND = dyn_cast<NamespaceDecl>(DC);
388  if (!ND)
389    return false;
390
391  while (const DeclContext *Parent = ND->getParent()) {
392    if (!isa<NamespaceDecl>(Parent))
393      break;
394    ND = cast<NamespaceDecl>(Parent);
395  }
396
397  return ND->getName() == "std";
398}
399
400// The GDM component containing the dynamic dispatch bifurcation info. When
401// the exact type of the receiver is not known, we want to explore both paths -
402// one on which we do inline it and the other one on which we don't. This is
403// done to ensure we do not drop coverage.
404// This is the map from the receiver region to a bool, specifying either we
405// consider this region's information precise or not along the given path.
406namespace {
407  enum DynamicDispatchMode {
408    DynamicDispatchModeInlined = 1,
409    DynamicDispatchModeConservative
410  };
411}
412REGISTER_TRAIT_WITH_PROGRAMSTATE(DynamicDispatchBifurcationMap,
413                                 CLANG_ENTO_PROGRAMSTATE_MAP(const MemRegion *,
414                                                             unsigned))
415
416bool ExprEngine::inlineCall(const CallEvent &Call, const Decl *D,
417                            NodeBuilder &Bldr, ExplodedNode *Pred,
418                            ProgramStateRef State) {
419  assert(D);
420
421  const LocationContext *CurLC = Pred->getLocationContext();
422  const StackFrameContext *CallerSFC = CurLC->getCurrentStackFrame();
423  const LocationContext *ParentOfCallee = CallerSFC;
424  if (Call.getKind() == CE_Block) {
425    const BlockDataRegion *BR = cast<BlockCall>(Call).getBlockRegion();
426    assert(BR && "If we have the block definition we should have its region");
427    AnalysisDeclContext *BlockCtx = AMgr.getAnalysisDeclContext(D);
428    ParentOfCallee = BlockCtx->getBlockInvocationContext(CallerSFC,
429                                                         cast<BlockDecl>(D),
430                                                         BR);
431  }
432
433  // This may be NULL, but that's fine.
434  const Expr *CallE = Call.getOriginExpr();
435
436  // Construct a new stack frame for the callee.
437  AnalysisDeclContext *CalleeADC = AMgr.getAnalysisDeclContext(D);
438  const StackFrameContext *CalleeSFC =
439    CalleeADC->getStackFrame(ParentOfCallee, CallE,
440                             currBldrCtx->getBlock(),
441                             currStmtIdx);
442
443
444  CallEnter Loc(CallE, CalleeSFC, CurLC);
445
446  // Construct a new state which contains the mapping from actual to
447  // formal arguments.
448  State = State->enterStackFrame(Call, CalleeSFC);
449
450  bool isNew;
451  if (ExplodedNode *N = G.getNode(Loc, State, false, &isNew)) {
452    N->addPredecessor(Pred, G);
453    if (isNew)
454      Engine.getWorkList()->enqueue(N);
455  }
456
457  // If we decided to inline the call, the successor has been manually
458  // added onto the work list so remove it from the node builder.
459  Bldr.takeNodes(Pred);
460
461  NumInlinedCalls++;
462
463  // Mark the decl as visited.
464  if (VisitedCallees)
465    VisitedCallees->insert(D);
466
467  return true;
468}
469
470static ProgramStateRef getInlineFailedState(ProgramStateRef State,
471                                            const Stmt *CallE) {
472  const void *ReplayState = State->get<ReplayWithoutInlining>();
473  if (!ReplayState)
474    return 0;
475
476  assert(ReplayState == CallE && "Backtracked to the wrong call.");
477  (void)CallE;
478
479  return State->remove<ReplayWithoutInlining>();
480}
481
482void ExprEngine::VisitCallExpr(const CallExpr *CE, ExplodedNode *Pred,
483                               ExplodedNodeSet &dst) {
484  // Perform the previsit of the CallExpr.
485  ExplodedNodeSet dstPreVisit;
486  getCheckerManager().runCheckersForPreStmt(dstPreVisit, Pred, CE, *this);
487
488  // Get the call in its initial state. We use this as a template to perform
489  // all the checks.
490  CallEventManager &CEMgr = getStateManager().getCallEventManager();
491  CallEventRef<> CallTemplate
492    = CEMgr.getSimpleCall(CE, Pred->getState(), Pred->getLocationContext());
493
494  // Evaluate the function call.  We try each of the checkers
495  // to see if the can evaluate the function call.
496  ExplodedNodeSet dstCallEvaluated;
497  for (ExplodedNodeSet::iterator I = dstPreVisit.begin(), E = dstPreVisit.end();
498       I != E; ++I) {
499    evalCall(dstCallEvaluated, *I, *CallTemplate);
500  }
501
502  // Finally, perform the post-condition check of the CallExpr and store
503  // the created nodes in 'Dst'.
504  // Note that if the call was inlined, dstCallEvaluated will be empty.
505  // The post-CallExpr check will occur in processCallExit.
506  getCheckerManager().runCheckersForPostStmt(dst, dstCallEvaluated, CE,
507                                             *this);
508}
509
510void ExprEngine::evalCall(ExplodedNodeSet &Dst, ExplodedNode *Pred,
511                          const CallEvent &Call) {
512  // WARNING: At this time, the state attached to 'Call' may be older than the
513  // state in 'Pred'. This is a minor optimization since CheckerManager will
514  // use an updated CallEvent instance when calling checkers, but if 'Call' is
515  // ever used directly in this function all callers should be updated to pass
516  // the most recent state. (It is probably not worth doing the work here since
517  // for some callers this will not be necessary.)
518
519  // Run any pre-call checks using the generic call interface.
520  ExplodedNodeSet dstPreVisit;
521  getCheckerManager().runCheckersForPreCall(dstPreVisit, Pred, Call, *this);
522
523  // Actually evaluate the function call.  We try each of the checkers
524  // to see if the can evaluate the function call, and get a callback at
525  // defaultEvalCall if all of them fail.
526  ExplodedNodeSet dstCallEvaluated;
527  getCheckerManager().runCheckersForEvalCall(dstCallEvaluated, dstPreVisit,
528                                             Call, *this);
529
530  // Finally, run any post-call checks.
531  getCheckerManager().runCheckersForPostCall(Dst, dstCallEvaluated,
532                                             Call, *this);
533}
534
535ProgramStateRef ExprEngine::bindReturnValue(const CallEvent &Call,
536                                            const LocationContext *LCtx,
537                                            ProgramStateRef State) {
538  const Expr *E = Call.getOriginExpr();
539  if (!E)
540    return State;
541
542  // Some method families have known return values.
543  if (const ObjCMethodCall *Msg = dyn_cast<ObjCMethodCall>(&Call)) {
544    switch (Msg->getMethodFamily()) {
545    default:
546      break;
547    case OMF_autorelease:
548    case OMF_retain:
549    case OMF_self: {
550      // These methods return their receivers.
551      return State->BindExpr(E, LCtx, Msg->getReceiverSVal());
552    }
553    }
554  } else if (const CXXConstructorCall *C = dyn_cast<CXXConstructorCall>(&Call)){
555    SVal ThisV = C->getCXXThisVal();
556
557    // If the constructed object is a temporary prvalue, get its bindings.
558    if (isTemporaryPRValue(cast<CXXConstructExpr>(E), ThisV))
559      ThisV = State->getSVal(ThisV.castAs<Loc>());
560
561    return State->BindExpr(E, LCtx, ThisV);
562  }
563
564  // Conjure a symbol if the return value is unknown.
565  QualType ResultTy = Call.getResultType();
566  SValBuilder &SVB = getSValBuilder();
567  unsigned Count = currBldrCtx->blockCount();
568  SVal R = SVB.conjureSymbolVal(0, E, LCtx, ResultTy, Count);
569  return State->BindExpr(E, LCtx, R);
570}
571
572// Conservatively evaluate call by invalidating regions and binding
573// a conjured return value.
574void ExprEngine::conservativeEvalCall(const CallEvent &Call, NodeBuilder &Bldr,
575                                      ExplodedNode *Pred,
576                                      ProgramStateRef State) {
577  State = Call.invalidateRegions(currBldrCtx->blockCount(), State);
578  State = bindReturnValue(Call, Pred->getLocationContext(), State);
579
580  // And make the result node.
581  Bldr.generateNode(Call.getProgramPoint(), State, Pred);
582}
583
584enum CallInlinePolicy {
585  CIP_Allowed,
586  CIP_DisallowedOnce,
587  CIP_DisallowedAlways
588};
589
590static CallInlinePolicy mayInlineCallKind(const CallEvent &Call,
591                                          const ExplodedNode *Pred,
592                                          AnalyzerOptions &Opts) {
593  const LocationContext *CurLC = Pred->getLocationContext();
594  const StackFrameContext *CallerSFC = CurLC->getCurrentStackFrame();
595  switch (Call.getKind()) {
596  case CE_Function:
597  case CE_Block:
598    break;
599  case CE_CXXMember:
600  case CE_CXXMemberOperator:
601    if (!Opts.mayInlineCXXMemberFunction(CIMK_MemberFunctions))
602      return CIP_DisallowedAlways;
603    break;
604  case CE_CXXConstructor: {
605    if (!Opts.mayInlineCXXMemberFunction(CIMK_Constructors))
606      return CIP_DisallowedAlways;
607
608    const CXXConstructorCall &Ctor = cast<CXXConstructorCall>(Call);
609
610    // FIXME: We don't handle constructors or destructors for arrays properly.
611    // Even once we do, we still need to be careful about implicitly-generated
612    // initializers for array fields in default move/copy constructors.
613    const MemRegion *Target = Ctor.getCXXThisVal().getAsRegion();
614    if (Target && isa<ElementRegion>(Target))
615      return CIP_DisallowedOnce;
616
617    // FIXME: This is a hack. We don't use the correct region for a new
618    // expression, so if we inline the constructor its result will just be
619    // thrown away. This short-term hack is tracked in <rdar://problem/12180598>
620    // and the longer-term possible fix is discussed in PR12014.
621    const CXXConstructExpr *CtorExpr = Ctor.getOriginExpr();
622    if (const Stmt *Parent = CurLC->getParentMap().getParent(CtorExpr))
623      if (isa<CXXNewExpr>(Parent))
624        return CIP_DisallowedOnce;
625
626    // Inlining constructors requires including initializers in the CFG.
627    const AnalysisDeclContext *ADC = CallerSFC->getAnalysisDeclContext();
628    assert(ADC->getCFGBuildOptions().AddInitializers && "No CFG initializers");
629    (void)ADC;
630
631    // If the destructor is trivial, it's always safe to inline the constructor.
632    if (Ctor.getDecl()->getParent()->hasTrivialDestructor())
633      break;
634
635    // For other types, only inline constructors if destructor inlining is
636    // also enabled.
637    if (!Opts.mayInlineCXXMemberFunction(CIMK_Destructors))
638      return CIP_DisallowedAlways;
639
640    // FIXME: This is a hack. We don't handle temporary destructors
641    // right now, so we shouldn't inline their constructors.
642    if (CtorExpr->getConstructionKind() == CXXConstructExpr::CK_Complete)
643      if (!Target || !isa<DeclRegion>(Target))
644        return CIP_DisallowedOnce;
645
646    break;
647  }
648  case CE_CXXDestructor: {
649    if (!Opts.mayInlineCXXMemberFunction(CIMK_Destructors))
650      return CIP_DisallowedAlways;
651
652    // Inlining destructors requires building the CFG correctly.
653    const AnalysisDeclContext *ADC = CallerSFC->getAnalysisDeclContext();
654    assert(ADC->getCFGBuildOptions().AddImplicitDtors && "No CFG destructors");
655    (void)ADC;
656
657    const CXXDestructorCall &Dtor = cast<CXXDestructorCall>(Call);
658
659    // FIXME: We don't handle constructors or destructors for arrays properly.
660    const MemRegion *Target = Dtor.getCXXThisVal().getAsRegion();
661    if (Target && isa<ElementRegion>(Target))
662      return CIP_DisallowedOnce;
663
664    break;
665  }
666  case CE_CXXAllocator:
667    // Do not inline allocators until we model deallocators.
668    // This is unfortunate, but basically necessary for smart pointers and such.
669    return CIP_DisallowedAlways;
670  case CE_ObjCMessage:
671    if (!Opts.mayInlineObjCMethod())
672      return CIP_DisallowedAlways;
673    if (!(Opts.getIPAMode() == IPAK_DynamicDispatch ||
674          Opts.getIPAMode() == IPAK_DynamicDispatchBifurcate))
675      return CIP_DisallowedAlways;
676    break;
677  }
678
679  return CIP_Allowed;
680}
681
682/// Returns true if the given C++ class contains a member with the given name.
683static bool hasMember(const ASTContext &Ctx, const CXXRecordDecl *RD,
684                      StringRef Name) {
685  const IdentifierInfo &II = Ctx.Idents.get(Name);
686  DeclarationName DeclName = Ctx.DeclarationNames.getIdentifier(&II);
687  if (!RD->lookup(DeclName).empty())
688    return true;
689
690  CXXBasePaths Paths(false, false, false);
691  if (RD->lookupInBases(&CXXRecordDecl::FindOrdinaryMember,
692                        DeclName.getAsOpaquePtr(),
693                        Paths))
694    return true;
695
696  return false;
697}
698
699/// Returns true if the given C++ class is a container or iterator.
700///
701/// Our heuristic for this is whether it contains a method named 'begin()' or a
702/// nested type named 'iterator' or 'iterator_category'.
703static bool isContainerClass(const ASTContext &Ctx, const CXXRecordDecl *RD) {
704  return hasMember(Ctx, RD, "begin") ||
705         hasMember(Ctx, RD, "iterator") ||
706         hasMember(Ctx, RD, "iterator_category");
707}
708
709/// Returns true if the given function refers to a constructor or destructor of
710/// a C++ container or iterator.
711///
712/// We generally do a poor job modeling most containers right now, and would
713/// prefer not to inline their setup and teardown.
714static bool isContainerCtorOrDtor(const ASTContext &Ctx,
715                                  const FunctionDecl *FD) {
716  if (!(isa<CXXConstructorDecl>(FD) || isa<CXXDestructorDecl>(FD)))
717    return false;
718
719  const CXXRecordDecl *RD = cast<CXXMethodDecl>(FD)->getParent();
720  return isContainerClass(Ctx, RD);
721}
722
723/// Returns true if the given function is the destructor of a class named
724/// "shared_ptr".
725static bool isCXXSharedPtrDtor(const FunctionDecl *FD) {
726  const CXXDestructorDecl *Dtor = dyn_cast<CXXDestructorDecl>(FD);
727  if (!Dtor)
728    return false;
729
730  const CXXRecordDecl *RD = Dtor->getParent();
731  if (const IdentifierInfo *II = RD->getDeclName().getAsIdentifierInfo())
732    if (II->isStr("shared_ptr"))
733        return true;
734
735  return false;
736}
737
738/// Returns true if the function in \p CalleeADC may be inlined in general.
739///
740/// This checks static properties of the function, such as its signature and
741/// CFG, to determine whether the analyzer should ever consider inlining it,
742/// in any context.
743static bool mayInlineDecl(const CallEvent &Call, AnalysisDeclContext *CalleeADC,
744                          AnalyzerOptions &Opts) {
745  // FIXME: Do not inline variadic calls.
746  if (Call.isVariadic())
747    return false;
748
749  // Check certain C++-related inlining policies.
750  ASTContext &Ctx = CalleeADC->getASTContext();
751  if (Ctx.getLangOpts().CPlusPlus) {
752    if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(CalleeADC->getDecl())) {
753      // Conditionally control the inlining of template functions.
754      if (!Opts.mayInlineTemplateFunctions())
755        if (FD->getTemplatedKind() != FunctionDecl::TK_NonTemplate)
756          return false;
757
758      // Conditionally control the inlining of C++ standard library functions.
759      if (!Opts.mayInlineCXXStandardLibrary())
760        if (Ctx.getSourceManager().isInSystemHeader(FD->getLocation()))
761          if (IsInStdNamespace(FD))
762            return false;
763
764      // Conditionally control the inlining of methods on objects that look
765      // like C++ containers.
766      if (!Opts.mayInlineCXXContainerCtorsAndDtors())
767        if (!Ctx.getSourceManager().isFromMainFile(FD->getLocation()))
768          if (isContainerCtorOrDtor(Ctx, FD))
769            return false;
770
771      // Conditionally control the inlining of the destructor of C++ shared_ptr.
772      // We don't currently do a good job modeling shared_ptr because we can't
773      // see the reference count, so treating as opaque is probably the best
774      // idea.
775      if (!Opts.mayInlineCXXSharedPtrDtor())
776        if (isCXXSharedPtrDtor(FD))
777          return false;
778
779    }
780  }
781
782  // It is possible that the CFG cannot be constructed.
783  // Be safe, and check if the CalleeCFG is valid.
784  const CFG *CalleeCFG = CalleeADC->getCFG();
785  if (!CalleeCFG)
786    return false;
787
788  // Do not inline large functions.
789  if (CalleeCFG->getNumBlockIDs() > Opts.getMaxInlinableSize())
790    return false;
791
792  // It is possible that the live variables analysis cannot be
793  // run.  If so, bail out.
794  if (!CalleeADC->getAnalysis<RelaxedLiveVariables>())
795    return false;
796
797  return true;
798}
799
800bool ExprEngine::shouldInlineCall(const CallEvent &Call, const Decl *D,
801                                  const ExplodedNode *Pred) {
802  if (!D)
803    return false;
804
805  AnalysisManager &AMgr = getAnalysisManager();
806  AnalyzerOptions &Opts = AMgr.options;
807  AnalysisDeclContextManager &ADCMgr = AMgr.getAnalysisDeclContextManager();
808  AnalysisDeclContext *CalleeADC = ADCMgr.getContext(D);
809
810  // Temporary object destructor processing is currently broken, so we never
811  // inline them.
812  // FIME: Remove this once temp destructors are working.
813  if ((*currBldrCtx->getBlock())[currStmtIdx].getAs<CFGTemporaryDtor>())
814    return false;
815
816  // The auto-synthesized bodies are essential to inline as they are
817  // usually small and commonly used. Note: we should do this check early on to
818  // ensure we always inline these calls.
819  if (CalleeADC->isBodyAutosynthesized())
820    return true;
821
822  if (!AMgr.shouldInlineCall())
823    return false;
824
825  // Check if this function has been marked as non-inlinable.
826  Optional<bool> MayInline = Engine.FunctionSummaries->mayInline(D);
827  if (MayInline.hasValue()) {
828    if (!MayInline.getValue())
829      return false;
830
831  } else {
832    // We haven't actually checked the static properties of this function yet.
833    // Do that now, and record our decision in the function summaries.
834    if (mayInlineDecl(Call, CalleeADC, Opts)) {
835      Engine.FunctionSummaries->markMayInline(D);
836    } else {
837      Engine.FunctionSummaries->markShouldNotInline(D);
838      return false;
839    }
840  }
841
842  // Check if we should inline a call based on its kind.
843  // FIXME: this checks both static and dynamic properties of the call, which
844  // means we're redoing a bit of work that could be cached in the function
845  // summary.
846  CallInlinePolicy CIP = mayInlineCallKind(Call, Pred, Opts);
847  if (CIP != CIP_Allowed) {
848    if (CIP == CIP_DisallowedAlways) {
849      assert(!MayInline.hasValue() || MayInline.getValue());
850      Engine.FunctionSummaries->markShouldNotInline(D);
851    }
852    return false;
853  }
854
855  const CFG *CalleeCFG = CalleeADC->getCFG();
856
857  // Do not inline if recursive or we've reached max stack frame count.
858  bool IsRecursive = false;
859  unsigned StackDepth = 0;
860  examineStackFrames(D, Pred->getLocationContext(), IsRecursive, StackDepth);
861  if ((StackDepth >= Opts.InlineMaxStackDepth) &&
862      ((CalleeCFG->getNumBlockIDs() > Opts.getAlwaysInlineSize())
863       || IsRecursive))
864    return false;
865
866  // Do not inline large functions too many times.
867  if ((Engine.FunctionSummaries->getNumTimesInlined(D) >
868       Opts.getMaxTimesInlineLarge()) &&
869      CalleeCFG->getNumBlockIDs() > 13) {
870    NumReachedInlineCountMax++;
871    return false;
872  }
873
874  if (HowToInline == Inline_Minimal &&
875      (CalleeCFG->getNumBlockIDs() > Opts.getAlwaysInlineSize()
876      || IsRecursive))
877    return false;
878
879  Engine.FunctionSummaries->bumpNumTimesInlined(D);
880
881  return true;
882}
883
884static bool isTrivialObjectAssignment(const CallEvent &Call) {
885  const CXXInstanceCall *ICall = dyn_cast<CXXInstanceCall>(&Call);
886  if (!ICall)
887    return false;
888
889  const CXXMethodDecl *MD = dyn_cast_or_null<CXXMethodDecl>(ICall->getDecl());
890  if (!MD)
891    return false;
892  if (!(MD->isCopyAssignmentOperator() || MD->isMoveAssignmentOperator()))
893    return false;
894
895  return MD->isTrivial();
896}
897
898void ExprEngine::defaultEvalCall(NodeBuilder &Bldr, ExplodedNode *Pred,
899                                 const CallEvent &CallTemplate) {
900  // Make sure we have the most recent state attached to the call.
901  ProgramStateRef State = Pred->getState();
902  CallEventRef<> Call = CallTemplate.cloneWithState(State);
903
904  // Special-case trivial assignment operators.
905  if (isTrivialObjectAssignment(*Call)) {
906    performTrivialCopy(Bldr, Pred, *Call);
907    return;
908  }
909
910  // Try to inline the call.
911  // The origin expression here is just used as a kind of checksum;
912  // this should still be safe even for CallEvents that don't come from exprs.
913  const Expr *E = Call->getOriginExpr();
914
915  ProgramStateRef InlinedFailedState = getInlineFailedState(State, E);
916  if (InlinedFailedState) {
917    // If we already tried once and failed, make sure we don't retry later.
918    State = InlinedFailedState;
919  } else {
920    RuntimeDefinition RD = Call->getRuntimeDefinition();
921    const Decl *D = RD.getDecl();
922    if (shouldInlineCall(*Call, D, Pred)) {
923      if (RD.mayHaveOtherDefinitions()) {
924        AnalyzerOptions &Options = getAnalysisManager().options;
925
926        // Explore with and without inlining the call.
927        if (Options.getIPAMode() == IPAK_DynamicDispatchBifurcate) {
928          BifurcateCall(RD.getDispatchRegion(), *Call, D, Bldr, Pred);
929          return;
930        }
931
932        // Don't inline if we're not in any dynamic dispatch mode.
933        if (Options.getIPAMode() != IPAK_DynamicDispatch) {
934          conservativeEvalCall(*Call, Bldr, Pred, State);
935          return;
936        }
937      }
938
939      // We are not bifurcating and we do have a Decl, so just inline.
940      if (inlineCall(*Call, D, Bldr, Pred, State))
941        return;
942    }
943  }
944
945  // If we can't inline it, handle the return value and invalidate the regions.
946  conservativeEvalCall(*Call, Bldr, Pred, State);
947}
948
949void ExprEngine::BifurcateCall(const MemRegion *BifurReg,
950                               const CallEvent &Call, const Decl *D,
951                               NodeBuilder &Bldr, ExplodedNode *Pred) {
952  assert(BifurReg);
953  BifurReg = BifurReg->StripCasts();
954
955  // Check if we've performed the split already - note, we only want
956  // to split the path once per memory region.
957  ProgramStateRef State = Pred->getState();
958  const unsigned *BState =
959                        State->get<DynamicDispatchBifurcationMap>(BifurReg);
960  if (BState) {
961    // If we are on "inline path", keep inlining if possible.
962    if (*BState == DynamicDispatchModeInlined)
963      if (inlineCall(Call, D, Bldr, Pred, State))
964        return;
965    // If inline failed, or we are on the path where we assume we
966    // don't have enough info about the receiver to inline, conjure the
967    // return value and invalidate the regions.
968    conservativeEvalCall(Call, Bldr, Pred, State);
969    return;
970  }
971
972  // If we got here, this is the first time we process a message to this
973  // region, so split the path.
974  ProgramStateRef IState =
975      State->set<DynamicDispatchBifurcationMap>(BifurReg,
976                                               DynamicDispatchModeInlined);
977  inlineCall(Call, D, Bldr, Pred, IState);
978
979  ProgramStateRef NoIState =
980      State->set<DynamicDispatchBifurcationMap>(BifurReg,
981                                               DynamicDispatchModeConservative);
982  conservativeEvalCall(Call, Bldr, Pred, NoIState);
983
984  NumOfDynamicDispatchPathSplits++;
985  return;
986}
987
988
989void ExprEngine::VisitReturnStmt(const ReturnStmt *RS, ExplodedNode *Pred,
990                                 ExplodedNodeSet &Dst) {
991
992  ExplodedNodeSet dstPreVisit;
993  getCheckerManager().runCheckersForPreStmt(dstPreVisit, Pred, RS, *this);
994
995  StmtNodeBuilder B(dstPreVisit, Dst, *currBldrCtx);
996
997  if (RS->getRetValue()) {
998    for (ExplodedNodeSet::iterator it = dstPreVisit.begin(),
999                                  ei = dstPreVisit.end(); it != ei; ++it) {
1000      B.generateNode(RS, *it, (*it)->getState());
1001    }
1002  }
1003}
1004