1//=-- ExprEngineCallAndReturn.cpp - Support for call/return -----*- C++ -*-===//
2//
3//                     The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10//  This file defines ExprEngine's support for calls and returns.
11//
12//===----------------------------------------------------------------------===//
13
14#include "clang/StaticAnalyzer/Core/PathSensitive/ExprEngine.h"
15#include "PrettyStackTraceLocationContext.h"
16#include "clang/AST/CXXInheritance.h"
17#include "clang/AST/DeclCXX.h"
18#include "clang/AST/ParentMap.h"
19#include "clang/Analysis/Analyses/LiveVariables.h"
20#include "clang/StaticAnalyzer/Core/CheckerManager.h"
21#include "clang/StaticAnalyzer/Core/PathSensitive/CallEvent.h"
22#include "llvm/ADT/SmallSet.h"
23#include "llvm/ADT/Statistic.h"
24#include "llvm/Support/SaveAndRestore.h"
25
26using namespace clang;
27using namespace ento;
28
29#define DEBUG_TYPE "ExprEngine"
30
31STATISTIC(NumOfDynamicDispatchPathSplits,
32  "The # of times we split the path due to imprecise dynamic dispatch info");
33
34STATISTIC(NumInlinedCalls,
35  "The # of times we inlined a call");
36
37STATISTIC(NumReachedInlineCountMax,
38  "The # of times we reached inline count maximum");
39
40void ExprEngine::processCallEnter(CallEnter CE, ExplodedNode *Pred) {
41  // Get the entry block in the CFG of the callee.
42  const StackFrameContext *calleeCtx = CE.getCalleeContext();
43  PrettyStackTraceLocationContext CrashInfo(calleeCtx);
44
45  const CFG *CalleeCFG = calleeCtx->getCFG();
46  const CFGBlock *Entry = &(CalleeCFG->getEntry());
47
48  // Validate the CFG.
49  assert(Entry->empty());
50  assert(Entry->succ_size() == 1);
51
52  // Get the solitary successor.
53  const CFGBlock *Succ = *(Entry->succ_begin());
54
55  // Construct an edge representing the starting location in the callee.
56  BlockEdge Loc(Entry, Succ, calleeCtx);
57
58  ProgramStateRef state = Pred->getState();
59
60  // Construct a new node and add it to the worklist.
61  bool isNew;
62  ExplodedNode *Node = G.getNode(Loc, state, false, &isNew);
63  Node->addPredecessor(Pred, G);
64  if (isNew)
65    Engine.getWorkList()->enqueue(Node);
66}
67
68// Find the last statement on the path to the exploded node and the
69// corresponding Block.
70static std::pair<const Stmt*,
71                 const CFGBlock*> getLastStmt(const ExplodedNode *Node) {
72  const Stmt *S = nullptr;
73  const CFGBlock *Blk = nullptr;
74  const StackFrameContext *SF =
75          Node->getLocation().getLocationContext()->getCurrentStackFrame();
76
77  // Back up through the ExplodedGraph until we reach a statement node in this
78  // stack frame.
79  while (Node) {
80    const ProgramPoint &PP = Node->getLocation();
81
82    if (PP.getLocationContext()->getCurrentStackFrame() == SF) {
83      if (Optional<StmtPoint> SP = PP.getAs<StmtPoint>()) {
84        S = SP->getStmt();
85        break;
86      } else if (Optional<CallExitEnd> CEE = PP.getAs<CallExitEnd>()) {
87        S = CEE->getCalleeContext()->getCallSite();
88        if (S)
89          break;
90
91        // If there is no statement, this is an implicitly-generated call.
92        // We'll walk backwards over it and then continue the loop to find
93        // an actual statement.
94        Optional<CallEnter> CE;
95        do {
96          Node = Node->getFirstPred();
97          CE = Node->getLocationAs<CallEnter>();
98        } while (!CE || CE->getCalleeContext() != CEE->getCalleeContext());
99
100        // Continue searching the graph.
101      } else if (Optional<BlockEdge> BE = PP.getAs<BlockEdge>()) {
102        Blk = BE->getSrc();
103      }
104    } else if (Optional<CallEnter> CE = PP.getAs<CallEnter>()) {
105      // If we reached the CallEnter for this function, it has no statements.
106      if (CE->getCalleeContext() == SF)
107        break;
108    }
109
110    if (Node->pred_empty())
111      return std::make_pair(nullptr, nullptr);
112
113    Node = *Node->pred_begin();
114  }
115
116  return std::make_pair(S, Blk);
117}
118
119/// Adjusts a return value when the called function's return type does not
120/// match the caller's expression type. This can happen when a dynamic call
121/// is devirtualized, and the overridding method has a covariant (more specific)
122/// return type than the parent's method. For C++ objects, this means we need
123/// to add base casts.
124static SVal adjustReturnValue(SVal V, QualType ExpectedTy, QualType ActualTy,
125                              StoreManager &StoreMgr) {
126  // For now, the only adjustments we handle apply only to locations.
127  if (!V.getAs<Loc>())
128    return V;
129
130  // If the types already match, don't do any unnecessary work.
131  ExpectedTy = ExpectedTy.getCanonicalType();
132  ActualTy = ActualTy.getCanonicalType();
133  if (ExpectedTy == ActualTy)
134    return V;
135
136  // No adjustment is needed between Objective-C pointer types.
137  if (ExpectedTy->isObjCObjectPointerType() &&
138      ActualTy->isObjCObjectPointerType())
139    return V;
140
141  // C++ object pointers may need "derived-to-base" casts.
142  const CXXRecordDecl *ExpectedClass = ExpectedTy->getPointeeCXXRecordDecl();
143  const CXXRecordDecl *ActualClass = ActualTy->getPointeeCXXRecordDecl();
144  if (ExpectedClass && ActualClass) {
145    CXXBasePaths Paths(/*FindAmbiguities=*/true, /*RecordPaths=*/true,
146                       /*DetectVirtual=*/false);
147    if (ActualClass->isDerivedFrom(ExpectedClass, Paths) &&
148        !Paths.isAmbiguous(ActualTy->getCanonicalTypeUnqualified())) {
149      return StoreMgr.evalDerivedToBase(V, Paths.front());
150    }
151  }
152
153  // Unfortunately, Objective-C does not enforce that overridden methods have
154  // covariant return types, so we can't assert that that never happens.
155  // Be safe and return UnknownVal().
156  return UnknownVal();
157}
158
159void ExprEngine::removeDeadOnEndOfFunction(NodeBuilderContext& BC,
160                                           ExplodedNode *Pred,
161                                           ExplodedNodeSet &Dst) {
162  // Find the last statement in the function and the corresponding basic block.
163  const Stmt *LastSt = nullptr;
164  const CFGBlock *Blk = nullptr;
165  std::tie(LastSt, Blk) = getLastStmt(Pred);
166  if (!Blk || !LastSt) {
167    Dst.Add(Pred);
168    return;
169  }
170
171  // Here, we destroy the current location context. We use the current
172  // function's entire body as a diagnostic statement, with which the program
173  // point will be associated. However, we only want to use LastStmt as a
174  // reference for what to clean up if it's a ReturnStmt; otherwise, everything
175  // is dead.
176  SaveAndRestore<const NodeBuilderContext *> NodeContextRAII(currBldrCtx, &BC);
177  const LocationContext *LCtx = Pred->getLocationContext();
178  removeDead(Pred, Dst, dyn_cast<ReturnStmt>(LastSt), LCtx,
179             LCtx->getAnalysisDeclContext()->getBody(),
180             ProgramPoint::PostStmtPurgeDeadSymbolsKind);
181}
182
183static bool wasDifferentDeclUsedForInlining(CallEventRef<> Call,
184    const StackFrameContext *calleeCtx) {
185  const Decl *RuntimeCallee = calleeCtx->getDecl();
186  const Decl *StaticDecl = Call->getDecl();
187  assert(RuntimeCallee);
188  if (!StaticDecl)
189    return true;
190  return RuntimeCallee->getCanonicalDecl() != StaticDecl->getCanonicalDecl();
191}
192
193/// Returns true if the CXXConstructExpr \p E was intended to construct a
194/// prvalue for the region in \p V.
195///
196/// Note that we can't just test for rvalue vs. glvalue because
197/// CXXConstructExprs embedded in DeclStmts and initializers are considered
198/// rvalues by the AST, and the analyzer would like to treat them as lvalues.
199static bool isTemporaryPRValue(const CXXConstructExpr *E, SVal V) {
200  if (E->isGLValue())
201    return false;
202
203  const MemRegion *MR = V.getAsRegion();
204  if (!MR)
205    return false;
206
207  return isa<CXXTempObjectRegion>(MR);
208}
209
210/// The call exit is simulated with a sequence of nodes, which occur between
211/// CallExitBegin and CallExitEnd. The following operations occur between the
212/// two program points:
213/// 1. CallExitBegin (triggers the start of call exit sequence)
214/// 2. Bind the return value
215/// 3. Run Remove dead bindings to clean up the dead symbols from the callee.
216/// 4. CallExitEnd (switch to the caller context)
217/// 5. PostStmt<CallExpr>
218void ExprEngine::processCallExit(ExplodedNode *CEBNode) {
219  // Step 1 CEBNode was generated before the call.
220  PrettyStackTraceLocationContext CrashInfo(CEBNode->getLocationContext());
221  const StackFrameContext *calleeCtx =
222      CEBNode->getLocationContext()->getCurrentStackFrame();
223
224  // The parent context might not be a stack frame, so make sure we
225  // look up the first enclosing stack frame.
226  const StackFrameContext *callerCtx =
227    calleeCtx->getParent()->getCurrentStackFrame();
228
229  const Stmt *CE = calleeCtx->getCallSite();
230  ProgramStateRef state = CEBNode->getState();
231  // Find the last statement in the function and the corresponding basic block.
232  const Stmt *LastSt = nullptr;
233  const CFGBlock *Blk = nullptr;
234  std::tie(LastSt, Blk) = getLastStmt(CEBNode);
235
236  // Generate a CallEvent /before/ cleaning the state, so that we can get the
237  // correct value for 'this' (if necessary).
238  CallEventManager &CEMgr = getStateManager().getCallEventManager();
239  CallEventRef<> Call = CEMgr.getCaller(calleeCtx, state);
240
241  // Step 2: generate node with bound return value: CEBNode -> BindedRetNode.
242
243  // If the callee returns an expression, bind its value to CallExpr.
244  if (CE) {
245    if (const ReturnStmt *RS = dyn_cast_or_null<ReturnStmt>(LastSt)) {
246      const LocationContext *LCtx = CEBNode->getLocationContext();
247      SVal V = state->getSVal(RS, LCtx);
248
249      // Ensure that the return type matches the type of the returned Expr.
250      if (wasDifferentDeclUsedForInlining(Call, calleeCtx)) {
251        QualType ReturnedTy =
252          CallEvent::getDeclaredResultType(calleeCtx->getDecl());
253        if (!ReturnedTy.isNull()) {
254          if (const Expr *Ex = dyn_cast<Expr>(CE)) {
255            V = adjustReturnValue(V, Ex->getType(), ReturnedTy,
256                                  getStoreManager());
257          }
258        }
259      }
260
261      state = state->BindExpr(CE, callerCtx, V);
262    }
263
264    // Bind the constructed object value to CXXConstructExpr.
265    if (const CXXConstructExpr *CCE = dyn_cast<CXXConstructExpr>(CE)) {
266      loc::MemRegionVal This =
267        svalBuilder.getCXXThis(CCE->getConstructor()->getParent(), calleeCtx);
268      SVal ThisV = state->getSVal(This);
269
270      // If the constructed object is a temporary prvalue, get its bindings.
271      if (isTemporaryPRValue(CCE, ThisV))
272        ThisV = state->getSVal(ThisV.castAs<Loc>());
273
274      state = state->BindExpr(CCE, callerCtx, ThisV);
275    }
276  }
277
278  // Step 3: BindedRetNode -> CleanedNodes
279  // If we can find a statement and a block in the inlined function, run remove
280  // dead bindings before returning from the call. This is important to ensure
281  // that we report the issues such as leaks in the stack contexts in which
282  // they occurred.
283  ExplodedNodeSet CleanedNodes;
284  if (LastSt && Blk && AMgr.options.AnalysisPurgeOpt != PurgeNone) {
285    static SimpleProgramPointTag retValBind("ExprEngine", "Bind Return Value");
286    PostStmt Loc(LastSt, calleeCtx, &retValBind);
287    bool isNew;
288    ExplodedNode *BindedRetNode = G.getNode(Loc, state, false, &isNew);
289    BindedRetNode->addPredecessor(CEBNode, G);
290    if (!isNew)
291      return;
292
293    NodeBuilderContext Ctx(getCoreEngine(), Blk, BindedRetNode);
294    currBldrCtx = &Ctx;
295    // Here, we call the Symbol Reaper with 0 statement and callee location
296    // context, telling it to clean up everything in the callee's context
297    // (and its children). We use the callee's function body as a diagnostic
298    // statement, with which the program point will be associated.
299    removeDead(BindedRetNode, CleanedNodes, nullptr, calleeCtx,
300               calleeCtx->getAnalysisDeclContext()->getBody(),
301               ProgramPoint::PostStmtPurgeDeadSymbolsKind);
302    currBldrCtx = nullptr;
303  } else {
304    CleanedNodes.Add(CEBNode);
305  }
306
307  for (ExplodedNodeSet::iterator I = CleanedNodes.begin(),
308                                 E = CleanedNodes.end(); I != E; ++I) {
309
310    // Step 4: Generate the CallExit and leave the callee's context.
311    // CleanedNodes -> CEENode
312    CallExitEnd Loc(calleeCtx, callerCtx);
313    bool isNew;
314    ProgramStateRef CEEState = (*I == CEBNode) ? state : (*I)->getState();
315    ExplodedNode *CEENode = G.getNode(Loc, CEEState, false, &isNew);
316    CEENode->addPredecessor(*I, G);
317    if (!isNew)
318      return;
319
320    // Step 5: Perform the post-condition check of the CallExpr and enqueue the
321    // result onto the work list.
322    // CEENode -> Dst -> WorkList
323    NodeBuilderContext Ctx(Engine, calleeCtx->getCallSiteBlock(), CEENode);
324    SaveAndRestore<const NodeBuilderContext*> NBCSave(currBldrCtx,
325        &Ctx);
326    SaveAndRestore<unsigned> CBISave(currStmtIdx, calleeCtx->getIndex());
327
328    CallEventRef<> UpdatedCall = Call.cloneWithState(CEEState);
329
330    ExplodedNodeSet DstPostCall;
331    getCheckerManager().runCheckersForPostCall(DstPostCall, CEENode,
332                                               *UpdatedCall, *this,
333                                               /*WasInlined=*/true);
334
335    ExplodedNodeSet Dst;
336    if (const ObjCMethodCall *Msg = dyn_cast<ObjCMethodCall>(Call)) {
337      getCheckerManager().runCheckersForPostObjCMessage(Dst, DstPostCall, *Msg,
338                                                        *this,
339                                                        /*WasInlined=*/true);
340    } else if (CE) {
341      getCheckerManager().runCheckersForPostStmt(Dst, DstPostCall, CE,
342                                                 *this, /*WasInlined=*/true);
343    } else {
344      Dst.insert(DstPostCall);
345    }
346
347    // Enqueue the next element in the block.
348    for (ExplodedNodeSet::iterator PSI = Dst.begin(), PSE = Dst.end();
349                                   PSI != PSE; ++PSI) {
350      Engine.getWorkList()->enqueue(*PSI, calleeCtx->getCallSiteBlock(),
351                                    calleeCtx->getIndex()+1);
352    }
353  }
354}
355
356void ExprEngine::examineStackFrames(const Decl *D, const LocationContext *LCtx,
357                               bool &IsRecursive, unsigned &StackDepth) {
358  IsRecursive = false;
359  StackDepth = 0;
360
361  while (LCtx) {
362    if (const StackFrameContext *SFC = dyn_cast<StackFrameContext>(LCtx)) {
363      const Decl *DI = SFC->getDecl();
364
365      // Mark recursive (and mutually recursive) functions and always count
366      // them when measuring the stack depth.
367      if (DI == D) {
368        IsRecursive = true;
369        ++StackDepth;
370        LCtx = LCtx->getParent();
371        continue;
372      }
373
374      // Do not count the small functions when determining the stack depth.
375      AnalysisDeclContext *CalleeADC = AMgr.getAnalysisDeclContext(DI);
376      const CFG *CalleeCFG = CalleeADC->getCFG();
377      if (CalleeCFG->getNumBlockIDs() > AMgr.options.getAlwaysInlineSize())
378        ++StackDepth;
379    }
380    LCtx = LCtx->getParent();
381  }
382
383}
384
385static bool IsInStdNamespace(const FunctionDecl *FD) {
386  const DeclContext *DC = FD->getEnclosingNamespaceContext();
387  const NamespaceDecl *ND = dyn_cast<NamespaceDecl>(DC);
388  if (!ND)
389    return false;
390
391  while (const DeclContext *Parent = ND->getParent()) {
392    if (!isa<NamespaceDecl>(Parent))
393      break;
394    ND = cast<NamespaceDecl>(Parent);
395  }
396
397  return ND->isStdNamespace();
398}
399
400// The GDM component containing the dynamic dispatch bifurcation info. When
401// the exact type of the receiver is not known, we want to explore both paths -
402// one on which we do inline it and the other one on which we don't. This is
403// done to ensure we do not drop coverage.
404// This is the map from the receiver region to a bool, specifying either we
405// consider this region's information precise or not along the given path.
406namespace {
407  enum DynamicDispatchMode {
408    DynamicDispatchModeInlined = 1,
409    DynamicDispatchModeConservative
410  };
411}
412REGISTER_TRAIT_WITH_PROGRAMSTATE(DynamicDispatchBifurcationMap,
413                                 CLANG_ENTO_PROGRAMSTATE_MAP(const MemRegion *,
414                                                             unsigned))
415
416bool ExprEngine::inlineCall(const CallEvent &Call, const Decl *D,
417                            NodeBuilder &Bldr, ExplodedNode *Pred,
418                            ProgramStateRef State) {
419  assert(D);
420
421  const LocationContext *CurLC = Pred->getLocationContext();
422  const StackFrameContext *CallerSFC = CurLC->getCurrentStackFrame();
423  const LocationContext *ParentOfCallee = CallerSFC;
424  if (Call.getKind() == CE_Block) {
425    const BlockDataRegion *BR = cast<BlockCall>(Call).getBlockRegion();
426    assert(BR && "If we have the block definition we should have its region");
427    AnalysisDeclContext *BlockCtx = AMgr.getAnalysisDeclContext(D);
428    ParentOfCallee = BlockCtx->getBlockInvocationContext(CallerSFC,
429                                                         cast<BlockDecl>(D),
430                                                         BR);
431  }
432
433  // This may be NULL, but that's fine.
434  const Expr *CallE = Call.getOriginExpr();
435
436  // Construct a new stack frame for the callee.
437  AnalysisDeclContext *CalleeADC = AMgr.getAnalysisDeclContext(D);
438  const StackFrameContext *CalleeSFC =
439    CalleeADC->getStackFrame(ParentOfCallee, CallE,
440                             currBldrCtx->getBlock(),
441                             currStmtIdx);
442
443
444  CallEnter Loc(CallE, CalleeSFC, CurLC);
445
446  // Construct a new state which contains the mapping from actual to
447  // formal arguments.
448  State = State->enterStackFrame(Call, CalleeSFC);
449
450  bool isNew;
451  if (ExplodedNode *N = G.getNode(Loc, State, false, &isNew)) {
452    N->addPredecessor(Pred, G);
453    if (isNew)
454      Engine.getWorkList()->enqueue(N);
455  }
456
457  // If we decided to inline the call, the successor has been manually
458  // added onto the work list so remove it from the node builder.
459  Bldr.takeNodes(Pred);
460
461  NumInlinedCalls++;
462
463  // Mark the decl as visited.
464  if (VisitedCallees)
465    VisitedCallees->insert(D);
466
467  return true;
468}
469
470static ProgramStateRef getInlineFailedState(ProgramStateRef State,
471                                            const Stmt *CallE) {
472  const void *ReplayState = State->get<ReplayWithoutInlining>();
473  if (!ReplayState)
474    return nullptr;
475
476  assert(ReplayState == CallE && "Backtracked to the wrong call.");
477  (void)CallE;
478
479  return State->remove<ReplayWithoutInlining>();
480}
481
482void ExprEngine::VisitCallExpr(const CallExpr *CE, ExplodedNode *Pred,
483                               ExplodedNodeSet &dst) {
484  // Perform the previsit of the CallExpr.
485  ExplodedNodeSet dstPreVisit;
486  getCheckerManager().runCheckersForPreStmt(dstPreVisit, Pred, CE, *this);
487
488  // Get the call in its initial state. We use this as a template to perform
489  // all the checks.
490  CallEventManager &CEMgr = getStateManager().getCallEventManager();
491  CallEventRef<> CallTemplate
492    = CEMgr.getSimpleCall(CE, Pred->getState(), Pred->getLocationContext());
493
494  // Evaluate the function call.  We try each of the checkers
495  // to see if the can evaluate the function call.
496  ExplodedNodeSet dstCallEvaluated;
497  for (ExplodedNodeSet::iterator I = dstPreVisit.begin(), E = dstPreVisit.end();
498       I != E; ++I) {
499    evalCall(dstCallEvaluated, *I, *CallTemplate);
500  }
501
502  // Finally, perform the post-condition check of the CallExpr and store
503  // the created nodes in 'Dst'.
504  // Note that if the call was inlined, dstCallEvaluated will be empty.
505  // The post-CallExpr check will occur in processCallExit.
506  getCheckerManager().runCheckersForPostStmt(dst, dstCallEvaluated, CE,
507                                             *this);
508}
509
510void ExprEngine::evalCall(ExplodedNodeSet &Dst, ExplodedNode *Pred,
511                          const CallEvent &Call) {
512  // WARNING: At this time, the state attached to 'Call' may be older than the
513  // state in 'Pred'. This is a minor optimization since CheckerManager will
514  // use an updated CallEvent instance when calling checkers, but if 'Call' is
515  // ever used directly in this function all callers should be updated to pass
516  // the most recent state. (It is probably not worth doing the work here since
517  // for some callers this will not be necessary.)
518
519  // Run any pre-call checks using the generic call interface.
520  ExplodedNodeSet dstPreVisit;
521  getCheckerManager().runCheckersForPreCall(dstPreVisit, Pred, Call, *this);
522
523  // Actually evaluate the function call.  We try each of the checkers
524  // to see if the can evaluate the function call, and get a callback at
525  // defaultEvalCall if all of them fail.
526  ExplodedNodeSet dstCallEvaluated;
527  getCheckerManager().runCheckersForEvalCall(dstCallEvaluated, dstPreVisit,
528                                             Call, *this);
529
530  // Finally, run any post-call checks.
531  getCheckerManager().runCheckersForPostCall(Dst, dstCallEvaluated,
532                                             Call, *this);
533}
534
535ProgramStateRef ExprEngine::bindReturnValue(const CallEvent &Call,
536                                            const LocationContext *LCtx,
537                                            ProgramStateRef State) {
538  const Expr *E = Call.getOriginExpr();
539  if (!E)
540    return State;
541
542  // Some method families have known return values.
543  if (const ObjCMethodCall *Msg = dyn_cast<ObjCMethodCall>(&Call)) {
544    switch (Msg->getMethodFamily()) {
545    default:
546      break;
547    case OMF_autorelease:
548    case OMF_retain:
549    case OMF_self: {
550      // These methods return their receivers.
551      return State->BindExpr(E, LCtx, Msg->getReceiverSVal());
552    }
553    }
554  } else if (const CXXConstructorCall *C = dyn_cast<CXXConstructorCall>(&Call)){
555    SVal ThisV = C->getCXXThisVal();
556
557    // If the constructed object is a temporary prvalue, get its bindings.
558    if (isTemporaryPRValue(cast<CXXConstructExpr>(E), ThisV))
559      ThisV = State->getSVal(ThisV.castAs<Loc>());
560
561    return State->BindExpr(E, LCtx, ThisV);
562  }
563
564  // Conjure a symbol if the return value is unknown.
565  QualType ResultTy = Call.getResultType();
566  SValBuilder &SVB = getSValBuilder();
567  unsigned Count = currBldrCtx->blockCount();
568  SVal R = SVB.conjureSymbolVal(nullptr, E, LCtx, ResultTy, Count);
569  return State->BindExpr(E, LCtx, R);
570}
571
572// Conservatively evaluate call by invalidating regions and binding
573// a conjured return value.
574void ExprEngine::conservativeEvalCall(const CallEvent &Call, NodeBuilder &Bldr,
575                                      ExplodedNode *Pred,
576                                      ProgramStateRef State) {
577  State = Call.invalidateRegions(currBldrCtx->blockCount(), State);
578  State = bindReturnValue(Call, Pred->getLocationContext(), State);
579
580  // And make the result node.
581  Bldr.generateNode(Call.getProgramPoint(), State, Pred);
582}
583
584enum CallInlinePolicy {
585  CIP_Allowed,
586  CIP_DisallowedOnce,
587  CIP_DisallowedAlways
588};
589
590static CallInlinePolicy mayInlineCallKind(const CallEvent &Call,
591                                          const ExplodedNode *Pred,
592                                          AnalyzerOptions &Opts) {
593  const LocationContext *CurLC = Pred->getLocationContext();
594  const StackFrameContext *CallerSFC = CurLC->getCurrentStackFrame();
595  switch (Call.getKind()) {
596  case CE_Function:
597  case CE_Block:
598    break;
599  case CE_CXXMember:
600  case CE_CXXMemberOperator:
601    if (!Opts.mayInlineCXXMemberFunction(CIMK_MemberFunctions))
602      return CIP_DisallowedAlways;
603    break;
604  case CE_CXXConstructor: {
605    if (!Opts.mayInlineCXXMemberFunction(CIMK_Constructors))
606      return CIP_DisallowedAlways;
607
608    const CXXConstructorCall &Ctor = cast<CXXConstructorCall>(Call);
609
610    // FIXME: We don't handle constructors or destructors for arrays properly.
611    // Even once we do, we still need to be careful about implicitly-generated
612    // initializers for array fields in default move/copy constructors.
613    const MemRegion *Target = Ctor.getCXXThisVal().getAsRegion();
614    if (Target && isa<ElementRegion>(Target))
615      return CIP_DisallowedOnce;
616
617    // FIXME: This is a hack. We don't use the correct region for a new
618    // expression, so if we inline the constructor its result will just be
619    // thrown away. This short-term hack is tracked in <rdar://problem/12180598>
620    // and the longer-term possible fix is discussed in PR12014.
621    const CXXConstructExpr *CtorExpr = Ctor.getOriginExpr();
622    if (const Stmt *Parent = CurLC->getParentMap().getParent(CtorExpr))
623      if (isa<CXXNewExpr>(Parent))
624        return CIP_DisallowedOnce;
625
626    // Inlining constructors requires including initializers in the CFG.
627    const AnalysisDeclContext *ADC = CallerSFC->getAnalysisDeclContext();
628    assert(ADC->getCFGBuildOptions().AddInitializers && "No CFG initializers");
629    (void)ADC;
630
631    // If the destructor is trivial, it's always safe to inline the constructor.
632    if (Ctor.getDecl()->getParent()->hasTrivialDestructor())
633      break;
634
635    // For other types, only inline constructors if destructor inlining is
636    // also enabled.
637    if (!Opts.mayInlineCXXMemberFunction(CIMK_Destructors))
638      return CIP_DisallowedAlways;
639
640    // FIXME: This is a hack. We don't handle temporary destructors
641    // right now, so we shouldn't inline their constructors.
642    if (CtorExpr->getConstructionKind() == CXXConstructExpr::CK_Complete)
643      if (!Target || !isa<DeclRegion>(Target))
644        return CIP_DisallowedOnce;
645
646    break;
647  }
648  case CE_CXXDestructor: {
649    if (!Opts.mayInlineCXXMemberFunction(CIMK_Destructors))
650      return CIP_DisallowedAlways;
651
652    // Inlining destructors requires building the CFG correctly.
653    const AnalysisDeclContext *ADC = CallerSFC->getAnalysisDeclContext();
654    assert(ADC->getCFGBuildOptions().AddImplicitDtors && "No CFG destructors");
655    (void)ADC;
656
657    const CXXDestructorCall &Dtor = cast<CXXDestructorCall>(Call);
658
659    // FIXME: We don't handle constructors or destructors for arrays properly.
660    const MemRegion *Target = Dtor.getCXXThisVal().getAsRegion();
661    if (Target && isa<ElementRegion>(Target))
662      return CIP_DisallowedOnce;
663
664    break;
665  }
666  case CE_CXXAllocator:
667    if (Opts.mayInlineCXXAllocator())
668      break;
669    // Do not inline allocators until we model deallocators.
670    // This is unfortunate, but basically necessary for smart pointers and such.
671    return CIP_DisallowedAlways;
672  case CE_ObjCMessage:
673    if (!Opts.mayInlineObjCMethod())
674      return CIP_DisallowedAlways;
675    if (!(Opts.getIPAMode() == IPAK_DynamicDispatch ||
676          Opts.getIPAMode() == IPAK_DynamicDispatchBifurcate))
677      return CIP_DisallowedAlways;
678    break;
679  }
680
681  return CIP_Allowed;
682}
683
684/// Returns true if the given C++ class contains a member with the given name.
685static bool hasMember(const ASTContext &Ctx, const CXXRecordDecl *RD,
686                      StringRef Name) {
687  const IdentifierInfo &II = Ctx.Idents.get(Name);
688  DeclarationName DeclName = Ctx.DeclarationNames.getIdentifier(&II);
689  if (!RD->lookup(DeclName).empty())
690    return true;
691
692  CXXBasePaths Paths(false, false, false);
693  if (RD->lookupInBases(&CXXRecordDecl::FindOrdinaryMember,
694                        DeclName.getAsOpaquePtr(),
695                        Paths))
696    return true;
697
698  return false;
699}
700
701/// Returns true if the given C++ class is a container or iterator.
702///
703/// Our heuristic for this is whether it contains a method named 'begin()' or a
704/// nested type named 'iterator' or 'iterator_category'.
705static bool isContainerClass(const ASTContext &Ctx, const CXXRecordDecl *RD) {
706  return hasMember(Ctx, RD, "begin") ||
707         hasMember(Ctx, RD, "iterator") ||
708         hasMember(Ctx, RD, "iterator_category");
709}
710
711/// Returns true if the given function refers to a method of a C++ container
712/// or iterator.
713///
714/// We generally do a poor job modeling most containers right now, and might
715/// prefer not to inline their methods.
716static bool isContainerMethod(const ASTContext &Ctx,
717                              const FunctionDecl *FD) {
718  if (const CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(FD))
719    return isContainerClass(Ctx, MD->getParent());
720  return false;
721}
722
723/// Returns true if the given function is the destructor of a class named
724/// "shared_ptr".
725static bool isCXXSharedPtrDtor(const FunctionDecl *FD) {
726  const CXXDestructorDecl *Dtor = dyn_cast<CXXDestructorDecl>(FD);
727  if (!Dtor)
728    return false;
729
730  const CXXRecordDecl *RD = Dtor->getParent();
731  if (const IdentifierInfo *II = RD->getDeclName().getAsIdentifierInfo())
732    if (II->isStr("shared_ptr"))
733        return true;
734
735  return false;
736}
737
738/// Returns true if the function in \p CalleeADC may be inlined in general.
739///
740/// This checks static properties of the function, such as its signature and
741/// CFG, to determine whether the analyzer should ever consider inlining it,
742/// in any context.
743static bool mayInlineDecl(AnalysisDeclContext *CalleeADC,
744                          AnalyzerOptions &Opts) {
745  // FIXME: Do not inline variadic calls.
746  if (CallEvent::isVariadic(CalleeADC->getDecl()))
747    return false;
748
749  // Check certain C++-related inlining policies.
750  ASTContext &Ctx = CalleeADC->getASTContext();
751  if (Ctx.getLangOpts().CPlusPlus) {
752    if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(CalleeADC->getDecl())) {
753      // Conditionally control the inlining of template functions.
754      if (!Opts.mayInlineTemplateFunctions())
755        if (FD->getTemplatedKind() != FunctionDecl::TK_NonTemplate)
756          return false;
757
758      // Conditionally control the inlining of C++ standard library functions.
759      if (!Opts.mayInlineCXXStandardLibrary())
760        if (Ctx.getSourceManager().isInSystemHeader(FD->getLocation()))
761          if (IsInStdNamespace(FD))
762            return false;
763
764      // Conditionally control the inlining of methods on objects that look
765      // like C++ containers.
766      if (!Opts.mayInlineCXXContainerMethods())
767        if (!Ctx.getSourceManager().isInMainFile(FD->getLocation()))
768          if (isContainerMethod(Ctx, FD))
769            return false;
770
771      // Conditionally control the inlining of the destructor of C++ shared_ptr.
772      // We don't currently do a good job modeling shared_ptr because we can't
773      // see the reference count, so treating as opaque is probably the best
774      // idea.
775      if (!Opts.mayInlineCXXSharedPtrDtor())
776        if (isCXXSharedPtrDtor(FD))
777          return false;
778
779    }
780  }
781
782  // It is possible that the CFG cannot be constructed.
783  // Be safe, and check if the CalleeCFG is valid.
784  const CFG *CalleeCFG = CalleeADC->getCFG();
785  if (!CalleeCFG)
786    return false;
787
788  // Do not inline large functions.
789  if (CalleeCFG->getNumBlockIDs() > Opts.getMaxInlinableSize())
790    return false;
791
792  // It is possible that the live variables analysis cannot be
793  // run.  If so, bail out.
794  if (!CalleeADC->getAnalysis<RelaxedLiveVariables>())
795    return false;
796
797  return true;
798}
799
800bool ExprEngine::shouldInlineCall(const CallEvent &Call, const Decl *D,
801                                  const ExplodedNode *Pred) {
802  if (!D)
803    return false;
804
805  AnalysisManager &AMgr = getAnalysisManager();
806  AnalyzerOptions &Opts = AMgr.options;
807  AnalysisDeclContextManager &ADCMgr = AMgr.getAnalysisDeclContextManager();
808  AnalysisDeclContext *CalleeADC = ADCMgr.getContext(D);
809
810  // Temporary object destructor processing is currently broken, so we never
811  // inline them.
812  // FIXME: Remove this once temp destructors are working.
813  if (isa<CXXDestructorCall>(Call)) {
814    if ((*currBldrCtx->getBlock())[currStmtIdx].getAs<CFGTemporaryDtor>())
815      return false;
816  }
817
818  // The auto-synthesized bodies are essential to inline as they are
819  // usually small and commonly used. Note: we should do this check early on to
820  // ensure we always inline these calls.
821  if (CalleeADC->isBodyAutosynthesized())
822    return true;
823
824  if (!AMgr.shouldInlineCall())
825    return false;
826
827  // Check if this function has been marked as non-inlinable.
828  Optional<bool> MayInline = Engine.FunctionSummaries->mayInline(D);
829  if (MayInline.hasValue()) {
830    if (!MayInline.getValue())
831      return false;
832
833  } else {
834    // We haven't actually checked the static properties of this function yet.
835    // Do that now, and record our decision in the function summaries.
836    if (mayInlineDecl(CalleeADC, Opts)) {
837      Engine.FunctionSummaries->markMayInline(D);
838    } else {
839      Engine.FunctionSummaries->markShouldNotInline(D);
840      return false;
841    }
842  }
843
844  // Check if we should inline a call based on its kind.
845  // FIXME: this checks both static and dynamic properties of the call, which
846  // means we're redoing a bit of work that could be cached in the function
847  // summary.
848  CallInlinePolicy CIP = mayInlineCallKind(Call, Pred, Opts);
849  if (CIP != CIP_Allowed) {
850    if (CIP == CIP_DisallowedAlways) {
851      assert(!MayInline.hasValue() || MayInline.getValue());
852      Engine.FunctionSummaries->markShouldNotInline(D);
853    }
854    return false;
855  }
856
857  const CFG *CalleeCFG = CalleeADC->getCFG();
858
859  // Do not inline if recursive or we've reached max stack frame count.
860  bool IsRecursive = false;
861  unsigned StackDepth = 0;
862  examineStackFrames(D, Pred->getLocationContext(), IsRecursive, StackDepth);
863  if ((StackDepth >= Opts.InlineMaxStackDepth) &&
864      ((CalleeCFG->getNumBlockIDs() > Opts.getAlwaysInlineSize())
865       || IsRecursive))
866    return false;
867
868  // Do not inline large functions too many times.
869  if ((Engine.FunctionSummaries->getNumTimesInlined(D) >
870       Opts.getMaxTimesInlineLarge()) &&
871      CalleeCFG->getNumBlockIDs() > 13) {
872    NumReachedInlineCountMax++;
873    return false;
874  }
875
876  if (HowToInline == Inline_Minimal &&
877      (CalleeCFG->getNumBlockIDs() > Opts.getAlwaysInlineSize()
878      || IsRecursive))
879    return false;
880
881  Engine.FunctionSummaries->bumpNumTimesInlined(D);
882
883  return true;
884}
885
886static bool isTrivialObjectAssignment(const CallEvent &Call) {
887  const CXXInstanceCall *ICall = dyn_cast<CXXInstanceCall>(&Call);
888  if (!ICall)
889    return false;
890
891  const CXXMethodDecl *MD = dyn_cast_or_null<CXXMethodDecl>(ICall->getDecl());
892  if (!MD)
893    return false;
894  if (!(MD->isCopyAssignmentOperator() || MD->isMoveAssignmentOperator()))
895    return false;
896
897  return MD->isTrivial();
898}
899
900void ExprEngine::defaultEvalCall(NodeBuilder &Bldr, ExplodedNode *Pred,
901                                 const CallEvent &CallTemplate) {
902  // Make sure we have the most recent state attached to the call.
903  ProgramStateRef State = Pred->getState();
904  CallEventRef<> Call = CallTemplate.cloneWithState(State);
905
906  // Special-case trivial assignment operators.
907  if (isTrivialObjectAssignment(*Call)) {
908    performTrivialCopy(Bldr, Pred, *Call);
909    return;
910  }
911
912  // Try to inline the call.
913  // The origin expression here is just used as a kind of checksum;
914  // this should still be safe even for CallEvents that don't come from exprs.
915  const Expr *E = Call->getOriginExpr();
916
917  ProgramStateRef InlinedFailedState = getInlineFailedState(State, E);
918  if (InlinedFailedState) {
919    // If we already tried once and failed, make sure we don't retry later.
920    State = InlinedFailedState;
921  } else {
922    RuntimeDefinition RD = Call->getRuntimeDefinition();
923    const Decl *D = RD.getDecl();
924    if (shouldInlineCall(*Call, D, Pred)) {
925      if (RD.mayHaveOtherDefinitions()) {
926        AnalyzerOptions &Options = getAnalysisManager().options;
927
928        // Explore with and without inlining the call.
929        if (Options.getIPAMode() == IPAK_DynamicDispatchBifurcate) {
930          BifurcateCall(RD.getDispatchRegion(), *Call, D, Bldr, Pred);
931          return;
932        }
933
934        // Don't inline if we're not in any dynamic dispatch mode.
935        if (Options.getIPAMode() != IPAK_DynamicDispatch) {
936          conservativeEvalCall(*Call, Bldr, Pred, State);
937          return;
938        }
939      }
940
941      // We are not bifurcating and we do have a Decl, so just inline.
942      if (inlineCall(*Call, D, Bldr, Pred, State))
943        return;
944    }
945  }
946
947  // If we can't inline it, handle the return value and invalidate the regions.
948  conservativeEvalCall(*Call, Bldr, Pred, State);
949}
950
951void ExprEngine::BifurcateCall(const MemRegion *BifurReg,
952                               const CallEvent &Call, const Decl *D,
953                               NodeBuilder &Bldr, ExplodedNode *Pred) {
954  assert(BifurReg);
955  BifurReg = BifurReg->StripCasts();
956
957  // Check if we've performed the split already - note, we only want
958  // to split the path once per memory region.
959  ProgramStateRef State = Pred->getState();
960  const unsigned *BState =
961                        State->get<DynamicDispatchBifurcationMap>(BifurReg);
962  if (BState) {
963    // If we are on "inline path", keep inlining if possible.
964    if (*BState == DynamicDispatchModeInlined)
965      if (inlineCall(Call, D, Bldr, Pred, State))
966        return;
967    // If inline failed, or we are on the path where we assume we
968    // don't have enough info about the receiver to inline, conjure the
969    // return value and invalidate the regions.
970    conservativeEvalCall(Call, Bldr, Pred, State);
971    return;
972  }
973
974  // If we got here, this is the first time we process a message to this
975  // region, so split the path.
976  ProgramStateRef IState =
977      State->set<DynamicDispatchBifurcationMap>(BifurReg,
978                                               DynamicDispatchModeInlined);
979  inlineCall(Call, D, Bldr, Pred, IState);
980
981  ProgramStateRef NoIState =
982      State->set<DynamicDispatchBifurcationMap>(BifurReg,
983                                               DynamicDispatchModeConservative);
984  conservativeEvalCall(Call, Bldr, Pred, NoIState);
985
986  NumOfDynamicDispatchPathSplits++;
987  return;
988}
989
990
991void ExprEngine::VisitReturnStmt(const ReturnStmt *RS, ExplodedNode *Pred,
992                                 ExplodedNodeSet &Dst) {
993
994  ExplodedNodeSet dstPreVisit;
995  getCheckerManager().runCheckersForPreStmt(dstPreVisit, Pred, RS, *this);
996
997  StmtNodeBuilder B(dstPreVisit, Dst, *currBldrCtx);
998
999  if (RS->getRetValue()) {
1000    for (ExplodedNodeSet::iterator it = dstPreVisit.begin(),
1001                                  ei = dstPreVisit.end(); it != ei; ++it) {
1002      B.generateNode(RS, *it, (*it)->getState());
1003    }
1004  }
1005}
1006