ExprEngineCallAndReturn.cpp revision 622b6fb0a1d280c16e135c7e427b79cafffbde1f
1//=-- ExprEngineCallAndReturn.cpp - Support for call/return -----*- C++ -*-===// 2// 3// The LLVM Compiler Infrastructure 4// 5// This file is distributed under the University of Illinois Open Source 6// License. See LICENSE.TXT for details. 7// 8//===----------------------------------------------------------------------===// 9// 10// This file defines ExprEngine's support for calls and returns. 11// 12//===----------------------------------------------------------------------===// 13 14#define DEBUG_TYPE "ExprEngine" 15 16#include "clang/Analysis/Analyses/LiveVariables.h" 17#include "clang/StaticAnalyzer/Core/CheckerManager.h" 18#include "clang/StaticAnalyzer/Core/PathSensitive/ExprEngine.h" 19#include "clang/StaticAnalyzer/Core/PathSensitive/CallEvent.h" 20#include "clang/AST/DeclCXX.h" 21#include "clang/AST/ParentMap.h" 22#include "llvm/ADT/SmallSet.h" 23#include "llvm/ADT/Statistic.h" 24#include "llvm/Support/SaveAndRestore.h" 25 26using namespace clang; 27using namespace ento; 28 29STATISTIC(NumOfDynamicDispatchPathSplits, 30 "The # of times we split the path due to imprecise dynamic dispatch info"); 31 32STATISTIC(NumInlinedCalls, 33 "The # of times we inlined a call"); 34 35void ExprEngine::processCallEnter(CallEnter CE, ExplodedNode *Pred) { 36 // Get the entry block in the CFG of the callee. 37 const StackFrameContext *calleeCtx = CE.getCalleeContext(); 38 const CFG *CalleeCFG = calleeCtx->getCFG(); 39 const CFGBlock *Entry = &(CalleeCFG->getEntry()); 40 41 // Validate the CFG. 42 assert(Entry->empty()); 43 assert(Entry->succ_size() == 1); 44 45 // Get the solitary sucessor. 46 const CFGBlock *Succ = *(Entry->succ_begin()); 47 48 // Construct an edge representing the starting location in the callee. 49 BlockEdge Loc(Entry, Succ, calleeCtx); 50 51 ProgramStateRef state = Pred->getState(); 52 53 // Construct a new node and add it to the worklist. 54 bool isNew; 55 ExplodedNode *Node = G.getNode(Loc, state, false, &isNew); 56 Node->addPredecessor(Pred, G); 57 if (isNew) 58 Engine.getWorkList()->enqueue(Node); 59} 60 61// Find the last statement on the path to the exploded node and the 62// corresponding Block. 63static std::pair<const Stmt*, 64 const CFGBlock*> getLastStmt(const ExplodedNode *Node) { 65 const Stmt *S = 0; 66 const StackFrameContext *SF = 67 Node->getLocation().getLocationContext()->getCurrentStackFrame(); 68 69 // Back up through the ExplodedGraph until we reach a statement node in this 70 // stack frame. 71 while (Node) { 72 const ProgramPoint &PP = Node->getLocation(); 73 74 if (PP.getLocationContext()->getCurrentStackFrame() == SF) { 75 if (const StmtPoint *SP = dyn_cast<StmtPoint>(&PP)) { 76 S = SP->getStmt(); 77 break; 78 } else if (const CallExitEnd *CEE = dyn_cast<CallExitEnd>(&PP)) { 79 S = CEE->getCalleeContext()->getCallSite(); 80 if (S) 81 break; 82 83 // If there is no statement, this is an implicitly-generated call. 84 // We'll walk backwards over it and then continue the loop to find 85 // an actual statement. 86 const CallEnter *CE; 87 do { 88 Node = Node->getFirstPred(); 89 CE = Node->getLocationAs<CallEnter>(); 90 } while (!CE || CE->getCalleeContext() != CEE->getCalleeContext()); 91 92 // Continue searching the graph. 93 } 94 } else if (const CallEnter *CE = dyn_cast<CallEnter>(&PP)) { 95 // If we reached the CallEnter for this function, it has no statements. 96 if (CE->getCalleeContext() == SF) 97 break; 98 } 99 100 Node = *Node->pred_begin(); 101 } 102 103 const CFGBlock *Blk = 0; 104 if (S) { 105 // Now, get the enclosing basic block. 106 while (Node && Node->pred_size() >=1 ) { 107 const ProgramPoint &PP = Node->getLocation(); 108 if (isa<BlockEdge>(PP) && 109 (PP.getLocationContext()->getCurrentStackFrame() == SF)) { 110 BlockEdge &EPP = cast<BlockEdge>(PP); 111 Blk = EPP.getDst(); 112 break; 113 } 114 Node = *Node->pred_begin(); 115 } 116 } 117 118 return std::pair<const Stmt*, const CFGBlock*>(S, Blk); 119} 120 121/// The call exit is simulated with a sequence of nodes, which occur between 122/// CallExitBegin and CallExitEnd. The following operations occur between the 123/// two program points: 124/// 1. CallExitBegin (triggers the start of call exit sequence) 125/// 2. Bind the return value 126/// 3. Run Remove dead bindings to clean up the dead symbols from the callee. 127/// 4. CallExitEnd (switch to the caller context) 128/// 5. PostStmt<CallExpr> 129void ExprEngine::processCallExit(ExplodedNode *CEBNode) { 130 // Step 1 CEBNode was generated before the call. 131 132 const StackFrameContext *calleeCtx = 133 CEBNode->getLocationContext()->getCurrentStackFrame(); 134 135 // The parent context might not be a stack frame, so make sure we 136 // look up the first enclosing stack frame. 137 const StackFrameContext *callerCtx = 138 calleeCtx->getParent()->getCurrentStackFrame(); 139 140 const Stmt *CE = calleeCtx->getCallSite(); 141 ProgramStateRef state = CEBNode->getState(); 142 // Find the last statement in the function and the corresponding basic block. 143 const Stmt *LastSt = 0; 144 const CFGBlock *Blk = 0; 145 llvm::tie(LastSt, Blk) = getLastStmt(CEBNode); 146 147 // Step 2: generate node with bound return value: CEBNode -> BindedRetNode. 148 149 // If the callee returns an expression, bind its value to CallExpr. 150 if (CE) { 151 if (const ReturnStmt *RS = dyn_cast_or_null<ReturnStmt>(LastSt)) { 152 const LocationContext *LCtx = CEBNode->getLocationContext(); 153 SVal V = state->getSVal(RS, LCtx); 154 state = state->BindExpr(CE, callerCtx, V); 155 } 156 157 // Bind the constructed object value to CXXConstructExpr. 158 if (const CXXConstructExpr *CCE = dyn_cast<CXXConstructExpr>(CE)) { 159 loc::MemRegionVal This = 160 svalBuilder.getCXXThis(CCE->getConstructor()->getParent(), calleeCtx); 161 SVal ThisV = state->getSVal(This); 162 163 // If the constructed object is a prvalue, get its bindings. 164 // Note that we have to be careful here because constructors embedded 165 // in DeclStmts are not marked as lvalues. 166 if (!CCE->isGLValue()) 167 if (const MemRegion *MR = ThisV.getAsRegion()) 168 if (isa<CXXTempObjectRegion>(MR)) 169 ThisV = state->getSVal(cast<Loc>(ThisV)); 170 171 state = state->BindExpr(CCE, callerCtx, ThisV); 172 } 173 } 174 175 // Generate a CallEvent /before/ cleaning the state, so that we can get the 176 // correct value for 'this' (if necessary). 177 CallEventManager &CEMgr = getStateManager().getCallEventManager(); 178 CallEventRef<> Call = CEMgr.getCaller(calleeCtx, state); 179 180 // Step 3: BindedRetNode -> CleanedNodes 181 // If we can find a statement and a block in the inlined function, run remove 182 // dead bindings before returning from the call. This is important to ensure 183 // that we report the issues such as leaks in the stack contexts in which 184 // they occurred. 185 ExplodedNodeSet CleanedNodes; 186 if (LastSt && Blk && AMgr.options.AnalysisPurgeOpt != PurgeNone) { 187 static SimpleProgramPointTag retValBind("ExprEngine : Bind Return Value"); 188 PostStmt Loc(LastSt, calleeCtx, &retValBind); 189 bool isNew; 190 ExplodedNode *BindedRetNode = G.getNode(Loc, state, false, &isNew); 191 BindedRetNode->addPredecessor(CEBNode, G); 192 if (!isNew) 193 return; 194 195 NodeBuilderContext Ctx(getCoreEngine(), Blk, BindedRetNode); 196 currBldrCtx = &Ctx; 197 // Here, we call the Symbol Reaper with 0 statement and caller location 198 // context, telling it to clean up everything in the callee's context 199 // (and it's children). We use LastStmt as a diagnostic statement, which 200 // which the PreStmtPurge Dead point will be associated. 201 removeDead(BindedRetNode, CleanedNodes, 0, callerCtx, LastSt, 202 ProgramPoint::PostStmtPurgeDeadSymbolsKind); 203 currBldrCtx = 0; 204 } else { 205 CleanedNodes.Add(CEBNode); 206 } 207 208 for (ExplodedNodeSet::iterator I = CleanedNodes.begin(), 209 E = CleanedNodes.end(); I != E; ++I) { 210 211 // Step 4: Generate the CallExit and leave the callee's context. 212 // CleanedNodes -> CEENode 213 CallExitEnd Loc(calleeCtx, callerCtx); 214 bool isNew; 215 ProgramStateRef CEEState = (*I == CEBNode) ? state : (*I)->getState(); 216 ExplodedNode *CEENode = G.getNode(Loc, CEEState, false, &isNew); 217 CEENode->addPredecessor(*I, G); 218 if (!isNew) 219 return; 220 221 // Step 5: Perform the post-condition check of the CallExpr and enqueue the 222 // result onto the work list. 223 // CEENode -> Dst -> WorkList 224 NodeBuilderContext Ctx(Engine, calleeCtx->getCallSiteBlock(), CEENode); 225 SaveAndRestore<const NodeBuilderContext*> NBCSave(currBldrCtx, 226 &Ctx); 227 SaveAndRestore<unsigned> CBISave(currStmtIdx, calleeCtx->getIndex()); 228 229 CallEventRef<> UpdatedCall = Call.cloneWithState(CEEState); 230 231 ExplodedNodeSet DstPostCall; 232 getCheckerManager().runCheckersForPostCall(DstPostCall, CEENode, 233 *UpdatedCall, *this, 234 /*WasInlined=*/true); 235 236 ExplodedNodeSet Dst; 237 if (const ObjCMethodCall *Msg = dyn_cast<ObjCMethodCall>(Call)) { 238 getCheckerManager().runCheckersForPostObjCMessage(Dst, DstPostCall, *Msg, 239 *this, 240 /*WasInlined=*/true); 241 } else if (CE) { 242 getCheckerManager().runCheckersForPostStmt(Dst, DstPostCall, CE, 243 *this, /*WasInlined=*/true); 244 } else { 245 Dst.insert(DstPostCall); 246 } 247 248 // Enqueue the next element in the block. 249 for (ExplodedNodeSet::iterator PSI = Dst.begin(), PSE = Dst.end(); 250 PSI != PSE; ++PSI) { 251 Engine.getWorkList()->enqueue(*PSI, calleeCtx->getCallSiteBlock(), 252 calleeCtx->getIndex()+1); 253 } 254 } 255} 256 257void ExprEngine::examineStackFrames(const Decl *D, const LocationContext *LCtx, 258 bool &IsRecursive, unsigned &StackDepth) { 259 IsRecursive = false; 260 StackDepth = 0; 261 262 while (LCtx) { 263 if (const StackFrameContext *SFC = dyn_cast<StackFrameContext>(LCtx)) { 264 const Decl *DI = SFC->getDecl(); 265 266 // Mark recursive (and mutually recursive) functions and always count 267 // them when measuring the stack depth. 268 if (DI == D) { 269 IsRecursive = true; 270 ++StackDepth; 271 LCtx = LCtx->getParent(); 272 continue; 273 } 274 275 // Do not count the small functions when determining the stack depth. 276 AnalysisDeclContext *CalleeADC = AMgr.getAnalysisDeclContext(DI); 277 const CFG *CalleeCFG = CalleeADC->getCFG(); 278 if (CalleeCFG->getNumBlockIDs() > AMgr.options.getAlwaysInlineSize()) 279 ++StackDepth; 280 } 281 LCtx = LCtx->getParent(); 282 } 283 284} 285 286static bool IsInStdNamespace(const FunctionDecl *FD) { 287 const DeclContext *DC = FD->getEnclosingNamespaceContext(); 288 const NamespaceDecl *ND = dyn_cast<NamespaceDecl>(DC); 289 if (!ND) 290 return false; 291 292 while (const DeclContext *Parent = ND->getParent()) { 293 if (!isa<NamespaceDecl>(Parent)) 294 break; 295 ND = cast<NamespaceDecl>(Parent); 296 } 297 298 return ND->getName() == "std"; 299} 300 301// Determine if we should inline the call. 302bool ExprEngine::shouldInlineDecl(const Decl *D, ExplodedNode *Pred) { 303 AnalysisDeclContext *CalleeADC = AMgr.getAnalysisDeclContext(D); 304 const CFG *CalleeCFG = CalleeADC->getCFG(); 305 306 // It is possible that the CFG cannot be constructed. 307 // Be safe, and check if the CalleeCFG is valid. 308 if (!CalleeCFG) 309 return false; 310 311 bool IsRecursive = false; 312 unsigned StackDepth = 0; 313 examineStackFrames(D, Pred->getLocationContext(), IsRecursive, StackDepth); 314 if ((StackDepth >= AMgr.options.InlineMaxStackDepth) && 315 ((CalleeCFG->getNumBlockIDs() > AMgr.options.getAlwaysInlineSize()) 316 || IsRecursive)) 317 return false; 318 319 if (Engine.FunctionSummaries->hasReachedMaxBlockCount(D)) 320 return false; 321 322 if (CalleeCFG->getNumBlockIDs() > AMgr.options.InlineMaxFunctionSize) 323 return false; 324 325 // Do not inline variadic calls (for now). 326 if (const BlockDecl *BD = dyn_cast<BlockDecl>(D)) { 327 if (BD->isVariadic()) 328 return false; 329 } 330 else if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(D)) { 331 if (FD->isVariadic()) 332 return false; 333 } 334 335 if (getContext().getLangOpts().CPlusPlus) { 336 if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(D)) { 337 // Conditionally allow the inlining of template functions. 338 if (!getAnalysisManager().options.mayInlineTemplateFunctions()) 339 if (FD->getTemplatedKind() != FunctionDecl::TK_NonTemplate) 340 return false; 341 342 // Conditionally allow the inlining of C++ standard library functions. 343 if (!getAnalysisManager().options.mayInlineCXXStandardLibrary()) 344 if (getContext().getSourceManager().isInSystemHeader(FD->getLocation())) 345 if (IsInStdNamespace(FD)) 346 return false; 347 } 348 } 349 350 // It is possible that the live variables analysis cannot be 351 // run. If so, bail out. 352 if (!CalleeADC->getAnalysis<RelaxedLiveVariables>()) 353 return false; 354 355 return true; 356} 357 358/// The GDM component containing the dynamic dispatch bifurcation info. When 359/// the exact type of the receiver is not known, we want to explore both paths - 360/// one on which we do inline it and the other one on which we don't. This is 361/// done to ensure we do not drop coverage. 362/// This is the map from the receiver region to a bool, specifying either we 363/// consider this region's information precise or not along the given path. 364namespace clang { 365namespace ento { 366enum DynamicDispatchMode { DynamicDispatchModeInlined = 1, 367 DynamicDispatchModeConservative }; 368 369struct DynamicDispatchBifurcationMap {}; 370typedef llvm::ImmutableMap<const MemRegion*, 371 unsigned int> DynamicDispatchBifur; 372template<> struct ProgramStateTrait<DynamicDispatchBifurcationMap> 373 : public ProgramStatePartialTrait<DynamicDispatchBifur> { 374 static void *GDMIndex() { static int index; return &index; } 375}; 376 377}} 378 379bool ExprEngine::inlineCall(const CallEvent &Call, const Decl *D, 380 NodeBuilder &Bldr, ExplodedNode *Pred, 381 ProgramStateRef State) { 382 assert(D); 383 384 const LocationContext *CurLC = Pred->getLocationContext(); 385 const StackFrameContext *CallerSFC = CurLC->getCurrentStackFrame(); 386 const LocationContext *ParentOfCallee = 0; 387 388 AnalyzerOptions &Opts = getAnalysisManager().options; 389 390 // FIXME: Refactor this check into a hypothetical CallEvent::canInline. 391 switch (Call.getKind()) { 392 case CE_Function: 393 break; 394 case CE_CXXMember: 395 case CE_CXXMemberOperator: 396 if (!Opts.mayInlineCXXMemberFunction(CIMK_MemberFunctions)) 397 return false; 398 break; 399 case CE_CXXConstructor: { 400 if (!Opts.mayInlineCXXMemberFunction(CIMK_Constructors)) 401 return false; 402 403 const CXXConstructorCall &Ctor = cast<CXXConstructorCall>(Call); 404 405 // FIXME: We don't handle constructors or destructors for arrays properly. 406 const MemRegion *Target = Ctor.getCXXThisVal().getAsRegion(); 407 if (Target && isa<ElementRegion>(Target)) 408 return false; 409 410 // FIXME: This is a hack. We don't use the correct region for a new 411 // expression, so if we inline the constructor its result will just be 412 // thrown away. This short-term hack is tracked in <rdar://problem/12180598> 413 // and the longer-term possible fix is discussed in PR12014. 414 const CXXConstructExpr *CtorExpr = Ctor.getOriginExpr(); 415 if (const Stmt *Parent = CurLC->getParentMap().getParent(CtorExpr)) 416 if (isa<CXXNewExpr>(Parent)) 417 return false; 418 419 // Inlining constructors requires including initializers in the CFG. 420 const AnalysisDeclContext *ADC = CallerSFC->getAnalysisDeclContext(); 421 assert(ADC->getCFGBuildOptions().AddInitializers && "No CFG initializers"); 422 (void)ADC; 423 424 // If the destructor is trivial, it's always safe to inline the constructor. 425 if (Ctor.getDecl()->getParent()->hasTrivialDestructor()) 426 break; 427 428 // For other types, only inline constructors if destructor inlining is 429 // also enabled. 430 if (!Opts.mayInlineCXXMemberFunction(CIMK_Destructors)) 431 return false; 432 433 // FIXME: This is a hack. We don't handle temporary destructors 434 // right now, so we shouldn't inline their constructors. 435 if (CtorExpr->getConstructionKind() == CXXConstructExpr::CK_Complete) 436 if (!Target || !isa<DeclRegion>(Target)) 437 return false; 438 439 break; 440 } 441 case CE_CXXDestructor: { 442 if (!Opts.mayInlineCXXMemberFunction(CIMK_Destructors)) 443 return false; 444 445 // Inlining destructors requires building the CFG correctly. 446 const AnalysisDeclContext *ADC = CallerSFC->getAnalysisDeclContext(); 447 assert(ADC->getCFGBuildOptions().AddImplicitDtors && "No CFG destructors"); 448 (void)ADC; 449 450 const CXXDestructorCall &Dtor = cast<CXXDestructorCall>(Call); 451 452 // FIXME: We don't handle constructors or destructors for arrays properly. 453 const MemRegion *Target = Dtor.getCXXThisVal().getAsRegion(); 454 if (Target && isa<ElementRegion>(Target)) 455 return false; 456 457 break; 458 } 459 case CE_CXXAllocator: 460 // Do not inline allocators until we model deallocators. 461 // This is unfortunate, but basically necessary for smart pointers and such. 462 return false; 463 case CE_Block: { 464 const BlockDataRegion *BR = cast<BlockCall>(Call).getBlockRegion(); 465 assert(BR && "If we have the block definition we should have its region"); 466 AnalysisDeclContext *BlockCtx = AMgr.getAnalysisDeclContext(D); 467 ParentOfCallee = BlockCtx->getBlockInvocationContext(CallerSFC, 468 cast<BlockDecl>(D), 469 BR); 470 break; 471 } 472 case CE_ObjCMessage: 473 if (!Opts.mayInlineObjCMethod()) 474 return false; 475 if (!(getAnalysisManager().options.IPAMode == DynamicDispatch || 476 getAnalysisManager().options.IPAMode == DynamicDispatchBifurcate)) 477 return false; 478 break; 479 } 480 481 if (!shouldInlineDecl(D, Pred)) 482 return false; 483 484 if (!ParentOfCallee) 485 ParentOfCallee = CallerSFC; 486 487 // This may be NULL, but that's fine. 488 const Expr *CallE = Call.getOriginExpr(); 489 490 // Construct a new stack frame for the callee. 491 AnalysisDeclContext *CalleeADC = AMgr.getAnalysisDeclContext(D); 492 const StackFrameContext *CalleeSFC = 493 CalleeADC->getStackFrame(ParentOfCallee, CallE, 494 currBldrCtx->getBlock(), 495 currStmtIdx); 496 497 CallEnter Loc(CallE, CalleeSFC, CurLC); 498 499 // Construct a new state which contains the mapping from actual to 500 // formal arguments. 501 State = State->enterStackFrame(Call, CalleeSFC); 502 503 bool isNew; 504 if (ExplodedNode *N = G.getNode(Loc, State, false, &isNew)) { 505 N->addPredecessor(Pred, G); 506 if (isNew) 507 Engine.getWorkList()->enqueue(N); 508 } 509 510 // If we decided to inline the call, the successor has been manually 511 // added onto the work list so remove it from the node builder. 512 Bldr.takeNodes(Pred); 513 514 NumInlinedCalls++; 515 516 // Mark the decl as visited. 517 if (VisitedCallees) 518 VisitedCallees->insert(D); 519 520 return true; 521} 522 523static ProgramStateRef getInlineFailedState(ProgramStateRef State, 524 const Stmt *CallE) { 525 void *ReplayState = State->get<ReplayWithoutInlining>(); 526 if (!ReplayState) 527 return 0; 528 529 assert(ReplayState == (const void*)CallE && "Backtracked to the wrong call."); 530 (void)CallE; 531 532 return State->remove<ReplayWithoutInlining>(); 533} 534 535void ExprEngine::VisitCallExpr(const CallExpr *CE, ExplodedNode *Pred, 536 ExplodedNodeSet &dst) { 537 // Perform the previsit of the CallExpr. 538 ExplodedNodeSet dstPreVisit; 539 getCheckerManager().runCheckersForPreStmt(dstPreVisit, Pred, CE, *this); 540 541 // Get the call in its initial state. We use this as a template to perform 542 // all the checks. 543 CallEventManager &CEMgr = getStateManager().getCallEventManager(); 544 CallEventRef<> CallTemplate 545 = CEMgr.getSimpleCall(CE, Pred->getState(), Pred->getLocationContext()); 546 547 // Evaluate the function call. We try each of the checkers 548 // to see if the can evaluate the function call. 549 ExplodedNodeSet dstCallEvaluated; 550 for (ExplodedNodeSet::iterator I = dstPreVisit.begin(), E = dstPreVisit.end(); 551 I != E; ++I) { 552 evalCall(dstCallEvaluated, *I, *CallTemplate); 553 } 554 555 // Finally, perform the post-condition check of the CallExpr and store 556 // the created nodes in 'Dst'. 557 // Note that if the call was inlined, dstCallEvaluated will be empty. 558 // The post-CallExpr check will occur in processCallExit. 559 getCheckerManager().runCheckersForPostStmt(dst, dstCallEvaluated, CE, 560 *this); 561} 562 563void ExprEngine::evalCall(ExplodedNodeSet &Dst, ExplodedNode *Pred, 564 const CallEvent &Call) { 565 // WARNING: At this time, the state attached to 'Call' may be older than the 566 // state in 'Pred'. This is a minor optimization since CheckerManager will 567 // use an updated CallEvent instance when calling checkers, but if 'Call' is 568 // ever used directly in this function all callers should be updated to pass 569 // the most recent state. (It is probably not worth doing the work here since 570 // for some callers this will not be necessary.) 571 572 // Run any pre-call checks using the generic call interface. 573 ExplodedNodeSet dstPreVisit; 574 getCheckerManager().runCheckersForPreCall(dstPreVisit, Pred, Call, *this); 575 576 // Actually evaluate the function call. We try each of the checkers 577 // to see if the can evaluate the function call, and get a callback at 578 // defaultEvalCall if all of them fail. 579 ExplodedNodeSet dstCallEvaluated; 580 getCheckerManager().runCheckersForEvalCall(dstCallEvaluated, dstPreVisit, 581 Call, *this); 582 583 // Finally, run any post-call checks. 584 getCheckerManager().runCheckersForPostCall(Dst, dstCallEvaluated, 585 Call, *this); 586} 587 588ProgramStateRef ExprEngine::bindReturnValue(const CallEvent &Call, 589 const LocationContext *LCtx, 590 ProgramStateRef State) { 591 const Expr *E = Call.getOriginExpr(); 592 if (!E) 593 return State; 594 595 // Some method families have known return values. 596 if (const ObjCMethodCall *Msg = dyn_cast<ObjCMethodCall>(&Call)) { 597 switch (Msg->getMethodFamily()) { 598 default: 599 break; 600 case OMF_autorelease: 601 case OMF_retain: 602 case OMF_self: { 603 // These methods return their receivers. 604 return State->BindExpr(E, LCtx, Msg->getReceiverSVal()); 605 } 606 } 607 } else if (const CXXConstructorCall *C = dyn_cast<CXXConstructorCall>(&Call)){ 608 return State->BindExpr(E, LCtx, C->getCXXThisVal()); 609 } 610 611 // Conjure a symbol if the return value is unknown. 612 QualType ResultTy = Call.getResultType(); 613 SValBuilder &SVB = getSValBuilder(); 614 unsigned Count = currBldrCtx->blockCount(); 615 SVal R = SVB.conjureSymbolVal(0, E, LCtx, ResultTy, Count); 616 return State->BindExpr(E, LCtx, R); 617} 618 619// Conservatively evaluate call by invalidating regions and binding 620// a conjured return value. 621void ExprEngine::conservativeEvalCall(const CallEvent &Call, NodeBuilder &Bldr, 622 ExplodedNode *Pred, ProgramStateRef State) { 623 State = Call.invalidateRegions(currBldrCtx->blockCount(), State); 624 State = bindReturnValue(Call, Pred->getLocationContext(), State); 625 626 // And make the result node. 627 Bldr.generateNode(Call.getProgramPoint(), State, Pred); 628} 629 630void ExprEngine::defaultEvalCall(NodeBuilder &Bldr, ExplodedNode *Pred, 631 const CallEvent &CallTemplate) { 632 // Make sure we have the most recent state attached to the call. 633 ProgramStateRef State = Pred->getState(); 634 CallEventRef<> Call = CallTemplate.cloneWithState(State); 635 636 if (!getAnalysisManager().shouldInlineCall()) { 637 conservativeEvalCall(*Call, Bldr, Pred, State); 638 return; 639 } 640 // Try to inline the call. 641 // The origin expression here is just used as a kind of checksum; 642 // this should still be safe even for CallEvents that don't come from exprs. 643 const Expr *E = Call->getOriginExpr(); 644 ProgramStateRef InlinedFailedState = getInlineFailedState(State, E); 645 646 if (InlinedFailedState) { 647 // If we already tried once and failed, make sure we don't retry later. 648 State = InlinedFailedState; 649 } else { 650 RuntimeDefinition RD = Call->getRuntimeDefinition(); 651 const Decl *D = RD.getDecl(); 652 if (D) { 653 if (RD.mayHaveOtherDefinitions()) { 654 // Explore with and without inlining the call. 655 if (getAnalysisManager().options.IPAMode == DynamicDispatchBifurcate) { 656 BifurcateCall(RD.getDispatchRegion(), *Call, D, Bldr, Pred); 657 return; 658 } 659 660 // Don't inline if we're not in any dynamic dispatch mode. 661 if (getAnalysisManager().options.IPAMode != DynamicDispatch) { 662 conservativeEvalCall(*Call, Bldr, Pred, State); 663 return; 664 } 665 } 666 667 // We are not bifurcating and we do have a Decl, so just inline. 668 if (inlineCall(*Call, D, Bldr, Pred, State)) 669 return; 670 } 671 } 672 673 // If we can't inline it, handle the return value and invalidate the regions. 674 conservativeEvalCall(*Call, Bldr, Pred, State); 675} 676 677void ExprEngine::BifurcateCall(const MemRegion *BifurReg, 678 const CallEvent &Call, const Decl *D, 679 NodeBuilder &Bldr, ExplodedNode *Pred) { 680 assert(BifurReg); 681 BifurReg = BifurReg->StripCasts(); 682 683 // Check if we've performed the split already - note, we only want 684 // to split the path once per memory region. 685 ProgramStateRef State = Pred->getState(); 686 const unsigned int *BState = 687 State->get<DynamicDispatchBifurcationMap>(BifurReg); 688 if (BState) { 689 // If we are on "inline path", keep inlining if possible. 690 if (*BState == DynamicDispatchModeInlined) 691 if (inlineCall(Call, D, Bldr, Pred, State)) 692 return; 693 // If inline failed, or we are on the path where we assume we 694 // don't have enough info about the receiver to inline, conjure the 695 // return value and invalidate the regions. 696 conservativeEvalCall(Call, Bldr, Pred, State); 697 return; 698 } 699 700 // If we got here, this is the first time we process a message to this 701 // region, so split the path. 702 ProgramStateRef IState = 703 State->set<DynamicDispatchBifurcationMap>(BifurReg, 704 DynamicDispatchModeInlined); 705 inlineCall(Call, D, Bldr, Pred, IState); 706 707 ProgramStateRef NoIState = 708 State->set<DynamicDispatchBifurcationMap>(BifurReg, 709 DynamicDispatchModeConservative); 710 conservativeEvalCall(Call, Bldr, Pred, NoIState); 711 712 NumOfDynamicDispatchPathSplits++; 713 return; 714} 715 716 717void ExprEngine::VisitReturnStmt(const ReturnStmt *RS, ExplodedNode *Pred, 718 ExplodedNodeSet &Dst) { 719 720 ExplodedNodeSet dstPreVisit; 721 getCheckerManager().runCheckersForPreStmt(dstPreVisit, Pred, RS, *this); 722 723 StmtNodeBuilder B(dstPreVisit, Dst, *currBldrCtx); 724 725 if (RS->getRetValue()) { 726 for (ExplodedNodeSet::iterator it = dstPreVisit.begin(), 727 ei = dstPreVisit.end(); it != ei; ++it) { 728 B.generateNode(RS, *it, (*it)->getState()); 729 } 730 } 731} 732