void ExprEngine::defaultEvalCall(ExplodedNodeSet &Dst, ExplodedNode *Pred, const CallEvent &Call) { // Try to inline the call. ProgramStateRef state = 0; const Expr *E = Call.getOriginExpr(); if (E) { state = getInlineFailedState(Pred, E); if (state == 0 && inlineCall(Dst, Call, Pred)) return; } // If we can't inline it, handle the return value and invalidate the regions. StmtNodeBuilder Bldr(Pred, Dst, *currentBuilderContext); // Invalidate any regions touched by the call. unsigned Count = currentBuilderContext->getCurrentBlockCount(); if (state == 0) state = Pred->getState(); state = Call.invalidateRegions(Count, state); // Conjure a symbol value to use as the result. assert(Call.getOriginExpr() && "Must have an expression to bind the result"); QualType ResultTy = Call.getResultType(); SValBuilder &SVB = getSValBuilder(); const LocationContext *LCtx = Pred->getLocationContext(); SVal RetVal = SVB.getConjuredSymbolVal(0, Call.getOriginExpr(), LCtx, ResultTy, Count); // And make the result node. state = state->BindExpr(Call.getOriginExpr(), LCtx, RetVal); Bldr.generateNode(Call.getOriginExpr(), Pred, state); }
void ExprEngine::defaultEvalCall(NodeBuilder &Bldr, ExplodedNode *Pred, const CallEvent &Call) { ProgramStateRef State = 0; const Expr *E = Call.getOriginExpr(); // Try to inline the call. // The origin expression here is just used as a kind of checksum; // for CallEvents that do not have origin expressions, this should still be // safe. if (!isa<ObjCMethodCall>(Call)) { State = getInlineFailedState(Pred->getState(), E); if (State == 0 && inlineCall(Call, Pred)) { // If we inlined the call, the successor has been manually added onto // the work list and we should not consider it for subsequent call // handling steps. Bldr.takeNodes(Pred); return; } } // If we can't inline it, handle the return value and invalidate the regions. if (State == 0) State = Pred->getState(); // Invalidate any regions touched by the call. unsigned Count = currentBuilderContext->getCurrentBlockCount(); State = Call.invalidateRegions(Count, State); // Construct and bind the return value. State = bindReturnValue(Call, Pred->getLocationContext(), State); // And make the result node. Bldr.generateNode(Call.getProgramPoint(), State, Pred); }
void ExprEngine::defaultEvalCall(ExplodedNodeSet &Dst, ExplodedNode *Pred, const CallEvent &Call) { // Try to inline the call. // The origin expression here is just used as a kind of checksum; // for CallEvents that do not have origin expressions, this should still be // safe. const Expr *E = Call.getOriginExpr(); ProgramStateRef state = getInlineFailedState(Pred, E); if (state == 0 && inlineCall(Dst, Call, Pred)) return; // If we can't inline it, handle the return value and invalidate the regions. NodeBuilder Bldr(Pred, Dst, *currentBuilderContext); // Invalidate any regions touched by the call. unsigned Count = currentBuilderContext->getCurrentBlockCount(); if (state == 0) state = Pred->getState(); state = Call.invalidateRegions(Count, state); // Conjure a symbol value to use as the result. if (E) { QualType ResultTy = Call.getResultType(); SValBuilder &SVB = getSValBuilder(); const LocationContext *LCtx = Pred->getLocationContext(); SVal RetVal = SVB.getConjuredSymbolVal(0, E, LCtx, ResultTy, Count); state = state->BindExpr(E, LCtx, RetVal); } // And make the result node. Bldr.generateNode(Call.getProgramPoint(), state, Pred); }
ProgramStateRef ExprEngine::bindReturnValue(const CallEvent &Call, const LocationContext *LCtx, ProgramStateRef State) { const Expr *E = Call.getOriginExpr(); if (!E) return State; // Some method families have known return values. if (const ObjCMethodCall *Msg = dyn_cast<ObjCMethodCall>(&Call)) { switch (Msg->getMethodFamily()) { default: break; case OMF_autorelease: case OMF_retain: case OMF_self: { // These methods return their receivers. return State->BindExpr(E, LCtx, Msg->getReceiverSVal()); } } } else if (const CXXConstructorCall *C = dyn_cast<CXXConstructorCall>(&Call)){ return State->BindExpr(E, LCtx, C->getCXXThisVal()); } // Conjure a symbol if the return value is unknown. QualType ResultTy = Call.getResultType(); SValBuilder &SVB = getSValBuilder(); unsigned Count = currBldrCtx->blockCount(); SVal R = SVB.conjureSymbolVal(0, E, LCtx, ResultTy, Count); return State->BindExpr(E, LCtx, R); }
// FIXME: This is the sort of code that should eventually live in a Core // checker rather than as a special case in ExprEngine. void ExprEngine::performTrivialCopy(NodeBuilder &Bldr, ExplodedNode *Pred, const CallEvent &Call) { SVal ThisVal; bool AlwaysReturnsLValue; const CXXRecordDecl *ThisRD = nullptr; if (const CXXConstructorCall *Ctor = dyn_cast<CXXConstructorCall>(&Call)) { assert(Ctor->getDecl()->isTrivial()); assert(Ctor->getDecl()->isCopyOrMoveConstructor()); ThisVal = Ctor->getCXXThisVal(); ThisRD = Ctor->getDecl()->getParent(); AlwaysReturnsLValue = false; } else { assert(cast<CXXMethodDecl>(Call.getDecl())->isTrivial()); assert(cast<CXXMethodDecl>(Call.getDecl())->getOverloadedOperator() == OO_Equal); ThisVal = cast<CXXInstanceCall>(Call).getCXXThisVal(); ThisRD = cast<CXXMethodDecl>(Call.getDecl())->getParent(); AlwaysReturnsLValue = true; } assert(ThisRD); if (ThisRD->isEmpty()) { // Do nothing for empty classes. Otherwise it'd retrieve an UnknownVal // and bind it and RegionStore would think that the actual value // in this region at this offset is unknown. return; } const LocationContext *LCtx = Pred->getLocationContext(); ExplodedNodeSet Dst; Bldr.takeNodes(Pred); SVal V = Call.getArgSVal(0); // If the value being copied is not unknown, load from its location to get // an aggregate rvalue. if (Optional<Loc> L = V.getAs<Loc>()) V = Pred->getState()->getSVal(*L); else assert(V.isUnknownOrUndef()); const Expr *CallExpr = Call.getOriginExpr(); evalBind(Dst, CallExpr, Pred, ThisVal, V, true); PostStmt PS(CallExpr, LCtx); for (ExplodedNodeSet::iterator I = Dst.begin(), E = Dst.end(); I != E; ++I) { ProgramStateRef State = (*I)->getState(); if (AlwaysReturnsLValue) State = State->BindExpr(CallExpr, LCtx, ThisVal); else State = bindReturnValue(Call, LCtx, State); Bldr.generateNode(PS, State, *I); } }
bool ExprEngine::inlineCall(const CallEvent &Call, const Decl *D, NodeBuilder &Bldr, ExplodedNode *Pred, ProgramStateRef State) { assert(D); const LocationContext *CurLC = Pred->getLocationContext(); const StackFrameContext *CallerSFC = CurLC->getCurrentStackFrame(); const LocationContext *ParentOfCallee = CallerSFC; if (Call.getKind() == CE_Block && !cast<BlockCall>(Call).isConversionFromLambda()) { const BlockDataRegion *BR = cast<BlockCall>(Call).getBlockRegion(); assert(BR && "If we have the block definition we should have its region"); AnalysisDeclContext *BlockCtx = AMgr.getAnalysisDeclContext(D); ParentOfCallee = BlockCtx->getBlockInvocationContext(CallerSFC, cast<BlockDecl>(D), BR); } // This may be NULL, but that's fine. const Expr *CallE = Call.getOriginExpr(); // Construct a new stack frame for the callee. AnalysisDeclContext *CalleeADC = AMgr.getAnalysisDeclContext(D); const StackFrameContext *CalleeSFC = CalleeADC->getStackFrame(ParentOfCallee, CallE, currBldrCtx->getBlock(), currStmtIdx); CallEnter Loc(CallE, CalleeSFC, CurLC); // Construct a new state which contains the mapping from actual to // formal arguments. State = State->enterStackFrame(Call, CalleeSFC); bool isNew; if (ExplodedNode *N = G.getNode(Loc, State, false, &isNew)) { N->addPredecessor(Pred, G); if (isNew) Engine.getWorkList()->enqueue(N); } // If we decided to inline the call, the successor has been manually // added onto the work list so remove it from the node builder. Bldr.takeNodes(Pred); NumInlinedCalls++; Engine.FunctionSummaries->bumpNumTimesInlined(D); // Mark the decl as visited. if (VisitedCallees) VisitedCallees->insert(D); return true; }
ProgramStateRef ExprEngine::bindReturnValue(const CallEvent &Call, const LocationContext *LCtx, ProgramStateRef State) { const Expr *E = Call.getOriginExpr(); if (!E) return State; // Some method families have known return values. if (const ObjCMethodCall *Msg = dyn_cast<ObjCMethodCall>(&Call)) { switch (Msg->getMethodFamily()) { default: break; case OMF_autorelease: case OMF_retain: case OMF_self: { // These methods return their receivers. return State->BindExpr(E, LCtx, Msg->getReceiverSVal()); } } } else if (const CXXConstructorCall *C = dyn_cast<CXXConstructorCall>(&Call)){ SVal ThisV = C->getCXXThisVal(); // If the constructed object is a temporary prvalue, get its bindings. if (isTemporaryPRValue(cast<CXXConstructExpr>(E), ThisV)) ThisV = State->getSVal(ThisV.castAs<Loc>()); return State->BindExpr(E, LCtx, ThisV); } // Conjure a symbol if the return value is unknown. QualType ResultTy = Call.getResultType(); SValBuilder &SVB = getSValBuilder(); unsigned Count = currBldrCtx->blockCount(); // See if we need to conjure a heap pointer instead of // a regular unknown pointer. bool IsHeapPointer = false; if (const auto *CNE = dyn_cast<CXXNewExpr>(E)) if (CNE->getOperatorNew()->isReplaceableGlobalAllocationFunction()) { // FIXME: Delegate this to evalCall in MallocChecker? IsHeapPointer = true; } SVal R = IsHeapPointer ? SVB.getConjuredHeapSymbolVal(E, LCtx, Count) : SVB.conjureSymbolVal(nullptr, E, LCtx, ResultTy, Count); return State->BindExpr(E, LCtx, R); }
void InnerPointerChecker::markPtrSymbolsReleased(const CallEvent &Call, ProgramStateRef State, const MemRegion *MR, CheckerContext &C) const { if (const PtrSet *PS = State->get<RawPtrMap>(MR)) { const Expr *Origin = Call.getOriginExpr(); for (const auto Symbol : *PS) { // NOTE: `Origin` may be null, and will be stored so in the symbol's // `RefState` in MallocChecker's `RegionState` program state map. State = allocation_state::markReleased(State, Symbol, Origin); } State = State->remove<RawPtrMap>(MR); C.addTransition(State); return; } }
void NoReturnFunctionChecker::checkPostCall(const CallEvent &CE, CheckerContext &C) const { ProgramStateRef state = C.getState(); bool BuildSinks = false; if (const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(CE.getDecl())) BuildSinks = FD->getAttr<AnalyzerNoReturnAttr>() || FD->isNoReturn(); const Expr *Callee = CE.getOriginExpr(); if (!BuildSinks && Callee) BuildSinks = getFunctionExtInfo(Callee->getType()).getNoReturn(); if (!BuildSinks && CE.isGlobalCFunction()) { if (const IdentifierInfo *II = CE.getCalleeIdentifier()) { // HACK: Some functions are not marked noreturn, and don't return. // Here are a few hardwired ones. If this takes too long, we can // potentially cache these results. BuildSinks = llvm::StringSwitch<bool>(StringRef(II->getName())) .Case("exit", true) .Case("panic", true) .Case("error", true) .Case("Assert", true) // FIXME: This is just a wrapper around throwing an exception. // Eventually inter-procedural analysis should handle this easily. .Case("ziperr", true) .Case("assfail", true) .Case("db_error", true) .Case("__assert", true) // For the purpose of static analysis, we do not care that // this MSVC function will return if the user decides to continue. .Case("_wassert", true) .Case("__assert_rtn", true) .Case("__assert_fail", true) .Case("dtrace_assfail", true) .Case("yy_fatal_error", true) .Case("_XCAssertionFailureHandler", true) .Case("_DTAssertionFailureHandler", true) .Case("_TSAssertionFailureHandler", true) .Default(false); } } if (BuildSinks) C.generateSink(); }
// FIXME: This is the sort of code that should eventually live in a Core // checker rather than as a special case in ExprEngine. void ExprEngine::performTrivialCopy(NodeBuilder &Bldr, ExplodedNode *Pred, const CallEvent &Call) { SVal ThisVal; bool AlwaysReturnsLValue; if (const CXXConstructorCall *Ctor = dyn_cast<CXXConstructorCall>(&Call)) { assert(Ctor->getDecl()->isTrivial()); assert(Ctor->getDecl()->isCopyOrMoveConstructor()); ThisVal = Ctor->getCXXThisVal(); AlwaysReturnsLValue = false; } else { assert(cast<CXXMethodDecl>(Call.getDecl())->isTrivial()); assert(cast<CXXMethodDecl>(Call.getDecl())->getOverloadedOperator() == OO_Equal); ThisVal = cast<CXXInstanceCall>(Call).getCXXThisVal(); AlwaysReturnsLValue = true; } const LocationContext *LCtx = Pred->getLocationContext(); ExplodedNodeSet Dst; Bldr.takeNodes(Pred); SVal V = Call.getArgSVal(0); // If the value being copied is not unknown, load from its location to get // an aggregate rvalue. if (Optional<Loc> L = V.getAs<Loc>()) V = Pred->getState()->getSVal(*L); else assert(V.isUnknown()); const Expr *CallExpr = Call.getOriginExpr(); evalBind(Dst, CallExpr, Pred, ThisVal, V, true); PostStmt PS(CallExpr, LCtx); for (ExplodedNodeSet::iterator I = Dst.begin(), E = Dst.end(); I != E; ++I) { ProgramStateRef State = (*I)->getState(); if (AlwaysReturnsLValue) State = State->BindExpr(CallExpr, LCtx, ThisVal); else State = bindReturnValue(Call, LCtx, State); Bldr.generateNode(PS, State, *I); } }
/// \brief Run checkers for evaluating a call. /// Only one checker will evaluate the call. void CheckerManager::runCheckersForEvalCall(ExplodedNodeSet &Dst, const ExplodedNodeSet &Src, const CallEvent &Call, ExprEngine &Eng) { const CallExpr *CE = cast<CallExpr>(Call.getOriginExpr()); for (ExplodedNodeSet::iterator NI = Src.begin(), NE = Src.end(); NI != NE; ++NI) { ExplodedNode *Pred = *NI; bool anyEvaluated = false; ExplodedNodeSet checkDst; NodeBuilder B(Pred, checkDst, Eng.getBuilderContext()); // Check if any of the EvalCall callbacks can evaluate the call. for (std::vector<EvalCallFunc>::iterator EI = EvalCallCheckers.begin(), EE = EvalCallCheckers.end(); EI != EE; ++EI) { ProgramPoint::Kind K = ProgramPoint::PostStmtKind; const ProgramPoint &L = ProgramPoint::getProgramPoint(CE, K, Pred->getLocationContext(), EI->Checker); bool evaluated = false; { // CheckerContext generates transitions(populates checkDest) on // destruction, so introduce the scope to make sure it gets properly // populated. CheckerContext C(B, Eng, Pred, L); evaluated = (*EI)(CE, C); } assert(!(evaluated && anyEvaluated) && "There are more than one checkers evaluating the call"); if (evaluated) { anyEvaluated = true; Dst.insert(checkDst); #ifdef NDEBUG break; // on release don't check that no other checker also evals. #endif } } // If none of the checkers evaluated the call, ask ExprEngine to handle it. if (!anyEvaluated) { NodeBuilder B(Pred, Dst, Eng.getBuilderContext()); Eng.defaultEvalCall(B, Pred, Call); } } }
void CallDumper::checkPostCall(const CallEvent &Call, CheckerContext &C) const { const Expr *CallE = Call.getOriginExpr(); if (!CallE) return; unsigned Indentation = 0; for (const LocationContext *LC = C.getLocationContext()->getParent(); LC != nullptr; LC = LC->getParent()) ++Indentation; // It is mildly evil to print directly to llvm::outs() rather than emitting // warnings, but this ensures things do not get filtered out by the rest of // the static analyzer machinery. llvm::outs().indent(Indentation); if (Call.getResultType()->isVoidType()) llvm::outs() << "Returning void\n"; else llvm::outs() << "Returning " << C.getSVal(CallE) << "\n"; }
bool ExprEngine::inlineCall(const CallEvent &Call, ExplodedNode *Pred) { if (!getAnalysisManager().shouldInlineCall()) return false; const Decl *D = Call.getRuntimeDefinition(); if (!D) return false; const LocationContext *CurLC = Pred->getLocationContext(); const StackFrameContext *CallerSFC = CurLC->getCurrentStackFrame(); const LocationContext *ParentOfCallee = 0; switch (Call.getKind()) { case CE_Function: case CE_CXXMember: case CE_CXXMemberOperator: // These are always at least possible to inline. break; case CE_CXXConstructor: case CE_CXXDestructor: { // Only inline constructors and destructors if we built the CFGs for them // properly. const AnalysisDeclContext *ADC = CallerSFC->getAnalysisDeclContext(); if (!ADC->getCFGBuildOptions().AddImplicitDtors || !ADC->getCFGBuildOptions().AddInitializers) return false; // FIXME: We don't handle constructors or destructors for arrays properly. const MemRegion *Target = Call.getCXXThisVal().getAsRegion(); if (Target && isa<ElementRegion>(Target)) return false; // FIXME: This is a hack. We don't handle temporary destructors // right now, so we shouldn't inline their constructors. if (const CXXConstructorCall *Ctor = dyn_cast<CXXConstructorCall>(&Call)) { const CXXConstructExpr *CtorExpr = Ctor->getOriginExpr(); if (CtorExpr->getConstructionKind() == CXXConstructExpr::CK_Complete) if (!Target || !isa<DeclRegion>(Target)) return false; } break; } case CE_CXXAllocator: // Do not inline allocators until we model deallocators. // This is unfortunate, but basically necessary for smart pointers and such. return false; case CE_Block: { const BlockDataRegion *BR = cast<BlockCall>(Call).getBlockRegion(); assert(BR && "If we have the block definition we should have its region"); AnalysisDeclContext *BlockCtx = AMgr.getAnalysisDeclContext(D); ParentOfCallee = BlockCtx->getBlockInvocationContext(CallerSFC, cast<BlockDecl>(D), BR); break; } case CE_ObjCMessage: break; } if (!shouldInlineDecl(D, Pred)) return false; if (!ParentOfCallee) ParentOfCallee = CallerSFC; // This may be NULL, but that's fine. const Expr *CallE = Call.getOriginExpr(); // Construct a new stack frame for the callee. AnalysisDeclContext *CalleeADC = AMgr.getAnalysisDeclContext(D); const StackFrameContext *CalleeSFC = CalleeADC->getStackFrame(ParentOfCallee, CallE, currentBuilderContext->getBlock(), currentStmtIdx); CallEnter Loc(CallE, CalleeSFC, CurLC); // Construct a new state which contains the mapping from actual to // formal arguments. ProgramStateRef State = Pred->getState()->enterStackFrame(Call, CalleeSFC); bool isNew; if (ExplodedNode *N = G.getNode(Loc, State, false, &isNew)) { N->addPredecessor(Pred, G); if (isNew) Engine.getWorkList()->enqueue(N); } return true; }
void IteratorChecker::checkPostCall(const CallEvent &Call, CheckerContext &C) const { // Record new iterator positions and iterator position changes const auto *Func = dyn_cast_or_null<FunctionDecl>(Call.getDecl()); if (!Func) return; if (Func->isOverloadedOperator()) { const auto Op = Func->getOverloadedOperator(); if (isSimpleComparisonOperator(Op)) { if (const auto *InstCall = dyn_cast<CXXInstanceCall>(&Call)) { handleComparison(C, Call.getReturnValue(), InstCall->getCXXThisVal(), Call.getArgSVal(0), Op); } else { handleComparison(C, Call.getReturnValue(), Call.getArgSVal(0), Call.getArgSVal(1), Op); } } } else { const auto *OrigExpr = Call.getOriginExpr(); if (!OrigExpr) return; if (!isIteratorType(Call.getResultType())) return; auto State = C.getState(); // Already bound to container? if (getIteratorPosition(State, Call.getReturnValue())) return; if (const auto *InstCall = dyn_cast<CXXInstanceCall>(&Call)) { if (isEndCall(Func)) { handleEnd(C, OrigExpr, Call.getReturnValue(), InstCall->getCXXThisVal()); return; } } // Copy-like and move constructors if (isa<CXXConstructorCall>(&Call) && Call.getNumArgs() == 1) { if (const auto *Pos = getIteratorPosition(State, Call.getArgSVal(0))) { State = setIteratorPosition(State, Call.getReturnValue(), *Pos); if (cast<CXXConstructorDecl>(Func)->isMoveConstructor()) { State = removeIteratorPosition(State, Call.getArgSVal(0)); } C.addTransition(State); return; } } // Assumption: if return value is an iterator which is not yet bound to a // container, then look for the first iterator argument, and // bind the return value to the same container. This approach // works for STL algorithms. // FIXME: Add a more conservative mode for (unsigned i = 0; i < Call.getNumArgs(); ++i) { if (isIteratorType(Call.getArgExpr(i)->getType())) { if (const auto *Pos = getIteratorPosition(State, Call.getArgSVal(i))) { assignToContainer(C, OrigExpr, Call.getReturnValue(), Pos->getContainer()); return; } } } } }
bool ExprEngine::inlineCall(ExplodedNodeSet &Dst, const CallEvent &Call, ExplodedNode *Pred) { if (!getAnalysisManager().shouldInlineCall()) return false; bool IsDynamicDispatch; const Decl *D = Call.getDefinition(IsDynamicDispatch); if (!D || IsDynamicDispatch) return false; const LocationContext *CurLC = Pred->getLocationContext(); const StackFrameContext *CallerSFC = CurLC->getCurrentStackFrame(); const LocationContext *ParentOfCallee = 0; switch (Call.getKind()) { case CE_Function: case CE_CXXMember: case CE_CXXMemberOperator: // These are always at least possible to inline. break; case CE_CXXConstructor: case CE_CXXDestructor: // Do not inline constructors until we can really model destructors. // This is unfortunate, but basically necessary for smart pointers and such. return false; case CE_CXXAllocator: // Do not inline allocators until we model deallocators. // This is unfortunate, but basically necessary for smart pointers and such. return false; case CE_Block: { const BlockDataRegion *BR = cast<BlockCall>(Call).getBlockRegion(); assert(BR && "If we have the block definition we should have its region"); AnalysisDeclContext *BlockCtx = AMgr.getAnalysisDeclContext(D); ParentOfCallee = BlockCtx->getBlockInvocationContext(CallerSFC, cast<BlockDecl>(D), BR); break; } case CE_ObjCMessage: case CE_ObjCPropertyAccess: // These always use dynamic dispatch; enabling inlining means assuming // that a particular method will be called at runtime. llvm_unreachable("Dynamic dispatch should be handled above."); } if (!shouldInlineDecl(D, Pred)) return false; if (!ParentOfCallee) ParentOfCallee = CallerSFC; // This may be NULL, but that's fine. const Expr *CallE = Call.getOriginExpr(); // Construct a new stack frame for the callee. AnalysisDeclContext *CalleeADC = AMgr.getAnalysisDeclContext(D); const StackFrameContext *CalleeSFC = CalleeADC->getStackFrame(ParentOfCallee, CallE, currentBuilderContext->getBlock(), currentStmtIdx); CallEnter Loc(CallE, CalleeSFC, CurLC); // Construct a new state which contains the mapping from actual to // formal arguments. ProgramStateRef State = Pred->getState()->enterStackFrame(Call, CalleeSFC); bool isNew; if (ExplodedNode *N = G.getNode(Loc, State, false, &isNew)) { N->addPredecessor(Pred, G); if (isNew) Engine.getWorkList()->enqueue(N); } return true; }
bool ExprEngine::inlineCall(ExplodedNodeSet &Dst, const CallEvent &Call, ExplodedNode *Pred) { if (!getAnalysisManager().shouldInlineCall()) return false; const StackFrameContext *CallerSFC = Pred->getLocationContext()->getCurrentStackFrame(); const Decl *D = Call.getDecl(); const LocationContext *ParentOfCallee = 0; switch (Call.getKind()) { case CE_Function: case CE_CXXMember: // These are always at least possible to inline. break; case CE_CXXMemberOperator: // FIXME: This should be possible to inline, but // RegionStore::enterStackFrame isn't smart enough to handle the first // argument being 'this'. The correct solution is to use CallEvent in // enterStackFrame as well. return false; case CE_CXXConstructor: // Do not inline constructors until we can model destructors. // This is unfortunate, but basically necessary for smart pointers and such. return false; case CE_CXXAllocator: // Do not inline allocators until we model deallocators. // This is unfortunate, but basically necessary for smart pointers and such. return false; case CE_Block: { const BlockDataRegion *BR = cast<BlockCall>(Call).getBlockRegion(); if (!BR) return false; D = BR->getDecl(); AnalysisDeclContext *BlockCtx = AMgr.getAnalysisDeclContext(D); ParentOfCallee = BlockCtx->getBlockInvocationContext(CallerSFC, cast<BlockDecl>(D), BR); break; } case CE_ObjCMessage: case CE_ObjCPropertyAccess: // These always use dynamic dispatch; enabling inlining means assuming // that a particular method will be called at runtime. return false; } if (!D || !shouldInlineDecl(D, Pred)) return false; if (!ParentOfCallee) ParentOfCallee = CallerSFC; const Expr *CallE = Call.getOriginExpr(); assert(CallE && "It is not yet possible to have calls without statements"); // Construct a new stack frame for the callee. AnalysisDeclContext *CalleeADC = AMgr.getAnalysisDeclContext(D); const StackFrameContext *CalleeSFC = CalleeADC->getStackFrame(ParentOfCallee, CallE, currentBuilderContext->getBlock(), currentStmtIdx); CallEnter Loc(CallE, CalleeSFC, Pred->getLocationContext()); bool isNew; if (ExplodedNode *N = G.getNode(Loc, Pred->getState(), false, &isNew)) { N->addPredecessor(Pred, G); if (isNew) Engine.getWorkList()->enqueue(N); } return true; }
bool ExprEngine::inlineCall(const CallEvent &Call, const Decl *D, NodeBuilder &Bldr, ExplodedNode *Pred, ProgramStateRef State) { assert(D); const LocationContext *CurLC = Pred->getLocationContext(); const StackFrameContext *CallerSFC = CurLC->getCurrentStackFrame(); const LocationContext *ParentOfCallee = 0; const AnalyzerOptions &Opts = getAnalysisManager().options; // FIXME: Refactor this check into a hypothetical CallEvent::canInline. switch (Call.getKind()) { case CE_Function: break; case CE_CXXMember: case CE_CXXMemberOperator: if (!Opts.mayInlineCXXMemberFunction(CIMK_MemberFunctions)) return false; break; case CE_CXXConstructor: { if (!Opts.mayInlineCXXMemberFunction(CIMK_Constructors)) return false; const CXXConstructorCall &Ctor = cast<CXXConstructorCall>(Call); // FIXME: We don't handle constructors or destructors for arrays properly. const MemRegion *Target = Ctor.getCXXThisVal().getAsRegion(); if (Target && isa<ElementRegion>(Target)) return false; // FIXME: This is a hack. We don't use the correct region for a new // expression, so if we inline the constructor its result will just be // thrown away. This short-term hack is tracked in <rdar://problem/12180598> // and the longer-term possible fix is discussed in PR12014. const CXXConstructExpr *CtorExpr = Ctor.getOriginExpr(); if (const Stmt *Parent = CurLC->getParentMap().getParent(CtorExpr)) if (isa<CXXNewExpr>(Parent)) return false; // Inlining constructors requires including initializers in the CFG. const AnalysisDeclContext *ADC = CallerSFC->getAnalysisDeclContext(); assert(ADC->getCFGBuildOptions().AddInitializers && "No CFG initializers"); (void)ADC; // If the destructor is trivial, it's always safe to inline the constructor. if (Ctor.getDecl()->getParent()->hasTrivialDestructor()) break; // For other types, only inline constructors if destructor inlining is // also enabled. if (!Opts.mayInlineCXXMemberFunction(CIMK_Destructors)) return false; // FIXME: This is a hack. We don't handle temporary destructors // right now, so we shouldn't inline their constructors. if (CtorExpr->getConstructionKind() == CXXConstructExpr::CK_Complete) if (!Target || !isa<DeclRegion>(Target)) return false; break; } case CE_CXXDestructor: { if (!Opts.mayInlineCXXMemberFunction(CIMK_Destructors)) return false; // Inlining destructors requires building the CFG correctly. const AnalysisDeclContext *ADC = CallerSFC->getAnalysisDeclContext(); assert(ADC->getCFGBuildOptions().AddImplicitDtors && "No CFG destructors"); (void)ADC; const CXXDestructorCall &Dtor = cast<CXXDestructorCall>(Call); // FIXME: We don't handle constructors or destructors for arrays properly. const MemRegion *Target = Dtor.getCXXThisVal().getAsRegion(); if (Target && isa<ElementRegion>(Target)) return false; break; } case CE_CXXAllocator: // Do not inline allocators until we model deallocators. // This is unfortunate, but basically necessary for smart pointers and such. return false; case CE_Block: { const BlockDataRegion *BR = cast<BlockCall>(Call).getBlockRegion(); assert(BR && "If we have the block definition we should have its region"); AnalysisDeclContext *BlockCtx = AMgr.getAnalysisDeclContext(D); ParentOfCallee = BlockCtx->getBlockInvocationContext(CallerSFC, cast<BlockDecl>(D), BR); break; } case CE_ObjCMessage: if (!(getAnalysisManager().options.IPAMode == DynamicDispatch || getAnalysisManager().options.IPAMode == DynamicDispatchBifurcate)) return false; break; } if (!shouldInlineDecl(D, Pred)) return false; if (!ParentOfCallee) ParentOfCallee = CallerSFC; // This may be NULL, but that's fine. const Expr *CallE = Call.getOriginExpr(); // Construct a new stack frame for the callee. AnalysisDeclContext *CalleeADC = AMgr.getAnalysisDeclContext(D); const StackFrameContext *CalleeSFC = CalleeADC->getStackFrame(ParentOfCallee, CallE, currBldrCtx->getBlock(), currStmtIdx); CallEnter Loc(CallE, CalleeSFC, CurLC); // Construct a new state which contains the mapping from actual to // formal arguments. State = State->enterStackFrame(Call, CalleeSFC); bool isNew; if (ExplodedNode *N = G.getNode(Loc, State, false, &isNew)) { N->addPredecessor(Pred, G); if (isNew) Engine.getWorkList()->enqueue(N); } // If we decided to inline the call, the successor has been manually // added onto the work list so remove it from the node builder. Bldr.takeNodes(Pred); NumInlinedCalls++; // Mark the decl as visited. if (VisitedCallees) VisitedCallees->insert(D); return true; }