void ExprEngine::examineStackFrames(const Decl *D, const LocationContext *LCtx,
                               bool &IsRecursive, unsigned &StackDepth) {
  IsRecursive = false;
  StackDepth = 0;

  while (LCtx) {
    if (const StackFrameContext *SFC = dyn_cast<StackFrameContext>(LCtx)) {
      const Decl *DI = SFC->getDecl();

      // Mark recursive (and mutually recursive) functions and always count
      // them when measuring the stack depth.
      if (DI == D) {
        IsRecursive = true;
        ++StackDepth;
        LCtx = LCtx->getParent();
        continue;
      }

      // Do not count the small functions when determining the stack depth.
      AnalysisDeclContext *CalleeADC = AMgr.getAnalysisDeclContext(DI);
      const CFG *CalleeCFG = CalleeADC->getCFG();
      if (CalleeCFG->getNumBlockIDs() > AMgr.options.getAlwaysInlineSize())
        ++StackDepth;
    }
    LCtx = LCtx->getParent();
  }

}
// Determine if we should inline the call.
bool ExprEngine::shouldInlineDecl(const Decl *D, ExplodedNode *Pred) {
  AnalysisDeclContext *CalleeADC = AMgr.getAnalysisDeclContext(D);
  const CFG *CalleeCFG = CalleeADC->getCFG();

  // It is possible that the CFG cannot be constructed.
  // Be safe, and check if the CalleeCFG is valid.
  if (!CalleeCFG)
    return false;

  bool IsRecursive = false;
  unsigned StackDepth = 0;
  examineStackFrames(D, Pred->getLocationContext(), IsRecursive, StackDepth);
  if ((StackDepth >= AMgr.options.InlineMaxStackDepth) &&
       ((CalleeCFG->getNumBlockIDs() > AMgr.options.getAlwaysInlineSize())
         || IsRecursive))
    return false;

  if (Engine.FunctionSummaries->hasReachedMaxBlockCount(D))
    return false;

  if (CalleeCFG->getNumBlockIDs() > AMgr.options.InlineMaxFunctionSize)
    return false;

  // Do not inline variadic calls (for now).
  if (const BlockDecl *BD = dyn_cast<BlockDecl>(D)) {
    if (BD->isVariadic())
      return false;
  }
  else if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(D)) {
    if (FD->isVariadic())
      return false;
  }

  if (getContext().getLangOpts().CPlusPlus) {
    if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(D)) {
      // Conditionally allow the inlining of template functions.
      if (!getAnalysisManager().options.mayInlineTemplateFunctions())
        if (FD->getTemplatedKind() != FunctionDecl::TK_NonTemplate)
          return false;

      // Conditionally allow the inlining of C++ standard library functions.
      if (!getAnalysisManager().options.mayInlineCXXStandardLibrary())
        if (getContext().getSourceManager().isInSystemHeader(FD->getLocation()))
          if (IsInStdNamespace(FD))
            return false;
    }
  }

  // It is possible that the live variables analysis cannot be
  // run.  If so, bail out.
  if (!CalleeADC->getAnalysis<RelaxedLiveVariables>())
    return false;

  return true;
}
bool ExprEngine::inlineCall(const CallEvent &Call, const Decl *D,
                            NodeBuilder &Bldr, ExplodedNode *Pred,
                            ProgramStateRef State) {
  assert(D);

  const LocationContext *CurLC = Pred->getLocationContext();
  const StackFrameContext *CallerSFC = CurLC->getCurrentStackFrame();
  const LocationContext *ParentOfCallee = CallerSFC;
  if (Call.getKind() == CE_Block &&
      !cast<BlockCall>(Call).isConversionFromLambda()) {
    const BlockDataRegion *BR = cast<BlockCall>(Call).getBlockRegion();
    assert(BR && "If we have the block definition we should have its region");
    AnalysisDeclContext *BlockCtx = AMgr.getAnalysisDeclContext(D);
    ParentOfCallee = BlockCtx->getBlockInvocationContext(CallerSFC,
                                                         cast<BlockDecl>(D),
                                                         BR);
  }

  // This may be NULL, but that's fine.
  const Expr *CallE = Call.getOriginExpr();

  // Construct a new stack frame for the callee.
  AnalysisDeclContext *CalleeADC = AMgr.getAnalysisDeclContext(D);
  const StackFrameContext *CalleeSFC =
    CalleeADC->getStackFrame(ParentOfCallee, CallE,
                             currBldrCtx->getBlock(),
                             currStmtIdx);

  CallEnter Loc(CallE, CalleeSFC, CurLC);

  // Construct a new state which contains the mapping from actual to
  // formal arguments.
  State = State->enterStackFrame(Call, CalleeSFC);

  bool isNew;
  if (ExplodedNode *N = G.getNode(Loc, State, false, &isNew)) {
    N->addPredecessor(Pred, G);
    if (isNew)
      Engine.getWorkList()->enqueue(N);
  }

  // If we decided to inline the call, the successor has been manually
  // added onto the work list so remove it from the node builder.
  Bldr.takeNodes(Pred);

  NumInlinedCalls++;
  Engine.FunctionSummaries->bumpNumTimesInlined(D);

  // Mark the decl as visited.
  if (VisitedCallees)
    VisitedCallees->insert(D);

  return true;
}
Beispiel #4
0
void BlockDataRegion::LazyInitializeReferencedVars() {
  if (ReferencedVars)
    return;

  AnalysisDeclContext *AC = getCodeRegion()->getAnalysisDeclContext();
  AnalysisDeclContext::referenced_decls_iterator I, E;
  llvm::tie(I, E) = AC->getReferencedBlockVars(BC->getDecl());

  if (I == E) {
    ReferencedVars = (void*) 0x1;
    return;
  }

  MemRegionManager &MemMgr = *getMemRegionManager();
  llvm::BumpPtrAllocator &A = MemMgr.getAllocator();
  BumpVectorContext BC(A);

  typedef BumpVector<const MemRegion*> VarVec;
  VarVec *BV = (VarVec*) A.Allocate<VarVec>();
  new (BV) VarVec(BC, E - I);
  VarVec *BVOriginal = (VarVec*) A.Allocate<VarVec>();
  new (BVOriginal) VarVec(BC, E - I);

  for ( ; I != E; ++I) {
    const VarDecl *VD = *I;
    const VarRegion *VR = 0;
    const VarRegion *OriginalVR = 0;

    if (!VD->getAttr<BlocksAttr>() && VD->hasLocalStorage()) {
      VR = MemMgr.getVarRegion(VD, this);
      OriginalVR = MemMgr.getVarRegion(VD, LC);
    }
    else {
      if (LC) {
        VR = MemMgr.getVarRegion(VD, LC);
        OriginalVR = VR;
      }
      else {
        VR = MemMgr.getVarRegion(VD, MemMgr.getUnknownRegion());
        OriginalVR = MemMgr.getVarRegion(VD, LC);
      }
    }

    assert(VR);
    assert(OriginalVR);
    BV->push_back(VR, BC);
    BVOriginal->push_back(OriginalVR, BC);
  }

  ReferencedVars = BV;
  OriginalVars = BVOriginal;
}
/// \brief Returns true if the location is 'self'.
static bool isSelfVar(SVal location, CheckerContext &C) {
  AnalysisDeclContext *analCtx = C.getCurrentAnalysisDeclContext(); 
  if (!analCtx->getSelfDecl())
    return false;
  if (!isa<loc::MemRegionVal>(location))
    return false;

  loc::MemRegionVal MRV = cast<loc::MemRegionVal>(location);
  if (const DeclRegion *DR = dyn_cast<DeclRegion>(MRV.getRegion()))
    return (DR->getDecl() == analCtx->getSelfDecl());

  return false;
}
Beispiel #6
0
// Determine if we should inline the call.
bool ExprEngine::shouldInlineDecl(const FunctionDecl *FD, ExplodedNode *Pred) {
  AnalysisDeclContext *CalleeADC = AMgr.getAnalysisDeclContext(FD);
  const CFG *CalleeCFG = CalleeADC->getCFG();

  if (getNumberStackFrames(Pred->getLocationContext())
        == AMgr.InlineMaxStackDepth)
    return false;

  if (Engine.FunctionSummaries->hasReachedMaxBlockCount(FD))
    return false;

  if (CalleeCFG->getNumBlockIDs() > AMgr.InlineMaxFunctionSize)
    return false;

  return true;
}
bool ExprEngine::InlineCall(ExplodedNodeSet &Dst,
                            const CallExpr *CE, 
                            ExplodedNode *Pred) {
  if (!shouldInlineCallExpr(CE, this))
    return false;

  ProgramStateRef state = Pred->getState();
  const Expr *Callee = CE->getCallee();
  const FunctionDecl *FD =
    state->getSVal(Callee, Pred->getLocationContext()).getAsFunctionDecl();
  if (!FD || !FD->hasBody(FD))
    return false;
  
  switch (CE->getStmtClass()) {
    default:
      // FIXME: Handle C++.
      break;
    case Stmt::CallExprClass: {
      if (!shouldInlineDecl(FD, Pred))
        return false;

      // Construct a new stack frame for the callee.
      AnalysisDeclContext *CalleeADC = AMgr.getAnalysisDeclContext(FD);
      const StackFrameContext *CallerSFC =
      Pred->getLocationContext()->getCurrentStackFrame();
      const StackFrameContext *CalleeSFC =
      CalleeADC->getStackFrame(CallerSFC, CE,
                               currentBuilderContext->getBlock(),
                               currentStmtIdx);
      
      CallEnter Loc(CE, CalleeSFC, Pred->getLocationContext());
      bool isNew;
      if (ExplodedNode *N = G.getNode(Loc, state, false, &isNew)) {
        N->addPredecessor(Pred, G);
        if (isNew)
          Engine.getWorkList()->enqueue(N);
      }
      return true;
    }
  }
  return false;
}
Beispiel #8
0
static SourceLocation getValidSourceLocation(const Stmt* S,
                                             LocationOrAnalysisDeclContext LAC,
                                             bool UseEnd = false) {
  SourceLocation L = UseEnd ? S->getLocEnd() : S->getLocStart();
  assert(!LAC.isNull() && "A valid LocationContext or AnalysisDeclContext should "
                          "be passed to PathDiagnosticLocation upon creation.");

  // S might be a temporary statement that does not have a location in the
  // source code, so find an enclosing statement and use its location.
  if (!L.isValid()) {

    AnalysisDeclContext *ADC;
    if (LAC.is<const LocationContext*>())
      ADC = LAC.get<const LocationContext*>()->getAnalysisDeclContext();
    else
      ADC = LAC.get<AnalysisDeclContext*>();

    ParentMap &PM = ADC->getParentMap();

    const Stmt *Parent = S;
    do {
      Parent = PM.getParent(Parent);

      // In rare cases, we have implicit top-level expressions,
      // such as arguments for implicit member initializers.
      // In this case, fall back to the start of the body (even if we were
      // asked for the statement end location).
      if (!Parent) {
        const Stmt *Body = ADC->getBody();
        if (Body)
          L = Body->getLocStart();
        else
          L = ADC->getDecl()->getLocEnd();
        break;
      }

      L = UseEnd ? Parent->getLocEnd() : Parent->getLocStart();
    } while (!L.isValid());
  }

  return L;
}
Beispiel #9
0
RuntimeDefinition AnyFunctionCall::getRuntimeDefinition() const {
  const FunctionDecl *FD = getDecl();
  // Note that the AnalysisDeclContext will have the FunctionDecl with
  // the definition (if one exists).
  if (FD) {
    AnalysisDeclContext *AD =
      getLocationContext()->getAnalysisDeclContext()->
      getManager()->getContext(FD);
    bool IsAutosynthesized;
    Stmt* Body = AD->getBody(IsAutosynthesized);
    DEBUG({
        if (IsAutosynthesized)
          llvm::dbgs() << "Using autosynthesized body for " << FD->getName()
                       << "\n";
    });
    if (Body) {
      const Decl* Decl = AD->getDecl();
      return RuntimeDefinition(Decl);
    }
  }
Beispiel #10
0
void FindUnreachableCode(AnalysisDeclContext &AC, Preprocessor &PP,
                         Callback &CB) {

  CFG *cfg = AC.getCFG();
  if (!cfg)
    return;

  // Scan for reachable blocks from the entrance of the CFG.
  // If there are no unreachable blocks, we're done.
  llvm::BitVector reachable(cfg->getNumBlockIDs());
  unsigned numReachable =
    scanMaybeReachableFromBlock(&cfg->getEntry(), PP, reachable);
  if (numReachable == cfg->getNumBlockIDs())
    return;
  
  // If there aren't explicit EH edges, we should include the 'try' dispatch
  // blocks as roots.
  if (!AC.getCFGBuildOptions().AddEHEdges) {
    for (CFG::try_block_iterator I = cfg->try_blocks_begin(),
         E = cfg->try_blocks_end() ; I != E; ++I) {
      numReachable += scanMaybeReachableFromBlock(*I, PP, reachable);
    }
    if (numReachable == cfg->getNumBlockIDs())
      return;
  }

  // There are some unreachable blocks.  We need to find the root blocks that
  // contain code that should be considered unreachable.  
  for (CFG::iterator I = cfg->begin(), E = cfg->end(); I != E; ++I) {
    const CFGBlock *block = *I;
    // A block may have been marked reachable during this loop.
    if (reachable[block->getBlockID()])
      continue;
    
    DeadCodeScan DS(reachable, PP);
    numReachable += DS.scanBackwards(block, CB);
    
    if (numReachable == cfg->getNumBlockIDs())
      return;
  }
}
// Determine if we should inline the call.
bool ExprEngine::shouldInlineDecl(const FunctionDecl *FD, ExplodedNode *Pred) {
  AnalysisDeclContext *CalleeADC = AMgr.getAnalysisDeclContext(FD);
  const CFG *CalleeCFG = CalleeADC->getCFG();

  // It is possible that the CFG cannot be constructed.
  // Be safe, and check if the CalleeCFG is valid.
  if (!CalleeCFG)
    return false;

  if (getNumberStackFrames(Pred->getLocationContext())
        == AMgr.InlineMaxStackDepth)
    return false;

  if (Engine.FunctionSummaries->hasReachedMaxBlockCount(FD))
    return false;

  if (CalleeCFG->getNumBlockIDs() > AMgr.InlineMaxFunctionSize)
    return false;

  return true;
}
// Determine if we should inline the call.
bool ExprEngine::shouldInlineDecl(const Decl *D, ExplodedNode *Pred) {
    // FIXME: default constructors don't have bodies.
    if (!D->hasBody())
        return false;

    AnalysisDeclContext *CalleeADC = AMgr.getAnalysisDeclContext(D);
    const CFG *CalleeCFG = CalleeADC->getCFG();

    // It is possible that the CFG cannot be constructed.
    // Be safe, and check if the CalleeCFG is valid.
    if (!CalleeCFG)
        return false;

    if (getNumberStackFrames(Pred->getLocationContext())
            == AMgr.InlineMaxStackDepth)
        return false;

    if (Engine.FunctionSummaries->hasReachedMaxBlockCount(D))
        return false;

    if (CalleeCFG->getNumBlockIDs() > AMgr.InlineMaxFunctionSize)
        return false;

    // Do not inline variadic calls (for now).
    if (const BlockDecl *BD = dyn_cast<BlockDecl>(D)) {
        if (BD->isVariadic())
            return false;
    }
    else if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(D)) {
        if (FD->isVariadic())
            return false;
    }

    // It is possible that the live variables analysis cannot be
    // run.  If so, bail out.
    if (!CalleeADC->getAnalysis<RelaxedLiveVariables>())
        return false;

    return true;
}
Beispiel #13
0
void BlockDataRegion::LazyInitializeReferencedVars() {
  if (ReferencedVars)
    return;

  AnalysisDeclContext *AC = getCodeRegion()->getAnalysisDeclContext();
  const auto &ReferencedBlockVars = AC->getReferencedBlockVars(BC->getDecl());
  auto NumBlockVars =
      std::distance(ReferencedBlockVars.begin(), ReferencedBlockVars.end());

  if (NumBlockVars == 0) {
    ReferencedVars = (void*) 0x1;
    return;
  }

  MemRegionManager &MemMgr = *getMemRegionManager();
  llvm::BumpPtrAllocator &A = MemMgr.getAllocator();
  BumpVectorContext BC(A);

  typedef BumpVector<const MemRegion*> VarVec;
  VarVec *BV = A.Allocate<VarVec>();
  new (BV) VarVec(BC, NumBlockVars);
  VarVec *BVOriginal = A.Allocate<VarVec>();
  new (BVOriginal) VarVec(BC, NumBlockVars);

  for (const VarDecl *VD : ReferencedBlockVars) {
    const VarRegion *VR = nullptr;
    const VarRegion *OriginalVR = nullptr;
    std::tie(VR, OriginalVR) = getCaptureRegions(VD);
    assert(VR);
    assert(OriginalVR);
    BV->push_back(VR, BC);
    BVOriginal->push_back(OriginalVR, BC);
  }

  ReferencedVars = BV;
  OriginalVars = BVOriginal;
}
Beispiel #14
0
void BlockDataRegion::LazyInitializeReferencedVars() {
  if (ReferencedVars)
    return;

  AnalysisDeclContext *AC = getCodeRegion()->getAnalysisDeclContext();
  AnalysisDeclContext::referenced_decls_iterator I, E;
  llvm::tie(I, E) = AC->getReferencedBlockVars(BC->getDecl());

  if (I == E) {
    ReferencedVars = (void*) 0x1;
    return;
  }

  MemRegionManager &MemMgr = *getMemRegionManager();
  llvm::BumpPtrAllocator &A = MemMgr.getAllocator();
  BumpVectorContext BC(A);

  typedef BumpVector<const MemRegion*> VarVec;
  VarVec *BV = (VarVec*) A.Allocate<VarVec>();
  new (BV) VarVec(BC, E - I);
  VarVec *BVOriginal = (VarVec*) A.Allocate<VarVec>();
  new (BVOriginal) VarVec(BC, E - I);

  for ( ; I != E; ++I) {
    const VarRegion *VR = 0;
    const VarRegion *OriginalVR = 0;
    llvm::tie(VR, OriginalVR) = getCaptureRegions(*I);
    assert(VR);
    assert(OriginalVR);
    BV->push_back(VR, BC);
    BVOriginal->push_back(OriginalVR, BC);
  }

  ReferencedVars = BV;
  OriginalVars = BVOriginal;
}
Beispiel #15
0
void IdempotentOperationChecker::checkEndAnalysis(ExplodedGraph &G,
                                                  BugReporter &BR,
                                                  ExprEngine &Eng) const {
  BugType *BT = new BugType("Idempotent operation", "Dead code");
  // Iterate over the hash to see if we have any paths with definite
  // idempotent operations.
  for (AssumptionMap::const_iterator i = hash.begin(); i != hash.end(); ++i) {
    // Unpack the hash contents
    const BinaryOperatorData &Data = i->second;
    const Assumption &A = Data.assumption;
    const ExplodedNodeSet &ES = Data.explodedNodes;

    // If there are no nodes accosted with the expression, nothing to report.
    // FIXME: This is possible because the checker does part of processing in
    // checkPreStmt and part in checkPostStmt.
    if (ES.begin() == ES.end())
      continue;

    const BinaryOperator *B = i->first;

    if (A == Impossible)
      continue;

    // If the analyzer did not finish, check to see if we can still emit this
    // warning
    if (Eng.hasWorkRemaining()) {
      // If we can trace back
      AnalysisDeclContext *AC = (*ES.begin())->getLocationContext()
                                         ->getAnalysisDeclContext();
      if (!pathWasCompletelyAnalyzed(AC,
                                     AC->getCFGStmtMap()->getBlock(B),
                                     Eng.getCoreEngine()))
        continue;
    }

    // Select the error message and SourceRanges to report.
    llvm::SmallString<128> buf;
    llvm::raw_svector_ostream os(buf);
    bool LHSRelevant = false, RHSRelevant = false;
    switch (A) {
    case Equal:
      LHSRelevant = true;
      RHSRelevant = true;
      if (B->getOpcode() == BO_Assign)
        os << "Assigned value is always the same as the existing value";
      else
        os << "Both operands to '" << B->getOpcodeStr()
           << "' always have the same value";
      break;
    case LHSis1:
      LHSRelevant = true;
      os << "The left operand to '" << B->getOpcodeStr() << "' is always 1";
      break;
    case RHSis1:
      RHSRelevant = true;
      os << "The right operand to '" << B->getOpcodeStr() << "' is always 1";
      break;
    case LHSis0:
      LHSRelevant = true;
      os << "The left operand to '" << B->getOpcodeStr() << "' is always 0";
      break;
    case RHSis0:
      RHSRelevant = true;
      os << "The right operand to '" << B->getOpcodeStr() << "' is always 0";
      break;
    case Possible:
      llvm_unreachable("Operation was never marked with an assumption");
    case Impossible:
      llvm_unreachable(0);
    }

    // Add a report for each ExplodedNode
    for (ExplodedNodeSet::iterator I = ES.begin(), E = ES.end(); I != E; ++I) {
      BugReport *report = new BugReport(*BT, os.str(), *I);

      // Add source ranges and visitor hooks
      if (LHSRelevant) {
        const Expr *LHS = i->first->getLHS();
        report->addRange(LHS->getSourceRange());
        FindLastStoreBRVisitor::registerStatementVarDecls(*report, LHS);
      }
      if (RHSRelevant) {
        const Expr *RHS = i->first->getRHS();
        report->addRange(i->first->getRHS()->getSourceRange());
        FindLastStoreBRVisitor::registerStatementVarDecls(*report, RHS);
      }

      BR.EmitReport(report);
    }
  }

  hash.clear();
}
bool ExprEngine::inlineCall(ExplodedNodeSet &Dst,
                            const CallEvent &Call,
                            ExplodedNode *Pred) {
    if (!getAnalysisManager().shouldInlineCall())
        return false;

    const StackFrameContext *CallerSFC =
        Pred->getLocationContext()->getCurrentStackFrame();

    const Decl *D = Call.getDecl();
    const LocationContext *ParentOfCallee = 0;

    switch (Call.getKind()) {
    case CE_Function:
    case CE_CXXMember:
        // These are always at least possible to inline.
        break;
    case CE_CXXMemberOperator:
        // FIXME: This should be possible to inline, but
        // RegionStore::enterStackFrame isn't smart enough to handle the first
        // argument being 'this'. The correct solution is to use CallEvent in
        // enterStackFrame as well.
        return false;
    case CE_CXXConstructor:
        // Do not inline constructors until we can model destructors.
        // This is unfortunate, but basically necessary for smart pointers and such.
        return false;
    case CE_CXXAllocator:
        // Do not inline allocators until we model deallocators.
        // This is unfortunate, but basically necessary for smart pointers and such.
        return false;
    case CE_Block: {
        const BlockDataRegion *BR = cast<BlockCall>(Call).getBlockRegion();
        if (!BR)
            return false;
        D = BR->getDecl();
        AnalysisDeclContext *BlockCtx = AMgr.getAnalysisDeclContext(D);
        ParentOfCallee = BlockCtx->getBlockInvocationContext(CallerSFC,
                         cast<BlockDecl>(D),
                         BR);
        break;
    }
    case CE_ObjCMessage:
    case CE_ObjCPropertyAccess:
        // These always use dynamic dispatch; enabling inlining means assuming
        // that a particular method will be called at runtime.
        return false;
    }

    if (!D || !shouldInlineDecl(D, Pred))
        return false;

    if (!ParentOfCallee)
        ParentOfCallee = CallerSFC;

    const Expr *CallE = Call.getOriginExpr();
    assert(CallE && "It is not yet possible to have calls without statements");

    // Construct a new stack frame for the callee.
    AnalysisDeclContext *CalleeADC = AMgr.getAnalysisDeclContext(D);
    const StackFrameContext *CalleeSFC =
        CalleeADC->getStackFrame(ParentOfCallee, CallE,
                                 currentBuilderContext->getBlock(),
                                 currentStmtIdx);

    CallEnter Loc(CallE, CalleeSFC, Pred->getLocationContext());
    bool isNew;
    if (ExplodedNode *N = G.getNode(Loc, Pred->getState(), false, &isNew)) {
        N->addPredecessor(Pred, G);
        if (isNew)
            Engine.getWorkList()->enqueue(N);
    }
    return true;
}
bool ExprEngine::inlineCall(const CallEvent &Call, const Decl *D,
                            NodeBuilder &Bldr, ExplodedNode *Pred,
                            ProgramStateRef State) {
  assert(D);

  const LocationContext *CurLC = Pred->getLocationContext();
  const StackFrameContext *CallerSFC = CurLC->getCurrentStackFrame();
  const LocationContext *ParentOfCallee = 0;

  const AnalyzerOptions &Opts = getAnalysisManager().options;

  // FIXME: Refactor this check into a hypothetical CallEvent::canInline.
  switch (Call.getKind()) {
  case CE_Function:
    break;
  case CE_CXXMember:
  case CE_CXXMemberOperator:
    if (!Opts.mayInlineCXXMemberFunction(CIMK_MemberFunctions))
      return false;
    break;
  case CE_CXXConstructor: {
    if (!Opts.mayInlineCXXMemberFunction(CIMK_Constructors))
      return false;

    const CXXConstructorCall &Ctor = cast<CXXConstructorCall>(Call);

    // FIXME: We don't handle constructors or destructors for arrays properly.
    const MemRegion *Target = Ctor.getCXXThisVal().getAsRegion();
    if (Target && isa<ElementRegion>(Target))
      return false;

    // FIXME: This is a hack. We don't use the correct region for a new
    // expression, so if we inline the constructor its result will just be
    // thrown away. This short-term hack is tracked in <rdar://problem/12180598>
    // and the longer-term possible fix is discussed in PR12014.
    const CXXConstructExpr *CtorExpr = Ctor.getOriginExpr();
    if (const Stmt *Parent = CurLC->getParentMap().getParent(CtorExpr))
      if (isa<CXXNewExpr>(Parent))
        return false;

    // Inlining constructors requires including initializers in the CFG.
    const AnalysisDeclContext *ADC = CallerSFC->getAnalysisDeclContext();
    assert(ADC->getCFGBuildOptions().AddInitializers && "No CFG initializers");
    (void)ADC;

    // If the destructor is trivial, it's always safe to inline the constructor.
    if (Ctor.getDecl()->getParent()->hasTrivialDestructor())
      break;
    
    // For other types, only inline constructors if destructor inlining is
    // also enabled.
    if (!Opts.mayInlineCXXMemberFunction(CIMK_Destructors))
      return false;

    // FIXME: This is a hack. We don't handle temporary destructors
    // right now, so we shouldn't inline their constructors.
    if (CtorExpr->getConstructionKind() == CXXConstructExpr::CK_Complete)
      if (!Target || !isa<DeclRegion>(Target))
        return false;

    break;
  }
  case CE_CXXDestructor: {
    if (!Opts.mayInlineCXXMemberFunction(CIMK_Destructors))
      return false;

    // Inlining destructors requires building the CFG correctly.
    const AnalysisDeclContext *ADC = CallerSFC->getAnalysisDeclContext();
    assert(ADC->getCFGBuildOptions().AddImplicitDtors && "No CFG destructors");
    (void)ADC;

    const CXXDestructorCall &Dtor = cast<CXXDestructorCall>(Call);

    // FIXME: We don't handle constructors or destructors for arrays properly.
    const MemRegion *Target = Dtor.getCXXThisVal().getAsRegion();
    if (Target && isa<ElementRegion>(Target))
      return false;

    break;
  }
  case CE_CXXAllocator:
    // Do not inline allocators until we model deallocators.
    // This is unfortunate, but basically necessary for smart pointers and such.
    return false;
  case CE_Block: {
    const BlockDataRegion *BR = cast<BlockCall>(Call).getBlockRegion();
    assert(BR && "If we have the block definition we should have its region");
    AnalysisDeclContext *BlockCtx = AMgr.getAnalysisDeclContext(D);
    ParentOfCallee = BlockCtx->getBlockInvocationContext(CallerSFC,
                                                         cast<BlockDecl>(D),
                                                         BR);
    break;
  }
  case CE_ObjCMessage:
    if (!(getAnalysisManager().options.IPAMode == DynamicDispatch ||
          getAnalysisManager().options.IPAMode == DynamicDispatchBifurcate))
      return false;
    break;
  }

  if (!shouldInlineDecl(D, Pred))
    return false;
  
  if (!ParentOfCallee)
    ParentOfCallee = CallerSFC;

  // This may be NULL, but that's fine.
  const Expr *CallE = Call.getOriginExpr();

  // Construct a new stack frame for the callee.
  AnalysisDeclContext *CalleeADC = AMgr.getAnalysisDeclContext(D);
  const StackFrameContext *CalleeSFC =
    CalleeADC->getStackFrame(ParentOfCallee, CallE,
                             currBldrCtx->getBlock(),
                             currStmtIdx);
  
  CallEnter Loc(CallE, CalleeSFC, CurLC);

  // Construct a new state which contains the mapping from actual to
  // formal arguments.
  State = State->enterStackFrame(Call, CalleeSFC);

  bool isNew;
  if (ExplodedNode *N = G.getNode(Loc, State, false, &isNew)) {
    N->addPredecessor(Pred, G);
    if (isNew)
      Engine.getWorkList()->enqueue(N);
  }

  // If we decided to inline the call, the successor has been manually
  // added onto the work list so remove it from the node builder.
  Bldr.takeNodes(Pred);

  NumInlinedCalls++;

  // Mark the decl as visited.
  if (VisitedCallees)
    VisitedCallees->insert(D);

  return true;
}
bool ExprEngine::inlineCall(ExplodedNodeSet &Dst,
                            const CallEvent &Call,
                            ExplodedNode *Pred) {
  if (!getAnalysisManager().shouldInlineCall())
    return false;

  bool IsDynamicDispatch;
  const Decl *D = Call.getDefinition(IsDynamicDispatch);
  if (!D || IsDynamicDispatch)
    return false;

  const LocationContext *CurLC = Pred->getLocationContext();
  const StackFrameContext *CallerSFC = CurLC->getCurrentStackFrame();
  const LocationContext *ParentOfCallee = 0;

  switch (Call.getKind()) {
  case CE_Function:
  case CE_CXXMember:
  case CE_CXXMemberOperator:
    // These are always at least possible to inline.
    break;
  case CE_CXXConstructor:
  case CE_CXXDestructor:
    // Do not inline constructors until we can really model destructors.
    // This is unfortunate, but basically necessary for smart pointers and such.
    return false;
  case CE_CXXAllocator:
    // Do not inline allocators until we model deallocators.
    // This is unfortunate, but basically necessary for smart pointers and such.
    return false;
  case CE_Block: {
    const BlockDataRegion *BR = cast<BlockCall>(Call).getBlockRegion();
    assert(BR && "If we have the block definition we should have its region");
    AnalysisDeclContext *BlockCtx = AMgr.getAnalysisDeclContext(D);
    ParentOfCallee = BlockCtx->getBlockInvocationContext(CallerSFC,
                                                         cast<BlockDecl>(D),
                                                         BR);
    break;
  }
  case CE_ObjCMessage:
  case CE_ObjCPropertyAccess:
    // These always use dynamic dispatch; enabling inlining means assuming
    // that a particular method will be called at runtime.
    llvm_unreachable("Dynamic dispatch should be handled above.");
  }

  if (!shouldInlineDecl(D, Pred))
    return false;
  
  if (!ParentOfCallee)
    ParentOfCallee = CallerSFC;

  // This may be NULL, but that's fine.
  const Expr *CallE = Call.getOriginExpr();

  // Construct a new stack frame for the callee.
  AnalysisDeclContext *CalleeADC = AMgr.getAnalysisDeclContext(D);
  const StackFrameContext *CalleeSFC =
    CalleeADC->getStackFrame(ParentOfCallee, CallE,
                             currentBuilderContext->getBlock(),
                             currentStmtIdx);
  
  CallEnter Loc(CallE, CalleeSFC, CurLC);

  // Construct a new state which contains the mapping from actual to
  // formal arguments.
  ProgramStateRef State = Pred->getState()->enterStackFrame(Call, CalleeSFC);

  bool isNew;
  if (ExplodedNode *N = G.getNode(Loc, State, false, &isNew)) {
    N->addPredecessor(Pred, G);
    if (isNew)
      Engine.getWorkList()->enqueue(N);
  }
  return true;
}
bool ExprEngine::shouldInlineCall(const CallEvent &Call, const Decl *D,
                                  const ExplodedNode *Pred) {
  if (!D)
    return false;

  AnalysisManager &AMgr = getAnalysisManager();
  AnalyzerOptions &Opts = AMgr.options;
  AnalysisDeclContextManager &ADCMgr = AMgr.getAnalysisDeclContextManager();
  AnalysisDeclContext *CalleeADC = ADCMgr.getContext(D);

  // Temporary object destructor processing is currently broken, so we never
  // inline them.
  // FIXME: Remove this once temp destructors are working.
  if (isa<CXXDestructorCall>(Call)) {
    if ((*currBldrCtx->getBlock())[currStmtIdx].getAs<CFGTemporaryDtor>())
      return false;
  }

  // The auto-synthesized bodies are essential to inline as they are
  // usually small and commonly used. Note: we should do this check early on to
  // ensure we always inline these calls.
  if (CalleeADC->isBodyAutosynthesized())
    return true;

  if (!AMgr.shouldInlineCall())
    return false;

  // Check if this function has been marked as non-inlinable.
  Optional<bool> MayInline = Engine.FunctionSummaries->mayInline(D);
  if (MayInline.hasValue()) {
    if (!MayInline.getValue())
      return false;

  } else {
    // We haven't actually checked the static properties of this function yet.
    // Do that now, and record our decision in the function summaries.
    if (mayInlineDecl(CalleeADC, Opts)) {
      Engine.FunctionSummaries->markMayInline(D);
    } else {
      Engine.FunctionSummaries->markShouldNotInline(D);
      return false;
    }
  }

  // Check if we should inline a call based on its kind.
  // FIXME: this checks both static and dynamic properties of the call, which
  // means we're redoing a bit of work that could be cached in the function
  // summary.
  CallInlinePolicy CIP = mayInlineCallKind(Call, Pred, Opts);
  if (CIP != CIP_Allowed) {
    if (CIP == CIP_DisallowedAlways) {
      assert(!MayInline.hasValue() || MayInline.getValue());
      Engine.FunctionSummaries->markShouldNotInline(D);
    }
    return false;
  }

  const CFG *CalleeCFG = CalleeADC->getCFG();

  // Do not inline if recursive or we've reached max stack frame count.
  bool IsRecursive = false;
  unsigned StackDepth = 0;
  examineStackFrames(D, Pred->getLocationContext(), IsRecursive, StackDepth);
  if ((StackDepth >= Opts.InlineMaxStackDepth) &&
      ((CalleeCFG->getNumBlockIDs() > Opts.getAlwaysInlineSize())
       || IsRecursive))
    return false;

  // Do not inline large functions too many times.
  if ((Engine.FunctionSummaries->getNumTimesInlined(D) >
       Opts.getMaxTimesInlineLarge()) &&
      CalleeCFG->getNumBlockIDs() > 13) {
    NumReachedInlineCountMax++;
    return false;
  }

  if (HowToInline == Inline_Minimal &&
      (CalleeCFG->getNumBlockIDs() > Opts.getAlwaysInlineSize()
      || IsRecursive))
    return false;

  Engine.FunctionSummaries->bumpNumTimesInlined(D);

  return true;
}
/// This method check when nullable pointer or null value is returned from a
/// function that has nonnull return type.
void NullabilityChecker::checkPreStmt(const ReturnStmt *S,
                                      CheckerContext &C) const {
  auto RetExpr = S->getRetValue();
  if (!RetExpr)
    return;

  if (!RetExpr->getType()->isAnyPointerType())
    return;

  ProgramStateRef State = C.getState();
  if (State->get<InvariantViolated>())
    return;

  auto RetSVal = C.getSVal(S).getAs<DefinedOrUnknownSVal>();
  if (!RetSVal)
    return;

  bool InSuppressedMethodFamily = false;

  QualType RequiredRetType;
  AnalysisDeclContext *DeclCtxt =
      C.getLocationContext()->getAnalysisDeclContext();
  const Decl *D = DeclCtxt->getDecl();
  if (auto *MD = dyn_cast<ObjCMethodDecl>(D)) {
    // HACK: This is a big hammer to avoid warning when there are defensive
    // nil checks in -init and -copy methods. We should add more sophisticated
    // logic here to suppress on common defensive idioms but still
    // warn when there is a likely problem.
    ObjCMethodFamily Family = MD->getMethodFamily();
    if (OMF_init == Family || OMF_copy == Family || OMF_mutableCopy == Family)
      InSuppressedMethodFamily = true;

    RequiredRetType = MD->getReturnType();
  } else if (auto *FD = dyn_cast<FunctionDecl>(D)) {
    RequiredRetType = FD->getReturnType();
  } else {
    return;
  }

  NullConstraint Nullness = getNullConstraint(*RetSVal, State);

  Nullability RequiredNullability = getNullabilityAnnotation(RequiredRetType);

  // If the returned value is null but the type of the expression
  // generating it is nonnull then we will suppress the diagnostic.
  // This enables explicit suppression when returning a nil literal in a
  // function with a _Nonnull return type:
  //    return (NSString * _Nonnull)0;
  Nullability RetExprTypeLevelNullability =
        getNullabilityAnnotation(lookThroughImplicitCasts(RetExpr)->getType());

  bool NullReturnedFromNonNull = (RequiredNullability == Nullability::Nonnull &&
                                  Nullness == NullConstraint::IsNull);
  if (Filter.CheckNullReturnedFromNonnull &&
      NullReturnedFromNonNull &&
      RetExprTypeLevelNullability != Nullability::Nonnull &&
      !InSuppressedMethodFamily &&
      C.getLocationContext()->inTopFrame()) {
    static CheckerProgramPointTag Tag(this, "NullReturnedFromNonnull");
    ExplodedNode *N = C.generateErrorNode(State, &Tag);
    if (!N)
      return;

    SmallString<256> SBuf;
    llvm::raw_svector_ostream OS(SBuf);
    OS << (RetExpr->getType()->isObjCObjectPointerType() ? "nil" : "Null");
    OS << " returned from a " << C.getDeclDescription(D) <<
          " that is expected to return a non-null value";
    reportBugIfInvariantHolds(OS.str(),
                              ErrorKind::NilReturnedToNonnull, N, nullptr, C,
                              RetExpr);
    return;
  }

  // If null was returned from a non-null function, mark the nullability
  // invariant as violated even if the diagnostic was suppressed.
  if (NullReturnedFromNonNull) {
    State = State->set<InvariantViolated>(true);
    C.addTransition(State);
    return;
  }

  const MemRegion *Region = getTrackRegion(*RetSVal);
  if (!Region)
    return;

  const NullabilityState *TrackedNullability =
      State->get<NullabilityMap>(Region);
  if (TrackedNullability) {
    Nullability TrackedNullabValue = TrackedNullability->getValue();
    if (Filter.CheckNullableReturnedFromNonnull &&
        Nullness != NullConstraint::IsNotNull &&
        TrackedNullabValue == Nullability::Nullable &&
        RequiredNullability == Nullability::Nonnull) {
      static CheckerProgramPointTag Tag(this, "NullableReturnedFromNonnull");
      ExplodedNode *N = C.addTransition(State, C.getPredecessor(), &Tag);

      SmallString<256> SBuf;
      llvm::raw_svector_ostream OS(SBuf);
      OS << "Nullable pointer is returned from a " << C.getDeclDescription(D) <<
            " that is expected to return a non-null value";

      reportBugIfInvariantHolds(OS.str(),
                                ErrorKind::NullableReturnedToNonnull, N,
                                Region, C);
    }
    return;
  }
  if (RequiredNullability == Nullability::Nullable) {
    State = State->set<NullabilityMap>(Region,
                                       NullabilityState(RequiredNullability,
                                                        S));
    C.addTransition(State);
  }
}
Beispiel #21
0
/// This method check when nullable pointer or null value is returned from a
/// function that has nonnull return type.
///
/// TODO: when nullability preconditons are violated, it is ok to violate the
/// nullability postconditons (i.e.: when one of the nonnull parameters are null
/// this check should not report any nullability related issue).
void NullabilityChecker::checkPreStmt(const ReturnStmt *S,
                                      CheckerContext &C) const {
  auto RetExpr = S->getRetValue();
  if (!RetExpr)
    return;

  if (!RetExpr->getType()->isAnyPointerType())
    return;

  ProgramStateRef State = C.getState();
  if (State->get<PreconditionViolated>())
    return;

  auto RetSVal =
      State->getSVal(S, C.getLocationContext()).getAs<DefinedOrUnknownSVal>();
  if (!RetSVal)
    return;

  AnalysisDeclContext *DeclCtxt =
      C.getLocationContext()->getAnalysisDeclContext();
  const FunctionType *FuncType = DeclCtxt->getDecl()->getFunctionType();
  if (!FuncType)
    return;

  NullConstraint Nullness = getNullConstraint(*RetSVal, State);

  Nullability StaticNullability =
      getNullabilityAnnotation(FuncType->getReturnType());

  if (Filter.CheckNullReturnedFromNonnull &&
      Nullness == NullConstraint::IsNull &&
      StaticNullability == Nullability::Nonnull) {
    static CheckerProgramPointTag Tag(this, "NullReturnedFromNonnull");
    ExplodedNode *N = C.generateErrorNode(State, &Tag);
    if (!N)
      return;
    reportBugIfPreconditionHolds(ErrorKind::NilReturnedToNonnull, N, nullptr, C,
                                 RetExpr);
    return;
  }

  const MemRegion *Region = getTrackRegion(*RetSVal);
  if (!Region)
    return;

  const NullabilityState *TrackedNullability =
      State->get<NullabilityMap>(Region);
  if (TrackedNullability) {
    Nullability TrackedNullabValue = TrackedNullability->getValue();
    if (Filter.CheckNullableReturnedFromNonnull &&
        Nullness != NullConstraint::IsNotNull &&
        TrackedNullabValue == Nullability::Nullable &&
        StaticNullability == Nullability::Nonnull) {
      static CheckerProgramPointTag Tag(this, "NullableReturnedFromNonnull");
      ExplodedNode *N = C.addTransition(State, C.getPredecessor(), &Tag);
      reportBugIfPreconditionHolds(ErrorKind::NullableReturnedToNonnull, N,
                                   Region, C);
    }
    return;
  }
  if (StaticNullability == Nullability::Nullable) {
    State = State->set<NullabilityMap>(Region,
                                       NullabilityState(StaticNullability, S));
    C.addTransition(State);
  }
}
Beispiel #22
0
bool ExprEngine::inlineCall(const CallEvent &Call,
                            ExplodedNode *Pred) {
  if (!getAnalysisManager().shouldInlineCall())
    return false;

  const Decl *D = Call.getRuntimeDefinition();
  if (!D)
    return false;

  const LocationContext *CurLC = Pred->getLocationContext();
  const StackFrameContext *CallerSFC = CurLC->getCurrentStackFrame();
  const LocationContext *ParentOfCallee = 0;

  switch (Call.getKind()) {
  case CE_Function:
  case CE_CXXMember:
  case CE_CXXMemberOperator:
    // These are always at least possible to inline.
    break;
  case CE_CXXConstructor:
  case CE_CXXDestructor: {
    // Only inline constructors and destructors if we built the CFGs for them
    // properly.
    const AnalysisDeclContext *ADC = CallerSFC->getAnalysisDeclContext();
    if (!ADC->getCFGBuildOptions().AddImplicitDtors ||
        !ADC->getCFGBuildOptions().AddInitializers)
      return false;

    // FIXME: We don't handle constructors or destructors for arrays properly.
    const MemRegion *Target = Call.getCXXThisVal().getAsRegion();
    if (Target && isa<ElementRegion>(Target))
      return false;

    // FIXME: This is a hack. We don't handle temporary destructors
    // right now, so we shouldn't inline their constructors.
    if (const CXXConstructorCall *Ctor = dyn_cast<CXXConstructorCall>(&Call)) {
      const CXXConstructExpr *CtorExpr = Ctor->getOriginExpr();
      if (CtorExpr->getConstructionKind() == CXXConstructExpr::CK_Complete)
        if (!Target || !isa<DeclRegion>(Target))
          return false;
    }
    break;
  }
  case CE_CXXAllocator:
    // Do not inline allocators until we model deallocators.
    // This is unfortunate, but basically necessary for smart pointers and such.
    return false;
  case CE_Block: {
    const BlockDataRegion *BR = cast<BlockCall>(Call).getBlockRegion();
    assert(BR && "If we have the block definition we should have its region");
    AnalysisDeclContext *BlockCtx = AMgr.getAnalysisDeclContext(D);
    ParentOfCallee = BlockCtx->getBlockInvocationContext(CallerSFC,
                                                         cast<BlockDecl>(D),
                                                         BR);
    break;
  }
  case CE_ObjCMessage:
    break;
  }

  if (!shouldInlineDecl(D, Pred))
    return false;
  
  if (!ParentOfCallee)
    ParentOfCallee = CallerSFC;

  // This may be NULL, but that's fine.
  const Expr *CallE = Call.getOriginExpr();

  // Construct a new stack frame for the callee.
  AnalysisDeclContext *CalleeADC = AMgr.getAnalysisDeclContext(D);
  const StackFrameContext *CalleeSFC =
    CalleeADC->getStackFrame(ParentOfCallee, CallE,
                             currentBuilderContext->getBlock(),
                             currentStmtIdx);
  
  CallEnter Loc(CallE, CalleeSFC, CurLC);

  // Construct a new state which contains the mapping from actual to
  // formal arguments.
  ProgramStateRef State = Pred->getState()->enterStackFrame(Call, CalleeSFC);

  bool isNew;
  if (ExplodedNode *N = G.getNode(Loc, State, false, &isNew)) {
    N->addPredecessor(Pred, G);
    if (isNew)
      Engine.getWorkList()->enqueue(N);
  }
  return true;
}