void lfort::runUninitializedVariablesAnalysis( const DeclContext &dc, const CFG &cfg, AnalysisDeclContext &ac, UninitVariablesHandler &handler, UninitVariablesAnalysisStats &stats) { CFGBlockValues vals(cfg); vals.computeSetOfDeclarations(dc); if (vals.hasNoDeclarations()) return; stats.NumVariablesAnalyzed = vals.getNumEntries(); // Precompute which expressions are uses and which are initializations. ClassifyRefs classification(ac); cfg.VisitBlockStmts(classification); // Mark all variables uninitialized at the entry. const CFGBlock &entry = cfg.getEntry(); ValueVector &vec = vals.getValueVector(&entry); const unsigned n = vals.getNumEntries(); for (unsigned j = 0; j < n ; ++j) { vec[j] = Uninitialized; } // Proceed with the workist. DataflowWorklist worklist(cfg, *ac.getAnalysis<PostOrderCFGView>()); llvm::BitVector previouslyVisited(cfg.getNumBlockIDs()); worklist.enqueueSuccessors(&cfg.getEntry()); llvm::BitVector wasAnalyzed(cfg.getNumBlockIDs(), false); wasAnalyzed[cfg.getEntry().getBlockID()] = true; PruneBlocksHandler PBH(cfg.getNumBlockIDs()); while (const CFGBlock *block = worklist.dequeue()) { PBH.currentBlock = block->getBlockID(); // Did the block change? bool changed = runOnBlock(block, cfg, ac, vals, classification, wasAnalyzed, PBH); ++stats.NumBlockVisits; if (changed || !previouslyVisited[block->getBlockID()]) worklist.enqueueSuccessors(block); previouslyVisited[block->getBlockID()] = true; } if (!PBH.hadAnyUse) return; // Run through the blocks one more time, and report uninitialized variabes. for (CFG::const_iterator BI = cfg.begin(), BE = cfg.end(); BI != BE; ++BI) { const CFGBlock *block = *BI; if (PBH.hadUse[block->getBlockID()]) { runOnBlock(block, cfg, ac, vals, classification, wasAnalyzed, handler); ++stats.NumBlockVisits; } } }
void clang::runUninitializedVariablesAnalysis( const DeclContext &dc, const CFG &cfg, AnalysisDeclContext &ac, UninitVariablesHandler &handler, UninitVariablesAnalysisStats &stats) { CFGBlockValues vals(cfg); vals.computeSetOfDeclarations(dc); if (vals.hasNoDeclarations()) return; stats.NumVariablesAnalyzed = vals.getNumEntries(); // Mark all variables uninitialized at the entry. const CFGBlock &entry = cfg.getEntry(); for (CFGBlock::const_succ_iterator i = entry.succ_begin(), e = entry.succ_end(); i != e; ++i) { if (const CFGBlock *succ = *i) { ValueVector &vec = vals.getValueVector(&entry, succ); const unsigned n = vals.getNumEntries(); for (unsigned j = 0; j < n ; ++j) { vec[j] = Uninitialized; } } } // Proceed with the workist. DataflowWorklist worklist(cfg); llvm::BitVector previouslyVisited(cfg.getNumBlockIDs()); worklist.enqueueSuccessors(&cfg.getEntry()); llvm::BitVector wasAnalyzed(cfg.getNumBlockIDs(), false); wasAnalyzed[cfg.getEntry().getBlockID()] = true; while (const CFGBlock *block = worklist.dequeue()) { // Did the block change? bool changed = runOnBlock(block, cfg, ac, vals, wasAnalyzed); ++stats.NumBlockVisits; if (changed || !previouslyVisited[block->getBlockID()]) worklist.enqueueSuccessors(block); previouslyVisited[block->getBlockID()] = true; } // Run through the blocks one more time, and report uninitialized variabes. for (CFG::const_iterator BI = cfg.begin(), BE = cfg.end(); BI != BE; ++BI) { const CFGBlock *block = *BI; if (wasAnalyzed[block->getBlockID()]) { runOnBlock(block, cfg, ac, vals, wasAnalyzed, &handler); ++stats.NumBlockVisits; } } }
void ProgAnalysis::traverseCFG(CFG *cfg, std::set<CFG *> &cfgs, std::set<CFG *> &cfgs_done) { // Addition of rules only depends of whether the pds // is of type wpds::WPDS or not (all others inherit from wpds::ewpds::EWPDS). wali::wpds::ewpds::EWPDS *epds = 0; if(wpdsType != USING_WPDS) { epds = static_cast<wali::wpds::ewpds::EWPDS *>(pds); } const std::set<CFGEdge *> &edges = cfg->getEdges(); std::set<CFGEdge *>::const_iterator it; for(it = edges.begin(); it != edges.end(); it++) { CFGEdge *edge = *it; if(!edge->isCall()) { pds->add_rule(pds_state, edge->getSource()->getWpdsKey(), pds_state, edge->getTarget()->getWpdsKey(), edge->getWeight()); } else { CFG *callee = edge->getCallee(); if(wpdsType == USING_WPDS) { pds->add_rule(pds_state, edge->getSource()->getWpdsKey(), pds_state, callee->getEntry()->getWpdsKey(), edge->getTarget()->getWpdsKey(), edge->getWeight()); } else { epds->add_rule(pds_state, edge->getSource()->getWpdsKey(), pds_state, callee->getEntry()->getWpdsKey(), edge->getTarget()->getWpdsKey(), edge->getWeight(), edge->getMergeFn()); } if(cfgs_done.find(callee) == cfgs_done.end()) { cfgs.insert(callee); } } // store the weight if(!se.is_valid()) { se = edge->getWeight(); } } }
void FindUnreachableCode(AnalysisDeclContext &AC, Preprocessor &PP, Callback &CB) { CFG *cfg = AC.getCFG(); if (!cfg) return; // Scan for reachable blocks from the entrance of the CFG. // If there are no unreachable blocks, we're done. llvm::BitVector reachable(cfg->getNumBlockIDs()); unsigned numReachable = scanMaybeReachableFromBlock(&cfg->getEntry(), PP, reachable); if (numReachable == cfg->getNumBlockIDs()) return; // If there aren't explicit EH edges, we should include the 'try' dispatch // blocks as roots. if (!AC.getCFGBuildOptions().AddEHEdges) { for (CFG::try_block_iterator I = cfg->try_blocks_begin(), E = cfg->try_blocks_end() ; I != E; ++I) { numReachable += scanMaybeReachableFromBlock(*I, PP, reachable); } if (numReachable == cfg->getNumBlockIDs()) return; } // There are some unreachable blocks. We need to find the root blocks that // contain code that should be considered unreachable. for (CFG::iterator I = cfg->begin(), E = cfg->end(); I != E; ++I) { const CFGBlock *block = *I; // A block may have been marked reachable during this loop. if (reachable[block->getBlockID()]) continue; DeadCodeScan DS(reachable, PP); numReachable += DS.scanBackwards(block, CB); if (numReachable == cfg->getNumBlockIDs()) return; } }
void SystemDependenceGraph::build() { boost::unordered_map<CFGVertex, Vertex> cfgVerticesToSdgVertices; boost::unordered_map<SgNode*, Vertex> astNodesToSdgVertices; //map<SgFunctionCallExp*, vector<SDGNode*> > funcCallToArgs; vector<CallSiteInfo> functionCalls; map<SgNode*, vector<Vertex> > actualInParameters; map<SgNode*, vector<Vertex> > actualOutParameters; map<SgNode*, Vertex> formalInParameters; map<SgNode*, Vertex> formalOutParameters; vector<SgFunctionDefinition*> funcDefs = SageInterface::querySubTree<SgFunctionDefinition>(project_, V_SgFunctionDefinition); foreach (SgFunctionDefinition* funcDef, funcDefs) { SgFunctionDeclaration* funcDecl = funcDef->get_declaration(); CFG* cfg = new CFG(funcDef, cfgNodefilter_); functionsToCFGs_[funcDecl] = cfg; // For each function, build an entry node for it. SDGNode* entry = new SDGNode(SDGNode::Entry); entry->astNode = funcDef; //entry->funcDef = funcDef; Vertex entryVertex = addVertex(entry); functionsToEntries_[funcDecl] = entryVertex; // Add all out formal parameters to SDG. const SgInitializedNamePtrList& formalArgs = funcDecl->get_args(); foreach (SgInitializedName* initName, formalArgs) { // If the parameter is passed by reference, create a formal-out node. if (isParaPassedByRef(initName->get_type())) { SDGNode* formalOutNode = new SDGNode(SDGNode::FormalOut); formalOutNode->astNode = initName; Vertex formalOutVertex = addVertex(formalOutNode); formalOutParameters[initName] = formalOutVertex; // Add a CD edge from call node to this formal-out node. addTrueCDEdge(entryVertex, formalOutVertex); } } // A vertex representing the returned value. Vertex returnVertex; // If the function returns something, build a formal-out node. if (!isSgTypeVoid(funcDecl->get_type()->get_return_type())) { SDGNode* formalOutNode = new SDGNode(SDGNode::FormalOut); // Assign the function declaration to the AST node of this vertex to make // it possible to classify this node into the subgraph of this function. formalOutNode->astNode = funcDecl; returnVertex = addVertex(formalOutNode); formalOutParameters[funcDecl] = returnVertex; // Add a CD edge from call node to this formal-out node. addTrueCDEdge(entryVertex, returnVertex); } // Add all CFG vertices to SDG. foreach (CFGVertex cfgVertex, boost::vertices(*cfg)) { if (cfgVertex == cfg->getEntry() || cfgVertex == cfg->getExit()) continue; SgNode* astNode = (*cfg)[cfgVertex]->getNode(); // If this node is an initialized name and it is a parameter, make it // as a formal in node. SgInitializedName* initName = isSgInitializedName(astNode); if (initName && isSgFunctionParameterList(initName->get_parent())) { SDGNode* formalInNode = new SDGNode(SDGNode::FormalIn); formalInNode->astNode = initName; Vertex formalInVertex = addVertex(formalInNode); formalInParameters[initName] = formalInVertex; cfgVerticesToSdgVertices[cfgVertex] = formalInVertex; astNodesToSdgVertices[astNode] = formalInVertex; // Add a CD edge from call node to this formal-in node. addTrueCDEdge(entryVertex, formalInVertex); continue; } // Add a new node to SDG. SDGNode* newSdgNode = new SDGNode(SDGNode::ASTNode); //newSdgNode->cfgNode = (*cfg)[cfgVertex]; newSdgNode->astNode = astNode; Vertex sdgVertex = addVertex(newSdgNode); cfgVerticesToSdgVertices[cfgVertex] = sdgVertex; astNodesToSdgVertices[astNode] = sdgVertex; // Connect a vertex containing the return statement to the formal-out return vertex. if (isSgReturnStmt(astNode) || isSgReturnStmt(astNode->get_parent())) { SDGEdge* newEdge = new SDGEdge(SDGEdge::DataDependence); addEdge(sdgVertex, returnVertex, newEdge); } // If this CFG node contains a function call expression, extract its all parameters // and make them as actual-in nodes. if (SgFunctionCallExp* funcCallExpr = isSgFunctionCallExp(astNode)) { CallSiteInfo callInfo; callInfo.funcCall = funcCallExpr; callInfo.vertex = sdgVertex; // Change the node type. newSdgNode->type = SDGNode::FunctionCall; vector<SDGNode*> argsNodes; // Get the associated function declaration. SgFunctionDeclaration* funcDecl = funcCallExpr->getAssociatedFunctionDeclaration(); if (funcDecl == NULL) continue; ROSE_ASSERT(funcDecl); const SgInitializedNamePtrList& formalArgs = funcDecl->get_args(); SgExprListExp* args = funcCallExpr->get_args(); const SgExpressionPtrList& actualArgs = args->get_expressions(); if (formalArgs.size() != actualArgs.size()) { cout << "The following function has variadic arguments:\n"; cout << funcDecl->get_file_info()->get_filename() << endl; cout << funcDecl->get_name() << formalArgs.size() << " " << actualArgs.size() << endl; continue; } for (int i = 0, s = actualArgs.size(); i < s; ++i) { // Make sure that this parameter node is added to SDG then we // change its node type from normal AST node to a ActualIn arg. ROSE_ASSERT(astNodesToSdgVertices.count(actualArgs[i])); Vertex paraInVertex = astNodesToSdgVertices.at(actualArgs[i]); SDGNode* paraInNode = (*this)[paraInVertex]; paraInNode->type = SDGNode::ActualIn; actualInParameters[formalArgs[i]].push_back(paraInVertex); callInfo.inPara.push_back(paraInVertex); // Add a CD edge from call node to this actual-in node. addTrueCDEdge(sdgVertex, paraInVertex); // If the parameter is passed by reference, create a parameter-out node. if (isParaPassedByRef(formalArgs[i]->get_type())) { SDGNode* paraOutNode = new SDGNode(SDGNode::ActualOut); paraOutNode->astNode = actualArgs[i]; //argsNodes.push_back(paraInNode); // Add an actual-out parameter node. Vertex paraOutVertex = addVertex(paraOutNode); actualOutParameters[formalArgs[i]].push_back(paraOutVertex); callInfo.outPara.push_back(paraOutVertex); // Add a CD edge from call node to this actual-out node. addTrueCDEdge(sdgVertex, paraOutVertex); } } if (!isSgTypeVoid(funcDecl->get_type()->get_return_type())) { // If this function returns a value, create a actual-out vertex. SDGNode* paraOutNode = new SDGNode(SDGNode::ActualOut); paraOutNode->astNode = funcCallExpr; // Add an actual-out parameter node. Vertex paraOutVertex = addVertex(paraOutNode); actualOutParameters[funcDecl].push_back(paraOutVertex); callInfo.outPara.push_back(paraOutVertex); callInfo.isVoid = false; callInfo.returned = paraOutVertex; // Add a CD edge from call node to this actual-out node. addTrueCDEdge(sdgVertex, paraOutVertex); } functionCalls.push_back(callInfo); //funcCallToArgs[funcCallExpr] = argsNodes; } } // Add control dependence edges. addControlDependenceEdges(cfgVerticesToSdgVertices, *cfg, entryVertex); }
/// CheckFallThrough - Check that we don't fall off the end of a /// Statement that should return a value. /// /// \returns AlwaysFallThrough iff we always fall off the end of the statement, /// MaybeFallThrough iff we might or might not fall off the end, /// NeverFallThroughOrReturn iff we never fall off the end of the statement or /// return. We assume NeverFallThrough iff we never fall off the end of the /// statement but we may return. We assume that functions not marked noreturn /// will return. static ControlFlowKind CheckFallThrough(AnalysisContext &AC) { CFG *cfg = AC.getCFG(); if (cfg == 0) return UnknownFallThrough; // The CFG leaves in dead things, and we don't want the dead code paths to // confuse us, so we mark all live things first. llvm::BitVector live(cfg->getNumBlockIDs()); unsigned count = reachable_code::ScanReachableFromBlock(cfg->getEntry(), live); bool AddEHEdges = AC.getAddEHEdges(); if (!AddEHEdges && count != cfg->getNumBlockIDs()) // When there are things remaining dead, and we didn't add EH edges // from CallExprs to the catch clauses, we have to go back and // mark them as live. for (CFG::iterator I = cfg->begin(), E = cfg->end(); I != E; ++I) { CFGBlock &b = **I; if (!live[b.getBlockID()]) { if (b.pred_begin() == b.pred_end()) { if (b.getTerminator() && isa<CXXTryStmt>(b.getTerminator())) // When not adding EH edges from calls, catch clauses // can otherwise seem dead. Avoid noting them as dead. count += reachable_code::ScanReachableFromBlock(b, live); continue; } } } // Now we know what is live, we check the live precessors of the exit block // and look for fall through paths, being careful to ignore normal returns, // and exceptional paths. bool HasLiveReturn = false; bool HasFakeEdge = false; bool HasPlainEdge = false; bool HasAbnormalEdge = false; for (CFGBlock::pred_iterator I=cfg->getExit().pred_begin(), E = cfg->getExit().pred_end(); I != E; ++I) { CFGBlock& B = **I; if (!live[B.getBlockID()]) continue; if (B.size() == 0) { if (B.getTerminator() && isa<CXXTryStmt>(B.getTerminator())) { HasAbnormalEdge = true; continue; } // A labeled empty statement, or the entry block... HasPlainEdge = true; continue; } Stmt *S = B[B.size()-1]; if (isa<ReturnStmt>(S)) { HasLiveReturn = true; continue; } if (isa<ObjCAtThrowStmt>(S)) { HasFakeEdge = true; continue; } if (isa<CXXThrowExpr>(S)) { HasFakeEdge = true; continue; } if (const AsmStmt *AS = dyn_cast<AsmStmt>(S)) { if (AS->isMSAsm()) { HasFakeEdge = true; HasLiveReturn = true; continue; } } if (isa<CXXTryStmt>(S)) { HasAbnormalEdge = true; continue; } bool NoReturnEdge = false; if (CallExpr *C = dyn_cast<CallExpr>(S)) { if (std::find(B.succ_begin(), B.succ_end(), &cfg->getExit()) == B.succ_end()) { HasAbnormalEdge = true; continue; } Expr *CEE = C->getCallee()->IgnoreParenCasts(); if (getFunctionExtInfo(CEE->getType()).getNoReturn()) { NoReturnEdge = true; HasFakeEdge = true; } else if (DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(CEE)) { ValueDecl *VD = DRE->getDecl(); if (VD->hasAttr<NoReturnAttr>()) { NoReturnEdge = true; HasFakeEdge = true; } } } // FIXME: Remove this hack once temporaries and their destructors are // modeled correctly by the CFG. if (CXXExprWithTemporaries *E = dyn_cast<CXXExprWithTemporaries>(S)) { for (unsigned I = 0, N = E->getNumTemporaries(); I != N; ++I) { const FunctionDecl *FD = E->getTemporary(I)->getDestructor(); if (FD->hasAttr<NoReturnAttr>() || FD->getType()->getAs<FunctionType>()->getNoReturnAttr()) { NoReturnEdge = true; HasFakeEdge = true; break; } } } // FIXME: Add noreturn message sends. if (NoReturnEdge == false) HasPlainEdge = true; } if (!HasPlainEdge) { if (HasLiveReturn) return NeverFallThrough; return NeverFallThroughOrReturn; } if (HasAbnormalEdge || HasFakeEdge || HasLiveReturn) return MaybeFallThrough; // This says AlwaysFallThrough for calls to functions that are not marked // noreturn, that don't return. If people would like this warning to be more // accurate, such functions should be marked as noreturn. return AlwaysFallThrough; }
/// \brief Check a function's CFG for thread-safety violations. /// /// We traverse the blocks in the CFG, compute the set of mutexes that are held /// at the end of each block, and issue warnings for thread safety violations. /// Each block in the CFG is traversed exactly once. void runThreadSafetyAnalysis(AnalysisContext &AC, ThreadSafetyHandler &Handler) { CFG *CFGraph = AC.getCFG(); if (!CFGraph) return; const NamedDecl *D = dyn_cast_or_null<NamedDecl>(AC.getDecl()); if (!D) return; // Ignore anonymous functions for now. if (D->getAttr<NoThreadSafetyAnalysisAttr>()) return; Lockset::Factory LocksetFactory; // FIXME: Swith to SmallVector? Otherwise improve performance impact? std::vector<Lockset> EntryLocksets(CFGraph->getNumBlockIDs(), LocksetFactory.getEmptyMap()); std::vector<Lockset> ExitLocksets(CFGraph->getNumBlockIDs(), LocksetFactory.getEmptyMap()); // We need to explore the CFG via a "topological" ordering. // That way, we will be guaranteed to have information about required // predecessor locksets when exploring a new block. TopologicallySortedCFG SortedGraph(CFGraph); CFGBlockSet VisitedBlocks(CFGraph); if (!SortedGraph.empty() && D->hasAttrs()) { const CFGBlock *FirstBlock = *SortedGraph.begin(); Lockset &InitialLockset = EntryLocksets[FirstBlock->getBlockID()]; const AttrVec &ArgAttrs = D->getAttrs(); for(unsigned i = 0; i < ArgAttrs.size(); ++i) { Attr *Attr = ArgAttrs[i]; SourceLocation AttrLoc = Attr->getLocation(); if (SharedLocksRequiredAttr *SLRAttr = dyn_cast<SharedLocksRequiredAttr>(Attr)) { for (SharedLocksRequiredAttr::args_iterator SLRIter = SLRAttr->args_begin(), SLREnd = SLRAttr->args_end(); SLRIter != SLREnd; ++SLRIter) InitialLockset = addLock(Handler, LocksetFactory, InitialLockset, *SLRIter, D, LK_Shared, AttrLoc); } else if (ExclusiveLocksRequiredAttr *ELRAttr = dyn_cast<ExclusiveLocksRequiredAttr>(Attr)) { for (ExclusiveLocksRequiredAttr::args_iterator ELRIter = ELRAttr->args_begin(), ELREnd = ELRAttr->args_end(); ELRIter != ELREnd; ++ELRIter) InitialLockset = addLock(Handler, LocksetFactory, InitialLockset, *ELRIter, D, LK_Exclusive, AttrLoc); } } } for (TopologicallySortedCFG::iterator I = SortedGraph.begin(), E = SortedGraph.end(); I!= E; ++I) { const CFGBlock *CurrBlock = *I; int CurrBlockID = CurrBlock->getBlockID(); VisitedBlocks.insert(CurrBlock); // Use the default initial lockset in case there are no predecessors. Lockset &Entryset = EntryLocksets[CurrBlockID]; Lockset &Exitset = ExitLocksets[CurrBlockID]; // Iterate through the predecessor blocks and warn if the lockset for all // predecessors is not the same. We take the entry lockset of the current // block to be the intersection of all previous locksets. // FIXME: By keeping the intersection, we may output more errors in future // for a lock which is not in the intersection, but was in the union. We // may want to also keep the union in future. As an example, let's say // the intersection contains Mutex L, and the union contains L and M. // Later we unlock M. At this point, we would output an error because we // never locked M; although the real error is probably that we forgot to // lock M on all code paths. Conversely, let's say that later we lock M. // In this case, we should compare against the intersection instead of the // union because the real error is probably that we forgot to unlock M on // all code paths. bool LocksetInitialized = false; for (CFGBlock::const_pred_iterator PI = CurrBlock->pred_begin(), PE = CurrBlock->pred_end(); PI != PE; ++PI) { // if *PI -> CurrBlock is a back edge if (*PI == 0 || !VisitedBlocks.alreadySet(*PI)) continue; int PrevBlockID = (*PI)->getBlockID(); if (!LocksetInitialized) { Entryset = ExitLocksets[PrevBlockID]; LocksetInitialized = true; } else { Entryset = intersectAndWarn(Handler, Entryset, ExitLocksets[PrevBlockID], LocksetFactory, LEK_LockedSomePredecessors); } } BuildLockset LocksetBuilder(Handler, Entryset, LocksetFactory); for (CFGBlock::const_iterator BI = CurrBlock->begin(), BE = CurrBlock->end(); BI != BE; ++BI) { if (const CFGStmt *CfgStmt = dyn_cast<CFGStmt>(&*BI)) LocksetBuilder.Visit(const_cast<Stmt*>(CfgStmt->getStmt())); } Exitset = LocksetBuilder.getLockset(); // For every back edge from CurrBlock (the end of the loop) to another block // (FirstLoopBlock) we need to check that the Lockset of Block is equal to // the one held at the beginning of FirstLoopBlock. We can look up the // Lockset held at the beginning of FirstLoopBlock in the EntryLockSets map. for (CFGBlock::const_succ_iterator SI = CurrBlock->succ_begin(), SE = CurrBlock->succ_end(); SI != SE; ++SI) { // if CurrBlock -> *SI is *not* a back edge if (*SI == 0 || !VisitedBlocks.alreadySet(*SI)) continue; CFGBlock *FirstLoopBlock = *SI; Lockset PreLoop = EntryLocksets[FirstLoopBlock->getBlockID()]; Lockset LoopEnd = ExitLocksets[CurrBlockID]; intersectAndWarn(Handler, LoopEnd, PreLoop, LocksetFactory, LEK_LockedSomeLoopIterations); } } Lockset InitialLockset = EntryLocksets[CFGraph->getEntry().getBlockID()]; Lockset FinalLockset = ExitLocksets[CFGraph->getExit().getBlockID()]; // FIXME: Should we call this function for all blocks which exit the function? intersectAndWarn(Handler, InitialLockset, FinalLockset, LocksetFactory, LEK_LockedAtEndOfFunction); }
/// CheckFallThrough - Check that we don't fall off the end of a /// Statement that should return a value. /// /// \returns AlwaysFallThrough iff we always fall off the end of the statement, /// MaybeFallThrough iff we might or might not fall off the end, /// NeverFallThroughOrReturn iff we never fall off the end of the statement or /// return. We assume NeverFallThrough iff we never fall off the end of the /// statement but we may return. We assume that functions not marked noreturn /// will return. static ControlFlowKind CheckFallThrough(AnalysisContext &AC) { CFG *cfg = AC.getCFG(); if (cfg == 0) return UnknownFallThrough; // The CFG leaves in dead things, and we don't want the dead code paths to // confuse us, so we mark all live things first. llvm::BitVector live(cfg->getNumBlockIDs()); unsigned count = reachable_code::ScanReachableFromBlock(cfg->getEntry(), live); bool AddEHEdges = AC.getAddEHEdges(); if (!AddEHEdges && count != cfg->getNumBlockIDs()) // When there are things remaining dead, and we didn't add EH edges // from CallExprs to the catch clauses, we have to go back and // mark them as live. for (CFG::iterator I = cfg->begin(), E = cfg->end(); I != E; ++I) { CFGBlock &b = **I; if (!live[b.getBlockID()]) { if (b.pred_begin() == b.pred_end()) { if (b.getTerminator() && isa<CXXTryStmt>(b.getTerminator())) // When not adding EH edges from calls, catch clauses // can otherwise seem dead. Avoid noting them as dead. count += reachable_code::ScanReachableFromBlock(b, live); continue; } } } // Now we know what is live, we check the live precessors of the exit block // and look for fall through paths, being careful to ignore normal returns, // and exceptional paths. bool HasLiveReturn = false; bool HasFakeEdge = false; bool HasPlainEdge = false; bool HasAbnormalEdge = false; // Ignore default cases that aren't likely to be reachable because all // enums in a switch(X) have explicit case statements. CFGBlock::FilterOptions FO; FO.IgnoreDefaultsWithCoveredEnums = 1; for (CFGBlock::filtered_pred_iterator I = cfg->getExit().filtered_pred_start_end(FO); I.hasMore(); ++I) { const CFGBlock& B = **I; if (!live[B.getBlockID()]) continue; // Destructors can appear after the 'return' in the CFG. This is // normal. We need to look pass the destructors for the return // statement (if it exists). CFGBlock::const_reverse_iterator ri = B.rbegin(), re = B.rend(); bool hasNoReturnDtor = false; for ( ; ri != re ; ++ri) { CFGElement CE = *ri; // FIXME: The right solution is to just sever the edges in the // CFG itself. if (const CFGImplicitDtor *iDtor = ri->getAs<CFGImplicitDtor>()) if (iDtor->isNoReturn(AC.getASTContext())) { hasNoReturnDtor = true; HasFakeEdge = true; break; } if (isa<CFGStmt>(CE)) break; } if (hasNoReturnDtor) continue; // No more CFGElements in the block? if (ri == re) { if (B.getTerminator() && isa<CXXTryStmt>(B.getTerminator())) { HasAbnormalEdge = true; continue; } // A labeled empty statement, or the entry block... HasPlainEdge = true; continue; } CFGStmt CS = cast<CFGStmt>(*ri); Stmt *S = CS.getStmt(); if (isa<ReturnStmt>(S)) { HasLiveReturn = true; continue; } if (isa<ObjCAtThrowStmt>(S)) { HasFakeEdge = true; continue; } if (isa<CXXThrowExpr>(S)) { HasFakeEdge = true; continue; } if (const AsmStmt *AS = dyn_cast<AsmStmt>(S)) { if (AS->isMSAsm()) { HasFakeEdge = true; HasLiveReturn = true; continue; } } if (isa<CXXTryStmt>(S)) { HasAbnormalEdge = true; continue; } bool NoReturnEdge = false; if (CallExpr *C = dyn_cast<CallExpr>(S)) { if (std::find(B.succ_begin(), B.succ_end(), &cfg->getExit()) == B.succ_end()) { HasAbnormalEdge = true; continue; } Expr *CEE = C->getCallee()->IgnoreParenCasts(); QualType calleeType = CEE->getType(); if (calleeType == AC.getASTContext().BoundMemberTy) { calleeType = Expr::findBoundMemberType(CEE); assert(!calleeType.isNull() && "analyzing unresolved call?"); } if (getFunctionExtInfo(calleeType).getNoReturn()) { NoReturnEdge = true; HasFakeEdge = true; } else if (DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(CEE)) { ValueDecl *VD = DRE->getDecl(); if (VD->hasAttr<NoReturnAttr>()) { NoReturnEdge = true; HasFakeEdge = true; } } } // FIXME: Add noreturn message sends. if (NoReturnEdge == false) HasPlainEdge = true; } if (!HasPlainEdge) { if (HasLiveReturn) return NeverFallThrough; return NeverFallThroughOrReturn; } if (HasAbnormalEdge || HasFakeEdge || HasLiveReturn) return MaybeFallThrough; // This says AlwaysFallThrough for calls to functions that are not marked // noreturn, that don't return. If people would like this warning to be more // accurate, such functions should be marked as noreturn. return AlwaysFallThrough; }
void FindUnreachableCode(AnalysisContext &AC, Callback &CB) { CFG *cfg = AC.getCFG(); if (!cfg) return; // Scan for reachable blocks. llvm::BitVector reachable(cfg->getNumBlockIDs()); unsigned numReachable = ScanReachableFromBlock(cfg->getEntry(), reachable); // If there are no unreachable blocks, we're done. if (numReachable == cfg->getNumBlockIDs()) return; SourceRange R1, R2; llvm::SmallVector<ErrLoc, 24> lines; bool AddEHEdges = AC.getAddEHEdges(); // First, give warnings for blocks with no predecessors, as they // can't be part of a loop. for (CFG::iterator I = cfg->begin(), E = cfg->end(); I != E; ++I) { CFGBlock &b = **I; if (!reachable[b.getBlockID()]) { if (b.pred_empty()) { if (!AddEHEdges && dyn_cast_or_null<CXXTryStmt>(b.getTerminator().getStmt())) { // When not adding EH edges from calls, catch clauses // can otherwise seem dead. Avoid noting them as dead. numReachable += ScanReachableFromBlock(b, reachable); continue; } SourceLocation c = GetUnreachableLoc(b, R1, R2); if (!c.isValid()) { // Blocks without a location can't produce a warning, so don't mark // reachable blocks from here as live. reachable.set(b.getBlockID()); ++numReachable; continue; } lines.push_back(ErrLoc(c, R1, R2)); // Avoid excessive errors by marking everything reachable from here numReachable += ScanReachableFromBlock(b, reachable); } } } if (numReachable < cfg->getNumBlockIDs()) { // And then give warnings for the tops of loops. for (CFG::iterator I = cfg->begin(), E = cfg->end(); I != E; ++I) { CFGBlock &b = **I; if (!reachable[b.getBlockID()]) // Avoid excessive errors by marking everything reachable from here lines.push_back(ErrLoc(MarkLiveTop(&b, reachable, AC.getASTContext().getSourceManager()), SourceRange(), SourceRange())); } } llvm::array_pod_sort(lines.begin(), lines.end(), LineCmp); for (llvm::SmallVectorImpl<ErrLoc>::iterator I=lines.begin(), E=lines.end(); I != E; ++I) if (I->Loc.isValid()) CB.HandleUnreachable(I->Loc, I->R1, I->R2); }