void lfort::runUninitializedVariablesAnalysis( const DeclContext &dc, const CFG &cfg, AnalysisDeclContext &ac, UninitVariablesHandler &handler, UninitVariablesAnalysisStats &stats) { CFGBlockValues vals(cfg); vals.computeSetOfDeclarations(dc); if (vals.hasNoDeclarations()) return; stats.NumVariablesAnalyzed = vals.getNumEntries(); // Precompute which expressions are uses and which are initializations. ClassifyRefs classification(ac); cfg.VisitBlockStmts(classification); // Mark all variables uninitialized at the entry. const CFGBlock &entry = cfg.getEntry(); ValueVector &vec = vals.getValueVector(&entry); const unsigned n = vals.getNumEntries(); for (unsigned j = 0; j < n ; ++j) { vec[j] = Uninitialized; } // Proceed with the workist. DataflowWorklist worklist(cfg, *ac.getAnalysis<PostOrderCFGView>()); llvm::BitVector previouslyVisited(cfg.getNumBlockIDs()); worklist.enqueueSuccessors(&cfg.getEntry()); llvm::BitVector wasAnalyzed(cfg.getNumBlockIDs(), false); wasAnalyzed[cfg.getEntry().getBlockID()] = true; PruneBlocksHandler PBH(cfg.getNumBlockIDs()); while (const CFGBlock *block = worklist.dequeue()) { PBH.currentBlock = block->getBlockID(); // Did the block change? bool changed = runOnBlock(block, cfg, ac, vals, classification, wasAnalyzed, PBH); ++stats.NumBlockVisits; if (changed || !previouslyVisited[block->getBlockID()]) worklist.enqueueSuccessors(block); previouslyVisited[block->getBlockID()] = true; } if (!PBH.hadAnyUse) return; // Run through the blocks one more time, and report uninitialized variabes. for (CFG::const_iterator BI = cfg.begin(), BE = cfg.end(); BI != BE; ++BI) { const CFGBlock *block = *BI; if (PBH.hadUse[block->getBlockID()]) { runOnBlock(block, cfg, ac, vals, classification, wasAnalyzed, handler); ++stats.NumBlockVisits; } } }
void clang::runUninitializedVariablesAnalysis( const DeclContext &dc, const CFG &cfg, AnalysisDeclContext &ac, UninitVariablesHandler &handler, UninitVariablesAnalysisStats &stats) { CFGBlockValues vals(cfg); vals.computeSetOfDeclarations(dc); if (vals.hasNoDeclarations()) return; stats.NumVariablesAnalyzed = vals.getNumEntries(); // Mark all variables uninitialized at the entry. const CFGBlock &entry = cfg.getEntry(); for (CFGBlock::const_succ_iterator i = entry.succ_begin(), e = entry.succ_end(); i != e; ++i) { if (const CFGBlock *succ = *i) { ValueVector &vec = vals.getValueVector(&entry, succ); const unsigned n = vals.getNumEntries(); for (unsigned j = 0; j < n ; ++j) { vec[j] = Uninitialized; } } } // Proceed with the workist. DataflowWorklist worklist(cfg); llvm::BitVector previouslyVisited(cfg.getNumBlockIDs()); worklist.enqueueSuccessors(&cfg.getEntry()); llvm::BitVector wasAnalyzed(cfg.getNumBlockIDs(), false); wasAnalyzed[cfg.getEntry().getBlockID()] = true; while (const CFGBlock *block = worklist.dequeue()) { // Did the block change? bool changed = runOnBlock(block, cfg, ac, vals, wasAnalyzed); ++stats.NumBlockVisits; if (changed || !previouslyVisited[block->getBlockID()]) worklist.enqueueSuccessors(block); previouslyVisited[block->getBlockID()] = true; } // Run through the blocks one more time, and report uninitialized variabes. for (CFG::const_iterator BI = cfg.begin(), BE = cfg.end(); BI != BE; ++BI) { const CFGBlock *block = *BI; if (wasAnalyzed[block->getBlockID()]) { runOnBlock(block, cfg, ac, vals, wasAnalyzed, &handler); ++stats.NumBlockVisits; } } }
void FindUnreachableCode(AnalysisDeclContext &AC, Preprocessor &PP, Callback &CB) { CFG *cfg = AC.getCFG(); if (!cfg) return; // Scan for reachable blocks from the entrance of the CFG. // If there are no unreachable blocks, we're done. llvm::BitVector reachable(cfg->getNumBlockIDs()); unsigned numReachable = scanMaybeReachableFromBlock(&cfg->getEntry(), PP, reachable); if (numReachable == cfg->getNumBlockIDs()) return; // If there aren't explicit EH edges, we should include the 'try' dispatch // blocks as roots. if (!AC.getCFGBuildOptions().AddEHEdges) { for (CFG::try_block_iterator I = cfg->try_blocks_begin(), E = cfg->try_blocks_end() ; I != E; ++I) { numReachable += scanMaybeReachableFromBlock(*I, PP, reachable); } if (numReachable == cfg->getNumBlockIDs()) return; } // There are some unreachable blocks. We need to find the root blocks that // contain code that should be considered unreachable. for (CFG::iterator I = cfg->begin(), E = cfg->end(); I != E; ++I) { const CFGBlock *block = *I; // A block may have been marked reachable during this loop. if (reachable[block->getBlockID()]) continue; DeadCodeScan DS(reachable, PP); numReachable += DS.scanBackwards(block, CB); if (numReachable == cfg->getNumBlockIDs()) return; } }
CFGReverseBlockReachabilityAnalysis::CFGReverseBlockReachabilityAnalysis(const CFG &cfg) : analyzed(cfg.getNumBlockIDs(), false) {}
/// CheckFallThrough - Check that we don't fall off the end of a /// Statement that should return a value. /// /// \returns AlwaysFallThrough iff we always fall off the end of the statement, /// MaybeFallThrough iff we might or might not fall off the end, /// NeverFallThroughOrReturn iff we never fall off the end of the statement or /// return. We assume NeverFallThrough iff we never fall off the end of the /// statement but we may return. We assume that functions not marked noreturn /// will return. static ControlFlowKind CheckFallThrough(AnalysisContext &AC) { CFG *cfg = AC.getCFG(); if (cfg == 0) return UnknownFallThrough; // The CFG leaves in dead things, and we don't want the dead code paths to // confuse us, so we mark all live things first. llvm::BitVector live(cfg->getNumBlockIDs()); unsigned count = reachable_code::ScanReachableFromBlock(cfg->getEntry(), live); bool AddEHEdges = AC.getAddEHEdges(); if (!AddEHEdges && count != cfg->getNumBlockIDs()) // When there are things remaining dead, and we didn't add EH edges // from CallExprs to the catch clauses, we have to go back and // mark them as live. for (CFG::iterator I = cfg->begin(), E = cfg->end(); I != E; ++I) { CFGBlock &b = **I; if (!live[b.getBlockID()]) { if (b.pred_begin() == b.pred_end()) { if (b.getTerminator() && isa<CXXTryStmt>(b.getTerminator())) // When not adding EH edges from calls, catch clauses // can otherwise seem dead. Avoid noting them as dead. count += reachable_code::ScanReachableFromBlock(b, live); continue; } } } // Now we know what is live, we check the live precessors of the exit block // and look for fall through paths, being careful to ignore normal returns, // and exceptional paths. bool HasLiveReturn = false; bool HasFakeEdge = false; bool HasPlainEdge = false; bool HasAbnormalEdge = false; for (CFGBlock::pred_iterator I=cfg->getExit().pred_begin(), E = cfg->getExit().pred_end(); I != E; ++I) { CFGBlock& B = **I; if (!live[B.getBlockID()]) continue; if (B.size() == 0) { if (B.getTerminator() && isa<CXXTryStmt>(B.getTerminator())) { HasAbnormalEdge = true; continue; } // A labeled empty statement, or the entry block... HasPlainEdge = true; continue; } Stmt *S = B[B.size()-1]; if (isa<ReturnStmt>(S)) { HasLiveReturn = true; continue; } if (isa<ObjCAtThrowStmt>(S)) { HasFakeEdge = true; continue; } if (isa<CXXThrowExpr>(S)) { HasFakeEdge = true; continue; } if (const AsmStmt *AS = dyn_cast<AsmStmt>(S)) { if (AS->isMSAsm()) { HasFakeEdge = true; HasLiveReturn = true; continue; } } if (isa<CXXTryStmt>(S)) { HasAbnormalEdge = true; continue; } bool NoReturnEdge = false; if (CallExpr *C = dyn_cast<CallExpr>(S)) { if (std::find(B.succ_begin(), B.succ_end(), &cfg->getExit()) == B.succ_end()) { HasAbnormalEdge = true; continue; } Expr *CEE = C->getCallee()->IgnoreParenCasts(); if (getFunctionExtInfo(CEE->getType()).getNoReturn()) { NoReturnEdge = true; HasFakeEdge = true; } else if (DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(CEE)) { ValueDecl *VD = DRE->getDecl(); if (VD->hasAttr<NoReturnAttr>()) { NoReturnEdge = true; HasFakeEdge = true; } } } // FIXME: Remove this hack once temporaries and their destructors are // modeled correctly by the CFG. if (CXXExprWithTemporaries *E = dyn_cast<CXXExprWithTemporaries>(S)) { for (unsigned I = 0, N = E->getNumTemporaries(); I != N; ++I) { const FunctionDecl *FD = E->getTemporary(I)->getDestructor(); if (FD->hasAttr<NoReturnAttr>() || FD->getType()->getAs<FunctionType>()->getNoReturnAttr()) { NoReturnEdge = true; HasFakeEdge = true; break; } } } // FIXME: Add noreturn message sends. if (NoReturnEdge == false) HasPlainEdge = true; } if (!HasPlainEdge) { if (HasLiveReturn) return NeverFallThrough; return NeverFallThroughOrReturn; } if (HasAbnormalEdge || HasFakeEdge || HasLiveReturn) return MaybeFallThrough; // This says AlwaysFallThrough for calls to functions that are not marked // noreturn, that don't return. If people would like this warning to be more // accurate, such functions should be marked as noreturn. return AlwaysFallThrough; }
/// \brief Check a function's CFG for thread-safety violations. /// /// We traverse the blocks in the CFG, compute the set of mutexes that are held /// at the end of each block, and issue warnings for thread safety violations. /// Each block in the CFG is traversed exactly once. void runThreadSafetyAnalysis(AnalysisContext &AC, ThreadSafetyHandler &Handler) { CFG *CFGraph = AC.getCFG(); if (!CFGraph) return; const NamedDecl *D = dyn_cast_or_null<NamedDecl>(AC.getDecl()); if (!D) return; // Ignore anonymous functions for now. if (D->getAttr<NoThreadSafetyAnalysisAttr>()) return; Lockset::Factory LocksetFactory; // FIXME: Swith to SmallVector? Otherwise improve performance impact? std::vector<Lockset> EntryLocksets(CFGraph->getNumBlockIDs(), LocksetFactory.getEmptyMap()); std::vector<Lockset> ExitLocksets(CFGraph->getNumBlockIDs(), LocksetFactory.getEmptyMap()); // We need to explore the CFG via a "topological" ordering. // That way, we will be guaranteed to have information about required // predecessor locksets when exploring a new block. TopologicallySortedCFG SortedGraph(CFGraph); CFGBlockSet VisitedBlocks(CFGraph); if (!SortedGraph.empty() && D->hasAttrs()) { const CFGBlock *FirstBlock = *SortedGraph.begin(); Lockset &InitialLockset = EntryLocksets[FirstBlock->getBlockID()]; const AttrVec &ArgAttrs = D->getAttrs(); for(unsigned i = 0; i < ArgAttrs.size(); ++i) { Attr *Attr = ArgAttrs[i]; SourceLocation AttrLoc = Attr->getLocation(); if (SharedLocksRequiredAttr *SLRAttr = dyn_cast<SharedLocksRequiredAttr>(Attr)) { for (SharedLocksRequiredAttr::args_iterator SLRIter = SLRAttr->args_begin(), SLREnd = SLRAttr->args_end(); SLRIter != SLREnd; ++SLRIter) InitialLockset = addLock(Handler, LocksetFactory, InitialLockset, *SLRIter, D, LK_Shared, AttrLoc); } else if (ExclusiveLocksRequiredAttr *ELRAttr = dyn_cast<ExclusiveLocksRequiredAttr>(Attr)) { for (ExclusiveLocksRequiredAttr::args_iterator ELRIter = ELRAttr->args_begin(), ELREnd = ELRAttr->args_end(); ELRIter != ELREnd; ++ELRIter) InitialLockset = addLock(Handler, LocksetFactory, InitialLockset, *ELRIter, D, LK_Exclusive, AttrLoc); } } } for (TopologicallySortedCFG::iterator I = SortedGraph.begin(), E = SortedGraph.end(); I!= E; ++I) { const CFGBlock *CurrBlock = *I; int CurrBlockID = CurrBlock->getBlockID(); VisitedBlocks.insert(CurrBlock); // Use the default initial lockset in case there are no predecessors. Lockset &Entryset = EntryLocksets[CurrBlockID]; Lockset &Exitset = ExitLocksets[CurrBlockID]; // Iterate through the predecessor blocks and warn if the lockset for all // predecessors is not the same. We take the entry lockset of the current // block to be the intersection of all previous locksets. // FIXME: By keeping the intersection, we may output more errors in future // for a lock which is not in the intersection, but was in the union. We // may want to also keep the union in future. As an example, let's say // the intersection contains Mutex L, and the union contains L and M. // Later we unlock M. At this point, we would output an error because we // never locked M; although the real error is probably that we forgot to // lock M on all code paths. Conversely, let's say that later we lock M. // In this case, we should compare against the intersection instead of the // union because the real error is probably that we forgot to unlock M on // all code paths. bool LocksetInitialized = false; for (CFGBlock::const_pred_iterator PI = CurrBlock->pred_begin(), PE = CurrBlock->pred_end(); PI != PE; ++PI) { // if *PI -> CurrBlock is a back edge if (*PI == 0 || !VisitedBlocks.alreadySet(*PI)) continue; int PrevBlockID = (*PI)->getBlockID(); if (!LocksetInitialized) { Entryset = ExitLocksets[PrevBlockID]; LocksetInitialized = true; } else { Entryset = intersectAndWarn(Handler, Entryset, ExitLocksets[PrevBlockID], LocksetFactory, LEK_LockedSomePredecessors); } } BuildLockset LocksetBuilder(Handler, Entryset, LocksetFactory); for (CFGBlock::const_iterator BI = CurrBlock->begin(), BE = CurrBlock->end(); BI != BE; ++BI) { if (const CFGStmt *CfgStmt = dyn_cast<CFGStmt>(&*BI)) LocksetBuilder.Visit(const_cast<Stmt*>(CfgStmt->getStmt())); } Exitset = LocksetBuilder.getLockset(); // For every back edge from CurrBlock (the end of the loop) to another block // (FirstLoopBlock) we need to check that the Lockset of Block is equal to // the one held at the beginning of FirstLoopBlock. We can look up the // Lockset held at the beginning of FirstLoopBlock in the EntryLockSets map. for (CFGBlock::const_succ_iterator SI = CurrBlock->succ_begin(), SE = CurrBlock->succ_end(); SI != SE; ++SI) { // if CurrBlock -> *SI is *not* a back edge if (*SI == 0 || !VisitedBlocks.alreadySet(*SI)) continue; CFGBlock *FirstLoopBlock = *SI; Lockset PreLoop = EntryLocksets[FirstLoopBlock->getBlockID()]; Lockset LoopEnd = ExitLocksets[CurrBlockID]; intersectAndWarn(Handler, LoopEnd, PreLoop, LocksetFactory, LEK_LockedSomeLoopIterations); } } Lockset InitialLockset = EntryLocksets[CFGraph->getEntry().getBlockID()]; Lockset FinalLockset = ExitLocksets[CFGraph->getExit().getBlockID()]; // FIXME: Should we call this function for all blocks which exit the function? intersectAndWarn(Handler, InitialLockset, FinalLockset, LocksetFactory, LEK_LockedAtEndOfFunction); }
LiveVariables * LiveVariables::computeLiveness(AnalysisDeclContext &AC, bool killAtAssign) { // No CFG? Bail out. CFG *cfg = AC.getCFG(); if (!cfg) return nullptr; // The analysis currently has scalability issues for very large CFGs. // Bail out if it looks too large. if (cfg->getNumBlockIDs() > 300000) return nullptr; LiveVariablesImpl *LV = new LiveVariablesImpl(AC, killAtAssign); // Construct the dataflow worklist. Enqueue the exit block as the // start of the analysis. DataflowWorklist worklist(*cfg, AC); llvm::BitVector everAnalyzedBlock(cfg->getNumBlockIDs()); // FIXME: we should enqueue using post order. for (CFG::const_iterator it = cfg->begin(), ei = cfg->end(); it != ei; ++it) { const CFGBlock *block = *it; worklist.enqueueBlock(block); // FIXME: Scan for DeclRefExprs using in the LHS of an assignment. // We need to do this because we lack context in the reverse analysis // to determine if a DeclRefExpr appears in such a context, and thus // doesn't constitute a "use". if (killAtAssign) for (CFGBlock::const_iterator bi = block->begin(), be = block->end(); bi != be; ++bi) { if (Optional<CFGStmt> cs = bi->getAs<CFGStmt>()) { if (const BinaryOperator *BO = dyn_cast<BinaryOperator>(cs->getStmt())) { if (BO->getOpcode() == BO_Assign) { if (const DeclRefExpr *DR = dyn_cast<DeclRefExpr>(BO->getLHS()->IgnoreParens())) { LV->inAssignment[DR] = 1; } } } } } } while (const CFGBlock *block = worklist.dequeue()) { // Determine if the block's end value has changed. If not, we // have nothing left to do for this block. LivenessValues &prevVal = LV->blocksEndToLiveness[block]; // Merge the values of all successor blocks. LivenessValues val; for (CFGBlock::const_succ_iterator it = block->succ_begin(), ei = block->succ_end(); it != ei; ++it) { if (const CFGBlock *succ = *it) { val = LV->merge(val, LV->blocksBeginToLiveness[succ]); } } if (!everAnalyzedBlock[block->getBlockID()]) everAnalyzedBlock[block->getBlockID()] = true; else if (prevVal.equals(val)) continue; prevVal = val; // Update the dataflow value for the start of this block. LV->blocksBeginToLiveness[block] = LV->runOnBlock(block, val); // Enqueue the value to the predecessors. worklist.enqueuePredecessors(block); } return new LiveVariables(LV); }
/// CheckFallThrough - Check that we don't fall off the end of a /// Statement that should return a value. /// /// \returns AlwaysFallThrough iff we always fall off the end of the statement, /// MaybeFallThrough iff we might or might not fall off the end, /// NeverFallThroughOrReturn iff we never fall off the end of the statement or /// return. We assume NeverFallThrough iff we never fall off the end of the /// statement but we may return. We assume that functions not marked noreturn /// will return. static ControlFlowKind CheckFallThrough(AnalysisContext &AC) { CFG *cfg = AC.getCFG(); if (cfg == 0) return UnknownFallThrough; // The CFG leaves in dead things, and we don't want the dead code paths to // confuse us, so we mark all live things first. llvm::BitVector live(cfg->getNumBlockIDs()); unsigned count = reachable_code::ScanReachableFromBlock(cfg->getEntry(), live); bool AddEHEdges = AC.getAddEHEdges(); if (!AddEHEdges && count != cfg->getNumBlockIDs()) // When there are things remaining dead, and we didn't add EH edges // from CallExprs to the catch clauses, we have to go back and // mark them as live. for (CFG::iterator I = cfg->begin(), E = cfg->end(); I != E; ++I) { CFGBlock &b = **I; if (!live[b.getBlockID()]) { if (b.pred_begin() == b.pred_end()) { if (b.getTerminator() && isa<CXXTryStmt>(b.getTerminator())) // When not adding EH edges from calls, catch clauses // can otherwise seem dead. Avoid noting them as dead. count += reachable_code::ScanReachableFromBlock(b, live); continue; } } } // Now we know what is live, we check the live precessors of the exit block // and look for fall through paths, being careful to ignore normal returns, // and exceptional paths. bool HasLiveReturn = false; bool HasFakeEdge = false; bool HasPlainEdge = false; bool HasAbnormalEdge = false; // Ignore default cases that aren't likely to be reachable because all // enums in a switch(X) have explicit case statements. CFGBlock::FilterOptions FO; FO.IgnoreDefaultsWithCoveredEnums = 1; for (CFGBlock::filtered_pred_iterator I = cfg->getExit().filtered_pred_start_end(FO); I.hasMore(); ++I) { const CFGBlock& B = **I; if (!live[B.getBlockID()]) continue; // Destructors can appear after the 'return' in the CFG. This is // normal. We need to look pass the destructors for the return // statement (if it exists). CFGBlock::const_reverse_iterator ri = B.rbegin(), re = B.rend(); bool hasNoReturnDtor = false; for ( ; ri != re ; ++ri) { CFGElement CE = *ri; // FIXME: The right solution is to just sever the edges in the // CFG itself. if (const CFGImplicitDtor *iDtor = ri->getAs<CFGImplicitDtor>()) if (iDtor->isNoReturn(AC.getASTContext())) { hasNoReturnDtor = true; HasFakeEdge = true; break; } if (isa<CFGStmt>(CE)) break; } if (hasNoReturnDtor) continue; // No more CFGElements in the block? if (ri == re) { if (B.getTerminator() && isa<CXXTryStmt>(B.getTerminator())) { HasAbnormalEdge = true; continue; } // A labeled empty statement, or the entry block... HasPlainEdge = true; continue; } CFGStmt CS = cast<CFGStmt>(*ri); Stmt *S = CS.getStmt(); if (isa<ReturnStmt>(S)) { HasLiveReturn = true; continue; } if (isa<ObjCAtThrowStmt>(S)) { HasFakeEdge = true; continue; } if (isa<CXXThrowExpr>(S)) { HasFakeEdge = true; continue; } if (const AsmStmt *AS = dyn_cast<AsmStmt>(S)) { if (AS->isMSAsm()) { HasFakeEdge = true; HasLiveReturn = true; continue; } } if (isa<CXXTryStmt>(S)) { HasAbnormalEdge = true; continue; } bool NoReturnEdge = false; if (CallExpr *C = dyn_cast<CallExpr>(S)) { if (std::find(B.succ_begin(), B.succ_end(), &cfg->getExit()) == B.succ_end()) { HasAbnormalEdge = true; continue; } Expr *CEE = C->getCallee()->IgnoreParenCasts(); QualType calleeType = CEE->getType(); if (calleeType == AC.getASTContext().BoundMemberTy) { calleeType = Expr::findBoundMemberType(CEE); assert(!calleeType.isNull() && "analyzing unresolved call?"); } if (getFunctionExtInfo(calleeType).getNoReturn()) { NoReturnEdge = true; HasFakeEdge = true; } else if (DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(CEE)) { ValueDecl *VD = DRE->getDecl(); if (VD->hasAttr<NoReturnAttr>()) { NoReturnEdge = true; HasFakeEdge = true; } } } // FIXME: Add noreturn message sends. if (NoReturnEdge == false) HasPlainEdge = true; } if (!HasPlainEdge) { if (HasLiveReturn) return NeverFallThrough; return NeverFallThroughOrReturn; } if (HasAbnormalEdge || HasFakeEdge || HasLiveReturn) return MaybeFallThrough; // This says AlwaysFallThrough for calls to functions that are not marked // noreturn, that don't return. If people would like this warning to be more // accurate, such functions should be marked as noreturn. return AlwaysFallThrough; }
void FindUnreachableCode(AnalysisContext &AC, Callback &CB) { CFG *cfg = AC.getCFG(); if (!cfg) return; // Scan for reachable blocks. llvm::BitVector reachable(cfg->getNumBlockIDs()); unsigned numReachable = ScanReachableFromBlock(cfg->getEntry(), reachable); // If there are no unreachable blocks, we're done. if (numReachable == cfg->getNumBlockIDs()) return; SourceRange R1, R2; llvm::SmallVector<ErrLoc, 24> lines; bool AddEHEdges = AC.getAddEHEdges(); // First, give warnings for blocks with no predecessors, as they // can't be part of a loop. for (CFG::iterator I = cfg->begin(), E = cfg->end(); I != E; ++I) { CFGBlock &b = **I; if (!reachable[b.getBlockID()]) { if (b.pred_empty()) { if (!AddEHEdges && dyn_cast_or_null<CXXTryStmt>(b.getTerminator().getStmt())) { // When not adding EH edges from calls, catch clauses // can otherwise seem dead. Avoid noting them as dead. numReachable += ScanReachableFromBlock(b, reachable); continue; } SourceLocation c = GetUnreachableLoc(b, R1, R2); if (!c.isValid()) { // Blocks without a location can't produce a warning, so don't mark // reachable blocks from here as live. reachable.set(b.getBlockID()); ++numReachable; continue; } lines.push_back(ErrLoc(c, R1, R2)); // Avoid excessive errors by marking everything reachable from here numReachable += ScanReachableFromBlock(b, reachable); } } } if (numReachable < cfg->getNumBlockIDs()) { // And then give warnings for the tops of loops. for (CFG::iterator I = cfg->begin(), E = cfg->end(); I != E; ++I) { CFGBlock &b = **I; if (!reachable[b.getBlockID()]) // Avoid excessive errors by marking everything reachable from here lines.push_back(ErrLoc(MarkLiveTop(&b, reachable, AC.getASTContext().getSourceManager()), SourceRange(), SourceRange())); } } llvm::array_pod_sort(lines.begin(), lines.end(), LineCmp); for (llvm::SmallVectorImpl<ErrLoc>::iterator I=lines.begin(), E=lines.end(); I != E; ++I) if (I->Loc.isValid()) CB.HandleUnreachable(I->Loc, I->R1, I->R2); }