LiveVariables::LiveVariables(AnalysisContext &AC) { // Register all referenced VarDecls. CFG &cfg = *AC.getCFG(); getAnalysisData().setCFG(cfg); getAnalysisData().setContext(AC.getASTContext()); getAnalysisData().AC = &AC; RegisterDecls R(getAnalysisData()); cfg.VisitBlockStmts(R); // Register all parameters even if they didn't occur in the function body. if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(AC.getDecl())) for (FunctionDecl::param_const_iterator PI = FD->param_begin(), PE = FD->param_end(); PI != PE; ++PI) getAnalysisData().Register(*PI); }
void FindUnreachableCode(AnalysisContext &AC, Callback &CB) { CFG *cfg = AC.getCFG(); if (!cfg) return; // Scan for reachable blocks from the entrance of the CFG. // If there are no unreachable blocks, we're done. llvm::BitVector reachable(cfg->getNumBlockIDs()); unsigned numReachable = ScanReachableFromBlock(&cfg->getEntry(), reachable); if (numReachable == cfg->getNumBlockIDs()) return; // If there aren't explicit EH edges, we should include the 'try' dispatch // blocks as roots. if (!AC.getCFGBuildOptions().AddEHEdges) { for (CFG::try_block_iterator I = cfg->try_blocks_begin(), E = cfg->try_blocks_end() ; I != E; ++I) { numReachable += ScanReachableFromBlock(*I, reachable); } if (numReachable == cfg->getNumBlockIDs()) return; } // There are some unreachable blocks. We need to find the root blocks that // contain code that should be considered unreachable. for (CFG::iterator I = cfg->begin(), E = cfg->end(); I != E; ++I) { const CFGBlock *block = *I; // A block may have been marked reachable during this loop. if (reachable[block->getBlockID()]) continue; DeadCodeScan DS(reachable); numReachable += DS.scanBackwards(block, CB); if (numReachable == cfg->getNumBlockIDs()) return; } }
/// CheckFallThrough - Check that we don't fall off the end of a /// Statement that should return a value. /// /// \returns AlwaysFallThrough iff we always fall off the end of the statement, /// MaybeFallThrough iff we might or might not fall off the end, /// NeverFallThroughOrReturn iff we never fall off the end of the statement or /// return. We assume NeverFallThrough iff we never fall off the end of the /// statement but we may return. We assume that functions not marked noreturn /// will return. static ControlFlowKind CheckFallThrough(AnalysisContext &AC) { CFG *cfg = AC.getCFG(); if (cfg == 0) return UnknownFallThrough; // The CFG leaves in dead things, and we don't want the dead code paths to // confuse us, so we mark all live things first. llvm::BitVector live(cfg->getNumBlockIDs()); unsigned count = reachable_code::ScanReachableFromBlock(cfg->getEntry(), live); bool AddEHEdges = AC.getAddEHEdges(); if (!AddEHEdges && count != cfg->getNumBlockIDs()) // When there are things remaining dead, and we didn't add EH edges // from CallExprs to the catch clauses, we have to go back and // mark them as live. for (CFG::iterator I = cfg->begin(), E = cfg->end(); I != E; ++I) { CFGBlock &b = **I; if (!live[b.getBlockID()]) { if (b.pred_begin() == b.pred_end()) { if (b.getTerminator() && isa<CXXTryStmt>(b.getTerminator())) // When not adding EH edges from calls, catch clauses // can otherwise seem dead. Avoid noting them as dead. count += reachable_code::ScanReachableFromBlock(b, live); continue; } } } // Now we know what is live, we check the live precessors of the exit block // and look for fall through paths, being careful to ignore normal returns, // and exceptional paths. bool HasLiveReturn = false; bool HasFakeEdge = false; bool HasPlainEdge = false; bool HasAbnormalEdge = false; for (CFGBlock::pred_iterator I=cfg->getExit().pred_begin(), E = cfg->getExit().pred_end(); I != E; ++I) { CFGBlock& B = **I; if (!live[B.getBlockID()]) continue; if (B.size() == 0) { if (B.getTerminator() && isa<CXXTryStmt>(B.getTerminator())) { HasAbnormalEdge = true; continue; } // A labeled empty statement, or the entry block... HasPlainEdge = true; continue; } Stmt *S = B[B.size()-1]; if (isa<ReturnStmt>(S)) { HasLiveReturn = true; continue; } if (isa<ObjCAtThrowStmt>(S)) { HasFakeEdge = true; continue; } if (isa<CXXThrowExpr>(S)) { HasFakeEdge = true; continue; } if (const AsmStmt *AS = dyn_cast<AsmStmt>(S)) { if (AS->isMSAsm()) { HasFakeEdge = true; HasLiveReturn = true; continue; } } if (isa<CXXTryStmt>(S)) { HasAbnormalEdge = true; continue; } bool NoReturnEdge = false; if (CallExpr *C = dyn_cast<CallExpr>(S)) { if (std::find(B.succ_begin(), B.succ_end(), &cfg->getExit()) == B.succ_end()) { HasAbnormalEdge = true; continue; } Expr *CEE = C->getCallee()->IgnoreParenCasts(); if (getFunctionExtInfo(CEE->getType()).getNoReturn()) { NoReturnEdge = true; HasFakeEdge = true; } else if (DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(CEE)) { ValueDecl *VD = DRE->getDecl(); if (VD->hasAttr<NoReturnAttr>()) { NoReturnEdge = true; HasFakeEdge = true; } } } // FIXME: Remove this hack once temporaries and their destructors are // modeled correctly by the CFG. if (CXXExprWithTemporaries *E = dyn_cast<CXXExprWithTemporaries>(S)) { for (unsigned I = 0, N = E->getNumTemporaries(); I != N; ++I) { const FunctionDecl *FD = E->getTemporary(I)->getDestructor(); if (FD->hasAttr<NoReturnAttr>() || FD->getType()->getAs<FunctionType>()->getNoReturnAttr()) { NoReturnEdge = true; HasFakeEdge = true; break; } } } // FIXME: Add noreturn message sends. if (NoReturnEdge == false) HasPlainEdge = true; } if (!HasPlainEdge) { if (HasLiveReturn) return NeverFallThrough; return NeverFallThroughOrReturn; } if (HasAbnormalEdge || HasFakeEdge || HasLiveReturn) return MaybeFallThrough; // This says AlwaysFallThrough for calls to functions that are not marked // noreturn, that don't return. If people would like this warning to be more // accurate, such functions should be marked as noreturn. return AlwaysFallThrough; }
void IdempotentOperationChecker::VisitEndAnalysis(ExplodedGraph &G, BugReporter &BR, ExprEngine &Eng) { BugType *BT = new BugType("Idempotent operation", "Dead code"); // Iterate over the hash to see if we have any paths with definite // idempotent operations. for (AssumptionMap::const_iterator i = hash.begin(); i != hash.end(); ++i) { // Unpack the hash contents const BinaryOperatorData &Data = i->second; const Assumption &A = Data.assumption; AnalysisContext *AC = Data.analysisContext; const ExplodedNodeSet &ES = Data.explodedNodes; const BinaryOperator *B = i->first; if (A == Impossible) continue; // If the analyzer did not finish, check to see if we can still emit this // warning if (Eng.hasWorkRemaining()) { const CFGStmtMap *CBM = CFGStmtMap::Build(AC->getCFG(), &AC->getParentMap()); // If we can trace back if (!PathWasCompletelyAnalyzed(AC->getCFG(), CBM->getBlock(B), CBM, Eng.getCoreEngine())) continue; delete CBM; } // Select the error message and SourceRanges to report. llvm::SmallString<128> buf; llvm::raw_svector_ostream os(buf); bool LHSRelevant = false, RHSRelevant = false; switch (A) { case Equal: LHSRelevant = true; RHSRelevant = true; if (B->getOpcode() == BO_Assign) os << "Assigned value is always the same as the existing value"; else os << "Both operands to '" << B->getOpcodeStr() << "' always have the same value"; break; case LHSis1: LHSRelevant = true; os << "The left operand to '" << B->getOpcodeStr() << "' is always 1"; break; case RHSis1: RHSRelevant = true; os << "The right operand to '" << B->getOpcodeStr() << "' is always 1"; break; case LHSis0: LHSRelevant = true; os << "The left operand to '" << B->getOpcodeStr() << "' is always 0"; break; case RHSis0: RHSRelevant = true; os << "The right operand to '" << B->getOpcodeStr() << "' is always 0"; break; case Possible: llvm_unreachable("Operation was never marked with an assumption"); case Impossible: llvm_unreachable(0); } // Add a report for each ExplodedNode for (ExplodedNodeSet::iterator I = ES.begin(), E = ES.end(); I != E; ++I) { EnhancedBugReport *report = new EnhancedBugReport(*BT, os.str(), *I); // Add source ranges and visitor hooks if (LHSRelevant) { const Expr *LHS = i->first->getLHS(); report->addRange(LHS->getSourceRange()); report->addVisitorCreator(bugreporter::registerVarDeclsLastStore, LHS); } if (RHSRelevant) { const Expr *RHS = i->first->getRHS(); report->addRange(i->first->getRHS()->getSourceRange()); report->addVisitorCreator(bugreporter::registerVarDeclsLastStore, RHS); } BR.EmitReport(report); } } }
/// \brief Check a function's CFG for thread-safety violations. /// /// We traverse the blocks in the CFG, compute the set of mutexes that are held /// at the end of each block, and issue warnings for thread safety violations. /// Each block in the CFG is traversed exactly once. void runThreadSafetyAnalysis(AnalysisContext &AC, ThreadSafetyHandler &Handler) { CFG *CFGraph = AC.getCFG(); if (!CFGraph) return; const NamedDecl *D = dyn_cast_or_null<NamedDecl>(AC.getDecl()); if (!D) return; // Ignore anonymous functions for now. if (D->getAttr<NoThreadSafetyAnalysisAttr>()) return; Lockset::Factory LocksetFactory; // FIXME: Swith to SmallVector? Otherwise improve performance impact? std::vector<Lockset> EntryLocksets(CFGraph->getNumBlockIDs(), LocksetFactory.getEmptyMap()); std::vector<Lockset> ExitLocksets(CFGraph->getNumBlockIDs(), LocksetFactory.getEmptyMap()); // We need to explore the CFG via a "topological" ordering. // That way, we will be guaranteed to have information about required // predecessor locksets when exploring a new block. TopologicallySortedCFG SortedGraph(CFGraph); CFGBlockSet VisitedBlocks(CFGraph); if (!SortedGraph.empty() && D->hasAttrs()) { const CFGBlock *FirstBlock = *SortedGraph.begin(); Lockset &InitialLockset = EntryLocksets[FirstBlock->getBlockID()]; const AttrVec &ArgAttrs = D->getAttrs(); for(unsigned i = 0; i < ArgAttrs.size(); ++i) { Attr *Attr = ArgAttrs[i]; SourceLocation AttrLoc = Attr->getLocation(); if (SharedLocksRequiredAttr *SLRAttr = dyn_cast<SharedLocksRequiredAttr>(Attr)) { for (SharedLocksRequiredAttr::args_iterator SLRIter = SLRAttr->args_begin(), SLREnd = SLRAttr->args_end(); SLRIter != SLREnd; ++SLRIter) InitialLockset = addLock(Handler, LocksetFactory, InitialLockset, *SLRIter, D, LK_Shared, AttrLoc); } else if (ExclusiveLocksRequiredAttr *ELRAttr = dyn_cast<ExclusiveLocksRequiredAttr>(Attr)) { for (ExclusiveLocksRequiredAttr::args_iterator ELRIter = ELRAttr->args_begin(), ELREnd = ELRAttr->args_end(); ELRIter != ELREnd; ++ELRIter) InitialLockset = addLock(Handler, LocksetFactory, InitialLockset, *ELRIter, D, LK_Exclusive, AttrLoc); } } } for (TopologicallySortedCFG::iterator I = SortedGraph.begin(), E = SortedGraph.end(); I!= E; ++I) { const CFGBlock *CurrBlock = *I; int CurrBlockID = CurrBlock->getBlockID(); VisitedBlocks.insert(CurrBlock); // Use the default initial lockset in case there are no predecessors. Lockset &Entryset = EntryLocksets[CurrBlockID]; Lockset &Exitset = ExitLocksets[CurrBlockID]; // Iterate through the predecessor blocks and warn if the lockset for all // predecessors is not the same. We take the entry lockset of the current // block to be the intersection of all previous locksets. // FIXME: By keeping the intersection, we may output more errors in future // for a lock which is not in the intersection, but was in the union. We // may want to also keep the union in future. As an example, let's say // the intersection contains Mutex L, and the union contains L and M. // Later we unlock M. At this point, we would output an error because we // never locked M; although the real error is probably that we forgot to // lock M on all code paths. Conversely, let's say that later we lock M. // In this case, we should compare against the intersection instead of the // union because the real error is probably that we forgot to unlock M on // all code paths. bool LocksetInitialized = false; for (CFGBlock::const_pred_iterator PI = CurrBlock->pred_begin(), PE = CurrBlock->pred_end(); PI != PE; ++PI) { // if *PI -> CurrBlock is a back edge if (*PI == 0 || !VisitedBlocks.alreadySet(*PI)) continue; int PrevBlockID = (*PI)->getBlockID(); if (!LocksetInitialized) { Entryset = ExitLocksets[PrevBlockID]; LocksetInitialized = true; } else { Entryset = intersectAndWarn(Handler, Entryset, ExitLocksets[PrevBlockID], LocksetFactory, LEK_LockedSomePredecessors); } } BuildLockset LocksetBuilder(Handler, Entryset, LocksetFactory); for (CFGBlock::const_iterator BI = CurrBlock->begin(), BE = CurrBlock->end(); BI != BE; ++BI) { if (const CFGStmt *CfgStmt = dyn_cast<CFGStmt>(&*BI)) LocksetBuilder.Visit(const_cast<Stmt*>(CfgStmt->getStmt())); } Exitset = LocksetBuilder.getLockset(); // For every back edge from CurrBlock (the end of the loop) to another block // (FirstLoopBlock) we need to check that the Lockset of Block is equal to // the one held at the beginning of FirstLoopBlock. We can look up the // Lockset held at the beginning of FirstLoopBlock in the EntryLockSets map. for (CFGBlock::const_succ_iterator SI = CurrBlock->succ_begin(), SE = CurrBlock->succ_end(); SI != SE; ++SI) { // if CurrBlock -> *SI is *not* a back edge if (*SI == 0 || !VisitedBlocks.alreadySet(*SI)) continue; CFGBlock *FirstLoopBlock = *SI; Lockset PreLoop = EntryLocksets[FirstLoopBlock->getBlockID()]; Lockset LoopEnd = ExitLocksets[CurrBlockID]; intersectAndWarn(Handler, LoopEnd, PreLoop, LocksetFactory, LEK_LockedSomeLoopIterations); } } Lockset InitialLockset = EntryLocksets[CFGraph->getEntry().getBlockID()]; Lockset FinalLockset = ExitLocksets[CFGraph->getExit().getBlockID()]; // FIXME: Should we call this function for all blocks which exit the function? intersectAndWarn(Handler, InitialLockset, FinalLockset, LocksetFactory, LEK_LockedAtEndOfFunction); }
/// CheckFallThrough - Check that we don't fall off the end of a /// Statement that should return a value. /// /// \returns AlwaysFallThrough iff we always fall off the end of the statement, /// MaybeFallThrough iff we might or might not fall off the end, /// NeverFallThroughOrReturn iff we never fall off the end of the statement or /// return. We assume NeverFallThrough iff we never fall off the end of the /// statement but we may return. We assume that functions not marked noreturn /// will return. static ControlFlowKind CheckFallThrough(AnalysisContext &AC) { CFG *cfg = AC.getCFG(); if (cfg == 0) return UnknownFallThrough; // The CFG leaves in dead things, and we don't want the dead code paths to // confuse us, so we mark all live things first. llvm::BitVector live(cfg->getNumBlockIDs()); unsigned count = reachable_code::ScanReachableFromBlock(cfg->getEntry(), live); bool AddEHEdges = AC.getAddEHEdges(); if (!AddEHEdges && count != cfg->getNumBlockIDs()) // When there are things remaining dead, and we didn't add EH edges // from CallExprs to the catch clauses, we have to go back and // mark them as live. for (CFG::iterator I = cfg->begin(), E = cfg->end(); I != E; ++I) { CFGBlock &b = **I; if (!live[b.getBlockID()]) { if (b.pred_begin() == b.pred_end()) { if (b.getTerminator() && isa<CXXTryStmt>(b.getTerminator())) // When not adding EH edges from calls, catch clauses // can otherwise seem dead. Avoid noting them as dead. count += reachable_code::ScanReachableFromBlock(b, live); continue; } } } // Now we know what is live, we check the live precessors of the exit block // and look for fall through paths, being careful to ignore normal returns, // and exceptional paths. bool HasLiveReturn = false; bool HasFakeEdge = false; bool HasPlainEdge = false; bool HasAbnormalEdge = false; // Ignore default cases that aren't likely to be reachable because all // enums in a switch(X) have explicit case statements. CFGBlock::FilterOptions FO; FO.IgnoreDefaultsWithCoveredEnums = 1; for (CFGBlock::filtered_pred_iterator I = cfg->getExit().filtered_pred_start_end(FO); I.hasMore(); ++I) { const CFGBlock& B = **I; if (!live[B.getBlockID()]) continue; // Destructors can appear after the 'return' in the CFG. This is // normal. We need to look pass the destructors for the return // statement (if it exists). CFGBlock::const_reverse_iterator ri = B.rbegin(), re = B.rend(); bool hasNoReturnDtor = false; for ( ; ri != re ; ++ri) { CFGElement CE = *ri; // FIXME: The right solution is to just sever the edges in the // CFG itself. if (const CFGImplicitDtor *iDtor = ri->getAs<CFGImplicitDtor>()) if (iDtor->isNoReturn(AC.getASTContext())) { hasNoReturnDtor = true; HasFakeEdge = true; break; } if (isa<CFGStmt>(CE)) break; } if (hasNoReturnDtor) continue; // No more CFGElements in the block? if (ri == re) { if (B.getTerminator() && isa<CXXTryStmt>(B.getTerminator())) { HasAbnormalEdge = true; continue; } // A labeled empty statement, or the entry block... HasPlainEdge = true; continue; } CFGStmt CS = cast<CFGStmt>(*ri); Stmt *S = CS.getStmt(); if (isa<ReturnStmt>(S)) { HasLiveReturn = true; continue; } if (isa<ObjCAtThrowStmt>(S)) { HasFakeEdge = true; continue; } if (isa<CXXThrowExpr>(S)) { HasFakeEdge = true; continue; } if (const AsmStmt *AS = dyn_cast<AsmStmt>(S)) { if (AS->isMSAsm()) { HasFakeEdge = true; HasLiveReturn = true; continue; } } if (isa<CXXTryStmt>(S)) { HasAbnormalEdge = true; continue; } bool NoReturnEdge = false; if (CallExpr *C = dyn_cast<CallExpr>(S)) { if (std::find(B.succ_begin(), B.succ_end(), &cfg->getExit()) == B.succ_end()) { HasAbnormalEdge = true; continue; } Expr *CEE = C->getCallee()->IgnoreParenCasts(); QualType calleeType = CEE->getType(); if (calleeType == AC.getASTContext().BoundMemberTy) { calleeType = Expr::findBoundMemberType(CEE); assert(!calleeType.isNull() && "analyzing unresolved call?"); } if (getFunctionExtInfo(calleeType).getNoReturn()) { NoReturnEdge = true; HasFakeEdge = true; } else if (DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(CEE)) { ValueDecl *VD = DRE->getDecl(); if (VD->hasAttr<NoReturnAttr>()) { NoReturnEdge = true; HasFakeEdge = true; } } } // FIXME: Add noreturn message sends. if (NoReturnEdge == false) HasPlainEdge = true; } if (!HasPlainEdge) { if (HasLiveReturn) return NeverFallThrough; return NeverFallThroughOrReturn; } if (HasAbnormalEdge || HasFakeEdge || HasLiveReturn) return MaybeFallThrough; // This says AlwaysFallThrough for calls to functions that are not marked // noreturn, that don't return. If people would like this warning to be more // accurate, such functions should be marked as noreturn. return AlwaysFallThrough; }
void FindUnreachableCode(AnalysisContext &AC, Callback &CB) { CFG *cfg = AC.getCFG(); if (!cfg) return; // Scan for reachable blocks. llvm::BitVector reachable(cfg->getNumBlockIDs()); unsigned numReachable = ScanReachableFromBlock(cfg->getEntry(), reachable); // If there are no unreachable blocks, we're done. if (numReachable == cfg->getNumBlockIDs()) return; SourceRange R1, R2; llvm::SmallVector<ErrLoc, 24> lines; bool AddEHEdges = AC.getAddEHEdges(); // First, give warnings for blocks with no predecessors, as they // can't be part of a loop. for (CFG::iterator I = cfg->begin(), E = cfg->end(); I != E; ++I) { CFGBlock &b = **I; if (!reachable[b.getBlockID()]) { if (b.pred_empty()) { if (!AddEHEdges && dyn_cast_or_null<CXXTryStmt>(b.getTerminator().getStmt())) { // When not adding EH edges from calls, catch clauses // can otherwise seem dead. Avoid noting them as dead. numReachable += ScanReachableFromBlock(b, reachable); continue; } SourceLocation c = GetUnreachableLoc(b, R1, R2); if (!c.isValid()) { // Blocks without a location can't produce a warning, so don't mark // reachable blocks from here as live. reachable.set(b.getBlockID()); ++numReachable; continue; } lines.push_back(ErrLoc(c, R1, R2)); // Avoid excessive errors by marking everything reachable from here numReachable += ScanReachableFromBlock(b, reachable); } } } if (numReachable < cfg->getNumBlockIDs()) { // And then give warnings for the tops of loops. for (CFG::iterator I = cfg->begin(), E = cfg->end(); I != E; ++I) { CFGBlock &b = **I; if (!reachable[b.getBlockID()]) // Avoid excessive errors by marking everything reachable from here lines.push_back(ErrLoc(MarkLiveTop(&b, reachable, AC.getASTContext().getSourceManager()), SourceRange(), SourceRange())); } } llvm::array_pod_sort(lines.begin(), lines.end(), LineCmp); for (llvm::SmallVectorImpl<ErrLoc>::iterator I=lines.begin(), E=lines.end(); I != E; ++I) if (I->Loc.isValid()) CB.HandleUnreachable(I->Loc, I->R1, I->R2); }