void Graph::dumpSauto(raw_ostream& os, int tabn) { LangOptions LO; LO.CPlusPlus = true; PrintingPolicy Policy(LO); for (Graph::adjMapType::iterator it = _adjList.begin(), eit = _adjList.end(); it != eit; it++) { Graph::twoIntPairType p = it->first; Edge *e = it->second; os<<" Edge ("<<p.first<<", "<<p.second<<")\n"; os<<" Transition code \n"; vector<CFGBlock*> codeBlocks = e->getPreStmtBlks(); for (int i = 0; i<codeBlocks.size(); i++) { CFGBlock* currBlock = codeBlocks.at(i); for(CFGBlock::iterator it = currBlock->begin(), eit = currBlock->end(); it != eit; it++) { if(Optional <CFGStmt> cfgStmt = it->getAs<CFGStmt>()) { const Stmt* stmt = cfgStmt->getStmt(); stmt->printPretty(llvm::errs(), 0, Policy, 0); os<<"\n"; } } } } }
void Dominator::printBlockSet(BlkSetTy &blkSet) { OS << "{ "; for (BlkSetTy::iterator I = blkSet.begin(), E = blkSet.end(); I != E; ++I) { CFGBlock *block = *I; if (I != blkSet.begin()) OS << ", "; OS << "B" << block->getBlockID(); } OS << " }"; }
PathDiagnosticPiece* VisitNode(const ExplodedNode *N, const ExplodedNode *PrevN, BugReporterContext& BRC) { if (isSatisfied) return NULL; // Check if in the previous state it was feasible for this constraint // to *not* be true. if (PrevN->getState()->Assume(Constraint, !Assumption)) { isSatisfied = true; // As a sanity check, make sure that the negation of the constraint // was infeasible in the current state. If it is feasible, we somehow // missed the transition point. if (N->getState()->Assume(Constraint, !Assumption)) return NULL; // We found the transition point for the constraint. We now need to // pretty-print the constraint. (work-in-progress) std::string sbuf; llvm::raw_string_ostream os(sbuf); if (isa<Loc>(Constraint)) { os << "Assuming pointer value is "; os << (Assumption ? "non-null" : "null"); } if (os.str().empty()) return NULL; // FIXME: Refactor this into BugReporterContext. const Stmt *S = 0; ProgramPoint P = N->getLocation(); if (BlockEdge *BE = dyn_cast<BlockEdge>(&P)) { CFGBlock *BSrc = BE->getSrc(); S = BSrc->getTerminatorCondition(); } else if (PostStmt *PS = dyn_cast<PostStmt>(&P)) { S = PS->getStmt(); } if (!S) return NULL; // Construct a new PathDiagnosticPiece. PathDiagnosticLocation L(S, BRC.getSourceManager()); return new PathDiagnosticEventPiece(L, os.str()); } return NULL; }
CFGBlock* add_block(int ip, bool loop=false) { CFGBlock* blk = find_block(ip); if(blk) return blk; blk = new CFGBlock(ip, loop); // Inherit the current exception handler blk->set_exception_handler(current_->exception_handler()); set_block(ip, blk); return blk; }
/// ScanReachableFromBlock - Mark all blocks reachable from Start. /// Returns the total number of blocks that were marked reachable. unsigned ScanReachableFromBlock(const CFGBlock &Start, llvm::BitVector &Reachable) { unsigned count = 0; llvm::SmallVector<const CFGBlock*, 32> WL; // Prep work queue Reachable.set(Start.getBlockID()); ++count; WL.push_back(&Start); // Find the reachable blocks from 'Start'. CFGBlock::FilterOptions FO; FO.IgnoreDefaultsWithCoveredEnums = 1; while (!WL.empty()) { const CFGBlock *item = WL.back(); WL.pop_back(); // Look at the successors and mark then reachable. for (CFGBlock::filtered_succ_iterator I= item->filtered_succ_start_end(FO); I.hasMore(); ++I) if (const CFGBlock *B = *I) { unsigned blockID = B->getBlockID(); if (!Reachable[blockID]) { Reachable.set(blockID); ++count; WL.push_back(B); } } } return count; }
void Dominator::printDomMap() { OS << "=== Dominators ===\n"; for (BlkToBlkSetTy::reverse_iterator I = DomMap.rbegin(), E = DomMap.rend(); I != E; ++I) { CFGBlock *cfgBlock = (*I).first; OS << " [ B" << cfgBlock->getBlockID(); if (cfgBlock == &cfg.getEntry()) OS << " (ENTRY)"; else if (cfgBlock == &cfg.getExit()) OS << " (EXIT)"; OS << " ]\n"; BlkSetTy &doms = (*I).second; OS << " Dominators (" << doms.size() << "): "; printBlockSet(doms); OS << "\n"; } OS << "\n"; }
AST_expr* remapIfExp(AST_IfExp* node) { std::string rtn_name = nodeName(node); AST_expr* test = remapExpr(node->test); CFGBlock *starting_block = curblock; AST_Branch *br = new AST_Branch(); br->col_offset = node->col_offset; br->lineno = node->lineno; br->test = node->test; push_back(br); CFGBlock* iftrue = cfg->addBlock(); iftrue->info = "iftrue"; br->iftrue = iftrue; starting_block->connectTo(iftrue); curblock = iftrue; push_back(makeAssign(rtn_name, remapExpr(node->body))); AST_Jump* jtrue = new AST_Jump(); push_back(jtrue); CFGBlock* endtrue = curblock; CFGBlock* iffalse = cfg->addBlock(); iffalse->info = "iffalse"; br->iffalse = iffalse; starting_block->connectTo(iffalse); curblock = iffalse; push_back(makeAssign(rtn_name, remapExpr(node->orelse))); AST_Jump* jfalse = new AST_Jump(); push_back(jfalse); CFGBlock* endfalse = curblock; CFGBlock* exit_block = cfg->addBlock(); jtrue->target = exit_block; endtrue->connectTo(exit_block); jfalse->target = exit_block; endfalse->connectTo(exit_block); curblock = exit_block; return makeName(rtn_name, AST_TYPE::Load); }
void Dominator::printDominatorTree() { OS << "=== Dominator Tree ===\n"; for (CFG::iterator I = cfg.begin(), E = cfg.end(); I != E; ++I) { CFGBlock *cfgBlock = *I; OS << " [ B" << cfgBlock->getBlockID(); if (cfgBlock == &cfg.getEntry()) OS << " (ENTRY)"; else if (cfgBlock == &cfg.getExit()) OS << " (EXIT)"; OS << " ]\n"; OS << " Immediate dominator: "; if (ImmediateDomMap[cfgBlock]) OS << "B" << ImmediateDomMap[cfgBlock]->getBlockID(); OS << "\n"; BlkSetTy &children = ChildrenMap[cfgBlock]; OS << " Children (" << children.size() << "): "; printBlockSet(children); OS << "\n"; } OS << "\n"; }
void MallocOverflowSecurityChecker::checkASTCodeBody(const Decl *D, AnalysisManager &mgr, BugReporter &BR) const { CFG *cfg = mgr.getCFG(D); if (!cfg) return; // A list of variables referenced in possibly overflowing malloc operands. llvm::SmallVector<MallocOverflowCheck, 2> PossibleMallocOverflows; for (CFG::iterator it = cfg->begin(), ei = cfg->end(); it != ei; ++it) { CFGBlock *block = *it; for (CFGBlock::iterator bi = block->begin(), be = block->end(); bi != be; ++bi) { if (const CFGStmt *CS = bi->getAs<CFGStmt>()) { if (const CallExpr *TheCall = dyn_cast<CallExpr>(CS->getStmt())) { // Get the callee. const FunctionDecl *FD = TheCall->getDirectCallee(); if (!FD) return; // Get the name of the callee. If it's a builtin, strip off the prefix. IdentifierInfo *FnInfo = FD->getIdentifier(); if (!FnInfo) return; if (FnInfo->isStr ("malloc") || FnInfo->isStr ("_MALLOC")) { if (TheCall->getNumArgs() == 1) CheckMallocArgument(PossibleMallocOverflows, TheCall->getArg(0), mgr.getASTContext()); } } } } } OutputPossibleOverflows(PossibleMallocOverflows, D, BR, mgr); }
/// ExecuteWorkList - Run the worklist algorithm for a maximum number of steps. bool GRCoreEngine::ExecuteWorkList(const LocationContext *L, unsigned Steps) { if (G->num_roots() == 0) { // Initialize the analysis by constructing // the root if none exists. CFGBlock* Entry = &(L->getCFG()->getEntry()); assert (Entry->empty() && "Entry block must be empty."); assert (Entry->succ_size() == 1 && "Entry block must have 1 successor."); // Get the solitary successor. CFGBlock* Succ = *(Entry->succ_begin()); // Construct an edge representing the // starting location in the function. BlockEdge StartLoc(Entry, Succ, L); // Set the current block counter to being empty. WList->setBlockCounter(BCounterFactory.GetEmptyCounter()); // Generate the root. GenerateNode(StartLoc, getInitialState(L), 0); } while (Steps && WList->hasWork()) { --Steps; const GRWorkListUnit& WU = WList->Dequeue(); // Set the current block counter. WList->setBlockCounter(WU.getBlockCounter()); // Retrieve the node. ExplodedNode* Node = WU.getNode(); // Dispatch on the location type. switch (Node->getLocation().getKind()) { case ProgramPoint::BlockEdgeKind: HandleBlockEdge(cast<BlockEdge>(Node->getLocation()), Node); break; case ProgramPoint::BlockEntranceKind: HandleBlockEntrance(cast<BlockEntrance>(Node->getLocation()), Node); break; case ProgramPoint::BlockExitKind: assert (false && "BlockExit location never occur in forward analysis."); break; default: assert(isa<PostStmt>(Node->getLocation())); HandlePostStmt(cast<PostStmt>(Node->getLocation()), WU.getBlock(), WU.getIndex(), Node); break; } } return WList->hasWork(); }
/// \brief Check a function's CFG for thread-safety violations. /// /// We traverse the blocks in the CFG, compute the set of mutexes that are held /// at the end of each block, and issue warnings for thread safety violations. /// Each block in the CFG is traversed exactly once. void runThreadSafetyAnalysis(AnalysisContext &AC, ThreadSafetyHandler &Handler) { CFG *CFGraph = AC.getCFG(); if (!CFGraph) return; const NamedDecl *D = dyn_cast_or_null<NamedDecl>(AC.getDecl()); if (!D) return; // Ignore anonymous functions for now. if (D->getAttr<NoThreadSafetyAnalysisAttr>()) return; Lockset::Factory LocksetFactory; // FIXME: Swith to SmallVector? Otherwise improve performance impact? std::vector<Lockset> EntryLocksets(CFGraph->getNumBlockIDs(), LocksetFactory.getEmptyMap()); std::vector<Lockset> ExitLocksets(CFGraph->getNumBlockIDs(), LocksetFactory.getEmptyMap()); // We need to explore the CFG via a "topological" ordering. // That way, we will be guaranteed to have information about required // predecessor locksets when exploring a new block. TopologicallySortedCFG SortedGraph(CFGraph); CFGBlockSet VisitedBlocks(CFGraph); if (!SortedGraph.empty() && D->hasAttrs()) { const CFGBlock *FirstBlock = *SortedGraph.begin(); Lockset &InitialLockset = EntryLocksets[FirstBlock->getBlockID()]; const AttrVec &ArgAttrs = D->getAttrs(); for(unsigned i = 0; i < ArgAttrs.size(); ++i) { Attr *Attr = ArgAttrs[i]; SourceLocation AttrLoc = Attr->getLocation(); if (SharedLocksRequiredAttr *SLRAttr = dyn_cast<SharedLocksRequiredAttr>(Attr)) { for (SharedLocksRequiredAttr::args_iterator SLRIter = SLRAttr->args_begin(), SLREnd = SLRAttr->args_end(); SLRIter != SLREnd; ++SLRIter) InitialLockset = addLock(Handler, LocksetFactory, InitialLockset, *SLRIter, D, LK_Shared, AttrLoc); } else if (ExclusiveLocksRequiredAttr *ELRAttr = dyn_cast<ExclusiveLocksRequiredAttr>(Attr)) { for (ExclusiveLocksRequiredAttr::args_iterator ELRIter = ELRAttr->args_begin(), ELREnd = ELRAttr->args_end(); ELRIter != ELREnd; ++ELRIter) InitialLockset = addLock(Handler, LocksetFactory, InitialLockset, *ELRIter, D, LK_Exclusive, AttrLoc); } } } for (TopologicallySortedCFG::iterator I = SortedGraph.begin(), E = SortedGraph.end(); I!= E; ++I) { const CFGBlock *CurrBlock = *I; int CurrBlockID = CurrBlock->getBlockID(); VisitedBlocks.insert(CurrBlock); // Use the default initial lockset in case there are no predecessors. Lockset &Entryset = EntryLocksets[CurrBlockID]; Lockset &Exitset = ExitLocksets[CurrBlockID]; // Iterate through the predecessor blocks and warn if the lockset for all // predecessors is not the same. We take the entry lockset of the current // block to be the intersection of all previous locksets. // FIXME: By keeping the intersection, we may output more errors in future // for a lock which is not in the intersection, but was in the union. We // may want to also keep the union in future. As an example, let's say // the intersection contains Mutex L, and the union contains L and M. // Later we unlock M. At this point, we would output an error because we // never locked M; although the real error is probably that we forgot to // lock M on all code paths. Conversely, let's say that later we lock M. // In this case, we should compare against the intersection instead of the // union because the real error is probably that we forgot to unlock M on // all code paths. bool LocksetInitialized = false; for (CFGBlock::const_pred_iterator PI = CurrBlock->pred_begin(), PE = CurrBlock->pred_end(); PI != PE; ++PI) { // if *PI -> CurrBlock is a back edge if (*PI == 0 || !VisitedBlocks.alreadySet(*PI)) continue; int PrevBlockID = (*PI)->getBlockID(); if (!LocksetInitialized) { Entryset = ExitLocksets[PrevBlockID]; LocksetInitialized = true; } else { Entryset = intersectAndWarn(Handler, Entryset, ExitLocksets[PrevBlockID], LocksetFactory, LEK_LockedSomePredecessors); } } BuildLockset LocksetBuilder(Handler, Entryset, LocksetFactory); for (CFGBlock::const_iterator BI = CurrBlock->begin(), BE = CurrBlock->end(); BI != BE; ++BI) { if (const CFGStmt *CfgStmt = dyn_cast<CFGStmt>(&*BI)) LocksetBuilder.Visit(const_cast<Stmt*>(CfgStmt->getStmt())); } Exitset = LocksetBuilder.getLockset(); // For every back edge from CurrBlock (the end of the loop) to another block // (FirstLoopBlock) we need to check that the Lockset of Block is equal to // the one held at the beginning of FirstLoopBlock. We can look up the // Lockset held at the beginning of FirstLoopBlock in the EntryLockSets map. for (CFGBlock::const_succ_iterator SI = CurrBlock->succ_begin(), SE = CurrBlock->succ_end(); SI != SE; ++SI) { // if CurrBlock -> *SI is *not* a back edge if (*SI == 0 || !VisitedBlocks.alreadySet(*SI)) continue; CFGBlock *FirstLoopBlock = *SI; Lockset PreLoop = EntryLocksets[FirstLoopBlock->getBlockID()]; Lockset LoopEnd = ExitLocksets[CurrBlockID]; intersectAndWarn(Handler, LoopEnd, PreLoop, LocksetFactory, LEK_LockedSomeLoopIterations); } } Lockset InitialLockset = EntryLocksets[CFGraph->getEntry().getBlockID()]; Lockset FinalLockset = ExitLocksets[CFGraph->getExit().getBlockID()]; // FIXME: Should we call this function for all blocks which exit the function? intersectAndWarn(Handler, InitialLockset, FinalLockset, LocksetFactory, LEK_LockedAtEndOfFunction); }
void build() { find_backward_gotos(); // Construct the root block specially. if(blocks_[0]) { root_ = blocks_[0]; } else { root_ = new CFGBlock(0); blocks_[0] = root_; } current_ = root_; VMMethod::Iterator iter(stream_, stream_size_); for(;;) { if(CFGBlock* next_block = find_block(iter.position())) { if(next_block->loop_p()) { // The handler wasn't setup originally, so we have to set it now. next_block->set_exception_handler(current_->exception_handler()); close_current(iter, next_block); } else { current_ = next_block; } } switch(iter.op()) { case InstructionSequence::insn_goto: case InstructionSequence::insn_goto_if_true: case InstructionSequence::insn_goto_if_false: if(iter.operand1() > iter.position()) { current_->add_child(add_block(iter.operand1())); start_new_block(iter); } else { #ifndef NDEBUG CFGBlock* loop_header = find_block(iter.operand1()); assert(loop_header); assert(loop_header->exception_handler() == current_->exception_handler()); #endif } break; case InstructionSequence::insn_setup_unwind: { assert(iter.operand1() > iter.position()); CFGBlock* handler = add_block(iter.operand1()); handler->set_exception_type(iter.operand2()); current_->add_child(handler); CFGBlock* body = start_new_block(iter); assert(body); // make sure it's not at the end. body->set_exception_handler(handler); break; } case InstructionSequence::insn_pop_unwind: { assert(current_->exception_handler()); CFGBlock* cont = start_new_block(iter); CFGBlock* current_handler = cont->exception_handler(); assert(current_handler); // Effectively pop the current handler by setting the // blocks handler (and thus all blocks after it) to the current // handlers handler. cont->set_exception_handler(current_handler->exception_handler()); break; } case InstructionSequence::insn_ensure_return: case InstructionSequence::insn_raise_exc: case InstructionSequence::insn_raise_return: case InstructionSequence::insn_raise_break: case InstructionSequence::insn_reraise: case InstructionSequence::insn_ret: start_new_block(iter); break; } if(!iter.advance()) break; } current_->set_end_ip(iter.position()); }
void close_current(VMMethod::Iterator& iter, CFGBlock* next) { current_->set_end_ip(iter.position()); current_->add_child(next); current_ = next; }
static SourceLocation GetUnreachableLoc(const CFGBlock &b, SourceRange &R1, SourceRange &R2) { const Stmt *S = 0; unsigned sn = 0; R1 = R2 = SourceRange(); if (sn < b.size()) { const CFGStmt *CS = b[sn].getAs<CFGStmt>(); if (!CS) return SourceLocation(); S = CS->getStmt(); } else if (b.getTerminator()) S = b.getTerminator(); else return SourceLocation(); if (const Expr *Ex = dyn_cast<Expr>(S)) S = Ex->IgnoreParenImpCasts(); switch (S->getStmtClass()) { case Expr::BinaryOperatorClass: { const BinaryOperator *BO = cast<BinaryOperator>(S); if (BO->getOpcode() == BO_Comma) { if (sn+1 < b.size()) return b[sn+1].getAs<CFGStmt>()->getStmt()->getLocStart(); const CFGBlock *n = &b; while (1) { if (n->getTerminator()) return n->getTerminator()->getLocStart(); if (n->succ_size() != 1) return SourceLocation(); n = n[0].succ_begin()[0]; if (n->pred_size() != 1) return SourceLocation(); if (!n->empty()) return n[0][0].getAs<CFGStmt>()->getStmt()->getLocStart(); } } R1 = BO->getLHS()->getSourceRange(); R2 = BO->getRHS()->getSourceRange(); return BO->getOperatorLoc(); } case Expr::UnaryOperatorClass: { const UnaryOperator *UO = cast<UnaryOperator>(S); R1 = UO->getSubExpr()->getSourceRange(); return UO->getOperatorLoc(); } case Expr::CompoundAssignOperatorClass: { const CompoundAssignOperator *CAO = cast<CompoundAssignOperator>(S); R1 = CAO->getLHS()->getSourceRange(); R2 = CAO->getRHS()->getSourceRange(); return CAO->getOperatorLoc(); } case Expr::BinaryConditionalOperatorClass: case Expr::ConditionalOperatorClass: { const AbstractConditionalOperator *CO = cast<AbstractConditionalOperator>(S); return CO->getQuestionLoc(); } case Expr::MemberExprClass: { const MemberExpr *ME = cast<MemberExpr>(S); R1 = ME->getSourceRange(); return ME->getMemberLoc(); } case Expr::ArraySubscriptExprClass: { const ArraySubscriptExpr *ASE = cast<ArraySubscriptExpr>(S); R1 = ASE->getLHS()->getSourceRange(); R2 = ASE->getRHS()->getSourceRange(); return ASE->getRBracketLoc(); } case Expr::CStyleCastExprClass: { const CStyleCastExpr *CSC = cast<CStyleCastExpr>(S); R1 = CSC->getSubExpr()->getSourceRange(); return CSC->getLParenLoc(); } case Expr::CXXFunctionalCastExprClass: { const CXXFunctionalCastExpr *CE = cast <CXXFunctionalCastExpr>(S); R1 = CE->getSubExpr()->getSourceRange(); return CE->getTypeBeginLoc(); } case Stmt::CXXTryStmtClass: { return cast<CXXTryStmt>(S)->getHandler(0)->getCatchLoc(); } default: ; } R1 = S->getSourceRange(); return S->getLocStart(); }