// The actual analysis for a function call starting at a function call return point and terminating each path whenever we reach // another function call. Try to figure out if we ever read from EAX without first writing to it and return true if we do. static bool returnValueUsed(const Cfg &cfg, CfgVertex startVertex, const RegisterDictionary *regdict) { BaseSemantics::SValuePtr protoval = NullSemantics::SValue::instance(); RegisterStatePtr registers = RegisterState::instance(protoval, regdict); BaseSemantics::MemoryStatePtr memory = BaseSemantics::MemoryCellList::instance(protoval, protoval); BaseSemantics::StatePtr state = BaseSemantics::State::instance(registers, memory); BaseSemantics::RiscOperatorsPtr ops = NullSemantics::RiscOperators::instance(state); size_t addrWidth = regdict->findLargestRegister(x86_regclass_gpr, x86_gpr_sp).get_nbits(); BaseSemantics::DispatcherPtr dispatcher = DispatcherX86::instance(ops, addrWidth); WorkList<CfgVertex> worklist(true); Map<CfgVertex, size_t> seen; worklist.push(startVertex); while (!worklist.empty()) { CfgVertex v = worklist.pop(); seen[v] = 1; SgAsmBlock *bb = get_ast_node(cfg, v); std::vector<SgAsmInstruction*> insns = SageInterface::querySubTree<SgAsmInstruction>(bb); // "Run" the basic block bool failed = false; BOOST_FOREACH (SgAsmInstruction *insn, insns) { try { dispatcher->processInstruction(insn); if (registers->readUninitialized()) return true; } catch (...) { failed = true; break; } } if (failed) continue; // Add new vertices to the work list, but only if none of the outgoing edges are function calls. bool isCall = false; CfgOutEdgeIterator ei, ei_end; for (boost::tie(ei, ei_end)=out_edges(v, cfg); ei!=ei_end && !isCall; ++ei) isCall = isFunctionCall(cfg, *ei); if (!isCall) { for (boost::tie(ei, ei_end)=out_edges(v, cfg); ei!=ei_end && !isCall; ++ei) { if (!seen.exists(v)) worklist.push(v); } } } return false; }
//------------------------------expand_macro_nodes---------------------- // Returns true if a failure occurred. bool PhaseMacroExpand::expand_macro_nodes() { ResourceArea*a=Thread::current()->resource_area(); VectorSet visited(a); Node_List worklist(a); expand_recur(C->top(),visited,worklist); // Make sure expansion will not cause node limit to be exceeded. Worst case is a // macro node gets expanded into about 50 nodes. Allow 50% more for optimization if(C->check_node_count(worklist.size()*10,"out of nodes before macro expansion")) return true; // expand "macro" nodes // nodes are removed from the macro list as they are processed while( worklist.size() ) { Node * n = worklist.pop(); if (_igvn.type(n) == Type::TOP || (n->in(0) != NULL && n->in(0)->is_top()) ) { continue; } switch (n->class_id()) { //We use millicode instead of inline allocation case Node::Class_Allocate: expand_allocate(n->as_Allocate()); break; case Node::Class_AllocateArray: expand_allocate(n->as_AllocateArray()); break; case Node::Class_Lock: expand_lock_node(n->as_Lock()); break; case Node::Class_Unlock: expand_unlock_node(n->as_Unlock()); break; case Node::Class_SafePoint: expand_safepoint_node(n->as_SafePoint()); break; // case Node::Class_GetKlass: expand_klass(n); break; default: assert(false, "unknown node type in macro list"); } if (C->failing()) return true; } _igvn.optimize(); return false; }
void ValueMap::increase_table_size() { int old_size = size(); int new_size = old_size * 2 + 1; ValueMapEntryList worklist(8); ValueMapEntryArray new_entries(new_size, NULL); int new_entry_count = 0; TRACE_VALUE_NUMBERING(tty->print_cr("increasing table size from %d to %d", old_size, new_size)); for (int i = old_size - 1; i >= 0; i--) { ValueMapEntry* entry; for (entry = entry_at(i); entry != NULL; entry = entry->next()) { if (!is_killed(entry->value())) { worklist.push(entry); } } while (!worklist.is_empty()) { entry = worklist.pop(); int new_index = entry_index(entry->hash(), new_size); if (entry->nesting() != nesting() && new_entries.at(new_index) != entry->next()) { // changing entries with a lower nesting than the current nesting of the table // is not allowed because then the same entry is contained in multiple value maps. // clone entry when next-pointer must be changed entry = new ValueMapEntry(entry->hash(), entry->value(), entry->nesting(), NULL); } entry->set_next(new_entries.at(new_index)); new_entries.at_put(new_index, entry); new_entry_count++; } } _entries = new_entries; _entry_count = new_entry_count; }
static void analyze(clang::Decl *D) { variables.init(D); clang::AnalysisDeclContext ac(/* AnalysisDeclContextManager */ nullptr, D); clang::CFG *cfg; if (!(cfg = ac.getCFG())) exit(1); VisualizeCfg(&ac, cfg); BlockAnalysis blockAnalysis; for (clang::CFG::const_iterator BI = cfg->begin(), BE = cfg->end(); BI != BE; ++BI) { const clang::CFGBlock *block = *BI; blockAnalysis.add(block); } clang::ForwardDataflowWorklist worklist(*cfg, ac); worklist.enqueueBlock(&cfg->getEntry()); while (const clang::CFGBlock *block = worklist.dequeue()) { // Did the block change? bool changed = blockAnalysis.runOnBlock(block); if (blockAnalysis.foundError()) { goto Error; } if (changed) { worklist.enqueueSuccessors(block); } } printf("Fixed point reached\n"); Error: blockAnalysis.print(); }
void BCEscapeAnalyzer::iterate_blocks(Arena *arena) { int numblocks = _methodBlocks->num_blocks(); int stkSize = _method->max_stack(); int numLocals = _method->max_locals(); StateInfo state; int datacount = (numblocks + 1) * (stkSize + numLocals); int datasize = datacount * sizeof(ArgumentMap); StateInfo *blockstates = (StateInfo *) arena->Amalloc(_methodBlocks->num_blocks() * sizeof(StateInfo)); ArgumentMap *statedata = (ArgumentMap *) arena->Amalloc(datasize); for (int i = 0; i < datacount; i++) ::new ((void*)&statedata[i]) ArgumentMap(); ArgumentMap *dp = statedata; state._vars = dp; dp += numLocals; state._stack = dp; dp += stkSize; state._initialized = false; state._max_stack = stkSize; for (int i = 0; i < numblocks; i++) { blockstates[i]._vars = dp; dp += numLocals; blockstates[i]._stack = dp; dp += stkSize; blockstates[i]._initialized = false; blockstates[i]._stack_height = 0; blockstates[i]._max_stack = stkSize; } GrowableArray<ciBlock *> worklist(arena, numblocks / 4, 0, NULL); GrowableArray<ciBlock *> successors(arena, 4, 0, NULL); _methodBlocks->clear_processed(); // initialize block 0 state from method signature ArgumentMap allVars; // all oop arguments to method ciSignature* sig = method()->signature(); int j = 0; if (!method()->is_static()) { // record information for "this" blockstates[0]._vars[j].set(j); allVars.add(j); j++; } for (int i = 0; i < sig->count(); i++) { ciType* t = sig->type_at(i); if (!t->is_primitive_type()) { blockstates[0]._vars[j].set(j); allVars.add(j); } j += t->size(); } blockstates[0]._initialized = true; assert(j == _arg_size, "just checking"); ArgumentMap unknown_map; unknown_map.add_unknown(); worklist.push(_methodBlocks->block_containing(0)); while(worklist.length() > 0) { ciBlock *blk = worklist.pop(); StateInfo *blkState = blockstates+blk->index(); if (blk->is_handler() || blk->is_ret_target()) { // for an exception handler or a target of a ret instruction, we assume the worst case, // that any variable or stack slot could contain any argument for (int i = 0; i < numLocals; i++) { state._vars[i] = allVars; } if (blk->is_handler()) { state._stack_height = 1; } else { state._stack_height = blkState->_stack_height; } for (int i = 0; i < state._stack_height; i++) { state._stack[i] = allVars; } } else { for (int i = 0; i < numLocals; i++) { state._vars[i] = blkState->_vars[i]; } for (int i = 0; i < blkState->_stack_height; i++) { state._stack[i] = blkState->_stack[i]; } state._stack_height = blkState->_stack_height; } iterate_one_block(blk, state, successors); // if this block has any exception handlers, push them // onto successor list if (blk->has_handler()) { DEBUG_ONLY(int handler_count = 0;) int blk_start = blk->start_bci(); int blk_end = blk->limit_bci(); for (int i = 0; i < numblocks; i++) { ciBlock *b = _methodBlocks->block(i); if (b->is_handler()) { int ex_start = b->ex_start_bci(); int ex_end = b->ex_limit_bci(); if ((ex_start >= blk_start && ex_start < blk_end) || (ex_end > blk_start && ex_end <= blk_end)) { successors.push(b); } DEBUG_ONLY(handler_count++;) } }
size_t WHtreeProcesser::flattenSelection( const std::vector< size_t > &selection, const bool keepBaseNodes ) { std::list< size_t > worklist( selection.begin(), selection.end() ); return flattenSelection( worklist, keepBaseNodes ); } // end "flattenSelection()" -----------------------------------------------------------------