SwitchMethodPartitioning::SwitchMethodPartitioning(IRCode* code, bool verify_default_case) : m_code(code) { m_code->build_cfg(/* editable */ true); auto& cfg = m_code->cfg(); // Note that a single-case switch can be compiled as either a switch opcode or // a series of if-* opcodes. We can use constant propagation to handle these // cases uniformly: to determine the case key, we use the inferred value of // the operand to the branching opcode in the successor blocks. cp::intraprocedural::FixpointIterator fixpoint( cfg, cp::ConstantPrimitiveAnalyzer()); fixpoint.run(ConstantEnvironment()); auto determining_reg = compute_prologue_blocks(&cfg, fixpoint, verify_default_case); // Find all the outgoing edges from the prologue blocks std::vector<cfg::Edge*> cases; for (const cfg::Block* prologue : m_prologue_blocks) { for (cfg::Edge* e : prologue->succs()) { if (std::find(m_prologue_blocks.begin(), m_prologue_blocks.end(), e->target()) == m_prologue_blocks.end()) { cases.push_back(e); } } } for (auto edge : cases) { auto case_block = edge->target(); auto env = fixpoint.get_entry_state_at(case_block); auto case_key = env.get<SignedConstantDomain>(*determining_reg); if (case_key.is_top() && verify_default_case) { auto last_insn_it = case_block->get_last_insn(); always_assert_log(last_insn_it != case_block->end() && last_insn_it->insn->opcode() == OPCODE_THROW, "Could not determine key for block that does not look " "like it throws an IllegalArgumentException: %d in %s", case_block->id(), SHOW(cfg)); } else if (!case_key.is_top()) { const auto& c = case_key.get_constant(); if (c != boost::none) { m_key_to_block[*c] = case_block; } else { // handle multiple case keys that map to a single block always_assert(edge->type() == cfg::EDGE_BRANCH); const auto& edge_case_key = edge->case_key(); always_assert(edge_case_key != boost::none); m_key_to_block[*edge_case_key] = case_block; } } } }
vframe* vframe::sender() const { RegisterMap temp_map = *register_map(); assert(is_top(), "just checking"); if (_fr.is_entry_frame() && _fr.is_first_frame()) return NULL; frame s = _fr.real_sender(&temp_map); if (s.is_first_frame()) return NULL; return vframe::new_vframe(&s, &temp_map, thread()); }
void MultiTimeout::set_timeout_at(int64 key, double timeout) { LOG(DEBUG) << "Set timeout for " << key << " in " << timeout - Time::now(); auto item = items_.emplace(key); auto heap_node = static_cast<HeapNode *>(const_cast<Item *>(&*item.first)); if (heap_node->in_heap()) { CHECK(!item.second); bool need_update_timeout = heap_node->is_top(); timeout_queue_.fix(timeout, heap_node); if (need_update_timeout || heap_node->is_top()) { update_timeout(); } } else { CHECK(item.second); timeout_queue_.insert(timeout, heap_node); if (heap_node->is_top()) { update_timeout(); } } }
void ScopeDesc::print_on(outputStream* st) const { // header st->print("ScopeDesc[%d]@0x%lx ", _decode_offset, _code->instructions_begin()); print_value_on(st); // decode offsets if (WizardMode) { st->print_cr("offset: %d", _decode_offset); st->print_cr("bci: %d", bci()); st->print_cr("locals: %d", _locals_decode_offset); st->print_cr("stack: %d", _expressions_decode_offset); st->print_cr("monitor: %d", _monitors_decode_offset); st->print_cr("sender: %d", _sender_decode_offset); } // locals { GrowableArray<ScopeValue*>* l = ((ScopeDesc*) this)->locals(); if (l != NULL) { tty->print_cr("Locals"); for (int index = 0; index < l->length(); index++) { st->print(" - l%d: ", index); l->at(index)->print_on(st); st->cr(); } } } // expressions { GrowableArray<ScopeValue*>* l = ((ScopeDesc*) this)->expressions(); if (l != NULL) { st->print_cr("Expression stack"); for (int index = 0; index < l->length(); index++) { st->print(" - @%d: ", index); l->at(index)->print_on(st); st->cr(); } } } // monitors { GrowableArray<MonitorValue*>* l = ((ScopeDesc*) this)->monitors(); if (l != NULL) { st->print_cr("Monitor stack"); for (int index = 0; index < l->length(); index++) { st->print(" - @%d: ", index); l->at(index)->print_on(st); st->cr(); } } } if (!is_top()) { st->print_cr("Sender:"); sender()->print_on(st); } }
void MultiTimeout::cancel_timeout(int64 key) { LOG(DEBUG) << "Cancel timeout for " << key; auto item = items_.find(Item(key)); if (item != items_.end()) { auto heap_node = static_cast<HeapNode *>(const_cast<Item *>(&*item)); CHECK(heap_node->in_heap()); bool need_update_timeout = heap_node->is_top(); timeout_queue_.erase(heap_node); items_.erase(item); if (need_update_timeout) { update_timeout(); } } }
void UsedVarsFixpointIterator::analyze_instruction( const IRInstruction* insn, UsedVarsSet* used_vars) const { TRACE(DEAD_CODE, 5, "Before %s : %s : %s\n", SHOW(insn), SHOW(*used_vars), show_subset(m_insn_env_map.at(insn), insn).c_str()); bool required = is_required(insn, *used_vars); auto op = insn->opcode(); if (ptrs::is_alloc_opcode(op)) { used_vars->remove(insn); } if (insn->dests_size()) { used_vars->remove(insn->dest()); } else if (insn->has_move_result()) { used_vars->remove(RESULT_REGISTER); } if (required) { const auto& env = m_insn_env_map.at(insn); if (env.is_bottom()) { return; } for (auto reg : object_read_registers(insn)) { auto pointers = env.get_pointers(reg); // XXX: We should never encounter this case since we explicitly bind all // potential pointer-containing registers to non-Top values in our // environment. If we did encounter Top here, however, we should treat // all local allocations as potentially used -- a read from // PointerSet::top() must be treated like a read from every possible // heap location. always_assert(!pointers.is_top()); for (auto pointer : pointers.elements()) { used_vars->add(pointer); } } for (size_t i = 0; i < insn->srcs_size(); ++i) { used_vars->add(insn->src(i)); } if (is_move_result(op) || opcode::is_move_result_pseudo(op)) { used_vars->add(RESULT_REGISTER); } } TRACE(DEAD_CODE, 5, "After: %s\n", SHOW(*used_vars)); }
void ScopeDesc::print_on(outputStream* st, PcDesc* pd) const { // header if (pd != NULL) { tty->print_cr("ScopeDesc(pc=" PTR_FORMAT " offset=%x):", pd->real_pc(_code), pd->pc_offset()); } print_value_on(st); // decode offsets if (WizardMode) { st->print("ScopeDesc[%d]@" PTR_FORMAT " ", _decode_offset, _code->content_begin()); st->print_cr(" offset: %d", _decode_offset); st->print_cr(" bci: %d", bci()); st->print_cr(" reexecute: %s", should_reexecute() ? "true" : "false"); st->print_cr(" locals: %d", _locals_decode_offset); st->print_cr(" stack: %d", _expressions_decode_offset); st->print_cr(" monitor: %d", _monitors_decode_offset); st->print_cr(" sender: %d", _sender_decode_offset); } // locals { GrowableArray<ScopeValue*>* l = ((ScopeDesc*) this)->locals(); if (l != NULL) { tty->print_cr(" Locals"); for (int index = 0; index < l->length(); index++) { st->print(" - l%d: ", index); l->at(index)->print_on(st); st->cr(); } } } // expressions { GrowableArray<ScopeValue*>* l = ((ScopeDesc*) this)->expressions(); if (l != NULL) { st->print_cr(" Expression stack"); for (int index = 0; index < l->length(); index++) { st->print(" - @%d: ", index); l->at(index)->print_on(st); st->cr(); } } } // monitors { GrowableArray<MonitorValue*>* l = ((ScopeDesc*) this)->monitors(); if (l != NULL) { st->print_cr(" Monitor stack"); for (int index = 0; index < l->length(); index++) { st->print(" - @%d: ", index); l->at(index)->print_on(st); st->cr(); } } } #ifdef COMPILER2 if (DoEscapeAnalysis && is_top() && _objects != NULL) { tty->print_cr("Objects"); for (int i = 0; i < _objects->length(); i++) { ObjectValue* sv = (ObjectValue*) _objects->at(i); tty->print(" - %d: ", sv->id()); sv->print_fields_on(tty); tty->cr(); } } #endif // COMPILER2 }
ScopeDesc* ScopeDesc::sender() const { if (is_top()) return NULL; return new ScopeDesc(this); }
ScopeDesc* ScopeDesc::sender() const { if (is_top()) return NULL; return new ScopeDesc(_code, _sender_decode_offset); }
bool Type::is_boolifiable() const { return is_top() || is_primitive() || is_ref(); }