void TraceBuilder::appendInstruction(IRInstruction* inst) { if (m_curWhere) { // We have a specific position to insert instructions. assert(!inst->isBlockEnd()); auto& it = m_curWhere.get(); it = m_curBlock->insert(it, inst); ++it; return; } Block* block = m_curTrace->back(); if (!block->empty()) { IRInstruction* prev = block->back(); if (prev->isBlockEnd()) { // start a new block Block* next = m_irFactory.defBlock(m_curFunc->getValFunc()); m_curTrace->push_back(next); if (!prev->isTerminal()) { // new block is reachable from old block so link it. block->setNext(next); } block = next; } } appendInstruction(inst, block); updateTrackedState(inst); }
// If main trace ends with an unconditional jump, and the target is not // reached by any other branch, then copy the target of the jump to the // end of the trace static void elimUnconditionalJump(Trace* trace, IRFactory* irFactory) { boost::dynamic_bitset<> isJoin(irFactory->numLabels()); boost::dynamic_bitset<> havePred(irFactory->numLabels()); IRInstruction::List& instList = trace->getInstructionList(); for (IRInstruction* inst : instList) { if (inst->isControlFlowInstruction()) { auto id = inst->getLabel()->getLabelId(); isJoin[id] = havePred[id]; havePred[id] = 1; } } IRInstruction::Iterator lastInst = instList.end(); --lastInst; // go back to the last instruction IRInstruction* jmp = *lastInst; if (jmp->getOpcode() == Jmp_ && !isJoin[jmp->getLabel()->getLabelId()]) { Trace* targetTrace = jmp->getLabel()->getParent(); IRInstruction::List& targetInstList = targetTrace->getInstructionList(); IRInstruction::Iterator instIter = targetInstList.begin(); instIter++; // skip over label // update the parent trace of the moved instructions for (IRInstruction::Iterator it = instIter; it != targetInstList.end(); ++it) { (*it)->setParent(trace); } instList.splice(lastInst, targetInstList, instIter, targetInstList.end()); // delete the jump instruction instList.erase(lastInst); } }
/* * Insert asserts at various points in the IR. * TODO: t2137231 Insert DbgAssertPtr at points that use or produces a GenPtr */ static void insertAsserts(IRTrace* trace, IRFactory& factory) { forEachTraceBlock(trace, [&](Block* block) { for (auto it = block->begin(), end = block->end(); it != end; ) { IRInstruction& inst = *it; ++it; if (inst.op() == SpillStack) { insertSpillStackAsserts(inst, factory); continue; } if (inst.op() == Call) { SSATmp* sp = inst.dst(); IRInstruction* addr = factory.gen(LdStackAddr, inst.marker(), Type::PtrToGen, StackOffset(0), sp); insertAfter(&inst, addr); insertAfter(addr, factory.gen(DbgAssertPtr, inst.marker(), addr->dst())); continue; } if (!inst.isBlockEnd()) insertRefCountAsserts(inst, factory); } }); }
/** * Called to clear out the tracked local values at a call site. * Calls kill all registers, so we don't want to keep locals in * registers across calls. We do continue tracking the types in * locals, however. */ void TraceBuilder::killLocalsForCall() { auto doKill = [&](smart::vector<LocalState>& locals) { for (auto& loc : locals) { SSATmp* t = loc.value; // should not kill DefConst, and LdConst should be replaced by DefConst if (!t || t->inst()->op() == DefConst) continue; if (t->inst()->op() == LdConst) { // make the new DefConst instruction IRInstruction* clone = t->inst()->clone(&m_irFactory); clone->setOpcode(DefConst); loc.value = clone->dst(); continue; } assert(!t->isConst()); loc.unsafe = true; } }; doKill(m_locals); m_callerAvailableValues.clear(); for (auto& state : m_inlineSavedStates) { doKill(state->locals); state->callerAvailableValues.clear(); } }
void LinearScan::collectInfo(BlockList::iterator it, IRTrace* trace) { m_natives.clear(); m_uses.reset(); // TODO(#2536764): serious time sink while (it != m_blocks.end()) { Block* block = *it++; bool offTrace = block->trace() != trace; if (offTrace) { if (!trace->isMain()) return; int lastId = block->trace()->data(); for (IRInstruction& inst : *block) { for (auto* src : inst.srcs()) { if (lastId > m_uses[src].lastUse) { m_uses[src].lastUse = lastId; } } } } else { for (IRInstruction& inst : *block) { for (auto* src : inst.srcs()) { m_uses[src].lastUse = m_linear[inst]; } if (inst.isNative()) m_natives.push_back(&inst); } IRInstruction* jmp = block->back(); if (jmp->op() == Jmp_ && jmp->numSrcs() != 0) { for (SSATmp* src : jmp->srcs()) { m_jmps[src].push_back(jmp); } } } } }
static void genBlock(IRUnit& unit, CodeBlock& cb, CodeBlock& stubsCode, MCGenerator* mcg, CodegenState& state, Block* block, std::vector<TransBCMapping>* bcMap) { FTRACE(6, "genBlock: {}\n", block->id()); std::unique_ptr<CodeGenerator> cg(mcg->backEnd().newCodeGenerator(unit, cb, stubsCode, mcg, state)); BCMarker prevMarker; for (IRInstruction& instr : *block) { IRInstruction* inst = &instr; // If we're on the first instruction of the block or we have a new // marker since the last instruction, update the bc mapping. if ((!prevMarker.valid() || inst->marker() != prevMarker) && (mcg->tx().isTransDBEnabled() || RuntimeOption::EvalJitUseVtuneAPI) && bcMap) { bcMap->push_back(TransBCMapping{inst->marker().func()->unit()->md5(), inst->marker().bcOff(), cb.frontier(), stubsCode.frontier()}); prevMarker = inst->marker(); } auto* addr = cg->cgInst(inst); if (state.asmInfo && addr) { state.asmInfo->updateForInstruction(inst, addr, cb.frontier()); } } }
static void genBlock(IRUnit& unit, CodeBlock& cb, CodeBlock& coldCode, CodeBlock& frozenCode, CodegenState& state, Block* block, std::vector<TransBCMapping>* bcMap) { FTRACE(6, "genBlock: {}\n", block->id()); std::unique_ptr<CodeGenerator> cg(mcg->backEnd().newCodeGenerator(unit, cb, coldCode, frozenCode, state)); for (IRInstruction& instr : *block) { IRInstruction* inst = &instr; if (instr.is(EndGuards)) state.pastGuards = true; if (bcMap && state.pastGuards && (mcg->tx().isTransDBEnabled() || RuntimeOption::EvalJitUseVtuneAPI)) { // Don't insert an entry in bcMap if the marker corresponds to last entry // in there. if (bcMap->empty() || bcMap->back().md5 != inst->marker().func()->unit()->md5() || bcMap->back().bcStart != inst->marker().bcOff()) { bcMap->push_back(TransBCMapping{ inst->marker().func()->unit()->md5(), inst->marker().bcOff(), mcg->cgFixups().m_tletMain->frontier(), mcg->cgFixups().m_tletCold->frontier(), mcg->cgFixups().m_tletFrozen->frontier()}); } } auto* start = cb.frontier(); cg->cgInst(inst); if (state.asmInfo && start < cb.frontier()) { state.asmInfo->updateForInstruction(inst, start, cb.frontier()); } } }
void TraceBuilder::appendInstruction(IRInstruction* inst) { if (m_curWhere) { // We have a specific position to insert instructions. assert(!inst->isBlockEnd()); auto& it = m_curWhere.get(); it = m_curBlock->insert(it, inst); ++it; return; } Block* block = m_curTrace->back(); if (!block->empty()) { IRInstruction* prev = &block->back(); if (prev->isBlockEnd()) { // start a new block Block* next = m_unit.defBlock(); FTRACE(2, "lazily adding B{}\n", next->id()); m_curTrace->push_back(next); if (!prev->isTerminal()) { // new block is reachable from old block so link it. block->setNext(next); next->setHint(block->hint()); } block = next; } } appendInstruction(inst, block); if (m_savedTraces.empty()) { // We don't track state on non-main traces for now. t2982555 m_state.update(inst); } }
void LinearScan::removeUnusedSpillsAux(Trace* trace) { IRInstruction::List& instList = trace->getInstructionList(); for (IRInstruction::Iterator it = instList.begin(); it != instList.end(); ) { IRInstruction::Iterator next = it; ++next; IRInstruction* inst = *it; if (inst->getOpcode() == Spill && inst->getDst()->getUseCount() == 0) { instList.erase(it); SSATmp* src = inst->getSrc(0); if (src->decUseCount() == 0) { Opcode srcOpc = src->getInstruction()->getOpcode(); // Not all instructions are able to take noreg as its dest // reg. We pick LdLoc and IncRef because they occur often. if (srcOpc == IncRef || srcOpc == LdLoc) { for (int locIndex = 0; locIndex < src->numNeededRegs(); ++locIndex) { src->setReg(InvalidReg, locIndex); } } } } it = next; } }
/* * Insert a DbgAssertRefCount instruction after each place we produce * a refcounted value. The value must be something we can safely dereference * to check the _count field. */ static void insertRefCountAsserts(IRInstruction& inst, IRUnit& unit) { for (SSATmp& dst : inst.dsts()) { Type t = dst.type(); if (t <= (Type::Counted | Type::StaticStr | Type::StaticArr)) { insertAfter(&inst, unit.gen(DbgAssertRefCount, inst.marker(), &dst)); } } }
MethodBlock* MethodBlock::if_else_testz(IROpcode if_op, Location test, MethodBlock** true_block) { always_assert(OPCODE_IF_EQZ <= if_op && if_op <= OPCODE_IF_LEZ); IRInstruction* op = new IRInstruction(if_op); op->set_src(0, test.get_reg()); return make_if_else_block(op, true_block); }
void MethodBlock::load_null(Location& loc) { always_assert(!loc.is_wide()); IRInstruction* load = new IRInstruction(OPCODE_CONST); load->set_dest(loc.get_reg()); load->set_literal(0); loc.type = get_object_type(); push_instruction(load); }
void MethodBlock::load_const(Location& loc, double value) { always_assert(loc.is_wide()); IRInstruction* load = new IRInstruction(OPCODE_CONST_WIDE); load->set_dest(loc.get_reg()); load->set_literal(value); loc.type = get_double_type(); push_instruction(load); }
/* * Insert a DbgAssertRefCount instruction after each place we produce * a refcounted value. The value must be something we can safely dereference * to check the _count field. */ static void insertRefCountAsserts(IRInstruction& inst, IRFactory& factory) { for (SSATmp& dst : inst.dsts()) { Type t = dst.type(); if (t.subtypeOf(Type::Counted | Type::StaticStr | Type::StaticArr)) { insertAfter(&inst, factory.gen(DbgAssertRefCount, inst.marker(), &dst)); } } }
MethodBlock* MethodBlock::if_test(IROpcode if_op, Location first, Location second) { always_assert(OPCODE_IF_EQ <= if_op && if_op <= OPCODE_IF_LE); IRInstruction* op = new IRInstruction(if_op); op->set_src(0, first.get_reg()); op->set_src(1, second.get_reg()); return make_if_block(op); }
void MethodBlock::instance_of(Location& obj, Location& dst, DexType* type) { always_assert(obj.is_ref()); always_assert(dst.type == get_boolean_type()); IRInstruction* insn = new IRInstruction(OPCODE_INSTANCE_OF); insn->set_src(0, obj.get_reg()); insn->set_type(type); push_instruction(insn); push_instruction( (new IRInstruction(IOPCODE_MOVE_RESULT_PSEUDO))->set_dest(dst.get_reg())); }
void MethodBlock::load_const(Location& loc, DexType* value) { always_assert(!loc.is_wide()); IRInstruction* load = new IRInstruction(OPCODE_CONST_CLASS); load->set_type(value); push_instruction(load); IRInstruction* move_result_pseudo = new IRInstruction(IOPCODE_MOVE_RESULT_PSEUDO_OBJECT); loc.type = get_class_type(); move_result_pseudo->set_dest(loc.get_reg()); push_instruction(move_result_pseudo); }
IRInstruction* IRUnit::defLabel(unsigned numDst, BCMarker marker) { IRInstruction inst(DefLabel, marker); IRInstruction* label = cloneInstruction(&inst); if (numDst > 0) { SSATmp* dsts = (SSATmp*) m_arena.alloc(numDst * sizeof(SSATmp)); for (unsigned i = 0; i < numDst; ++i) { new (&dsts[i]) SSATmp(m_nextOpndId++, label); } label->setDsts(numDst, dsts); } return label; }
void MethodBlock::binop_lit8(IROpcode op, const Location& dest, const Location& src, int8_t literal) { always_assert(OPCODE_ADD_INT_LIT8 <= op && op <= OPCODE_USHR_INT_LIT8); always_assert(dest.type == src.type); always_assert(dest.type == get_int_type()); IRInstruction* insn = new IRInstruction(op); insn->set_dest(dest.get_reg()); insn->set_src(0, src.get_reg()); insn->set_literal(literal); push_instruction(insn); }
// Create a spill slot for <tmp>. uint32_t LinearScan::createSpillSlot(SSATmp* tmp) { uint32_t slotId = m_slots.size(); tmp->setSpillSlot(slotId); IRInstruction* spillInst = m_irFactory->gen(Spill, tmp); SSATmp* spillTmp = spillInst->getDst(); SlotInfo si; si.m_spillTmp = spillTmp; si.m_latestReload = tmp; m_slots.push_back(si); // The spill slot inherits the last use ID of the spilled tmp. si.m_spillTmp->setLastUseId(tmp->getLastUseId()); return slotId; }
SSATmp* Simplifier::simplifyCall(IRInstruction* inst) { auto spillVals = inst->getSrcs().subpiece(3); IRInstruction* spillStack = m_tb->getSp()->getInstruction(); if (spillStack->getOpcode() != SpillStack) { return nullptr; } SSATmp* sp = spillStack->getSrc(0); int baseOffset = spillStack->getSrc(1)->getValInt() - spillValueCells(spillStack); auto const numSpillSrcs = spillVals.size(); for (int32_t i = 0; i < numSpillSrcs; i++) { const int64_t offset = -(i + 1) + baseOffset; assert(spillVals[i]->getType() != Type::ActRec); IRInstruction* srcInst = spillVals[i]->getInstruction(); // If our value came from a LdStack on the same sp and offset, // we don't need to spill it. if (srcInst->getOpcode() == LdStack && srcInst->getSrc(0) == sp && srcInst->getSrc(1)->getValInt() == offset) { spillVals[i] = m_tb->genDefNone(); } } // Note: although the instruction might have been modified above, we still // need to return nullptr so that it gets cloned later if it's stack-allocated return nullptr; }
TEST(IRInstruction, InvokeSourceIsWideBasic) { using namespace dex_asm; g_redex = new RedexContext(); DexMethodRef* m = DexMethod::make_method("Lfoo;", "baz", "V", {"J"}); IRInstruction* insn = new IRInstruction(OPCODE_INVOKE_STATIC); insn->set_arg_word_count(1); insn->set_src(0, 0); insn->set_method(m); EXPECT_TRUE(insn->invoke_src_is_wide(0)); delete g_redex; }
IRInstruction* IRUnit::defLabel(unsigned numDst, BCMarker marker, const jit::vector<uint32_t>& producedRefs) { IRInstruction inst(DefLabel, marker); IRInstruction* label = cloneInstruction(&inst); always_assert(producedRefs.size() == numDst); m_labelRefs[label] = producedRefs; if (numDst > 0) { SSATmp* dsts = (SSATmp*) m_arena.alloc(numDst * sizeof(SSATmp)); for (unsigned i = 0; i < numDst; ++i) { new (&dsts[i]) SSATmp(m_nextOpndId++, label); } label->setDsts(numDst, dsts); } return label; }
static void insertRefCountAssertsAux(Trace* trace, IRFactory* factory) { IRInstruction::List& instructions = trace->getInstructionList(); IRInstruction::Iterator it; for (it = instructions.begin(); it != instructions.end(); ) { IRInstruction* inst = *it; it++; SSATmp* dst = inst->getDst(); if (dst && Type::isStaticallyKnown(dst->getType()) && Type::isRefCounted(dst->getType())) { auto* assertInst = factory->gen(DbgAssertRefCount, dst); assertInst->setParent(trace); instructions.insert(it, assertInst); } } }
MemEffects memory_effects(const IRInstruction& inst) { auto const ret = memory_effects_impl(inst); if (debug) { // In debug let's do some type checking in case people move instruction // argument numbers. auto const fp = match<SSATmp*>( ret, [&] (UnknownEffects) { return nullptr; }, [&] (IrrelevantEffects) { return nullptr; }, [&] (ReadAllLocals) { return nullptr; }, [&] (KillFrameLocals l) { return l.fp; }, [&] (ReadLocal l) { return l.fp; }, [&] (ReadLocal2 l) { return l.fp; }, [&] (StoreLocal l) { return l.fp; }, [&] (StoreLocalNT l) { return l.fp; } ); if (fp != nullptr) { always_assert_flog( fp->type() <= Type::FramePtr, "Non frame pointer in memory effects:\n inst: {}\n effects: {}", inst.toString(), show(ret) ); } } return ret; }
/* * Check that each destination register or spill slot is unique, * and that sources have the same number or less operands than * destinations. */ bool checkShuffle(const IRInstruction& inst, const RegAllocInfo& regs) { auto n = inst.numSrcs(); assert(n == inst.extra<Shuffle>()->size); RegSet destRegs; std::bitset<NumPreAllocatedSpillLocs> destSlots; auto& inst_regs = regs[inst]; for (uint32_t i = 0; i < n; ++i) { DEBUG_ONLY auto& rs = inst_regs.src(i); DEBUG_ONLY auto& rd = inst.extra<Shuffle>()->dests[i]; if (rd.numAllocated() == 0) continue; // dest was unused; ignore. if (rd.spilled()) { assert(!rs.spilled()); // no mem-mem copies } else { // rs could have less assigned registers/slots than rd, in these cases: // - when rs is empty, because the source is a constant. // - when rs has 1 register because it's untagged but rd needs 2 because // it's a more general (tagged) type, because of a phi. assert(rs.numWords() <= rd.numWords()); assert(rs.spilled() || rs.isFullSIMD() == rd.isFullSIMD()); } for (int j = 0; j < rd.numAllocated(); ++j) { if (rd.spilled()) { assert(!destSlots.test(rd.slot(j))); destSlots.set(rd.slot(j)); } else { assert(!destRegs.contains(rd.reg(j))); // no duplicate dests destRegs.add(rd.reg(j)); } } } return true; }
/* * Construct effects for InterpOne, using the information in its extra data. * * We always consider an InterpOne as potentially doing anything to the heap, * potentially re-entering, potentially raising warnings in the current frame, * potentially reading any locals, and potentially reading/writing any stack * location that isn't below the bottom of the stack. * * The extra data for the InterpOne comes with some additional information * about which local(s) it may modify, which is all we try to be more precise * about right now. */ GeneralEffects interp_one_effects(const IRInstruction& inst) { auto const extra = inst.extra<InterpOne>(); auto loads = AHeapAny | AStackAny | AFrameAny; auto stores = AHeapAny | AStackAny; if (extra->smashesAllLocals) { stores = stores | AFrameAny; } else { for (auto i = uint32_t{0}; i < extra->nChangedLocals; ++i) { stores = stores | AFrame { inst.src(1), extra->changedLocals[i].id }; } } auto kills = AEmpty; if (isMemberBaseOp(extra->opcode)) { stores = stores | AMIStateAny; kills = kills | AMIStateAny; } else if (isMemberDimOp(extra->opcode) || isMemberFinalOp(extra->opcode)) { stores = stores | AMIStateAny; loads = loads | AMIStateAny; } else { kills = kills | AMIStateAny; } return may_raise(inst, may_load_store_kill(loads, stores, kills)); }
void TraceBuilder::appendInstruction(IRInstruction* inst) { Block* block = m_trace->back(); IRInstruction* prev = block->back(); if (prev->isBlockEnd()) { // start a new block Block* next = m_irFactory.defBlock(m_curFunc->getValFunc()); m_trace->push_back(next); if (!prev->isTerminal()) { // new block is reachable from old block so link it. block->setNext(next); } block = next; } appendInstruction(inst, block); updateTrackedState(inst); }
void LinearScan::allocRegsToTraceAux(Trace* trace) { IRInstruction::List& instructionList = trace->getInstructionList(); IRInstruction::Iterator it; for (it = instructionList.begin(); it != instructionList.end(); it++) { IRInstruction* inst = *it; allocRegToInstruction(trace, it); if (RuntimeOption::EvalDumpIR > 3) { std::cout << "--- allocated to instruction: "; inst->print(std::cout); std::cout << "\n"; } if (inst->isControlFlowInstruction()) { // This instruction may transfer control to another trace // If this is the last instruction in the trace that can branch // to this target trace, then allocate registers to the target // trace, effectively linearizing the target trace after inst. LabelInstruction* label = inst->getLabel(); if (label != NULL && label->getId() == inst->getId() + 1) { allocRegsToTraceAux(label->getTrace()); } } } // Insert spill instructions. // Reload instructions are already added in <allocRegsToTrace>. for (it = instructionList.begin(); it != instructionList.end(); ) { IRInstruction::Iterator next = it; ++next; IRInstruction* inst = *it; if (inst->getOpcode() != Reload) { // Reloaded SSATmps needn't be spilled again. if (SSATmp* dst = inst->getDst()) { int32 slotId = dst->getSpillSlot(); if (slotId != -1) { // If this instruction is marked to be spilled, // add a spill right afterwards. IRInstruction* spillInst = m_slots[slotId].m_slotTmp->getInstruction(); instructionList.insert(next, spillInst); spillInst->setParent(trace); } } } it = next; } }
const StringData* findClassName(SSATmp* cls) { assert(cls->isA(Type::Cls)); if (cls->isConst()) { return cls->getValClass()->preClass()->name(); } // Try to get the class name from a LdCls IRInstruction* clsInst = cls->inst(); if (clsInst->op() == LdCls || clsInst->op() == LdClsCached) { SSATmp* clsName = clsInst->src(0); assert(clsName->isA(Type::Str)); if (clsName->isConst()) { return clsName->getValStr(); } } return nullptr; }