// This function attempts to find a pre-coloring hint from two // different sources: If tmp comes from a DefLabel, it will scan up to // the SSATmps providing values to incoming Jmp_s to look for a // hint. If tmp is consumed by a Jmp_, look for other incoming Jmp_s // to its destination and see if any of them have already been given a // register. If all of these fail, let normal register allocation // proceed unhinted. RegNumber LinearScan::getJmpPreColor(SSATmp* tmp, uint32_t regIndex, bool isReload) { IRInstruction* srcInst = tmp->inst(); const JmpList& jmps = m_jmps[tmp]; if (isReload && (srcInst->op() == DefLabel || !jmps.empty())) { // If we're precoloring a Reload of a temp that we'd normally find // a hint for, just return the register allocated to the spilled // temp. auto reg = m_allocInfo[tmp].reg(regIndex); assert(reg != reg::noreg); return reg; } if (srcInst->op() == DefLabel) { // Figure out which dst of the label is tmp for (unsigned i = 0, n = srcInst->numDsts(); i < n; ++i) { if (srcInst->dst(i) == tmp) { auto reg = findLabelSrcReg(m_allocInfo, srcInst, i, regIndex); // Until we handle loops, it's a bug to try and allocate a // register to a DefLabel's dest before all of its incoming // Jmp_s have had their srcs allocated, unless the incoming // block is unreachable. const DEBUG_ONLY bool unreachable = std::find(m_blocks.begin(), m_blocks.end(), srcInst->block()) == m_blocks.end(); always_assert(reg != reg::noreg || unreachable); return reg; } } not_reached(); } // If srcInst wasn't a label, check if tmp is used by any Jmp_ // instructions. If it is, trace to the Jmp_'s label and use the // same procedure as above. for (unsigned ji = 0, jn = jmps.size(); ji < jn; ++ji) { IRInstruction* jmp = jmps[ji]; IRInstruction* label = jmp->taken()->front(); // Figure out which src of the Jmp_ is tmp for (unsigned si = 0, sn = jmp->numSrcs(); si < sn; ++si) { SSATmp* src = jmp->src(si); if (tmp == src) { // For now, a DefLabel should never have a register assigned // to it before any of its incoming Jmp_ instructions. always_assert(m_allocInfo[label->dst(si)].reg(regIndex) == reg::noreg); auto reg = findLabelSrcReg(m_allocInfo, label, si, regIndex); if (reg != reg::noreg) return reg; } } } return reg::noreg; }
/* * Insert a DbgAssertTv instruction for each stack location stored to by * a SpillStack instruction. */ static void insertSpillStackAsserts(IRInstruction& inst, IRFactory* factory) { SSATmp* sp = inst.dst(); auto const vals = inst.srcs().subpiece(2); auto* block = inst.block(); auto pos = block->iteratorTo(&inst); ++pos; for (unsigned i = 0, n = vals.size(); i < n; ++i) { Type t = vals[i]->type(); if (t.subtypeOf(Type::Gen)) { IRInstruction* addr = factory->gen(LdStackAddr, Type::PtrToGen, StackOffset(i), sp); block->insert(pos, addr); IRInstruction* check = factory->gen(DbgAssertPtr, addr->dst()); block->insert(pos, check); } } }
void LinearScan::allocRegToInstruction(InstructionList::iterator it) { IRInstruction* inst = &*it; dumpIR<IRInstruction, kExtraLevel>(inst, "allocating to instruction"); // Reload all source operands if necessary. // Mark registers as unpinned. for (int regNo = 0; regNo < kNumRegs; ++regNo) { m_regs[regNo].m_pinned = false; } smart::vector<bool> needsReloading(inst->numSrcs(), true); for (uint32_t i = 0; i < inst->numSrcs(); ++i) { SSATmp* tmp = inst->src(i); int32_t slotId = m_spillSlots[tmp]; if (slotId == -1) { needsReloading[i] = false; } else if ((tmp = m_slots[slotId].latestReload)) { needsReloading[i] = false; inst->setSrc(i, tmp); } if (!needsReloading[i]) { for (int i = 0, n = m_allocInfo[tmp].numAllocatedRegs(); i < n; ++i) { m_regs[int(m_allocInfo[tmp].reg(i))].m_pinned = true; } } } for (uint32_t i = 0; i < inst->numSrcs(); ++i) { if (needsReloading[i]) { SSATmp* tmp = inst->src(i); int32_t slotId = m_spillSlots[tmp]; // <tmp> is spilled, and not reloaded. // Therefore, We need to reload the value into a new SSATmp. // Insert the Reload instruction. SSATmp* spillTmp = m_slots[slotId].spillTmp; IRInstruction* reload = m_unit.gen(Reload, inst->marker(), spillTmp); inst->block()->insert(it, reload); // Create <reloadTmp> which inherits <tmp>'s slot ID and // <spillTmp>'s last use ID. // Replace <tmp> with <reloadTmp> in <inst>. SSATmp* reloadTmp = reload->dst(); m_uses[reloadTmp].lastUse = m_uses[spillTmp].lastUse; m_spillSlots[reloadTmp] = slotId; inst->setSrc(i, reloadTmp); // reloadTmp and tmp share the same type. Since it was spilled, it // must be using its entire needed-count of registers. assert(reloadTmp->type() == tmp->type()); for (int locIndex = 0; locIndex < tmp->numNeededRegs();) { locIndex += allocRegToTmp(reloadTmp, locIndex); } // Remember this reload tmp in case we can reuse it in later blocks. m_slots[slotId].latestReload = reloadTmp; dumpIR<IRInstruction, kExtraLevel>(reload, "created reload"); } } freeRegsAtId(m_linear[inst]); // Update next native. if (nextNative() == inst) { assert(!m_natives.empty()); m_natives.pop_front(); computePreColoringHint(); } Range<SSATmp*> dsts = inst->dsts(); if (dsts.empty()) return; Opcode opc = inst->op(); if (opc == DefMIStateBase) { assert(dsts[0].isA(Type::PtrToCell)); assignRegToTmp(&m_regs[int(rsp)], &dsts[0], 0); return; } for (SSATmp& dst : dsts) { for (int numAllocated = 0, n = dst.numNeededRegs(); numAllocated < n; ) { // LdRaw, loading a generator's embedded AR, is the only time we have a // pointer to an AR that is not in rVmFp. const bool abnormalFramePtr = (opc == LdRaw && inst->src(1)->getValInt() == RawMemSlot::ContARPtr); // Note that the point of StashGeneratorSP is to save a StkPtr // somewhere other than rVmSp. (TODO(#2288359): make rbx not // special.) const bool abnormalStkPtr = opc == StashGeneratorSP; if (!abnormalStkPtr && dst.isA(Type::StkPtr)) { assert(opc == DefSP || opc == ReDefSP || opc == ReDefGeneratorSP || opc == PassSP || opc == DefInlineSP || opc == Call || opc == CallArray || opc == SpillStack || opc == SpillFrame || opc == CufIterSpillFrame || opc == ExceptionBarrier || opc == RetAdjustStack || opc == InterpOne || opc == InterpOneCF || opc == GenericRetDecRefs || opc == CheckStk || opc == GuardStk || opc == AssertStk || opc == CastStk || opc == CoerceStk || opc == SideExitGuardStk || MInstrEffects::supported(opc)); assignRegToTmp(&m_regs[int(rVmSp)], &dst, 0); numAllocated++; continue; } if (!abnormalFramePtr && dst.isA(Type::FramePtr)) { assignRegToTmp(&m_regs[int(rVmFp)], &dst, 0); numAllocated++; continue; } // Generally speaking, StkPtrs are pretty special due to // tracelet ABI registers. Keep track here of the allowed uses // that don't use the above allocation. assert(!dst.isA(Type::FramePtr) || abnormalFramePtr); assert(!dst.isA(Type::StkPtr) || abnormalStkPtr); if (!RuntimeOption::EvalHHIRDeadCodeElim || m_uses[dst].lastUse != 0) { numAllocated += allocRegToTmp(&dst, numAllocated); } else { numAllocated++; } } } if (!RuntimeOption::EvalHHIRDeadCodeElim) { // if any outputs were unused, free regs now. freeRegsAtId(m_linear[inst]); } }
void LinearScan::allocRegsOneTrace(BlockList::iterator& blockIt, ExitTraceMap& etm) { auto const trace = (*blockIt)->trace(); collectInfo(blockIt, trace); computePreColoringHint(); auto v = etm.find(*blockIt); if (v != etm.end()) { assert(!trace->isMain()); v->second.restore(this); } else { assert(blockIt == m_blocks.begin() && trace->isMain()); initFreeList(); } // First, visit every instruction, allocating registers as we go, // and inserting Reload instructions where necessary. bool isMain = trace->isMain(); size_t sz = m_slots.size(); while (blockIt != m_blocks.end()) { Block* block = *blockIt; if (block->trace() != trace) { if (!isMain) { break; } else { ++blockIt; continue; } } FTRACE(5, "Block{}: {} ({})\n", trace->isMain() ? "" : " (exit trace)", (*blockIt)->id(), (*blockIt)->postId()); // clear remembered reloads that don't dominate this block for (SlotInfo& slot : m_slots) { if (SSATmp* reload = slot.latestReload) { if (!dominates(reload->inst()->block(), block, m_idoms)) { slot.latestReload = nullptr; } } } for (auto it = block->begin(), end = block->end(); it != end; ++it) { allocRegToInstruction(it); dumpIR<IRInstruction, kExtraLevel>(&*it, "allocated to instruction "); } if (isMain) { assert(block->trace()->isMain()); if (block->taken() && !block->taken()->trace()->isMain()) { etm[block->taken()].save(this); } } ++blockIt; } // Now that we have visited all instructions on this trace, // and inserted Reloads for SSATmps which needed to be spilled, // we can go back and insert the spills. // On the main trace, insert the spill right after the instruction // that generated the value (without traversing everything else). // On exit traces, if the instruction that generated the value // is on the main trace, insert the spill at the start of the trace, // otherwise, after the instruction that generated the value size_t begin = sz; size_t end = m_slots.size(); while (begin < end) { SlotInfo& slot = m_slots[begin++]; IRInstruction* spill = slot.spillTmp->inst(); IRInstruction* inst = spill->src(0)->inst(); Block* block = inst->block(); if (!isMain && block->trace()->isMain()) { // We're on an exit trace, but the def is on the // main trace, so put it at the start of this trace if (spill->block()) { // its already been inserted in another exit trace assert(!spill->block()->trace()->isMain()); spill = m_unit.cloneInstruction(spill); } trace->front()->prepend(spill); } else if (inst->isBlockEnd()) { block->next()->prepend(spill); } else { auto pos = block->iteratorTo(inst); block->insert(++pos, spill); } } }