SSATmp* Simplifier::simplifyCall(IRInstruction* inst) { auto spillVals = inst->getSrcs().subpiece(3); IRInstruction* spillStack = m_tb->getSp()->getInstruction(); if (spillStack->getOpcode() != SpillStack) { return nullptr; } SSATmp* sp = spillStack->getSrc(0); int baseOffset = spillStack->getSrc(1)->getValInt() - spillValueCells(spillStack); auto const numSpillSrcs = spillVals.size(); for (int32_t i = 0; i < numSpillSrcs; i++) { const int64_t offset = -(i + 1) + baseOffset; assert(spillVals[i]->getType() != Type::ActRec); IRInstruction* srcInst = spillVals[i]->getInstruction(); // If our value came from a LdStack on the same sp and offset, // we don't need to spill it. if (srcInst->getOpcode() == LdStack && srcInst->getSrc(0) == sp && srcInst->getSrc(1)->getValInt() == offset) { spillVals[i] = m_tb->genDefNone(); } } // Note: although the instruction might have been modified above, we still // need to return nullptr so that it gets cloned later if it's stack-allocated return nullptr; }
// If main trace ends with a conditional jump with no side-effects on exit, // hook it to the exitTrace and make it a TraceExitType::NormalCc static void hoistConditionalJumps(Trace* trace, IRFactory* irFactory) { IRInstruction::List& instList = trace->getInstructionList(); IRInstruction::Iterator tail = instList.end(); IRInstruction* jccInst = nullptr; IRInstruction* exitInst = nullptr; IRInstruction* exitCcInst = nullptr; Opcode opc = OpAdd; // Normally Jcc comes before a Marker for (int idx = 3; idx >= 0; idx--) { tail--; // go back to the previous instruction IRInstruction* inst = *tail; opc = inst->getOpcode(); if (opc == ExitTrace) { exitInst = inst; continue; } if (opc == Marker) { continue; } if (jccCanBeDirectExit(opc)) { jccInst = inst; break; } break; } if (jccCanBeDirectExit(opc)) { SSATmp* dst = jccInst->getDst(); Trace* targetTrace = jccInst->getLabel()->getParent(); IRInstruction::List& targetInstList = targetTrace->getInstructionList(); IRInstruction::Iterator targetInstIter = targetInstList.begin(); targetInstIter++; // skip over label // Check for a NormalCc exit with no side effects for (IRInstruction::Iterator it = targetInstIter; it != targetInstList.end(); ++it) { IRInstruction* instr = (*it); // Extend to support ExitSlow, ExitSlowNoProgress, ... Opcode opc = instr->getOpcode(); if (opc == ExitTraceCc) { exitCcInst = instr; break; } else if (opc == Marker) { continue; } else { // Do not optimize if there are other instructions break; } } if (exitInst && exitCcInst) { // Found both exits, link them to Jcc for codegen assert(dst); exitCcInst->appendSrc(irFactory->arena(), dst); exitInst->appendSrc(irFactory->arena(), dst); // Set flag so Jcc and exits know this is active dst->setTCA(kIRDirectJccJmpActive); } } }
void MemMap::sinkStores(StoreList& stores) { // sink dead stores into exit edges that occur between the dead store and the // next store StoreList::reverse_iterator it, end; for (it = stores.rbegin(), end = stores.rend(); it != end; ++it) { IRInstruction* store = it->first; if (store->getId() != DEAD) { continue; } std::vector<IRInstruction*>::iterator i, e; for (i = it->second.begin(), e = it->second.end(); i != e; ++i) { IRInstruction* guard = *i; IRInstruction* clone = store->clone(factory); if (store->getDst() != NULL) { factory->getSSATmp(clone); } guard->getLabel()->getParent()->prependInstruction(clone); } // StRefs cannot just be removed, they have to be converted into Movs // as the destination of the StRef still has the DecRef attached to it. if (store->getOpcode() == StRef || store->getOpcode() == StRefNT) { store->setOpcode(Mov); store->setSrc(1, NULL); store->setNumSrcs(1); store->setId(LIVE); } } }
void initInstructions(Trace* trace, IRInstruction::List& wl) { IRInstruction::List instructions = trace->getInstructionList(); IRInstruction::Iterator it; bool unreachable = false; TRACE(5, "DCE:vvvvvvvvvvvvvvvvvvvv\n"); for (it = instructions.begin(); it != instructions.end(); it++) { IRInstruction* inst = *it; ASSERT(inst->getParent() == trace); Simplifier::copyProp(inst); // if this is a load that does not generate a guard, then get rid // of its label so that its not an essential control-flow // instruction if (isUnguardedLoad(inst)) { // LdStack and LdLoc instructions that produce generic types // and LdStack instruction that produce Cell types will not // generate guards, so remove the label from this instruction so // that its no longer an essential control-flow instruction inst->setLabel(NULL); } Opcode opc = inst->getOpcode(); // decref of anything that isn't ref counted is a nop if ((opc == DecRef || opc == DecRefNZ) && !isRefCounted(inst->getSrc(0))) { inst->setId(DEAD); continue; } if (!unreachable && inst->isControlFlowInstruction()) { // mark the destination label so that the destination trace // is marked reachable inst->getLabel()->setId(LIVE); } if (!unreachable && isEssential(inst)) { inst->setId(LIVE); wl.push_back(inst); } else { if (moduleEnabled(HPHP::Trace::hhir, 5)) { std::ostringstream ss1; inst->printSrcs(ss1); TRACE(5, "DCE: %s\n", ss1.str().c_str()); std::ostringstream ss2; inst->print(ss2); TRACE(5, "DCE: %s\n", ss2.str().c_str()); } inst->setId(DEAD); } if (inst->getOpcode() == Jmp_) { unreachable = true; } } TRACE(5, "DCE:^^^^^^^^^^^^^^^^^^^^\n"); }
SSATmp* Simplifier::simplifyNot(SSATmp* src) { // const XORs are handled in simplifyXor() assert(!src->isConst()); assert(src->getType() == Type::Bool); IRInstruction* inst = src->getInstruction()->getSrc(0)->getInstruction(); Opcode op = inst->getOpcode(); // TODO: Add more algebraic simplification rules for NOT switch (op) { case OpXor: { // !!X --> bool(X) if (isNotInst(inst->getSrc(0))) { return m_tb->genConvToBool(inst->getSrc(0)); } break; } // !(X cmp Y) --> X opposite_cmp Y case OpLt: case OpLte: case OpGt: case OpGte: case OpEq: case OpNeq: case OpSame: case OpNSame: return m_tb->genCmp(negateQueryOp(op), inst->getSrc(0), inst->getSrc(1)); // TODO !(X | non_zero) --> 0 default: (void)op; } return NULL; }
// If main trace ends with an unconditional jump, and the target is not // reached by any other branch, then copy the target of the jump to the // end of the trace static void elimUnconditionalJump(Trace* trace, IRFactory* irFactory) { boost::dynamic_bitset<> isJoin(irFactory->numLabels()); boost::dynamic_bitset<> havePred(irFactory->numLabels()); IRInstruction::List& instList = trace->getInstructionList(); for (IRInstruction* inst : instList) { if (inst->isControlFlowInstruction()) { auto id = inst->getLabel()->getLabelId(); isJoin[id] = havePred[id]; havePred[id] = 1; } } IRInstruction::Iterator lastInst = instList.end(); --lastInst; // go back to the last instruction IRInstruction* jmp = *lastInst; if (jmp->getOpcode() == Jmp_ && !isJoin[jmp->getLabel()->getLabelId()]) { Trace* targetTrace = jmp->getLabel()->getParent(); IRInstruction::List& targetInstList = targetTrace->getInstructionList(); IRInstruction::Iterator instIter = targetInstList.begin(); instIter++; // skip over label // update the parent trace of the moved instructions for (IRInstruction::Iterator it = instIter; it != targetInstList.end(); ++it) { (*it)->setParent(trace); } instList.splice(lastInst, targetInstList, instIter, targetInstList.end()); // delete the jump instruction instList.erase(lastInst); } }
void LinearScan::removeUnusedSpillsAux(Trace* trace) { IRInstruction::List& instList = trace->getInstructionList(); for (IRInstruction::Iterator it = instList.begin(); it != instList.end(); ) { IRInstruction::Iterator next = it; ++next; IRInstruction* inst = *it; if (inst->getOpcode() == Spill && inst->getDst()->getUseCount() == 0) { instList.erase(it); SSATmp* src = inst->getSrc(0); if (src->decUseCount() == 0) { Opcode srcOpc = src->getInstruction()->getOpcode(); // Not all instructions are able to take noreg as its dest // reg. We pick LdLoc and IncRef because they occur often. if (srcOpc == IncRef || srcOpc == LdLoc) { for (int locIndex = 0; locIndex < src->numNeededRegs(); ++locIndex) { src->setReg(InvalidReg, locIndex); } } } } it = next; } }
/** * If main trace ends with a conditional jump with no side-effects on exit, * hook it to the exitTrace and make it a TraceExitType::NormalCc. * * This function essentially looks for the following code pattern: * * Main Trace: * ---------- * L1: // jccBlock * ... * Jcc ... -> L3 * L2: // lastBlock * DefLabel * [Marker] * ExitTrace * * Exit Trace: * ---------- * L3: // targetBlock * DefLabel * [Marker] * ExitTraceCc * * If the pattern is found, Jcc's dst operand is linked to the ExitTrace and * ExitTraceCc instructions and it's flagged with kIRDirectJccJmpActive. This * then triggers CodeGenerator to emit a REQ_BIND_JMPCC_FIRST service request. * */ static void hoistConditionalJumps(Trace* trace, IRFactory* irFactory) { IRInstruction* exitInst = nullptr; IRInstruction* exitCcInst = nullptr; Opcode opc = OpAdd; // Normally Jcc comes before a Marker auto& blocks = trace->getBlocks(); if (blocks.size() < 2) return; auto it = blocks.end(); Block* lastBlock = *(--it); Block* jccBlock = *(--it); IRInstruction& jccInst = *(jccBlock->back()); if (!jccCanBeDirectExit(jccInst.getOpcode())) return; for (auto it = lastBlock->skipLabel(), end = lastBlock->end(); it != end; it++) { IRInstruction& inst = *it; opc = inst.getOpcode(); if (opc == ExitTrace) { exitInst = &inst; break; } if (opc != Marker) { // Found real instruction on the last block return; } } if (exitInst) { SSATmp* dst = jccInst.getDst(); Block* targetBlock = jccInst.getTaken(); auto targetInstIter = targetBlock->skipLabel(); // Check for a NormalCc exit with no side effects for (auto it = targetInstIter, end = targetBlock->end(); it != end; ++it) { IRInstruction* instr = &*it; // Extend to support ExitSlow, ExitSlowNoProgress, ... Opcode opc = instr->getOpcode(); if (opc == ExitTraceCc) { exitCcInst = instr; break; } else if (opc != Marker) { // Do not optimize if there are other instructions break; } } if (exitCcInst) { // Found both exits, link them to Jcc for codegen assert(dst); exitCcInst->appendSrc(irFactory->arena(), dst); exitInst->appendSrc(irFactory->arena(), dst); // Set flag so Jcc and exits know this is active dst->setTCA(kIRDirectJccJmpActive); } } }
void MemMap::sinkStores(StoreList& stores) { // sink dead stores into exit edges that occur between the dead store and the // next store StoreList::reverse_iterator it, end; for (it = stores.rbegin(), end = stores.rend(); it != end; ++it) { IRInstruction* store = it->first; if (isLive(store)) continue; for (IRInstruction* guard : it->second) { Block* exit = guard->getTaken(); exit->prepend(store->clone(m_factory)); } // StRefs cannot just be removed, they have to be converted into Movs // as the destination of the StRef still has the DecRef attached to it. if (store->getOpcode() == StRef || store->getOpcode() == StRefNT) { store->setOpcode(Mov); store->setSrc(1, nullptr); store->setNumSrcs(1); setLive(*store, true); } } }
// Perform the following transformations: // 1) Change all unconsumed IncRefs to Mov. // 2) Mark a conditionally dead DecRefNZ as live if its corresponding IncRef // cannot be eliminated. void optimizeRefCount(Trace* trace) { IRInstruction::List& instList = trace->getInstructionList(); for (IRInstruction::Iterator it = instList.begin(); it != instList.end(); ++it) { IRInstruction* inst = *it; if (inst->getOpcode() == IncRef && inst->getId() != REFCOUNT_CONSUMED && inst->getId() != REFCOUNT_CONSUMED_OFF_TRACE) { inst->setOpcode(Mov); inst->setId(DEAD); } if (inst->getOpcode() == DecRefNZ) { IRInstruction* srcInst = inst->getSrc(0)->getInstruction(); if (srcInst->getId() == REFCOUNT_CONSUMED || srcInst->getId() == REFCOUNT_CONSUMED_OFF_TRACE) { inst->setId(LIVE); } } // Do copyProp at last. When processing DecRefNZs, we still need to look at // its source which should not be trampled over. Simplifier::copyProp(inst); } }
SSATmp* Simplifier::simplifyNot(SSATmp* src) { IRInstruction* inst = src->getInstruction(); Opcode op = inst->getOpcode(); // TODO: Add more algebraic simplification rules for NOT switch (op) { case ConvToBool: return simplifyNot(inst->getSrc(0)); case OpXor: { // !!X --> bool(X) if (isNotInst(inst->getSrc(0))) { return m_tb->genConvToBool(inst->getSrc(0)); } break; } // !(X cmp Y) --> X opposite_cmp Y case OpLt: case OpLte: case OpGt: case OpGte: case OpEq: case OpNeq: case OpSame: case OpNSame: // XXX: this could technically be losing a ConvToBool, except // that we kinda know "not" instructions (Xor with 1) are always // going to be followed by ConvToBool. // // TODO(#2058865): This would make more sense with a real Not // instruction and allowing boolean output types for query ops. return m_tb->genCmp(negateQueryOp(op), inst->getSrc(0), inst->getSrc(1)); case InstanceOf: case NInstanceOf: case InstanceOfBitmask: case NInstanceOfBitmask: // TODO: combine this with the above check and use isQueryOp or // add an isNegatable. return m_tb->gen(negateQueryOp(op), inst->getNumSrcs(), inst->getSrcs().begin()); // TODO !(X | non_zero) --> 0 default: (void)op; } return nullptr; }
void LinearScan::allocRegsToTraceAux(Trace* trace) { IRInstruction::List& instructionList = trace->getInstructionList(); IRInstruction::Iterator it; for (it = instructionList.begin(); it != instructionList.end(); it++) { IRInstruction* inst = *it; allocRegToInstruction(trace, it); if (RuntimeOption::EvalDumpIR > 3) { std::cout << "--- allocated to instruction: "; inst->print(std::cout); std::cout << "\n"; } if (inst->isControlFlowInstruction()) { // This instruction may transfer control to another trace // If this is the last instruction in the trace that can branch // to this target trace, then allocate registers to the target // trace, effectively linearizing the target trace after inst. LabelInstruction* label = inst->getLabel(); if (label != NULL && label->getId() == inst->getId() + 1) { allocRegsToTraceAux(label->getTrace()); } } } // Insert spill instructions. // Reload instructions are already added in <allocRegsToTrace>. for (it = instructionList.begin(); it != instructionList.end(); ) { IRInstruction::Iterator next = it; ++next; IRInstruction* inst = *it; if (inst->getOpcode() != Reload) { // Reloaded SSATmps needn't be spilled again. if (SSATmp* dst = inst->getDst()) { int32 slotId = dst->getSpillSlot(); if (slotId != -1) { // If this instruction is marked to be spilled, // add a spill right afterwards. IRInstruction* spillInst = m_slots[slotId].m_slotTmp->getInstruction(); instructionList.insert(next, spillInst); spillInst->setParent(trace); } } } it = next; } }
/* * Looks for whether the value in tmp was defined by a load, and if * so, changes that load into a load that guards on the given * type. Returns true if it succeeds. */ static bool hoistGuardToLoad(SSATmp* tmp, Type type) { IRInstruction* inst = tmp->getInstruction(); switch (inst->getOpcode()) { case Mov: case IncRef: { // if inst is an incref or move, then chase down its src if (hoistGuardToLoad(inst->getSrc(0), type)) { // guard was successfully attached to a load instruction // refine the type of this mov/incref // Note: We can also further simplify incref's here if type is not // ref-counted tmp->setType(type); inst->setTypeParam(type); return true; } break; } case LdLoc: case LdStack: case LdMem: case LdProp: case LdRef: case LdClsCns: { if (!inst->getTaken()) { // Not a control flow instruction, so can't give it check semantics break; } Type instType = tmp->getType(); if (instType == Type::Gen || (instType == Type::Cell && !type.isBoxed())) { tmp->setType(type); inst->setTypeParam(type); return true; } break; } default: break; } return false; }
// If main trace starts with guards, have them generate a patchable jump // to the anchor trace static void hoistGuardJumps(Trace* trace, IRFactory* irFactory) { LabelInstruction* guardLabel = nullptr; IRInstruction::List& instList = trace->getInstructionList(); // Check the beginning of the trace for guards for (IRInstruction* inst : instList) { Opcode opc = inst->getOpcode(); if (inst->getLabel() && (opc == LdLoc || opc == LdStack || opc == GuardLoc || opc == GuardStk)) { LabelInstruction* exitLabel = inst->getLabel(); // Find the GuardFailure's label and confirm this branches there if (!guardLabel && exitLabel->getParent() != trace) { Trace* exitTrace = exitLabel->getParent(); IRInstruction::List& xList = exitTrace->getInstructionList(); IRInstruction::Iterator instIter = xList.begin(); instIter++; // skip over label // Confirm this is a GuardExit for (IRInstruction::Iterator it = instIter; it != xList.end(); ++it) { IRInstruction* i = *it; Opcode op = i->getOpcode(); if (op == Marker) { continue; } if (op == ExitGuardFailure) { guardLabel = exitLabel; } // Do not optimize if other instructions are on exit trace break; } } if (exitLabel == guardLabel) { inst->setTCA(kIRDirectGuardActive); continue; } break; } if (opc == Marker || opc == DefLabel || opc == DefSP || opc == DefFP || opc == LdStack) { continue; } break; } }
// If main trace starts with guards, have them generate a patchable jump // to the anchor trace static void hoistGuardJumps(Trace* trace, IRFactory* irFactory) { Block* guardLabel = nullptr; // Check the beginning of the trace for guards for (Block* block : trace->getBlocks()) { for (IRInstruction& instr : *block) { IRInstruction* inst = &instr; Opcode opc = inst->getOpcode(); if (inst->getTaken() && (opc == LdLoc || opc == LdStack || opc == GuardLoc || opc == GuardStk)) { Block* exitLabel = inst->getTaken(); // Find the GuardFailure's label and confirm this branches there if (!guardLabel && exitLabel->getTrace() != trace) { auto instIter = exitLabel->skipLabel(); // Confirm this is a GuardExit for (auto it = instIter, end = exitLabel->end(); it != end; ++it) { Opcode op = it->getOpcode(); if (op == Marker) { continue; } if (op == ExitGuardFailure) { guardLabel = exitLabel; } // Do not optimize if other instructions are on exit trace break; } } if (exitLabel == guardLabel) { inst->setTCA(kIRDirectGuardActive); continue; } return; // terminate search } if (opc == Marker || opc == DefLabel || opc == DefSP || opc == DefFP || opc == LdStack) { continue; } return; // terminate search } } }
void LinearScan::preAllocSpillLocAux(Trace* trace, uint32 numSpillLocs) { IRInstruction::List& instList = trace->getInstructionList(); for (IRInstruction::Iterator it = instList.begin(); it != instList.end(); ++it) { IRInstruction* inst = *it; if (inst->getOpcode() == Spill) { SSATmp* dst = inst->getDst(); for (int index = 0; index < dst->numNeededRegs(); ++index) { ASSERT(!dst->hasReg(index)); if (dst->getSpillInfo(index).type() == SpillInfo::Memory) { uint32 spillLoc = dst->getSpillInfo(index).mem(); // Native stack layout: // | | // +---------------+ // | | <-- spill[5..] // | pre allocated | <-- spill[4] // | (16 slots) | <-- spill[3] // +---------------+ // | return addr | // +---------------+ // | extra | <-- spill[2] // | spill | <-- spill[1] // | locations | <-- spill[0] // +---------------+ <-- %rsp // If a spill location falls into the pre-allocated region, we // need to increase its index by 1 to avoid overwriting the // return address. if (spillLoc + NumPreAllocatedSpillLocs >= numSpillLocs) { dst->setSpillInfo(index, SpillInfo(spillLoc + 1)); } } } } } }
void LinearScan::insertAllocFreeSpillAux(Trace* trace, uint32 numExtraSpillLocs) { SSATmp* tmp = m_irFactory->getSSATmp( m_irFactory->defConst((int64)numExtraSpillLocs)); IRInstruction::List& instList = trace->getInstructionList(); for (IRInstruction::Iterator it = instList.begin(); it != instList.end(); ) { IRInstruction::Iterator next = it; ++next; IRInstruction* inst = *it; Opcode opc = inst->getOpcode(); if (opc == Call) { // Insert FreeSpill and AllocSpill around each Call. IRInstruction* allocSpill = m_irFactory->allocSpill(tmp); IRInstruction* freeSpill = m_irFactory->freeSpill(tmp); instList.insert(it, freeSpill); freeSpill->setParent(trace); instList.insert(next, allocSpill); allocSpill->setParent(trace); } else if (opc == ExitTrace || opc == ExitSlow || opc == ExitSlowNoProgress || opc == ExitGuardFailure || opc == RetCtrl) { // Insert FreeSpill at trace exits. IRInstruction* freeSpill = m_irFactory->freeSpill(tmp); instList.insert(it, freeSpill); freeSpill->setParent(trace); } it = next; } // Insert AllocSpill at the start of the main trace. if (trace->isMain()) { IRInstruction* allocSpill = m_irFactory->allocSpill(tmp); trace->prependInstruction(allocSpill); } }
void LinearScan::rematerializeAux(Trace* trace, SSATmp* curSp, SSATmp* curFp, std::vector<SSATmp*> localValues) { IRInstruction::List& instList = trace->getInstructionList(); for (IRInstruction::Iterator it = instList.begin(); it != instList.end(); ++it) { IRInstruction* inst = *it; Opcode opc = inst->getOpcode(); SSATmp* dst = inst->getDst(); if (opc == DefFP || opc == FreeActRec) { curFp = dst; ASSERT(dst && dst->getReg() == rVmFp); } if (opc == Reload) { // s = Spill t0 // t = Reload s SSATmp* spilledTmp = getSpilledTmp(dst); IRInstruction* spilledInst = spilledTmp->getInstruction(); IRInstruction* newInst = NULL; if (spilledInst->isRematerializable() || (spilledInst->getOpcode() == LdStack && spilledInst->getSrc(0) == curSp)) { // XXX: could change <newInst> to the non-check version. // Rematerialize those rematerializable instructions (i.e., // isRematerializable returns true) and LdStack. newInst = spilledInst->clone(m_irFactory); // The new instruction needn't have an exit label, because it is always // dominated by the original instruction. newInst->setLabel(NULL); } else { // Rematerialize LdLoc. std::vector<SSATmp*>::iterator pos = std::find(localValues.begin(), localValues.end(), canonicalize(spilledTmp)); // Search for a local that stores the value of <spilledTmp>. if (pos != localValues.end()) { size_t locId = pos - localValues.begin(); ASSERT(curFp != NULL); ConstInstruction constInst(curFp, Local(locId)); IRInstruction* ldHomeInst = m_irFactory->cloneInstruction(&constInst); newInst = m_irFactory->ldLoc(m_irFactory->getSSATmp(ldHomeInst), dst->getType(), NULL); } } if (newInst) { newInst->setDst(dst); newInst->getDst()->setInstruction(newInst); *it = newInst; newInst->setParent(trace); } } // Updating <curSp> and <localValues>. if (dst && dst->getReg() == rVmSp) { // <inst> modifies the stack pointer. curSp = dst; } if (opc == LdLoc || opc == StLoc || opc == StLocNT) { // dst = LdLoc home // StLoc/StLocNT home, src int locId = getLocalIdFromHomeOpnd(inst->getSrc(0)); SSATmp* localValue = (opc == LdLoc ? dst : inst->getSrc(1)); if (int(localValues.size()) < locId + 1) { localValues.resize(locId + 1); } localValues[locId] = canonicalize(localValue); } if (inst->isControlFlowInstruction()) { LabelInstruction* label = inst->getLabel(); if (label != NULL && label->getId() == inst->getId() + 1) { rematerializeAux(label->getTrace(), curSp, curFp, localValues); } } } }
// XXX: to be refactored // This function repeats the logic in cg to pre-color tmps that are // going to be used in next native. void LinearScan::computePreColoringHint() { m_preColoringHint.clear(); IRInstruction* nextNative = getNextNative(); if (nextNative == NULL) { return; } auto normalHint = [&](int count, int srcBase = 0, int argBase = 0) { for (int i = 0; i < count; ++i) { m_preColoringHint.add(nextNative->getSrc(i + srcBase), 0, i + argBase); } }; switch (nextNative->getOpcode()) { case Box: if (nextNative->getSrc(0)->getType() == Type::Cell) { m_preColoringHint.add(nextNative->getSrc(0), 1, 0); } m_preColoringHint.add(nextNative->getSrc(0), 0, 1); break; case LdObjMethod: m_preColoringHint.add(nextNative->getSrc(1), 0, 1); m_preColoringHint.add(nextNative->getSrc(0), 0, 2); break; case LdFunc: m_preColoringHint.add(nextNative->getSrc(0), 0, 1); break; case NativeImpl: m_preColoringHint.add(nextNative->getSrc(1), 0, 0); break; case Print: m_preColoringHint.add(nextNative->getSrc(0), 0, 0); break; case AddElem: if (nextNative->getSrc(1)->getType() == Type::Int && nextNative->getSrc(2)->getType() == Type::Int) { normalHint(3, 0, 1); } else { m_preColoringHint.add(nextNative->getSrc(0), 0, 0); m_preColoringHint.add(nextNative->getSrc(1), 0, 1); m_preColoringHint.add(nextNative->getSrc(2), 0, 2); m_preColoringHint.add(nextNative->getSrc(2), 1, 3); } break; case AddNewElem: m_preColoringHint.add(nextNative->getSrc(0), 0, 0); m_preColoringHint.add(nextNative->getSrc(1), 0, 1); m_preColoringHint.add(nextNative->getSrc(1), 1, 2); break; case Concat: { Type::Tag lType = nextNative->getSrc(0)->getType(); Type::Tag rType = nextNative->getSrc(1)->getType(); if ((Type::isString(lType) && Type::isString(rType)) || (Type::isString(lType) && rType == Type::Int) || (lType == Type::Int && Type::isString(rType))) { m_preColoringHint.add(nextNative->getSrc(0), 0, 0); m_preColoringHint.add(nextNative->getSrc(1), 0, 1); } else { m_preColoringHint.add(nextNative->getSrc(0), 0, 1); m_preColoringHint.add(nextNative->getSrc(1), 0, 3); } } break; case ArrayAdd: normalHint(2); break; case DefFunc: normalHint(1); break; case CreateCont: normalHint(4); break; case FillContLocals: normalHint(4); break; case OpEq: case OpNeq: case OpSame: case OpNSame: { auto src1 = nextNative->getSrc(0); auto src2 = nextNative->getSrc(1); auto type1 = src1->getType(); auto type2 = src2->getType(); if ((type1 == Type::Arr && type2 == Type::Arr) || (Type::isString(type1) && Type::isString(type2)) || (Type::isString(type1) && !src1->isConst()) || (type1 == Type::Obj && type2 == Type::Obj)) { m_preColoringHint.add(src1, 0, 0); m_preColoringHint.add(src2, 0, 1); } } break; case Conv: { SSATmp* src = nextNative->getSrc(0); Type::Tag toType = nextNative->getType(); Type::Tag fromType = src->getType(); if (toType == Type::Bool) { switch (fromType) { case Type::Cell: m_preColoringHint.add(src, 0, 0); m_preColoringHint.add(src, 1, 1); break; case Type::Str: case Type::StaticStr: case Type::Arr: case Type::Obj: m_preColoringHint.add(src, 0, 0); break; default: break; } } else if (Type::isString(toType)) { if (fromType == Type::Int) { m_preColoringHint.add(src, 0, 0); } } else if (Type::isString(fromType) && toType == Type::Int) { m_preColoringHint.add(src, 0, 0); } break; } default: break; } }
void LinearScan::computePreColoringHint() { m_preColoringHint.clear(); IRInstruction* inst = getNextNative(); if (inst == nullptr) { return; } Opcode opc = inst->getOpcode(); using namespace NativeCalls; if (CallMap::hasInfo(opc)) { unsigned reg = 0; for (auto const& arg : CallMap::getInfo(opc).args) { switch (arg.type) { case SSA: m_preColoringHint.add(inst->getSrc(arg.srcIdx), 0, reg++); break; case TV: case VecKeyS: case VecKeyIS: m_preColoringHint.add(inst->getSrc(arg.srcIdx), 0, reg++); m_preColoringHint.add(inst->getSrc(arg.srcIdx), 1, reg++); break; } } return; } // For instructions that want to hint a continuous increasing range // of sources to a continuous increasing range of argument // registers. auto normalHint = [&](int count, int srcBase = 0, int argBase = 0) { for (int i = 0; i < count; ++i) { m_preColoringHint.add(inst->getSrc(i + srcBase), 0, i + argBase); } }; switch (opc) { case LdFunc: m_preColoringHint.add(inst->getSrc(0), 0, 1); break; case NativeImpl: m_preColoringHint.add(inst->getSrc(1), 0, 0); break; case Concat: { Type lType = inst->getSrc(0)->getType(); Type rType = inst->getSrc(1)->getType(); if ((lType.isString() && rType.isString()) || (lType.isString() && rType == Type::Int) || (lType == Type::Int && rType.isString())) { m_preColoringHint.add(inst->getSrc(0), 0, 0); m_preColoringHint.add(inst->getSrc(1), 0, 1); } else { m_preColoringHint.add(inst->getSrc(0), 0, 1); m_preColoringHint.add(inst->getSrc(1), 0, 3); } } break; case AKExists: normalHint(2); break; case DefFunc: normalHint(1); break; case OpEq: case OpNeq: case OpSame: case OpNSame: { auto src1 = inst->getSrc(0); auto src2 = inst->getSrc(1); auto type1 = src1->getType(); auto type2 = src2->getType(); if ((type1.isArray() && type2.isArray()) || (type1.isString() && type2.isString()) || (type1.isString() && !src1->isConst()) || (type1 == Type::Obj && type2 == Type::Obj)) { m_preColoringHint.add(src1, 0, 0); m_preColoringHint.add(src2, 0, 1); } } break; case IterInit: { m_preColoringHint.add(inst->getSrc(0), 0, 1); } break; case ConvToArr: break; case ConvToBool: { SSATmp* src = inst->getSrc(0); Type fromType = src->getType(); if (fromType == Type::Cell) { m_preColoringHint.add(src, 0, 0); m_preColoringHint.add(src, 1, 1); } else if (fromType == Type::Str || fromType == Type::StaticStr || fromType.isArray() || fromType == Type::Obj) { m_preColoringHint.add(src, 0, 0); } break; } case ConvToDbl: break; case ConvToInt: { SSATmp* src = inst->getSrc(0); Type fromType = src->getType(); if (fromType.isString()) { m_preColoringHint.add(src, 0, 0); } break; } case ConvToObj: break; case ConvToStr: break; case InstanceOf: case NInstanceOf: case JmpInstanceOf: case JmpNInstanceOf: normalHint(2); break; case LdSSwitchDestFast: normalHint(1); break; case LdSSwitchDestSlow: normalHint(1); break; case LdGblAddr: case LdGblAddrDef: normalHint(1); break; case LdClsPropAddr: normalHint(3); break; case LdCls: m_preColoringHint.add(inst->getSrc(0), 0, 1); break; case BoxPtr: normalHint(1); break; default: break; } }
void eliminateDeadCode(Trace* trace, IRFactory* irFactory) { IRInstruction::List wl; // worklist of live instructions Trace::List& exitTraces = trace->getExitTraces(); // first mark all exit traces as unreachable by setting the id on // their labels to 0 for (Trace::Iterator it = exitTraces.begin(); it != exitTraces.end(); it++) { Trace* trace = *it; trace->getLabel()->setId(DEAD); } // mark the essential instructions and add them to the initial // work list; also mark the exit traces that are reachable by // any control flow instruction in the main trace. initInstructions(trace, wl); for (Trace::Iterator it = exitTraces.begin(); it != exitTraces.end(); it++) { // only process those exit traces that are reachable from // the main trace Trace* trace = *it; if (trace->getLabel()->getId() != DEAD) { initInstructions(trace, wl); } } // process the worklist while (!wl.empty()) { IRInstruction* inst = wl.front(); wl.pop_front(); for (uint32 i = 0; i < inst->getNumSrcs(); i++) { SSATmp* src = inst->getSrc(i); if (src->getInstruction()->isDefConst()) { continue; } IRInstruction* srcInst = src->getInstruction(); if (srcInst->getId() == DEAD) { srcInst->setId(LIVE); wl.push_back(srcInst); } // <inst> consumes <srcInst> which is an IncRef, // so we mark <srcInst> as REFCOUNT_CONSUMED. if (inst->consumesReference(i) && srcInst->getOpcode() == IncRef) { if (inst->getParent()->isMain() || !srcInst->getParent()->isMain()) { // <srcInst> is consumed from its own trace. srcInst->setId(REFCOUNT_CONSUMED); } else { // <srcInst> is consumed off trace. if (srcInst->getId() != REFCOUNT_CONSUMED) { // mark <srcInst> as REFCOUNT_CONSUMED_OFF_TRACE unless it is // also consumed from its own trace. srcInst->setId(REFCOUNT_CONSUMED_OFF_TRACE); } } } } } // Optimize IncRefs and DecRefs. optimizeRefCount(trace); for (Trace::Iterator it = exitTraces.begin(); it != exitTraces.end(); ++it) { optimizeRefCount(*it); } if (RuntimeOption::EvalHHIREnableSinking) { // Sink IncRefs consumed off trace. IRInstruction::List toSink; sinkIncRefs(trace, irFactory, toSink); } // now remove instructions whose id == DEAD removeDeadInstructions(trace); for (Trace::Iterator it = exitTraces.begin(); it != exitTraces.end(); it++) { removeDeadInstructions(*it); } }
void eliminateDeadCode(Trace* trace, IRFactory* irFactory) { IRInstruction::List wl; // worklist of live instructions Trace::List& exitTraces = trace->getExitTraces(); // first mark all exit traces as unreachable by setting the id on // their labels to 0 for (Trace::Iterator it = exitTraces.begin(); it != exitTraces.end(); it++) { Trace* trace = *it; trace->getLabel()->setId(DEAD); } // mark the essential instructions and add them to the initial // work list; also mark the exit traces that are reachable by // any control flow instruction in the main trace. initInstructions(trace, wl); for (Trace::Iterator it = exitTraces.begin(); it != exitTraces.end(); it++) { // only process those exit traces that are reachable from // the main trace Trace* trace = *it; if (trace->getLabel()->getId() != DEAD) { initInstructions(trace, wl); } } // process the worklist while (!wl.empty()) { IRInstruction* inst = wl.front(); wl.pop_front(); for (uint32 i = 0; i < inst->getNumSrcs(); i++) { SSATmp* src = inst->getSrc(i); if (src->getInstruction()->isDefConst()) { continue; } IRInstruction* srcInst = src->getInstruction(); if (srcInst->getId() == DEAD) { srcInst->setId(LIVE); wl.push_back(srcInst); } // <inst> consumes <srcInst> which is an IncRef, // so we mark <srcInst> as REFCOUNT_CONSUMED. if (inst->consumesReference(i) && srcInst->getOpcode() == IncRef) { if (inst->getParent()->isMain() || !srcInst->getParent()->isMain()) { // <srcInst> is consumed from its own trace. srcInst->setId(REFCOUNT_CONSUMED); } else { // <srcInst> is consumed off trace. if (srcInst->getId() != REFCOUNT_CONSUMED) { // mark <srcInst> as REFCOUNT_CONSUMED_OFF_TRACE unless it is // also consumed from its own trace. srcInst->setId(REFCOUNT_CONSUMED_OFF_TRACE); } } } } } // Optimize IncRefs and DecRefs. optimizeRefCount(trace); for (Trace::Iterator it = exitTraces.begin(); it != exitTraces.end(); ++it) { optimizeRefCount(*it); } if (RuntimeOption::EvalHHIREnableSinking) { // Sink IncRefs consumed off trace. IRInstruction::List toSink; sinkIncRefs(trace, irFactory, toSink); } // now remove instructions whose id == DEAD removeDeadInstructions(trace); for (Trace::Iterator it = exitTraces.begin(); it != exitTraces.end(); it++) { removeDeadInstructions(*it); } // If main trace ends with an unconditional jump, copy the target of // the jump to the end of the trace IRInstruction::List& instList = trace->getInstructionList(); IRInstruction::Iterator lastInst = instList.end(); lastInst--; // go back to the last instruction IRInstruction* jmpInst = *lastInst; if (jmpInst->getOpcode() == Jmp_) { Trace* targetTrace = jmpInst->getLabel()->getTrace(); IRInstruction::List& targetInstList = targetTrace->getInstructionList(); IRInstruction::Iterator instIter = targetInstList.begin(); instIter++; // skip over label // update the parent trace of the moved instructions for (IRInstruction::Iterator it = instIter; it != targetInstList.end(); ++it) { (*it)->setParent(trace); } instList.splice(lastInst, targetInstList, instIter, targetInstList.end()); // delete the jump instruction instList.erase(lastInst); } // If main trace ends with a conditional jump with no side-effects on exit, // hook it to the exitTrace and make it a TraceExitType::NormalCc if (RuntimeOption::EvalHHIRDirectExit) { IRInstruction::List& instList = trace->getInstructionList(); IRInstruction::Iterator tail = instList.end(); IRInstruction* jccInst = NULL; IRInstruction* exitInst = NULL; IRInstruction* exitCcInst = NULL; Opcode opc = OpAdd; // Normally Jcc comes before a Marker for (int idx = 3; idx >= 0; idx--) { tail--; // go back to the previous instruction IRInstruction* inst = *tail; opc = inst->getOpcode(); if (opc == ExitTrace) { exitInst = *tail; continue; } if (opc == Marker) { continue; } if (jccCanBeDirectExit(opc)) { jccInst = inst; break; } break; } if (jccCanBeDirectExit(opc)) { SSATmp* dst = jccInst->getDst(); Trace* targetTrace = jccInst->getLabel()->getTrace(); IRInstruction::List& targetInstList = targetTrace->getInstructionList(); IRInstruction::Iterator targetInstIter = targetInstList.begin(); targetInstIter++; // skip over label // Check for a NormalCc exit with no side effects for (IRInstruction::Iterator it = targetInstIter; it != targetInstList.end(); ++it) { IRInstruction* instr = (*it); // Extend to support ExitSlow, ExitSlowNoProgress, ... Opcode opc = instr->getOpcode(); if (opc == ExitTraceCc) { exitCcInst = instr; break; } else if (opc == Marker) { continue; } else { // Do not optimize if there are other instructions break; } } if (exitInst && exitCcInst && exitCcInst->getNumSrcs() > NUM_FIXED_SRCS && exitInst->getNumSrcs() > NUM_FIXED_SRCS) { // Found both exits, link them to Jcc for codegen ASSERT(dst); ExtendedInstruction* exCcInst = (ExtendedInstruction*)exitCcInst; exCcInst->appendExtendedSrc(*irFactory, dst); ExtendedInstruction* exInst = (ExtendedInstruction*)exitInst; exInst->appendExtendedSrc(*irFactory, dst); // Set flag so Jcc and exits know this is active dst->setTCA(kIRDirectJccJmpActive); } } } // If main trace starts with guards, have them generate a patchable jump // to the anchor trace if (RuntimeOption::EvalHHIRDirectExit) { LabelInstruction* guardLabel = NULL; IRInstruction::List& instList = trace->getInstructionList(); // Check the beginning of the trace for guards for (IRInstruction::Iterator it = instList.begin(); it != instList.end(); ++it) { IRInstruction* inst = *it; Opcode opc = inst->getOpcode(); if (inst->getLabel() && (opc == LdLoc || opc == LdStack || opc == GuardLoc || opc == GuardStk)) { LabelInstruction* exitLabel = inst->getLabel(); // Find the GuardFailure's label and confirm this branches there if (guardLabel == NULL) { Trace* exitTrace = exitLabel->getTrace(); IRInstruction::List& xList = exitTrace->getInstructionList(); IRInstruction::Iterator instIter = xList.begin(); instIter++; // skip over label // Confirm this is a GuardExit for (IRInstruction::Iterator it = instIter; it != xList.end(); ++it) { IRInstruction* i = *it; Opcode op = i->getOpcode(); if (op == Marker) { continue; } if (op == ExitGuardFailure) { guardLabel = exitLabel; } // Do not optimize if other instructions are on exit trace break; } } if (exitLabel == guardLabel) { inst->setTCA(kIRDirectGuardActive); continue; } break; } if (opc == Marker || opc == DefLabel || opc == DefSP || opc == DefFP || opc == LdStack) { continue; } break; } } }
// Sink IncRefs consumed off trace. // When <trace> is an exit trace, <toSink> contains all live IncRefs in the // main trace that are consumed off trace. void sinkIncRefs(Trace* trace, IRFactory* irFactory, IRInstruction::List& toSink) { IRInstruction::List& instList = trace->getInstructionList(); IRInstruction::Iterator it; std::map<SSATmp*, SSATmp*> sunkTmps; if (!trace->isMain()) { // Sink REFCOUNT_CONSUMED_OFF_TRACE IncRefs before the first non-label // instruction, and create a mapping between the original tmps to the sunk // tmps so that we can later replace the original ones with the sunk ones. for (IRInstruction::ReverseIterator j = toSink.rbegin(); j != toSink.rend(); ++j) { // prependInstruction inserts an instruction to the beginning. Therefore, // we iterate through toSink in the reversed order. IRInstruction* sunkInst = irFactory->incRef((*j)->getSrc(0)); sunkInst->setId(LIVE); trace->prependInstruction(sunkInst); ASSERT((*j)->getDst()); ASSERT(!sunkTmps.count((*j)->getDst())); sunkTmps[(*j)->getDst()] = irFactory->getSSATmp(sunkInst); } } // An exit trace may be entered from multiple exit points. We keep track of // which exit traces we already pushed sunk IncRefs to, so that we won't push // them multiple times. std::set<Trace*> pushedTo; for (it = instList.begin(); it != instList.end(); ++it) { IRInstruction* inst = *it; if (trace->isMain()) { if (inst->getOpcode() == IncRef) { // Must be REFCOUNT_CONSUMED or REFCOUNT_CONSUMED_OFF_TRACE; // otherwise, it should be already removed in optimizeRefCount. ASSERT(inst->getId() == REFCOUNT_CONSUMED || inst->getId() == REFCOUNT_CONSUMED_OFF_TRACE); if (inst->getId() == REFCOUNT_CONSUMED_OFF_TRACE) { inst->setOpcode(Mov); // Mark them as dead so that they'll be removed later. inst->setId(DEAD); // Put all REFCOUNT_CONSUMED_OFF_TRACE IncRefs to the sinking list. toSink.push_back(inst); } } if (inst->getOpcode() == DecRefNZ) { IRInstruction* srcInst = inst->getSrc(0)->getInstruction(); if (srcInst->getId() == DEAD) { inst->setId(DEAD); // This may take O(I) time where I is the number of IncRefs // in the main trace. toSink.remove(srcInst); } } if (LabelInstruction* label = inst->getLabel()) { Trace* exitTrace = label->getTrace(); if (!pushedTo.count(exitTrace)) { pushedTo.insert(exitTrace); sinkIncRefs(exitTrace, irFactory, toSink); } } } else { // Replace the original tmps with the sunk tmps. for (uint32 i = 0; i < inst->getNumSrcs(); ++i) { SSATmp* src = inst->getSrc(i); if (SSATmp* sunkTmp = sunkTmps[src]) { inst->setSrc(i, sunkTmp); } } } } // Do copyProp at last, because we need to keep REFCOUNT_CONSUMED_OFF_TRACE // Movs as the prototypes for sunk instructions. for (it = instList.begin(); it != instList.end(); ++it) { Simplifier::copyProp(*it); } }
void LinearScan::rematerializeAux(Trace* trace, SSATmp* curSp, SSATmp* curFp, std::vector<SSATmp*> localValues) { IRInstruction::List& instList = trace->getInstructionList(); for (IRInstruction::Iterator it = instList.begin(); it != instList.end(); ++it) { IRInstruction* inst = *it; Opcode opc = inst->getOpcode(); SSATmp* dst = inst->getDst(); if (opc == DefFP || opc == FreeActRec) { curFp = dst; assert(dst && dst->getReg() == rVmFp); } if (opc == Reload) { // s = Spill t0 // t = Reload s SSATmp* spilledTmp = getSpilledTmp(dst); IRInstruction* spilledInst = spilledTmp->getInstruction(); IRInstruction* newInst = NULL; if (spilledInst->isRematerializable() || (spilledInst->getOpcode() == LdStack && spilledInst->getSrc(0) == curSp)) { // XXX: could change <newInst> to the non-check version. // Rematerialize those rematerializable instructions (i.e., // isRematerializable returns true) and LdStack. newInst = spilledInst->clone(m_irFactory); // The new instruction needn't have an exit label, because it is always // dominated by the original instruction. newInst->setLabel(NULL); } else { // Rematerialize LdLoc. std::vector<SSATmp*>::iterator pos = std::find(localValues.begin(), localValues.end(), canonicalize(spilledTmp)); // Search for a local that stores the value of <spilledTmp>. if (pos != localValues.end()) { size_t locId = pos - localValues.begin(); assert(curFp != NULL); ConstInstruction constInst(curFp, Local(locId)); IRInstruction* ldHomeInst = m_irFactory->cloneInstruction(&constInst); newInst = m_irFactory->gen(LdLoc, dst->getType(), m_irFactory->getSSATmp(ldHomeInst)); } } if (newInst) { UNUSED Type::Tag oldType = dst->getType(); newInst->setDst(dst); dst->setInstruction(newInst); assert(outputType(newInst) == oldType); *it = newInst; newInst->setParent(trace); } } // Updating <curSp> and <localValues>. if (dst && dst->getReg() == rVmSp) { // <inst> modifies the stack pointer. curSp = dst; } if (opc == LdLoc || opc == StLoc || opc == StLocNT) { // dst = LdLoc home // StLoc/StLocNT home, src int locId = getLocalIdFromHomeOpnd(inst->getSrc(0)); // Note that when we implement inlining, we will need to deal // with the new local id space of the inlined function. SSATmp* localValue = (opc == LdLoc ? dst : inst->getSrc(1)); if (int(localValues.size()) < locId + 1) { localValues.resize(locId + 1); } localValues[locId] = canonicalize(localValue); } // Other instructions that may have side effects on locals must // kill the local variable values. else if (opc == IterInit) { int valLocId = inst->getSrc(3)->getConstValAsInt(); localValues[valLocId] = NULL; if (inst->getNumSrcs() == 5) { int keyLocId = inst->getSrc(4)->getConstValAsInt(); localValues[keyLocId] = NULL; } } else if (opc == IterNext) { int valLocId = inst->getSrc(2)->getConstValAsInt(); localValues[valLocId] = NULL; if (inst->getNumSrcs() == 4) { int keyLocId = inst->getSrc(3)->getConstValAsInt(); localValues[keyLocId] = NULL; } } if (inst->isControlFlowInstruction()) { LabelInstruction* label = inst->getLabel(); if (label != NULL && label->getId() == inst->getId() + 1) { rematerializeAux(label->getParent(), curSp, curFp, localValues); } } } }
void LinearScan::rematerializeAux() { struct State { SSATmp *sp, *fp; std::vector<SSATmp*> values; }; StateVector<Block, State*> states(m_irFactory, nullptr); SCOPE_EXIT { for (State* s : states) delete s; }; SSATmp* curSp = nullptr; SSATmp* curFp = nullptr; std::vector<SSATmp*> localValues; auto killLocal = [&](IRInstruction& inst, unsigned src) { if (src < inst.getNumSrcs()) { unsigned loc = inst.getSrc(src)->getValInt(); if (loc < localValues.size()) localValues[loc] = nullptr; } }; auto setLocal = [&](unsigned loc, SSATmp* value) { // Note that when we implement inlining, we will need to deal // with the new local id space of the inlined function. if (loc >= localValues.size()) localValues.resize(loc + 1); localValues[loc] = canonicalize(value); }; // Search for a local that stores <value> auto findLocal = [&](SSATmp* value) -> int { auto pos = std::find(localValues.begin(), localValues.end(), canonicalize(value)); return pos != localValues.end() ? pos - localValues.begin() : -1; }; // save the current state for future use by block; merge if necessary. auto saveState = [&](Block* block) { if (State* state = states[block]) { // merge with saved state assert(curFp == state->fp); if (curSp != state->sp) state->sp = nullptr; for (unsigned i = 0; i < state->values.size(); ++i) { if (i >= localValues.size() || localValues[i] != state->values[i]) { state->values[i] = nullptr; } } } else { // snapshot state for use at target. state = states[block] = new State; state->sp = curSp; state->fp = curFp; state->values = localValues; } }; for (Block* block : m_blocks) { if (State* state = states[block]) { states[block] = nullptr; localValues = state->values; curSp = state->sp; curFp = state->fp; delete state; } for (auto it = block->begin(); it != block->end(); ++it) { IRInstruction& inst = *it; Opcode opc = inst.getOpcode(); if (opc == DefFP || opc == FreeActRec) { assert(inst.getDst()->getReg() == rVmFp); curFp = inst.getDst(); } else if (opc == Reload) { // s = Spill t0 // t = Reload s SSATmp* dst = inst.getDst(); SSATmp* spilledTmp = getSpilledTmp(dst); IRInstruction* spilledInst = spilledTmp->getInstruction(); IRInstruction* newInst = NULL; if (spilledInst->isRematerializable() || (spilledInst->getOpcode() == LdStack && spilledInst->getSrc(0) == curSp)) { // XXX: could change <newInst> to the non-check version. // Rematerialize those rematerializable instructions (i.e., // isRematerializable returns true) and LdStack. newInst = spilledInst->clone(m_irFactory); // The new instruction needn't have an exit label; it must always // be dominated by the original instruction because reloads are // inserted just before uses, which must be dominated by the // original (spilled) def. newInst->setTaken(nullptr); } else if (curFp) { // Rematerialize LdLoc. int loc = findLocal(spilledTmp); if (loc != -1) { LocalId localId(loc); newInst = m_irFactory->gen(LdLoc, dst->getType(), &localId, curFp); } } if (newInst) { UNUSED Type oldType = dst->getType(); newInst->setDst(dst); dst->setInstruction(newInst); assert(outputType(newInst) == oldType); auto* block = inst.getBlock(); auto newIt = block->insert(it, newInst); block->erase(it); it = newIt; } } // Updating curSp and localValues if (inst.hasDst() && inst.getDst()->getReg() == rVmSp) { // inst modifies the stack pointer. curSp = inst.getDst(); } if (opc == LdLoc || opc == StLoc || opc == StLocNT) { setLocal(inst.getExtra<LocalId>()->locId, opc == LdLoc ? inst.getDst() : inst.getSrc(1)); } // Other instructions that may have side effects on locals must // kill the local variable values. else if (opc == IterInit) { killLocal(inst, 3); } else if (opc == IterInitK) { killLocal(inst, 3); killLocal(inst, 4); } else if (opc == IterNext) { killLocal(inst, 2); } else if (opc == IterNextK) { killLocal(inst, 2); killLocal(inst, 3); } } if (Block* taken = block->getTaken()) saveState(taken); if (Block* next = block->getNext()) saveState(next); } }
void eliminateDeadCode(Trace* trace, IRFactory* irFactory) { auto removeEmptyExitTraces = [&] { trace->getExitTraces().remove_if([](Trace* exit) { return exit->getBlocks().empty(); }); }; // kill unreachable code and remove any traces that are now empty BlockList blocks = removeUnreachable(trace, irFactory); removeEmptyExitTraces(); // mark the essential instructions and add them to the initial // work list; this will also mark reachable exit traces. All // other instructions marked dead. DceState state(irFactory, DceFlags()); WorkList wl = initInstructions(trace, blocks, state, irFactory); // process the worklist while (!wl.empty()) { auto* inst = wl.front(); wl.pop_front(); for (uint32_t i = 0; i < inst->getNumSrcs(); i++) { SSATmp* src = inst->getSrc(i); if (src->getInstruction()->getOpcode() == DefConst) { continue; } IRInstruction* srcInst = src->getInstruction(); if (state[srcInst].isDead()) { state[srcInst].setLive(); wl.push_back(srcInst); } // <inst> consumes <srcInst> which is an IncRef, so we mark <srcInst> as // REFCOUNT_CONSUMED. If the source instruction is a GuardType and guards // to a maybeCounted type, we need to trace through to the source for // refcounting purposes. while (srcInst->getOpcode() == GuardType && srcInst->getTypeParam().maybeCounted()) { srcInst = srcInst->getSrc(0)->getInstruction(); } if (inst->consumesReference(i) && srcInst->getOpcode() == IncRef) { if (inst->getTrace()->isMain() || !srcInst->getTrace()->isMain()) { // <srcInst> is consumed from its own trace. state[srcInst].setCountConsumed(); } else { // <srcInst> is consumed off trace. if (!state[srcInst].countConsumed()) { // mark <srcInst> as REFCOUNT_CONSUMED_OFF_TRACE unless it is // also consumed from its own trace. state[srcInst].setCountConsumedOffTrace(); } } } } } // Optimize IncRefs and DecRefs. forEachTrace(trace, [&](Trace* t) { optimizeRefCount(t, state); }); if (RuntimeOption::EvalHHIREnableSinking) { // Sink IncRefs consumed off trace. sinkIncRefs(trace, irFactory, state); } // now remove instructions whose id == DEAD removeDeadInstructions(trace, state); for (Trace* exit : trace->getExitTraces()) { removeDeadInstructions(exit, state); } // and remove empty exit traces removeEmptyExitTraces(); }
uint32 LinearScan::assignSpillLocAux(Trace* trace, uint32 nextSpillLoc, uint32 nextMmxReg) { IRInstruction::List& instructionList = trace->getInstructionList(); for (IRInstruction::Iterator it = instructionList.begin(); it != instructionList.end(); ++it) { IRInstruction* inst = *it; if (getNextNative() == inst) { ASSERT(!m_natives.empty()); m_natives.pop_front(); } if (inst->getOpcode() == Spill) { SSATmp* dst = inst->getDst(); SSATmp* src = inst->getSrc(0); for (int locIndex = 0; locIndex < src->numNeededRegs(); ++locIndex) { if (dst->getLastUseId() <= getNextNativeId()) { TRACE(3, "[counter] 1 spill a tmp that does not span native\n"); } else { TRACE(3, "[counter] 1 spill a tmp that spans native\n"); } const bool allowMmxSpill = RuntimeOption::EvalHHIREnableMmx && // The live range of the spill slot doesn't span native calls, // and we still have free MMX registers. dst->getLastUseId() <= getNextNativeId() && nextMmxReg < (uint32)NumMmxRegs; dst->setSpillInfo(locIndex, allowMmxSpill ? SpillInfo(RegNumber(nextMmxReg++)) : SpillInfo(nextSpillLoc++) ); if (allowMmxSpill) { TRACE(3, "[counter] 1 spill to mmx\n"); } else { TRACE(3, "[counter] 1 spill to memory\n"); } } } if (inst->getOpcode() == Reload) { SSATmp* src = inst->getSrc(0); for (int locIndex = 0; locIndex < src->numNeededRegs(); ++locIndex) { if (src->getSpillInfo(locIndex).type() == SpillInfo::MMX) { TRACE(3, "[counter] reload from mmx\n"); } else { TRACE(3, "[counter] reload from memory\n"); } } } if (inst->isControlFlowInstruction()) { LabelInstruction* label = inst->getLabel(); if (label != NULL && label->getId() == inst->getId() + 1) { nextSpillLoc = assignSpillLocAux(label->getTrace(), nextSpillLoc, nextMmxReg); } } } return nextSpillLoc; }
void MemMap::optimizeMemoryAccesses(Trace* trace) { StoreList tracking; for (IRInstruction* inst : trace->getInstructionList()) { // initialize each instruction as live inst->setId(LIVE); int offset = -1; Opcode op = inst->getOpcode(); if (isLoad(op)) { if (op == LdProp) { offset = inst->getSrc(1)->getConstValAsInt(); } optimizeLoad(inst, offset); } else if (isStore(op)) { if (op == StProp || op == StPropNT) { offset = inst->getSrc(1)->getConstValAsInt(); } // if we see a store, first check if its last available access is a store // if it is, then the last access is a dead store IRInstruction* access = getLastAccess(inst->getSrc(0), offset); if (access != NULL && isStore(access->getOpcode())) { // if a dead St* is followed by a St*NT, then the second store needs to // now write in the type because the first store will be removed if (access->getOpcode() == StProp && op == StPropNT) { inst->setOpcode(StProp); } else if (access->getOpcode() == StLoc && op == StLocNT) { inst->setOpcode(StLoc); } else if (access->getOpcode() == StRef && op == StRefNT) { inst->setOpcode(StRef); } access->setId(DEAD); } // start tracking the current store tracking.push_back(std::make_pair(inst, std::vector<IRInstruction*>())); } else if (inst->mayRaiseError()) { // if the function has an exit edge that we don't know anything about // (raising an error), then all stores we're currently tracking need to // be erased. all stores already declared dead are untouched StoreList::iterator it, end; for (it = tracking.begin(), end = tracking.end(); it != end; ) { StoreList::iterator copy = it; ++it; if (copy->first->getId() != DEAD) { // XXX: t1779667 tracking.erase(copy); } } } // if the current instruction is guarded, make sure all of our stores that // are not yet dead know about it if (inst->getLabel() != NULL) { for (auto& entry : tracking) { if (entry.first->getId() != DEAD) { entry.second.push_back(inst); } } } Simplifier::copyProp(inst); processInstruction(inst); } sinkStores(tracking); // kill the dead stores removeDeadInstructions(trace); }