// If main trace starts with guards, have them generate a patchable jump // to the anchor trace static void hoistGuardJumps(Trace* trace, IRFactory* irFactory) { LabelInstruction* guardLabel = nullptr; IRInstruction::List& instList = trace->getInstructionList(); // Check the beginning of the trace for guards for (IRInstruction* inst : instList) { Opcode opc = inst->getOpcode(); if (inst->getLabel() && (opc == LdLoc || opc == LdStack || opc == GuardLoc || opc == GuardStk)) { LabelInstruction* exitLabel = inst->getLabel(); // Find the GuardFailure's label and confirm this branches there if (!guardLabel && exitLabel->getParent() != trace) { Trace* exitTrace = exitLabel->getParent(); IRInstruction::List& xList = exitTrace->getInstructionList(); IRInstruction::Iterator instIter = xList.begin(); instIter++; // skip over label // Confirm this is a GuardExit for (IRInstruction::Iterator it = instIter; it != xList.end(); ++it) { IRInstruction* i = *it; Opcode op = i->getOpcode(); if (op == Marker) { continue; } if (op == ExitGuardFailure) { guardLabel = exitLabel; } // Do not optimize if other instructions are on exit trace break; } } if (exitLabel == guardLabel) { inst->setTCA(kIRDirectGuardActive); continue; } break; } if (opc == Marker || opc == DefLabel || opc == DefSP || opc == DefFP || opc == LdStack) { continue; } break; } }
void LinearScan::collectNativesAux(Trace* trace) { for (IRInstruction* inst : trace->getInstructionList()) { if (inst->isNative()) { m_natives.push_back(inst); } if (inst->isControlFlowInstruction()) { LabelInstruction* label = inst->getLabel(); if (label != NULL && label->getId() == inst->getId() + 1) { collectNativesAux(label->getParent()); } } } }
void LinearScan::allocRegsToTraceAux(Trace* trace) { IRInstruction::List& instructionList = trace->getInstructionList(); IRInstruction::Iterator it; for (it = instructionList.begin(); it != instructionList.end(); it++) { IRInstruction* inst = *it; allocRegToInstruction(trace, it); if (RuntimeOption::EvalDumpIR > 3) { std::cout << "--- allocated to instruction: "; inst->print(std::cout); std::cout << "\n"; } if (inst->isControlFlowInstruction()) { // This instruction may transfer control to another trace // If this is the last instruction in the trace that can branch // to this target trace, then allocate registers to the target // trace, effectively linearizing the target trace after inst. LabelInstruction* label = inst->getLabel(); if (label != NULL && label->getId() == inst->getId() + 1) { allocRegsToTraceAux(label->getParent()); } } } // Insert spill instructions. // Reload instructions are already added in <allocRegsToTrace>. for (it = instructionList.begin(); it != instructionList.end(); ) { IRInstruction::Iterator next = it; ++next; IRInstruction* inst = *it; if (inst->getOpcode() != Reload) { // Reloaded SSATmps needn't be spilled again. if (SSATmp* dst = inst->getDst()) { int32 slotId = dst->getSpillSlot(); if (slotId != -1) { // If this instruction is marked to be spilled, // add a spill right afterwards. IRInstruction* spillInst = m_slots[slotId].m_spillTmp->getInstruction(); instructionList.insert(next, spillInst); spillInst->setParent(trace); } } } it = next; } }
void LinearScan::collectNativesAux(Trace* trace) { IRInstruction::List& instList = trace->getInstructionList(); for (IRInstruction::Iterator it = instList.begin(); it != instList.end(); ++it) { IRInstruction* inst = *it; if (inst->isNative()) { m_natives.push_back(inst); } if (inst->isControlFlowInstruction()) { LabelInstruction* label = inst->getLabel(); if (label != NULL && label->getId() == inst->getId() + 1) { collectNativesAux(label->getParent()); } } } }
void LinearScan::rematerializeAux(Trace* trace, SSATmp* curSp, SSATmp* curFp, std::vector<SSATmp*> localValues) { IRInstruction::List& instList = trace->getInstructionList(); for (IRInstruction::Iterator it = instList.begin(); it != instList.end(); ++it) { IRInstruction* inst = *it; Opcode opc = inst->getOpcode(); SSATmp* dst = inst->getDst(); if (opc == DefFP || opc == FreeActRec) { curFp = dst; assert(dst && dst->getReg() == rVmFp); } if (opc == Reload) { // s = Spill t0 // t = Reload s SSATmp* spilledTmp = getSpilledTmp(dst); IRInstruction* spilledInst = spilledTmp->getInstruction(); IRInstruction* newInst = NULL; if (spilledInst->isRematerializable() || (spilledInst->getOpcode() == LdStack && spilledInst->getSrc(0) == curSp)) { // XXX: could change <newInst> to the non-check version. // Rematerialize those rematerializable instructions (i.e., // isRematerializable returns true) and LdStack. newInst = spilledInst->clone(m_irFactory); // The new instruction needn't have an exit label, because it is always // dominated by the original instruction. newInst->setLabel(NULL); } else { // Rematerialize LdLoc. std::vector<SSATmp*>::iterator pos = std::find(localValues.begin(), localValues.end(), canonicalize(spilledTmp)); // Search for a local that stores the value of <spilledTmp>. if (pos != localValues.end()) { size_t locId = pos - localValues.begin(); assert(curFp != NULL); ConstInstruction constInst(curFp, Local(locId)); IRInstruction* ldHomeInst = m_irFactory->cloneInstruction(&constInst); newInst = m_irFactory->gen(LdLoc, dst->getType(), m_irFactory->getSSATmp(ldHomeInst)); } } if (newInst) { UNUSED Type::Tag oldType = dst->getType(); newInst->setDst(dst); dst->setInstruction(newInst); assert(outputType(newInst) == oldType); *it = newInst; newInst->setParent(trace); } } // Updating <curSp> and <localValues>. if (dst && dst->getReg() == rVmSp) { // <inst> modifies the stack pointer. curSp = dst; } if (opc == LdLoc || opc == StLoc || opc == StLocNT) { // dst = LdLoc home // StLoc/StLocNT home, src int locId = getLocalIdFromHomeOpnd(inst->getSrc(0)); // Note that when we implement inlining, we will need to deal // with the new local id space of the inlined function. SSATmp* localValue = (opc == LdLoc ? dst : inst->getSrc(1)); if (int(localValues.size()) < locId + 1) { localValues.resize(locId + 1); } localValues[locId] = canonicalize(localValue); } // Other instructions that may have side effects on locals must // kill the local variable values. else if (opc == IterInit) { int valLocId = inst->getSrc(3)->getConstValAsInt(); localValues[valLocId] = NULL; if (inst->getNumSrcs() == 5) { int keyLocId = inst->getSrc(4)->getConstValAsInt(); localValues[keyLocId] = NULL; } } else if (opc == IterNext) { int valLocId = inst->getSrc(2)->getConstValAsInt(); localValues[valLocId] = NULL; if (inst->getNumSrcs() == 4) { int keyLocId = inst->getSrc(3)->getConstValAsInt(); localValues[keyLocId] = NULL; } } if (inst->isControlFlowInstruction()) { LabelInstruction* label = inst->getLabel(); if (label != NULL && label->getId() == inst->getId() + 1) { rematerializeAux(label->getParent(), curSp, curFp, localValues); } } } }
uint32 LinearScan::assignSpillLocAux(Trace* trace, uint32 nextSpillLoc, uint32 nextMmxReg) { for (IRInstruction* inst : trace->getInstructionList()) { if (getNextNative() == inst) { assert(!m_natives.empty()); m_natives.pop_front(); } if (inst->getOpcode() == Spill) { SSATmp* dst = inst->getDst(); SSATmp* src = inst->getSrc(0); for (int locIndex = 0; locIndex < src->numNeededRegs(); ++locIndex) { if (dst->getLastUseId() <= getNextNativeId()) { TRACE(3, "[counter] 1 spill a tmp that does not span native\n"); } else { TRACE(3, "[counter] 1 spill a tmp that spans native\n"); } const bool allowMmxSpill = RuntimeOption::EvalHHIREnableMmx && // The live range of the spill slot doesn't span native calls, // and we still have free MMX registers. dst->getLastUseId() <= getNextNativeId() && nextMmxReg < (uint32)NumMmxRegs; dst->setSpillInfo(locIndex, allowMmxSpill ? SpillInfo(RegNumber(nextMmxReg++)) : SpillInfo(nextSpillLoc++) ); if (allowMmxSpill) { TRACE(3, "[counter] 1 spill to mmx\n"); } else { TRACE(3, "[counter] 1 spill to memory\n"); } } } if (inst->getOpcode() == Reload) { SSATmp* src = inst->getSrc(0); for (int locIndex = 0; locIndex < src->numNeededRegs(); ++locIndex) { if (src->getSpillInfo(locIndex).type() == SpillInfo::MMX) { TRACE(3, "[counter] reload from mmx\n"); } else { TRACE(3, "[counter] reload from memory\n"); } } } if (inst->isControlFlowInstruction()) { LabelInstruction* label = inst->getLabel(); if (label != NULL && label->getId() == inst->getId() + 1) { nextSpillLoc = assignSpillLocAux(label->getParent(), nextSpillLoc, nextMmxReg); } } } return nextSpillLoc; }
void eliminateDeadCode(Trace* trace, IRFactory* irFactory) { IRInstruction::List wl; // worklist of live instructions Trace::List& exitTraces = trace->getExitTraces(); // first mark all exit traces as unreachable by setting the id on // their labels to 0 for (Trace::Iterator it = exitTraces.begin(); it != exitTraces.end(); it++) { Trace* trace = *it; trace->getLabel()->setId(DEAD); } // mark the essential instructions and add them to the initial // work list; also mark the exit traces that are reachable by // any control flow instruction in the main trace. initInstructions(trace, wl); for (Trace::Iterator it = exitTraces.begin(); it != exitTraces.end(); it++) { // only process those exit traces that are reachable from // the main trace Trace* trace = *it; if (trace->getLabel()->getId() != DEAD) { initInstructions(trace, wl); } } // process the worklist while (!wl.empty()) { IRInstruction* inst = wl.front(); wl.pop_front(); for (uint32 i = 0; i < inst->getNumSrcs(); i++) { SSATmp* src = inst->getSrc(i); if (src->getInstruction()->isDefConst()) { continue; } IRInstruction* srcInst = src->getInstruction(); if (srcInst->getId() == DEAD) { srcInst->setId(LIVE); wl.push_back(srcInst); } // <inst> consumes <srcInst> which is an IncRef, // so we mark <srcInst> as REFCOUNT_CONSUMED. if (inst->consumesReference(i) && srcInst->getOpcode() == IncRef) { if (inst->getParent()->isMain() || !srcInst->getParent()->isMain()) { // <srcInst> is consumed from its own trace. srcInst->setId(REFCOUNT_CONSUMED); } else { // <srcInst> is consumed off trace. if (srcInst->getId() != REFCOUNT_CONSUMED) { // mark <srcInst> as REFCOUNT_CONSUMED_OFF_TRACE unless it is // also consumed from its own trace. srcInst->setId(REFCOUNT_CONSUMED_OFF_TRACE); } } } } } // Optimize IncRefs and DecRefs. optimizeRefCount(trace); for (Trace::Iterator it = exitTraces.begin(); it != exitTraces.end(); ++it) { optimizeRefCount(*it); } if (RuntimeOption::EvalHHIREnableSinking) { // Sink IncRefs consumed off trace. IRInstruction::List toSink; sinkIncRefs(trace, irFactory, toSink); } // now remove instructions whose id == DEAD removeDeadInstructions(trace); for (Trace::Iterator it = exitTraces.begin(); it != exitTraces.end(); it++) { removeDeadInstructions(*it); } // If main trace ends with an unconditional jump, copy the target of // the jump to the end of the trace IRInstruction::List& instList = trace->getInstructionList(); IRInstruction::Iterator lastInst = instList.end(); lastInst--; // go back to the last instruction IRInstruction* jmpInst = *lastInst; if (jmpInst->getOpcode() == Jmp_) { Trace* targetTrace = jmpInst->getLabel()->getParent(); IRInstruction::List& targetInstList = targetTrace->getInstructionList(); IRInstruction::Iterator instIter = targetInstList.begin(); instIter++; // skip over label // update the parent trace of the moved instructions for (IRInstruction::Iterator it = instIter; it != targetInstList.end(); ++it) { (*it)->setParent(trace); } instList.splice(lastInst, targetInstList, instIter, targetInstList.end()); // delete the jump instruction instList.erase(lastInst); } // If main trace ends with a conditional jump with no side-effects on exit, // hook it to the exitTrace and make it a TraceExitType::NormalCc if (RuntimeOption::EvalHHIRDirectExit) { IRInstruction::List& instList = trace->getInstructionList(); IRInstruction::Iterator tail = instList.end(); IRInstruction* jccInst = NULL; IRInstruction* exitInst = NULL; IRInstruction* exitCcInst = NULL; Opcode opc = OpAdd; // Normally Jcc comes before a Marker for (int idx = 3; idx >= 0; idx--) { tail--; // go back to the previous instruction IRInstruction* inst = *tail; opc = inst->getOpcode(); if (opc == ExitTrace) { exitInst = *tail; continue; } if (opc == Marker) { continue; } if (jccCanBeDirectExit(opc)) { jccInst = inst; break; } break; } if (jccCanBeDirectExit(opc)) { SSATmp* dst = jccInst->getDst(); Trace* targetTrace = jccInst->getLabel()->getParent(); IRInstruction::List& targetInstList = targetTrace->getInstructionList(); IRInstruction::Iterator targetInstIter = targetInstList.begin(); targetInstIter++; // skip over label // Check for a NormalCc exit with no side effects for (IRInstruction::Iterator it = targetInstIter; it != targetInstList.end(); ++it) { IRInstruction* instr = (*it); // Extend to support ExitSlow, ExitSlowNoProgress, ... Opcode opc = instr->getOpcode(); if (opc == ExitTraceCc) { exitCcInst = instr; break; } else if (opc == Marker) { continue; } else { // Do not optimize if there are other instructions break; } } if (exitInst && exitCcInst) { // Found both exits, link them to Jcc for codegen assert(dst); exitCcInst->appendSrc(*irFactory, dst); exitInst->appendSrc(*irFactory, dst); // Set flag so Jcc and exits know this is active dst->setTCA(kIRDirectJccJmpActive); } } } // If main trace starts with guards, have them generate a patchable jump // to the anchor trace if (RuntimeOption::EvalHHIRDirectExit) { LabelInstruction* guardLabel = NULL; IRInstruction::List& instList = trace->getInstructionList(); // Check the beginning of the trace for guards for (IRInstruction::Iterator it = instList.begin(); it != instList.end(); ++it) { IRInstruction* inst = *it; Opcode opc = inst->getOpcode(); if (inst->getLabel() && (opc == LdLoc || opc == LdStack || opc == GuardLoc || opc == GuardStk)) { LabelInstruction* exitLabel = inst->getLabel(); // Find the GuardFailure's label and confirm this branches there if (guardLabel == NULL) { Trace* exitTrace = exitLabel->getParent(); IRInstruction::List& xList = exitTrace->getInstructionList(); IRInstruction::Iterator instIter = xList.begin(); instIter++; // skip over label // Confirm this is a GuardExit for (IRInstruction::Iterator it = instIter; it != xList.end(); ++it) { IRInstruction* i = *it; Opcode op = i->getOpcode(); if (op == Marker) { continue; } if (op == ExitGuardFailure) { guardLabel = exitLabel; } // Do not optimize if other instructions are on exit trace break; } } if (exitLabel == guardLabel) { inst->setTCA(kIRDirectGuardActive); continue; } break; } if (opc == Marker || opc == DefLabel || opc == DefSP || opc == DefFP || opc == LdStack) { continue; } break; } } }