void LinearScan::allocRegToTmp(SSATmp* ssaTmp, uint32_t index) { bool preferCallerSaved = true; if (RuntimeOption::EvalHHIREnableCalleeSavedOpt) { // Prefer caller-saved registers iff <ssaTmp> doesn't span native. preferCallerSaved = (ssaTmp->getLastUseId() <= getNextNativeId()); } RegState* reg = NULL; if (!preferCallerSaved) { reg = getFreeReg(false); if (reg->isCallerSaved()) { // If we are out of callee-saved registers, fall into the logic of // assigning a caller-saved register. pushFreeReg(reg); // getFreeReg pins the reg. Need restore it here. reg->m_pinned = false; reg = NULL; } } if (reg == NULL && RuntimeOption::EvalHHIREnablePreColoring) { // Pre-colors ssaTmp if it's used as an argument of next native. // Search for the original tmp instead of <ssaTmp> itself, because // the pre-coloring hint is not aware of reloaded tmps. RegNumber targetRegNo = m_preColoringHint.getPreColoringReg(getOrigTmp(ssaTmp), index); if (targetRegNo != reg::noreg) { reg = getReg(&m_regs[int(targetRegNo)]); } } if (reg == NULL && RuntimeOption::EvalHHIREnablePreColoring && ssaTmp->getInstruction()->isNative()) { // Pre-colors ssaTmp if it's the return value of a native. ASSERT(index == 0); reg = getReg(&m_regs[int(rax)]); } if (reg == NULL) { // No pre-coloring for this tmp. // Pick a regular caller-saved reg. reg = getFreeReg(true); } ASSERT(reg); if (!preferCallerSaved && reg->isCallerSaved()) { // ssaTmp spans native, but we failed to find a free callee-saved reg. // We eagerly add a spill ssaTmp, and update ssaTmp's live range // to end with next native, because we know we have to spill it at // the next native. // Setting the last use ID to the next native is conservative. // Setting it to the last use before the next native would be more precise, // but that would be more expensive to compute. if (ssaTmp->getSpillSlot() == -1) { createSpillSlot(ssaTmp); } ssaTmp->setLastUseId(getNextNativeId()); } allocRegToTmp(reg, ssaTmp, index); }
uint32 LinearScan::assignSpillLocAux(Trace* trace, uint32 nextSpillLoc, uint32 nextMmxReg) { IRInstruction::List& instructionList = trace->getInstructionList(); for (IRInstruction::Iterator it = instructionList.begin(); it != instructionList.end(); ++it) { IRInstruction* inst = *it; if (getNextNative() == inst) { ASSERT(!m_natives.empty()); m_natives.pop_front(); } if (inst->getOpcode() == Spill) { SSATmp* dst = inst->getDst(); SSATmp* src = inst->getSrc(0); for (int locIndex = 0; locIndex < src->numNeededRegs(); ++locIndex) { if (dst->getLastUseId() <= getNextNativeId()) { TRACE(3, "[counter] 1 spill a tmp that does not span native\n"); } else { TRACE(3, "[counter] 1 spill a tmp that spans native\n"); } const bool allowMmxSpill = RuntimeOption::EvalHHIREnableMmx && // The live range of the spill slot doesn't span native calls, // and we still have free MMX registers. dst->getLastUseId() <= getNextNativeId() && nextMmxReg < (uint32)NumMmxRegs; dst->setSpillInfo(locIndex, allowMmxSpill ? SpillInfo(RegNumber(nextMmxReg++)) : SpillInfo(nextSpillLoc++) ); if (allowMmxSpill) { TRACE(3, "[counter] 1 spill to mmx\n"); } else { TRACE(3, "[counter] 1 spill to memory\n"); } } } if (inst->getOpcode() == Reload) { SSATmp* src = inst->getSrc(0); for (int locIndex = 0; locIndex < src->numNeededRegs(); ++locIndex) { if (src->getSpillInfo(locIndex).type() == SpillInfo::MMX) { TRACE(3, "[counter] reload from mmx\n"); } else { TRACE(3, "[counter] reload from memory\n"); } } } if (inst->isControlFlowInstruction()) { LabelInstruction* label = inst->getLabel(); if (label != NULL && label->getId() == inst->getId() + 1) { nextSpillLoc = assignSpillLocAux(label->getTrace(), nextSpillLoc, nextMmxReg); } } } return nextSpillLoc; }