OMR::LabelSymbol::LabelSymbol(TR::CodeGenerator *codeGen, TR::Block *labb) : TR::Symbol(), _instruction(NULL), _codeLocation(NULL), _estimatedCodeLocation(0), _snippet(NULL) { self()->setIsLabel(); TR::Compilation *comp = TR::comp(); if (comp && comp->getDebug()) comp->getDebug()->newLabelSymbol(self()); }
OMR::LabelSymbol::LabelSymbol() : TR::Symbol(), _instruction(NULL), _codeLocation(NULL), _estimatedCodeLocation(0), _snippet(NULL), _directlyTargeted(false) { self()->setIsLabel(); TR::Compilation *comp = TR::comp(); if (comp && comp->getDebug()) comp->getDebug()->newLabelSymbol(self()); }
void OMR::CodeGenPhase::performInstructionSelectionPhase(TR::CodeGenerator * cg, TR::CodeGenPhase * phase) { TR::Compilation* comp = cg->comp(); phase->reportPhase(InstructionSelectionPhase); if (comp->getOption(TR_TraceCG) || comp->getOption(TR_TraceTrees) || comp->getOptions()->getTraceCGOption(TR_TraceCGPreInstructionSelection)) comp->dumpMethodTrees("Pre Instruction Selection Trees"); TR::LexicalMemProfiler mp(phase->getName(), comp->phaseMemProfiler()); LexicalTimer pt(phase->getName(), comp->phaseTimer()); cg->doInstructionSelection(); if (comp->getOption(TR_TraceCG) || comp->getOptions()->getTraceCGOption(TR_TraceCGPostInstructionSelection)) comp->getDebug()->dumpMethodInstrs(comp->getOutFile(), "Post Instruction Selection Instructions", false, true); // check reference counts #if defined(DEBUG) || defined(PROD_WITH_ASSUMES) for (int r=0; r<NumRegisterKinds; r++) { if (TO_KIND_MASK(r) & cg->getSupportedLiveRegisterKinds()) { cg->checkForLiveRegisters(cg->getLiveRegisters((TR_RegisterKinds)r)); } } #endif // check interrupt if (comp->compilationShouldBeInterrupted(AFTER_INSTRUCTION_SELECTION_CONTEXT)) { comp->failCompilation<TR::CompilationInterrupted>("interrupted after instruction selection"); } }
void OMR::CodeGenPhase::performRegisterAssigningPhase(TR::CodeGenerator * cg, TR::CodeGenPhase * phase) { TR::Compilation* comp = cg->comp(); phase->reportPhase(RegisterAssigningPhase); if (cg->getDebug()) cg->getDebug()->roundAddressEnumerationCounters(); { TR::LexicalMemProfiler mp("RA", comp->phaseMemProfiler()); LexicalTimer pt("RA", comp->phaseTimer()); TR_RegisterKinds colourableKindsToAssign; TR_RegisterKinds nonColourableKindsToAssign = cg->prepareRegistersForAssignment(); cg->jettisonAllSpills(); // Spill temps used before now may lead to conflicts if also used by register assignment // Do local register assignment for non-colourable registers. // if(cg->getTraceRAOption(TR_TraceRAListing)) if(cg->getDebug()) cg->getDebug()->dumpMethodInstrs(comp->getOutFile(),"Before Local RA",false); cg->doRegisterAssignment(nonColourableKindsToAssign); if (comp->compilationShouldBeInterrupted(AFTER_REGISTER_ASSIGNMENT_CONTEXT)) { comp->failCompilation<TR::CompilationInterrupted>("interrupted after RA"); } } if (comp->getOption(TR_TraceCG) || comp->getOptions()->getTraceCGOption(TR_TraceCGPostRegisterAssignment)) comp->getDebug()->dumpMethodInstrs(comp->getOutFile(), "Post Register Assignment Instructions", false, true); }
void OMR::CodeGenPhase::performEmitSnippetsPhase(TR::CodeGenerator * cg, TR::CodeGenPhase * phase) { TR::Compilation * comp = cg->comp(); phase->reportPhase(EmitSnippetsPhase); TR::LexicalMemProfiler mp("Emit Snippets", comp->phaseMemProfiler()); LexicalTimer pt("Emit Snippets", comp->phaseTimer()); cg->emitSnippets(); if (comp->getOption(TR_EnableOSR)) { comp->getOSRCompilationData()->checkOSRLimits(); comp->getOSRCompilationData()->compressInstruction2SharedSlotMap(); } if (comp->getOption(TR_TraceCG) || comp->getOptions()->getTraceCGOption(TR_TraceCGPostBinaryEncoding)) { diagnostic("\nbuffer start = %8x, code start = %8x, buffer length = %d", cg->getBinaryBufferStart(), cg->getCodeStart(), cg->getEstimatedCodeLength()); diagnostic("\n"); const char * title = "Post Binary Instructions"; comp->getDebug()->dumpMethodInstrs(comp->getOutFile(), title, false, true); traceMsg(comp,"<snippets>"); comp->getDebug()->print(comp->getOutFile(), cg->getSnippetList()); traceMsg(comp,"\n</snippets>\n"); auto iterator = cg->getSnippetList().begin(); int32_t estimatedSnippetStart = cg->getEstimatedSnippetStart(); while (iterator != cg->getSnippetList().end()) { estimatedSnippetStart += (*iterator)->getLength(estimatedSnippetStart); ++iterator; } int32_t snippetLength = estimatedSnippetStart - cg->getEstimatedSnippetStart(); diagnostic("\nAmount of code memory allocated for this function = %d" "\nAmount of code memory consumed for this function = %d" "\nAmount of snippet code memory consumed for this function = %d\n\n", cg->getEstimatedCodeLength(), cg->getCodeLength(), snippetLength); } }
void OMR::CodeGenPhase::performPeepholePhase(TR::CodeGenerator * cg, TR::CodeGenPhase * phase) { TR::Compilation * comp = cg->comp(); phase->reportPhase(PeepholePhase); TR::LexicalMemProfiler mp(phase->getName(), comp->phaseMemProfiler()); LexicalTimer pt(phase->getName(), comp->phaseTimer()); cg->doPeephole(); if (comp->getOption(TR_TraceCG)) comp->getDebug()->dumpMethodInstrs(comp->getOutFile(), "Post Peephole Instructions", false); }
void OMR::CodeGenPhase::performMapStackPhase(TR::CodeGenerator * cg, TR::CodeGenPhase * phase) { TR::Compilation* comp = cg->comp(); cg->remapGCIndicesInInternalPtrFormat(); { TR::LexicalMemProfiler mp("Stackmap", comp->phaseMemProfiler()); LexicalTimer pt("Stackmap", comp->phaseTimer()); cg->getLinkage()->mapStack(comp->getJittedMethodSymbol()); if (comp->getOption(TR_TraceCG) || comp->getOptions()->getTraceCGOption(TR_TraceEarlyStackMap)) comp->getDebug()->dumpMethodInstrs(comp->getOutFile(), "Post Stack Map", false); } cg->setMappingAutomatics(); }
TR::Register *OMR::X86::AMD64::TreeEvaluator::i2lEvaluator(TR::Node *node, TR::CodeGenerator *cg) { TR::Compilation *comp = cg->comp(); if (node->getFirstChild()->getOpCode().isLoadConst()) { TR::Register *targetRegister = cg->allocateRegister(); generateRegImmInstruction(MOV8RegImm4, node, targetRegister, node->getFirstChild()->getInt(), cg); node->setRegister(targetRegister); cg->decReferenceCount(node->getFirstChild()); return targetRegister; } else { // In theory, because iRegStore has chosen to disregard needsSignExtension, // we must disregard skipSignExtension here for correctness. // // However, in fact, it is actually safe to obey skipSignExtension so // long as the optimizer only uses it on nodes known to be non-negative // when the i2l occurs. We do already have isNonNegative for that // purpose, but it may not always be set by the optimizer if a node known // to be non-negative at one point in a block is commoned up above the // BNDCHK or branch that determines the node's non-negativity. The // codegen does set the flag during tree evaluation, but the // skipSignExtension flag is set by the optimizer with more global // knowledge than the tree evaluator, so we will trust it. // TR_X86OpCodes regMemOpCode,regRegOpCode; if( node->isNonNegative() || (node->skipSignExtension() && performTransformation(comp, "TREE EVALUATION: skipping sign extension on node %s despite lack of isNonNegative\n", comp->getDebug()->getName(node)))) { // We prefer these plain (zero-extending) opcodes because the analyser can often eliminate them // regMemOpCode = L4RegMem; regRegOpCode = MOVZXReg8Reg4; } else { regMemOpCode = MOVSXReg8Mem4; regRegOpCode = MOVSXReg8Reg4; } return TR::TreeEvaluator::conversionAnalyser(node, regMemOpCode, regRegOpCode, cg); } }
void TR_PPCRegisterDependencyGroup::assignRegisters(TR::Instruction *currentInstruction, TR_RegisterKinds kindToBeAssigned, uint32_t numberOfRegisters, TR::CodeGenerator *cg) { // *this swipeable for debugging purposes TR::Machine *machine = cg->machine(); TR::Register *virtReg; TR::RealRegister::RegNum dependentRegNum; TR::RealRegister *dependentRealReg, *assignedRegister, *realReg; int i, j; TR::Compilation *comp = cg->comp(); int num_gprs = 0; int num_fprs = 0; int num_vrfs = 0; // Use to do lookups using real register numbers TR_PPCRegisterDependencyMap map(_dependencies, numberOfRegisters); if (!comp->getOption(TR_DisableOOL)) { for (i = 0; i< numberOfRegisters; i++) { virtReg = _dependencies[i].getRegister(); dependentRegNum = _dependencies[i].getRealRegister(); if (dependentRegNum == TR::RealRegister::SpilledReg) { TR_ASSERT(virtReg->getBackingStorage(),"should have a backing store if dependentRegNum == spillRegIndex()\n"); if (virtReg->getAssignedRealRegister()) { // this happens when the register was first spilled in main line path then was reverse spilled // and assigned to a real register in OOL path. We protected the backing store when doing // the reverse spill so we could re-spill to the same slot now traceMsg (comp,"\nOOL: Found register spilled in main line and re-assigned inside OOL"); TR::Node *currentNode = currentInstruction->getNode(); TR::RealRegister *assignedReg = toRealRegister(virtReg->getAssignedRegister()); TR::MemoryReference *tempMR = new (cg->trHeapMemory()) TR::MemoryReference(currentNode, (TR::SymbolReference*)virtReg->getBackingStorage()->getSymbolReference(), sizeof(uintptr_t), cg); TR::InstOpCode::Mnemonic opCode; TR_RegisterKinds rk = virtReg->getKind(); switch (rk) { case TR_GPR: opCode =TR::InstOpCode::Op_load; break; case TR_FPR: opCode = virtReg->isSinglePrecision() ? TR::InstOpCode::lfs : TR::InstOpCode::lfd; break; default: TR_ASSERT(0, "\nRegister kind not supported in OOL spill\n"); break; } TR::Instruction *inst = generateTrg1MemInstruction(cg, opCode, currentNode, assignedReg, tempMR, currentInstruction); assignedReg->setAssignedRegister(NULL); virtReg->setAssignedRegister(NULL); assignedReg->setState(TR::RealRegister::Free); if (comp->getDebug()) cg->traceRegisterAssignment("Generate reload of virt %s due to spillRegIndex dep at inst %p\n",comp->getDebug()->getName(virtReg),currentInstruction); cg->traceRAInstruction(inst); } if (!(std::find(cg->getSpilledRegisterList()->begin(), cg->getSpilledRegisterList()->end(), virtReg) != cg->getSpilledRegisterList()->end())) cg->getSpilledRegisterList()->push_front(virtReg); } // we also need to free up all locked backing storage if we are exiting the OOL during backwards RA assignment else if (currentInstruction->isLabel() && virtReg->getAssignedRealRegister()) { TR::PPCLabelInstruction *labelInstr = (TR::PPCLabelInstruction *)currentInstruction; TR_BackingStore * location = virtReg->getBackingStorage(); TR_RegisterKinds rk = virtReg->getKind(); int32_t dataSize; if (labelInstr->getLabelSymbol()->isStartOfColdInstructionStream() && location) { traceMsg (comp,"\nOOL: Releasing backing storage (%p)\n", location); if (rk == TR_GPR) dataSize = TR::Compiler->om.sizeofReferenceAddress(); else dataSize = 8; location->setMaxSpillDepth(0); cg->freeSpill(location,dataSize,0); virtReg->setBackingStorage(NULL); } } } } for (i = 0; i < numberOfRegisters; i++) { map.addDependency(_dependencies[i], i); virtReg = _dependencies[i].getRegister(); dependentRegNum = _dependencies[i].getRealRegister(); if (dependentRegNum != TR::RealRegister::SpilledReg) { if (virtReg->getKind() == TR_GPR) num_gprs++; else if (virtReg->getKind() == TR_FPR) num_fprs++; else if (virtReg->getKind() == TR_VRF) num_vrfs++; } } #ifdef DEBUG int locked_gprs = 0; int locked_fprs = 0; int locked_vrfs = 0; // count up how many registers are locked for each type for(i = TR::RealRegister::FirstGPR; i <= TR::RealRegister::LastGPR; i++) { realReg = machine->getPPCRealRegister((TR::RealRegister::RegNum)i); if (realReg->getState() == TR::RealRegister::Locked) locked_gprs++; } for(i = TR::RealRegister::FirstFPR; i <= TR::RealRegister::LastFPR; i++) { realReg = machine->getPPCRealRegister((TR::RealRegister::RegNum)i); if (realReg->getState() == TR::RealRegister::Locked) locked_fprs++; } for(i = TR::RealRegister::FirstVRF; i <= TR::RealRegister::LastVRF; i++) { realReg = machine->getPPCRealRegister((TR::RealRegister::RegNum)i); if (realReg->getState() == TR::RealRegister::Locked) locked_vrfs++; } TR_ASSERT( locked_gprs == machine->getNumberOfLockedRegisters(TR_GPR),"Inconsistent number of locked GPRs"); TR_ASSERT( locked_fprs == machine->getNumberOfLockedRegisters(TR_FPR),"Inconsistent number of locked FPRs"); TR_ASSERT( locked_vrfs == machine->getNumberOfLockedRegisters(TR_VRF), "Inconsistent number of locked VRFs"); #endif // To handle circular dependencies, we block a real register if (1) it is already assigned to a correct // virtual register and (2) if it is assigned to one register in the list but is required by another. // However, if all available registers are requested, we do not block in case (2) to avoid all registers // being blocked. bool block_gprs = true; bool block_fprs = true; bool block_vrfs = true; TR_ASSERT(num_gprs <= (TR::RealRegister::LastGPR - TR::RealRegister::FirstGPR + 1 - machine->getNumberOfLockedRegisters(TR_GPR)), "Too many GPR dependencies, unable to assign" ); TR_ASSERT(num_fprs <= (TR::RealRegister::LastFPR - TR::RealRegister::FirstFPR + 1 - machine->getNumberOfLockedRegisters(TR_FPR)), "Too many FPR dependencies, unable to assign" ); TR_ASSERT(num_vrfs <= (TR::RealRegister::LastVRF - TR::RealRegister::FirstVRF + 1 - machine->getNumberOfLockedRegisters(TR_VRF)), "Too many VRF dependencies, unable to assign" ); if (num_gprs == (TR::RealRegister::LastGPR - TR::RealRegister::FirstGPR + 1 - machine->getNumberOfLockedRegisters(TR_GPR))) block_gprs = false; if (num_fprs == (TR::RealRegister::LastFPR - TR::RealRegister::FirstFPR + 1 - machine->getNumberOfLockedRegisters(TR_FPR))) block_fprs = false; if (num_vrfs == (TR::RealRegister::LastVRF - TR::RealRegister::FirstVRF + 1 - machine->getNumberOfLockedRegisters(TR_VRF))) block_vrfs = false; for (i = 0; i < numberOfRegisters; i++) { virtReg = _dependencies[i].getRegister(); if (virtReg->getAssignedRealRegister()!=NULL) { if (_dependencies[i].getRealRegister() == TR::RealRegister::NoReg) { virtReg->block(); } else { TR::RealRegister::RegNum assignedRegNum; assignedRegNum = toRealRegister(virtReg->getAssignedRealRegister())->getRegisterNumber(); // always block if required register and assigned register match; // block if assigned register is required by other dependency but only if // any spare registers are left to avoid blocking all existing registers if (_dependencies[i].getRealRegister() == assignedRegNum || (map.getDependencyWithTarget(assignedRegNum) && ((virtReg->getKind() != TR_GPR || block_gprs) && (virtReg->getKind() != TR_FPR || block_fprs) && (virtReg->getKind() != TR_VRF || block_vrfs)))) { virtReg->block(); } } } } // Assign all virtual regs that depend on a specific real reg that is free for (i = 0; i < numberOfRegisters; i++) { virtReg = _dependencies[i].getRegister(); dependentRegNum = _dependencies[i].getRealRegister(); dependentRealReg = machine->getPPCRealRegister(dependentRegNum); if (dependentRegNum != TR::RealRegister::NoReg && dependentRegNum != TR::RealRegister::SpilledReg && dependentRealReg->getState() == TR::RealRegister::Free) { assignFreeRegisters(currentInstruction, &_dependencies[i], map, cg); } } // Assign all virtual regs that depend on a specfic real reg that is not free for (i = 0; i < numberOfRegisters; i++) { virtReg = _dependencies[i].getRegister(); assignedRegister = NULL; if (virtReg->getAssignedRealRegister() != NULL) { assignedRegister = toRealRegister(virtReg->getAssignedRealRegister()); } dependentRegNum = _dependencies[i].getRealRegister(); dependentRealReg = machine->getPPCRealRegister(dependentRegNum); if (dependentRegNum != TR::RealRegister::NoReg && dependentRegNum != TR::RealRegister::SpilledReg && dependentRealReg != assignedRegister) { bool depsBlocked = false; switch (_dependencies[i].getRegister()->getKind()) { case TR_GPR: depsBlocked = block_gprs; break; case TR_FPR: depsBlocked = block_fprs; break; case TR_VRF: depsBlocked = block_vrfs; break; } assignContendedRegisters(currentInstruction, &_dependencies[i], map, depsBlocked, cg); } } // Assign all virtual regs that depend on NoReg but exclude gr0 for (i=0; i<numberOfRegisters; i++) { if (_dependencies[i].getRealRegister() == TR::RealRegister::NoReg && _dependencies[i].getExcludeGPR0()) { TR::RealRegister *realOne; virtReg = _dependencies[i].getRegister(); realOne = virtReg->getAssignedRealRegister(); if (realOne!=NULL && toRealRegister(realOne)->getRegisterNumber()==TR::RealRegister::gr0) { if ((assignedRegister = machine->findBestFreeRegister(currentInstruction, virtReg->getKind(), true, false, virtReg)) == NULL) { assignedRegister = machine->freeBestRegister(currentInstruction, virtReg, NULL, true); } machine->coerceRegisterAssignment(currentInstruction, virtReg, assignedRegister->getRegisterNumber()); } else if (realOne == NULL) { machine->assignOneRegister(currentInstruction, virtReg, true); } virtReg->block(); } } // Assign all virtual regs that depend on NoReg for (i=0; i<numberOfRegisters; i++) { if (_dependencies[i].getRealRegister() == TR::RealRegister::NoReg && !_dependencies[i].getExcludeGPR0()) { TR::RealRegister *realOne; virtReg = _dependencies[i].getRegister(); realOne = virtReg->getAssignedRealRegister(); if (!realOne) { machine->assignOneRegister(currentInstruction, virtReg, false); } virtReg->block(); } } unblockRegisters(numberOfRegisters); for (i = 0; i < numberOfRegisters; i++) { TR::Register *dependentRegister = getRegisterDependency(i)->getRegister(); // dependentRegister->getAssignedRegister() is NULL if the reg has already been spilled due to a spilledReg dep if (comp->getOption(TR_DisableOOL) || (!(cg->isOutOfLineColdPath()) && !(cg->isOutOfLineHotPath()))) { TR_ASSERT(dependentRegister->getAssignedRegister(), "assignedRegister can not be NULL"); } if (dependentRegister->getAssignedRegister()) { TR::RealRegister *assignedRegister = dependentRegister->getAssignedRegister()->getRealRegister(); if (getRegisterDependency(i)->getRealRegister() == TR::RealRegister::NoReg) getRegisterDependency(i)->setRealRegister(toRealRegister(assignedRegister)->getRegisterNumber()); machine->decFutureUseCountAndUnlatch(dependentRegister); } } }
void OMR::CodeGenPhase::performProcessRelocationsPhase(TR::CodeGenerator * cg, TR::CodeGenPhase * phase) { TR::Compilation * comp = cg->comp(); if (comp->getPersistentInfo()->isRuntimeInstrumentationEnabled()) { // This must be called before relocations to generate the relocation data for the profiled instructions. cg->createHWPRecords(); } phase->reportPhase(ProcessRelocationsPhase); TR::LexicalMemProfiler mp(phase->getName(), comp->phaseMemProfiler()); LexicalTimer pt(phase->getName(), comp->phaseTimer()); cg->processRelocations(); cg->resizeCodeMemory(); cg->registerAssumptions(); cg->syncCode(cg->getBinaryBufferStart(), cg->getBinaryBufferCursor() - cg->getBinaryBufferStart()); if (comp->getOption(TR_EnableOSR)) { if (comp->getOption(TR_TraceOSR) && !comp->getOption(TR_DisableOSRSharedSlots)) { (*comp) << "OSRCompilationData is " << *comp->getOSRCompilationData() << "\n"; } } if (comp->getOption(TR_AOT) && (comp->getOption(TR_TraceRelocatableDataCG) || comp->getOption(TR_TraceRelocatableDataDetailsCG) || comp->getOption(TR_TraceReloCG))) { traceMsg(comp, "\n<relocatableDataCG>\n"); if (comp->getOption(TR_TraceRelocatableDataDetailsCG)) // verbose output { uint8_t * relocatableMethodCodeStart = (uint8_t *)comp->getRelocatableMethodCodeStart(); traceMsg(comp, "Code start = %8x, Method start pc = %x, Method start pc offset = 0x%x\n", relocatableMethodCodeStart, cg->getCodeStart(), cg->getCodeStart() - relocatableMethodCodeStart); } cg->getAheadOfTimeCompile()->dumpRelocationData(); traceMsg(comp, "</relocatableDataCG>\n"); } if (debug("dumpCodeSizes")) { diagnostic("%08d %s\n", cg->getCodeLength(), comp->signature()); } if (comp->getCurrentMethod() == NULL) { comp->getMethodSymbol()->setMethodAddress(cg->getBinaryBufferStart()); } TR_ASSERT(cg->getCodeLength() <= cg->getEstimatedCodeLength(), "Method length estimate must be conservatively large\n" " codeLength = %d, estimatedCodeLength = %d \n", cg->getCodeLength(), cg->getEstimatedCodeLength() ); // also trace the interal stack atlas cg->getStackAtlas()->close(cg); TR::SimpleRegex * regex = comp->getOptions()->getSlipTrap(); if (regex && TR::SimpleRegex::match(regex, comp->getCurrentMethod())) { if (TR::Compiler->target.is64Bit()) { setDllSlip((char*)cg->getCodeStart(),(char*)cg->getCodeStart()+cg->getCodeLength(),"SLIPDLL64", comp); } else { setDllSlip((char*)cg->getCodeStart(),(char*)cg->getCodeStart()+cg->getCodeLength(),"SLIPDLL31", comp); } } if (comp->getOption(TR_TraceCG) || comp->getOptions()->getTraceCGOption(TR_TraceCGPostBinaryEncoding)) { const char * title = "Post Relocation Instructions"; comp->getDebug()->dumpMethodInstrs(comp->getOutFile(), title, false, true); traceMsg(comp,"<snippets>"); comp->getDebug()->print(comp->getOutFile(), cg->getSnippetList()); traceMsg(comp,"\n</snippets>\n"); auto iterator = cg->getSnippetList().begin(); int32_t estimatedSnippetStart = cg->getEstimatedSnippetStart(); while (iterator != cg->getSnippetList().end()) { estimatedSnippetStart += (*iterator)->getLength(estimatedSnippetStart); ++iterator; } } }
void TR_ARMRegisterDependencyGroup::assignRegisters(TR::Instruction *currentInstruction, TR_RegisterKinds kindToBeAssigned, uint32_t numberOfRegisters, TR::CodeGenerator *cg) { TR::Compilation *comp = cg->comp(); TR::Machine *machine = cg->machine(); TR::Register *virtReg; TR::RealRegister::RegNum dependentRegNum; TR::RealRegister *dependentRealReg, *assignedRegister; uint32_t i, j; bool changed; if (!comp->getOption(TR_DisableOOL)) { for (i = 0; i< numberOfRegisters; i++) { virtReg = dependencies[i].getRegister(); dependentRegNum = dependencies[i].getRealRegister(); if (dependentRegNum == TR::RealRegister::SpilledReg) { TR_ASSERT(virtReg->getBackingStorage(),"should have a backing store if dependentRegNum == spillRegIndex()\n"); if (virtReg->getAssignedRealRegister()) { // this happens when the register was first spilled in main line path then was reverse spilled // and assigned to a real register in OOL path. We protected the backing store when doing // the reverse spill so we could re-spill to the same slot now traceMsg (comp,"\nOOL: Found register spilled in main line and re-assigned inside OOL"); TR::Node *currentNode = currentInstruction->getNode(); TR::RealRegister *assignedReg = toRealRegister(virtReg->getAssignedRegister()); TR::MemoryReference *tempMR = new (cg->trHeapMemory()) TR::MemoryReference(currentNode, (TR::SymbolReference*)virtReg->getBackingStorage()->getSymbolReference(), sizeof(uintptr_t), cg); TR_ARMOpCodes opCode; TR_RegisterKinds rk = virtReg->getKind(); switch (rk) { case TR_GPR: opCode = ARMOp_ldr; break; case TR_FPR: opCode = virtReg->isSinglePrecision() ? ARMOp_ldfs : ARMOp_ldfd; break; default: TR_ASSERT(0, "\nRegister kind not supported in OOL spill\n"); break; } TR::Instruction *inst = generateTrg1MemInstruction(cg, opCode, currentNode, assignedReg, tempMR, currentInstruction); assignedReg->setAssignedRegister(NULL); virtReg->setAssignedRegister(NULL); assignedReg->setState(TR::RealRegister::Free); if (comp->getDebug()) cg->traceRegisterAssignment("Generate reload of virt %s due to spillRegIndex dep at inst %p\n", cg->comp()->getDebug()->getName(virtReg),currentInstruction); cg->traceRAInstruction(inst); } if (!(std::find(cg->getSpilledRegisterList()->begin(), cg->getSpilledRegisterList()->end(), virtReg) != cg->getSpilledRegisterList()->end())) cg->getSpilledRegisterList()->push_front(virtReg); } // we also need to free up all locked backing storage if we are exiting the OOL during backwards RA assignment else if (currentInstruction->isLabel() && virtReg->getAssignedRealRegister()) { TR::ARMLabelInstruction *labelInstr = (TR::ARMLabelInstruction *)currentInstruction; TR_BackingStore *location = virtReg->getBackingStorage(); TR_RegisterKinds rk = virtReg->getKind(); int32_t dataSize; if (labelInstr->getLabelSymbol()->isStartOfColdInstructionStream() && location) { traceMsg (comp,"\nOOL: Releasing backing storage (%p)\n", location); if (rk == TR_GPR) dataSize = TR::Compiler->om.sizeofReferenceAddress(); else dataSize = 8; location->setMaxSpillDepth(0); cg->freeSpill(location,dataSize,0); virtReg->setBackingStorage(NULL); } } } } for (i = 0; i < numberOfRegisters; i++) { virtReg = dependencies[i].getRegister(); if (virtReg->getAssignedRealRegister()!=NULL) { if (dependencies[i].getRealRegister() == TR::RealRegister::NoReg) { virtReg->block(); } else { dependentRegNum = toRealRegister(virtReg->getAssignedRealRegister())->getRegisterNumber(); for (j=0; j<numberOfRegisters; j++) { if (dependentRegNum == dependencies[j].getRealRegister()) { virtReg->block(); break; } } } } } do { changed = false; for (i = 0; i < numberOfRegisters; i++) { virtReg = dependencies[i].getRegister(); dependentRegNum = dependencies[i].getRealRegister(); dependentRealReg = machine->getRealRegister(dependentRegNum); if (dependentRegNum != TR::RealRegister::NoReg && dependentRegNum != TR::RealRegister::SpilledReg && dependentRealReg->getState() == TR::RealRegister::Free) { machine->coerceRegisterAssignment(currentInstruction, virtReg, dependentRegNum); virtReg->block(); changed = true; } } } while (changed == true); do { changed = false; for (i = 0; i < numberOfRegisters; i++) { virtReg = dependencies[i].getRegister(); assignedRegister = NULL; if (virtReg->getAssignedRealRegister() != NULL) { assignedRegister = toRealRegister(virtReg->getAssignedRealRegister()); } dependentRegNum = dependencies[i].getRealRegister(); dependentRealReg = machine->getRealRegister(dependentRegNum); if (dependentRegNum != TR::RealRegister::NoReg && dependentRegNum != TR::RealRegister::SpilledReg && dependentRealReg != assignedRegister) { machine->coerceRegisterAssignment(currentInstruction, virtReg, dependentRegNum); virtReg->block(); changed = true; } } } while (changed == true); for (i=0; i<numberOfRegisters; i++) { if (dependencies[i].getRealRegister() == TR::RealRegister::NoReg) { bool excludeGPR0 = dependencies[i].getExcludeGPR0()?true:false; TR::RealRegister *realOne; virtReg = dependencies[i].getRegister(); realOne = virtReg->getAssignedRealRegister(); if (realOne!=NULL && excludeGPR0 && toRealRegister(realOne)->getRegisterNumber()==TR::RealRegister::gr0) { if ((assignedRegister = machine->findBestFreeRegister(virtReg->getKind(), true)) == NULL) { assignedRegister = machine->freeBestRegister(currentInstruction, virtReg->getKind(), NULL, true); } machine->coerceRegisterAssignment(currentInstruction, virtReg, assignedRegister->getRegisterNumber()); } else if (realOne == NULL) { if (virtReg->getTotalUseCount() == virtReg->getFutureUseCount()) { if ((assignedRegister = machine->findBestFreeRegister(virtReg->getKind(), excludeGPR0, true)) == NULL) { assignedRegister = machine->freeBestRegister(currentInstruction, virtReg->getKind(), NULL, excludeGPR0); } } else { assignedRegister = machine->reverseSpillState(currentInstruction, virtReg, NULL, excludeGPR0); } virtReg->setAssignedRegister(assignedRegister); assignedRegister->setAssignedRegister(virtReg); assignedRegister->setState(TR::RealRegister::Assigned); virtReg->block(); } } } unblockRegisters(numberOfRegisters); for (i = 0; i < numberOfRegisters; i++) { TR::Register *dependentRegister = getRegisterDependency(i)->getRegister(); if (dependentRegister->getAssignedRegister()) { TR::RealRegister *assignedRegister = dependentRegister->getAssignedRegister()->getRealRegister(); if (getRegisterDependency(i)->getRealRegister() == TR::RealRegister::NoReg) getRegisterDependency(i)->setRealRegister(toRealRegister(assignedRegister)->getRegisterNumber()); if (dependentRegister->decFutureUseCount() == 0) { dependentRegister->setAssignedRegister(NULL); assignedRegister->setAssignedRegister(NULL); assignedRegister->setState(TR::RealRegister::Unlatched); // Was setting to Free } } } }