uint8_t TR::ExternalOrderedPair32BitRelocation::collectModifier() { TR::Compilation *comp = TR::comp(); uint8_t * relocatableMethodCodeStart = (uint8_t *)comp->getRelocatableMethodCodeStart(); uint8_t * updateLocation; uint8_t * updateLocation2; TR_ExternalRelocationTargetKind kind = getTargetKind(); if (TR::Compiler->target.cpu.isPower() && (kind == TR_ArrayCopyHelper || kind == TR_ArrayCopyToc || kind == TR_RamMethod || kind == TR_GlobalValue || kind == TR_BodyInfoAddressLoad || kind == TR_DataAddress || kind == TR_DebugCounter)) { TR::Instruction *instr = (TR::Instruction *)getUpdateLocation(); TR::Instruction *instr2 = (TR::Instruction *)getLocation2(); updateLocation = instr->getBinaryEncoding(); updateLocation2 = instr2->getBinaryEncoding(); } else { updateLocation = getUpdateLocation(); updateLocation2 = getLocation2(); } int32_t iLoc = updateLocation - relocatableMethodCodeStart; int32_t iLoc2 = updateLocation2 - relocatableMethodCodeStart; AOTcgDiag0(comp, "TR::ExternalOrderedPair32BitRelocation::collectModifier\n"); if ( (iLoc < MIN_SHORT_OFFSET || iLoc > MAX_SHORT_OFFSET ) || (iLoc2 < MIN_SHORT_OFFSET || iLoc2 > MAX_SHORT_OFFSET ) ) return RELOCATION_TYPE_WIDE_OFFSET | RELOCATION_TYPE_ORDERED_PAIR; return RELOCATION_TYPE_ORDERED_PAIR; }
void TR_OutlinedInstructions::assignRegistersOnOutlinedPath(TR_RegisterKinds kindsToBeAssigned, TR::X86VFPSaveInstruction *vfpSaveInstruction) { if (hasBeenRegisterAssigned()) { TR_ASSERT(0, "these registers should not have been assigned already"); return; } // Register assign the outlined instructions. // _cg->doBackwardsRegisterAssignment(kindsToBeAssigned, _appendInstruction); // Ensure correct VFP state at the start of the outlined instruction sequence. // generateVFPRestoreInstruction(comp()->getAppendInstruction(), vfpSaveInstruction, _cg); // Link in the helper stream into the mainline code. // TR::Instruction *appendInstruction = comp()->getAppendInstruction(); appendInstruction->setNext(_firstInstruction); _firstInstruction->setPrev(appendInstruction); comp()->setAppendInstruction(_appendInstruction); setHasBeenRegisterAssigned(true); }
void TR_PPCOutOfLineCodeSection::assignRegisters(TR_RegisterKinds kindsToBeAssigned) { TR::Compilation* comp = _cg->comp(); if (hasBeenRegisterAssigned()) return; // nested internal control flow assert: _cg->setInternalControlFlowSafeNestingDepth(_cg->internalControlFlowNestingDepth()); // Create a dependency list on the first instruction in this stream that captures all current real register associations. // This is necessary to get the register assigner back into its original state before the helper stream was processed. _cg->incOutOfLineColdPathNestedDepth(); // This prevents the OOL entry label from resetting all register's startOfranges during RA _cg->toggleIsInOOLSection(); TR::RegisterDependencyConditions *liveRealRegDeps = _cg->machine()->createCondForLiveAndSpilledGPRs(true, _cg->getSpilledRegisterList()); if (liveRealRegDeps) _firstInstruction->setDependencyConditions(liveRealRegDeps); _cg->toggleIsInOOLSection(); // toggle it back because swapInstructionListsWithCompilation() also calls toggle... // Register assign the helper dispatch instructions. swapInstructionListsWithCompilation(); _cg->doRegisterAssignment(kindsToBeAssigned); swapInstructionListsWithCompilation(); _cg->decOutOfLineColdPathNestedDepth(); // Returning to mainline, reset this counter _cg->setInternalControlFlowSafeNestingDepth(0); // Link in the helper stream into the mainline code. // We will end up with the OOL items attached at the bottom of the instruction stream TR::Instruction *appendInstruction = _cg->getAppendInstruction(); appendInstruction->setNext(_firstInstruction); _firstInstruction->setPrev(appendInstruction); _cg->setAppendInstruction(_appendInstruction); setHasBeenRegisterAssigned(true); }
void TR::ExternalOrderedPair32BitRelocation::apply(TR::CodeGenerator *codeGen) { TR::Compilation *comp = codeGen->comp(); AOTcgDiag0(comp, "TR::ExternalOrderedPair32BitRelocation::apply\n"); TR::IteratedExternalRelocation *rec = getRelocationRecord(); uint8_t *codeStart = (uint8_t *)comp->getRelocatableMethodCodeStart(); TR_ExternalRelocationTargetKind kind = getRelocationRecord()->getTargetKind(); if (TR::Compiler->target.cpu.isPower() && (kind == TR_ArrayCopyHelper || kind == TR_ArrayCopyToc || kind == TR_RamMethodSequence || kind == TR_GlobalValue || kind == TR_BodyInfoAddressLoad || kind == TR_DataAddress || kind == TR_DebugCounter)) { TR::Instruction *instr = (TR::Instruction *)getUpdateLocation(); TR::Instruction *instr2 = (TR::Instruction *)getLocation2(); rec->addRelocationEntry((uint32_t)(instr->getBinaryEncoding() - codeStart)); rec->addRelocationEntry((uint32_t)(instr2->getBinaryEncoding() - codeStart)); } else { rec->addRelocationEntry(getUpdateLocation() - codeStart); rec->addRelocationEntry(getLocation2() - codeStart); } }
TR::Register *TR::AMD64SystemLinkage::buildDirectDispatch( TR::Node *callNode, bool spillFPRegs) { TR::SymbolReference *methodSymRef = callNode->getSymbolReference(); TR::MethodSymbol *methodSymbol = methodSymRef->getSymbol()->castToMethodSymbol(); TR::Register *returnReg; // Allocate adequate register dependencies. // // pre = number of argument registers // post = number of volatile + return register // uint32_t pre = getProperties().getNumIntegerArgumentRegisters() + getProperties().getNumFloatArgumentRegisters(); uint32_t post = getProperties().getNumVolatileRegisters() + (callNode->getDataType() == TR::NoType ? 0 : 1); #if defined (PYTHON) && 0 // Treat all preserved GP regs as volatile until register map support available. // post += getProperties().getNumberOfPreservedGPRegisters(); #endif TR::RegisterDependencyConditions *preDeps = generateRegisterDependencyConditions(pre, 0, cg()); TR::RegisterDependencyConditions *postDeps = generateRegisterDependencyConditions(0, post, cg()); // Evaluate outgoing arguments on the system stack and build pre-conditions. // int32_t memoryArgSize = buildArgs(callNode, preDeps); // Build post-conditions. // returnReg = buildVolatileAndReturnDependencies(callNode, postDeps); postDeps->stopAddingPostConditions(); // Find the second scratch register in the post dependency list. // TR::Register *scratchReg = NULL; TR::RealRegister::RegNum scratchRegIndex = getProperties().getIntegerScratchRegister(1); for (int32_t i=0; i<post; i++) { if (postDeps->getPostConditions()->getRegisterDependency(i)->getRealRegister() == scratchRegIndex) { scratchReg = postDeps->getPostConditions()->getRegisterDependency(i)->getRegister(); break; } } #if defined(PYTHON) && 0 // For Python, store the instruction that contains the GC map at this site into // the frame object. // TR::SymbolReference *frameObjectSymRef = comp()->getSymRefTab()->findOrCreateAutoSymbol(comp()->getMethodSymbol(), 0, TR::Address, true, false, true); TR::Register *frameObjectRegister = cg()->allocateRegister(); generateRegMemInstruction( L8RegMem, callNode, frameObjectRegister, generateX86MemoryReference(frameObjectSymRef, cg()), cg()); TR::RealRegister *espReal = cg()->machine()->getX86RealRegister(TR::RealRegister::esp); TR::Register *gcMapPCRegister = cg()->allocateRegister(); generateRegMemInstruction( LEA8RegMem, callNode, gcMapPCRegister, generateX86MemoryReference(espReal, -8, cg()), cg()); // Use "volatile" registers across the call. Once proper register map support // is implemented, r14 and r15 will no longer be volatile and a different pair // should be chosen. // TR::RegisterDependencyConditions *gcMapDeps = generateRegisterDependencyConditions(0, 2, cg()); gcMapDeps->addPostCondition(frameObjectRegister, TR::RealRegister::r14, cg()); gcMapDeps->addPostCondition(gcMapPCRegister, TR::RealRegister::r15, cg()); gcMapDeps->stopAddingPostConditions(); generateMemRegInstruction( S8MemReg, callNode, generateX86MemoryReference(frameObjectRegister, fe()->getPythonGCMapPCOffsetInFrame(), cg()), gcMapPCRegister, gcMapDeps, cg()); cg()->stopUsingRegister(frameObjectRegister); cg()->stopUsingRegister(gcMapPCRegister); #endif TR::Instruction *instr; if (methodSymbol->getMethodAddress()) { TR_ASSERT(scratchReg, "could not find second scratch register"); auto LoadRegisterInstruction = generateRegImm64SymInstruction( MOV8RegImm64, callNode, scratchReg, (uintptr_t)methodSymbol->getMethodAddress(), methodSymRef, cg()); if (TR::Options::getCmdLineOptions()->getOption(TR_EmitRelocatableELFFile)) { LoadRegisterInstruction->setReloKind(TR_NativeMethodAbsolute); } instr = generateRegInstruction(CALLReg, callNode, scratchReg, preDeps, cg()); } else { instr = generateImmSymInstruction(CALLImm4, callNode, (uintptrj_t)methodSymbol->getMethodAddress(), methodSymRef, preDeps, cg()); } cg()->resetIsLeafMethod(); instr->setNeedsGCMap(getProperties().getPreservedRegisterMapForGC()); cg()->stopUsingRegister(scratchReg); TR::LabelSymbol *postDepLabel = generateLabelSymbol(cg()); generateLabelInstruction(LABEL, callNode, postDepLabel, postDeps, cg()); return returnReg; }
TR::Register *TR::AMD64SystemLinkage::buildDirectDispatch( TR::Node *callNode, bool spillFPRegs) { TR::SymbolReference *methodSymRef = callNode->getSymbolReference(); TR::MethodSymbol *methodSymbol = methodSymRef->getSymbol()->castToMethodSymbol(); TR::Register *returnReg; // Allocate adequate register dependencies. // // pre = number of argument registers // post = number of volatile + return register // uint32_t pre = getProperties().getNumIntegerArgumentRegisters() + getProperties().getNumFloatArgumentRegisters(); uint32_t post = getProperties().getNumVolatileRegisters() + (callNode->getDataType() == TR::NoType ? 0 : 1); TR::RegisterDependencyConditions *preDeps = generateRegisterDependencyConditions(pre, 0, cg()); TR::RegisterDependencyConditions *postDeps = generateRegisterDependencyConditions(0, post, cg()); // Evaluate outgoing arguments on the system stack and build pre-conditions. // int32_t memoryArgSize = buildArgs(callNode, preDeps); // Build post-conditions. // returnReg = buildVolatileAndReturnDependencies(callNode, postDeps); postDeps->stopAddingPostConditions(); // Find the second scratch register in the post dependency list. // TR::Register *scratchReg = NULL; TR::RealRegister::RegNum scratchRegIndex = getProperties().getIntegerScratchRegister(1); for (int32_t i=0; i<post; i++) { if (postDeps->getPostConditions()->getRegisterDependency(i)->getRealRegister() == scratchRegIndex) { scratchReg = postDeps->getPostConditions()->getRegisterDependency(i)->getRegister(); break; } } TR::Instruction *instr; if (methodSymbol->getMethodAddress()) { TR_ASSERT(scratchReg, "could not find second scratch register"); auto LoadRegisterInstruction = generateRegImm64SymInstruction( MOV8RegImm64, callNode, scratchReg, (uintptr_t)methodSymbol->getMethodAddress(), methodSymRef, cg()); if (comp()->getOption(TR_EmitRelocatableELFFile)) { LoadRegisterInstruction->setReloKind(TR_NativeMethodAbsolute); } instr = generateRegInstruction(CALLReg, callNode, scratchReg, preDeps, cg()); } else { instr = generateImmSymInstruction(CALLImm4, callNode, (uintptrj_t)methodSymbol->getMethodAddress(), methodSymRef, preDeps, cg()); } cg()->resetIsLeafMethod(); instr->setNeedsGCMap(getProperties().getPreservedRegisterMapForGC()); cg()->stopUsingRegister(scratchReg); TR::LabelSymbol *postDepLabel = generateLabelSymbol(cg()); generateLabelInstruction(LABEL, callNode, postDepLabel, postDeps, cg()); return returnReg; }
void TR_OutlinedInstructions::assignRegisters(TR_RegisterKinds kindsToBeAssigned, TR::X86VFPSaveInstruction *vfpSaveInstruction) { if (hasBeenRegisterAssigned()) return; // nested internal control flow assert: _cg->setInternalControlFlowSafeNestingDepth(_cg->internalControlFlowNestingDepth()); // Create a dependency list on the first instruction in this stream that captures all // current real register associations. This is necessary to get the register assigner // back into its original state before the helper stream was processed. // TR::RegisterDependencyConditions *liveRealRegDeps = _cg->machine()->createDepCondForLiveGPRs(); _firstInstruction->setDependencyConditions(liveRealRegDeps); #if 0 // If the outlined section jumps back to a section that's expecting a certain register // state then add register dependencies on the exit branch to set that state. // if (_postDependencyMergeList) { TR::RegisterDependencyConditions *mergeDeps = _postDependencyMergeList->clone(_cg); TR_ASSERT(_appendInstruction->getDependencyConditions() == NULL, "unexpected reg deps on OOL append instruction"); _appendInstruction->setDependencyConditions(mergeDeps); TR_X86RegisterDependencyGroup *depGroup = mergeDeps->getPostConditions(); for (int32_t i=0; i<mergeDeps->getNumPostConditions(); i++) { TR::RegisterDependency *dependency = depGroup->getRegisterDependency(i); TR::Register *virtReg = dependency->getRegister(); virtReg->incTotalUseCount(); virtReg->incFutureUseCount(); #ifdef DEBUG // Ensure all register dependencies have been assigned. // TR_ASSERT(dependency->getRealRegister() != TR::RealRegister::NoReg, "unassigned merge dep register"); TR_ASSERT(virtReg->getAssignedRealRegister() == _cg->machine()->getX86RealRegister(dependency->getRealRegister()), "unexpected(?) register assignment"); #endif } } #endif // TODO:AMD64: Fix excessive register assignment exchanges in outlined instruction dispatch. // Ensure correct VFP state at the start of the outlined instruction sequence. // generateVFPRestoreInstruction(comp()->getAppendInstruction(), vfpSaveInstruction, _cg); // Link in the helper stream into the mainline code. // TR::Instruction *appendInstruction = comp()->getAppendInstruction(); appendInstruction->setNext(_firstInstruction); _firstInstruction->setPrev(appendInstruction); comp()->setAppendInstruction(_appendInstruction); // Register assign the helper dispatch instructions. // _cg->doBackwardsRegisterAssignment(kindsToBeAssigned, _appendInstruction, appendInstruction); // Returning to mainline, reset this counter _cg->setInternalControlFlowSafeNestingDepth(0); setHasBeenRegisterAssigned(true); }