Exemple #1
0
TR::Register *TR::AMD64SystemLinkage::buildDirectDispatch(
      TR::Node *callNode,
      bool spillFPRegs)
   {
   TR::SymbolReference *methodSymRef = callNode->getSymbolReference();
   TR::MethodSymbol *methodSymbol = methodSymRef->getSymbol()->castToMethodSymbol();

   TR::Register *returnReg;

   // Allocate adequate register dependencies.
   //
   // pre = number of argument registers
   // post = number of volatile + return register
   //
   uint32_t pre = getProperties().getNumIntegerArgumentRegisters() + getProperties().getNumFloatArgumentRegisters();
   uint32_t post = getProperties().getNumVolatileRegisters() + (callNode->getDataType() == TR::NoType ? 0 : 1);

#if defined (PYTHON) && 0
   // Treat all preserved GP regs as volatile until register map support available.
   //
   post += getProperties().getNumberOfPreservedGPRegisters();
#endif

   TR::RegisterDependencyConditions *preDeps = generateRegisterDependencyConditions(pre, 0, cg());
   TR::RegisterDependencyConditions *postDeps = generateRegisterDependencyConditions(0, post, cg());

   // Evaluate outgoing arguments on the system stack and build pre-conditions.
   //
   int32_t memoryArgSize = buildArgs(callNode, preDeps);

   // Build post-conditions.
   //
   returnReg = buildVolatileAndReturnDependencies(callNode, postDeps);
   postDeps->stopAddingPostConditions();

   // Find the second scratch register in the post dependency list.
   //
   TR::Register *scratchReg = NULL;
   TR::RealRegister::RegNum scratchRegIndex = getProperties().getIntegerScratchRegister(1);
   for (int32_t i=0; i<post; i++)
      {
      if (postDeps->getPostConditions()->getRegisterDependency(i)->getRealRegister() == scratchRegIndex)
         {
         scratchReg = postDeps->getPostConditions()->getRegisterDependency(i)->getRegister();
         break;
         }
      }

#if defined(PYTHON) && 0
   // For Python, store the instruction that contains the GC map at this site into
   // the frame object.
   //
   TR::SymbolReference *frameObjectSymRef =
      comp()->getSymRefTab()->findOrCreateAutoSymbol(comp()->getMethodSymbol(), 0, TR::Address, true, false, true);

   TR::Register *frameObjectRegister = cg()->allocateRegister();
   generateRegMemInstruction(
         L8RegMem,
         callNode,
         frameObjectRegister,
         generateX86MemoryReference(frameObjectSymRef, cg()),
         cg());

   TR::RealRegister *espReal = cg()->machine()->getX86RealRegister(TR::RealRegister::esp);
   TR::Register *gcMapPCRegister = cg()->allocateRegister();

   generateRegMemInstruction(
         LEA8RegMem,
         callNode,
         gcMapPCRegister,
         generateX86MemoryReference(espReal, -8, cg()),
         cg());

   // Use "volatile" registers across the call.  Once proper register map support
   // is implemented, r14 and r15 will no longer be volatile and a different pair
   // should be chosen.
   //
   TR::RegisterDependencyConditions *gcMapDeps = generateRegisterDependencyConditions(0, 2, cg());
   gcMapDeps->addPostCondition(frameObjectRegister, TR::RealRegister::r14, cg());
   gcMapDeps->addPostCondition(gcMapPCRegister, TR::RealRegister::r15, cg());
   gcMapDeps->stopAddingPostConditions();

   generateMemRegInstruction(
         S8MemReg,
         callNode,
         generateX86MemoryReference(frameObjectRegister, fe()->getPythonGCMapPCOffsetInFrame(), cg()),
         gcMapPCRegister,
         gcMapDeps,
         cg());

   cg()->stopUsingRegister(frameObjectRegister);
   cg()->stopUsingRegister(gcMapPCRegister);
#endif

   TR::Instruction *instr;
   if (methodSymbol->getMethodAddress())
      {
      TR_ASSERT(scratchReg, "could not find second scratch register");
      auto LoadRegisterInstruction = generateRegImm64SymInstruction(
         MOV8RegImm64,
         callNode,
         scratchReg,
         (uintptr_t)methodSymbol->getMethodAddress(),
         methodSymRef,
         cg());

      if (TR::Options::getCmdLineOptions()->getOption(TR_EmitRelocatableELFFile))
         {
         LoadRegisterInstruction->setReloKind(TR_NativeMethodAbsolute);
         }

      instr = generateRegInstruction(CALLReg, callNode, scratchReg, preDeps, cg());
      }
   else
      {
      instr = generateImmSymInstruction(CALLImm4, callNode, (uintptrj_t)methodSymbol->getMethodAddress(), methodSymRef, preDeps, cg());
      }

   cg()->resetIsLeafMethod();

   instr->setNeedsGCMap(getProperties().getPreservedRegisterMapForGC());

   cg()->stopUsingRegister(scratchReg);

   TR::LabelSymbol *postDepLabel = generateLabelSymbol(cg());
   generateLabelInstruction(LABEL, callNode, postDepLabel, postDeps, cg());

   return returnReg;
   }
Exemple #2
0
TR::Register *TR::AMD64SystemLinkage::buildDirectDispatch(
      TR::Node *callNode,
      bool spillFPRegs)
   {
   TR::SymbolReference *methodSymRef = callNode->getSymbolReference();
   TR::MethodSymbol *methodSymbol = methodSymRef->getSymbol()->castToMethodSymbol();

   TR::Register *returnReg;

   // Allocate adequate register dependencies.
   //
   // pre = number of argument registers
   // post = number of volatile + return register
   //
   uint32_t pre = getProperties().getNumIntegerArgumentRegisters() + getProperties().getNumFloatArgumentRegisters();
   uint32_t post = getProperties().getNumVolatileRegisters() + (callNode->getDataType() == TR::NoType ? 0 : 1);

   TR::RegisterDependencyConditions *preDeps = generateRegisterDependencyConditions(pre, 0, cg());
   TR::RegisterDependencyConditions *postDeps = generateRegisterDependencyConditions(0, post, cg());

   // Evaluate outgoing arguments on the system stack and build pre-conditions.
   //
   int32_t memoryArgSize = buildArgs(callNode, preDeps);

   // Build post-conditions.
   //
   returnReg = buildVolatileAndReturnDependencies(callNode, postDeps);
   postDeps->stopAddingPostConditions();

   // Find the second scratch register in the post dependency list.
   //
   TR::Register *scratchReg = NULL;
   TR::RealRegister::RegNum scratchRegIndex = getProperties().getIntegerScratchRegister(1);
   for (int32_t i=0; i<post; i++)
      {
      if (postDeps->getPostConditions()->getRegisterDependency(i)->getRealRegister() == scratchRegIndex)
         {
         scratchReg = postDeps->getPostConditions()->getRegisterDependency(i)->getRegister();
         break;
         }
      }

   TR::Instruction *instr;
   if (methodSymbol->getMethodAddress())
      {
      TR_ASSERT(scratchReg, "could not find second scratch register");
      auto LoadRegisterInstruction = generateRegImm64SymInstruction(
         MOV8RegImm64,
         callNode,
         scratchReg,
         (uintptr_t)methodSymbol->getMethodAddress(),
         methodSymRef,
         cg());

      if (comp()->getOption(TR_EmitRelocatableELFFile))
         {
         LoadRegisterInstruction->setReloKind(TR_NativeMethodAbsolute);
         }

      instr = generateRegInstruction(CALLReg, callNode, scratchReg, preDeps, cg());
      }
   else
      {
      instr = generateImmSymInstruction(CALLImm4, callNode, (uintptrj_t)methodSymbol->getMethodAddress(), methodSymRef, preDeps, cg());
      }

   cg()->resetIsLeafMethod();

   instr->setNeedsGCMap(getProperties().getPreservedRegisterMapForGC());

   cg()->stopUsingRegister(scratchReg);

   TR::LabelSymbol *postDepLabel = generateLabelSymbol(cg());
   generateLabelInstruction(LABEL, callNode, postDepLabel, postDeps, cg());

   return returnReg;
   }
TR::RegisterDependencyConditions* TR_PPCScratchRegisterDependencyConditions::createDependencyConditions(TR::CodeGenerator *cg,
                                                                                                          TR_PPCScratchRegisterDependencyConditions *pre,
                                                                                                          TR_PPCScratchRegisterDependencyConditions *post)
   {
   int32_t preCount = pre ? pre->getNumberOfDependencies() : 0;
   int32_t postCount = post ? post->getNumberOfDependencies() : 0;
   TR_LiveRegisters *lrVector = cg->getLiveRegisters(TR_VSX_VECTOR);
   bool liveVSXVectorReg = (!lrVector || (lrVector->getNumberOfLiveRegisters() > 0));
   TR_LiveRegisters *lrScalar = cg->getLiveRegisters(TR_VSX_SCALAR);
   bool liveVSXScalarReg = (!lrScalar || (lrScalar->getNumberOfLiveRegisters() > 0));
   if (liveVSXVectorReg)
      {
      preCount += 64;
      postCount += 64;
      }
   else if (liveVSXScalarReg)
      {
      preCount += 32;
      postCount += 32;
      }

   TR::RegisterDependencyConditions *dependencies = new (cg->trHeapMemory()) TR::RegisterDependencyConditions(preCount,
                                                                                                                  postCount,
                                                                                                                  cg->trMemory());
   for (int i = 0; i < (pre ? pre->_numGPRDeps : 0); ++i)
      {
      dependencies->addPreCondition(pre->_gprDeps[i].getRegister(), pre->_gprDeps[i].getRealRegister(), pre->_gprDeps[i].getFlags());
      if (pre->_excludeGPR0 & (1 << i))
         dependencies->getPreConditions()->getRegisterDependency(i)->setExcludeGPR0();
      }
   for (int i = 0; i < (post ? post->_numGPRDeps : 0); ++i)
      {
      dependencies->addPostCondition(post->_gprDeps[i].getRegister(), post->_gprDeps[i].getRealRegister(), post->_gprDeps[i].getFlags());
      if (post->_excludeGPR0 & (1 << i))
         dependencies->getPostConditions()->getRegisterDependency(i)->setExcludeGPR0();
      }
   for (int i = 0; i < (pre ? pre->_numCCRDeps : 0); ++i)
      {
      dependencies->addPreCondition(pre->_ccrDeps[i].getRegister(), pre->_ccrDeps[i].getRealRegister(), pre->_ccrDeps[i].getFlags());
      }
   for (int i = 0; i < (post ? post->_numCCRDeps : 0); ++i)
      {
      dependencies->addPostCondition(post->_ccrDeps[i].getRegister(), post->_ccrDeps[i].getRealRegister(), post->_ccrDeps[i].getFlags());
      }

   const TR_PPCLinkageProperties& properties = cg->getLinkage()->getProperties();
   if (liveVSXVectorReg)
      {
      for (int32_t i=TR::RealRegister::FirstVSR; i<=TR::RealRegister::LastVSR; i++)
         {
         if (!properties.getPreserved((TR::RealRegister::RegNum)i))
            {
            TR::Register *vreg = cg->allocateRegister(TR_FPR);
            vreg->setPlaceholderReg();
            dependencies->addPreCondition(vreg, (TR::RealRegister::RegNum)i);
            dependencies->addPostCondition(vreg, (TR::RealRegister::RegNum)i);
            }
         }
      }
   else
      {
      if (liveVSXScalarReg)
         {
         for (int32_t i=TR::RealRegister::vsr32; i<=TR::RealRegister::LastVSR; i++)
            {
            if (!properties.getPreserved((TR::RealRegister::RegNum)i))
               {
               TR::Register *vreg = cg->allocateRegister(TR_FPR);
               vreg->setPlaceholderReg();
               dependencies->addPreCondition(vreg, (TR::RealRegister::RegNum)i);
               dependencies->addPostCondition(vreg, (TR::RealRegister::RegNum)i);
               }
            }
         }
      }

   return dependencies;
   }
void TR_OutlinedInstructions::assignRegisters(TR_RegisterKinds kindsToBeAssigned, TR::X86VFPSaveInstruction *vfpSaveInstruction)
   {
   if (hasBeenRegisterAssigned())
      return;

   // nested internal control flow assert:
   _cg->setInternalControlFlowSafeNestingDepth(_cg->internalControlFlowNestingDepth());

   // Create a dependency list on the first instruction in this stream that captures all
   // current real register associations.  This is necessary to get the register assigner
   // back into its original state before the helper stream was processed.
   //
   TR::RegisterDependencyConditions *liveRealRegDeps = _cg->machine()->createDepCondForLiveGPRs();
   _firstInstruction->setDependencyConditions(liveRealRegDeps);

#if 0
   // If the outlined section jumps back to a section that's expecting a certain register
   // state then add register dependencies on the exit branch to set that state.
   //
   if (_postDependencyMergeList)
      {
      TR::RegisterDependencyConditions *mergeDeps = _postDependencyMergeList->clone(_cg);

      TR_ASSERT(_appendInstruction->getDependencyConditions() == NULL, "unexpected reg deps on OOL append instruction");

      _appendInstruction->setDependencyConditions(mergeDeps);

      TR_X86RegisterDependencyGroup *depGroup = mergeDeps->getPostConditions();
      for (int32_t i=0; i<mergeDeps->getNumPostConditions(); i++)
         {
         TR::RegisterDependency *dependency = depGroup->getRegisterDependency(i);
         TR::Register *virtReg = dependency->getRegister();
         virtReg->incTotalUseCount();
         virtReg->incFutureUseCount();

#ifdef DEBUG
         // Ensure all register dependencies have been assigned.
         //
         TR_ASSERT(dependency->getRealRegister() != TR::RealRegister::NoReg, "unassigned merge dep register");
         TR_ASSERT(virtReg->getAssignedRealRegister() == _cg->machine()->getX86RealRegister(dependency->getRealRegister()), "unexpected(?) register assignment");
#endif
         }
      }
#endif


   // TODO:AMD64: Fix excessive register assignment exchanges in outlined instruction dispatch.

   // Ensure correct VFP state at the start of the outlined instruction sequence.
   //
   generateVFPRestoreInstruction(comp()->getAppendInstruction(), vfpSaveInstruction, _cg);
   // Link in the helper stream into the mainline code.
   //
   TR::Instruction *appendInstruction = comp()->getAppendInstruction();
   appendInstruction->setNext(_firstInstruction);
   _firstInstruction->setPrev(appendInstruction);
   comp()->setAppendInstruction(_appendInstruction);

   // Register assign the helper dispatch instructions.
   //
   _cg->doBackwardsRegisterAssignment(kindsToBeAssigned, _appendInstruction, appendInstruction);

   // Returning to mainline, reset this counter
   _cg->setInternalControlFlowSafeNestingDepth(0);



   setHasBeenRegisterAssigned(true);
   }