Exemplo n.º 1
0
TR::RegisterDependencyConditions *OMR::Power::RegisterDependencyConditions::cloneAndFix(
   TR::CodeGenerator * cg, TR::RegisterDependencyConditions *added)
   {
   TR::RegisterDependencyConditions *result;
   TR::RegisterDependency           *singlePair;
   int32_t      idx, postNum, addPost=0;

   if (added != NULL)
      {
      addPost = added->getAddCursorForPost();
      }
   postNum = this->getAddCursorForPost();
   result = new (cg->trHeapMemory()) TR::RegisterDependencyConditions(0, postNum+addPost, cg->trMemory());

   for (idx=0; idx<postNum; idx++)
      {
      singlePair = this->getPostConditions()->getRegisterDependency(idx);
      result->addPostCondition(singlePair->getRegister(), singlePair->getRealRegister(),
                               singlePair->getFlags());
      }

   for (idx=0; idx<addPost; idx++)
      {
      singlePair = added->getPostConditions()->getRegisterDependency(idx);
      result->addPostCondition(singlePair->getRegister(), singlePair->getRealRegister(),
                               singlePair->getFlags());
      }

   return result;
   }
Exemplo n.º 2
0
TR::RegisterDependencyConditions *OMR::ARM::RegisterDependencyConditions::cloneAndFix(
   TR::CodeGenerator * cg, TR::RegisterDependencyConditions *added)
   {
   TR::RegisterDependencyConditions *result;
   TR_ARMRegisterDependency           *singlePair;
   int32_t      idx, preNum, postNum, addPre=0, addPost=0;
   TR::Register *postReg, *tempReg;
   TR::RealRegister::RegNum rnum;

   if (added != NULL)
      {
      addPre = added->getAddCursorForPre();
      addPost = added->getAddCursorForPost();
      }
   preNum = this->getAddCursorForPre();
   postNum = this->getAddCursorForPost();
   result = new (cg->trHeapMemory()) TR::RegisterDependencyConditions(preNum + addPre, postNum + addPost, cg->trMemory());

   for (idx=0; idx<postNum; idx++)
      {
      singlePair = this->getPostConditions()->getRegisterDependency(idx);
      rnum       = singlePair->getRealRegister();
      tempReg    = singlePair->getRegister();
      result->addPostCondition(tempReg, rnum, singlePair->getFlags());
      if (rnum == TR::RealRegister::gr0)
         {
         postReg = tempReg;
         }
      }

   for (idx=0; idx<preNum; idx++)
      {
      singlePair = this->getPreConditions()->getRegisterDependency(idx);
      rnum       = singlePair->getRealRegister();
      tempReg    = singlePair->getRegister();
      if (rnum == TR::RealRegister::gr0)
         tempReg = postReg;
      result->addPreCondition(tempReg, rnum, singlePair->getFlags());
      }

   for (idx=0; idx<addPost; idx++)
      {
      singlePair = added->getPostConditions()->getRegisterDependency(idx);
      result->addPostCondition(singlePair->getRegister(), singlePair->getRealRegister(),
                               singlePair->getFlags());
      }

   for (idx=0; idx<addPre; idx++)
      {
      singlePair = added->getPreConditions()->getRegisterDependency(idx);
      result->addPreCondition(singlePair->getRegister(), singlePair->getRealRegister(),
                              singlePair->getFlags());
      }
   return result;
   }
Exemplo n.º 3
0
TR::Register *
TR::AMD64SystemLinkage::buildIndirectDispatch(TR::Node *callNode)
   {
   TR::SymbolReference *methodSymRef = callNode->getSymbolReference();
   TR_ASSERT(methodSymRef->getSymbol()->castToMethodSymbol()->isComputed(), "system linkage only supports computed indirect call for now %p\n", callNode);

   // Evaluate VFT
   //
   TR::Register *vftRegister;
   TR::Node *vftNode = callNode->getFirstChild();
   if (vftNode->getRegister())
      {
      vftRegister = vftNode->getRegister();
      }
   else
      {
      vftRegister = cg()->evaluate(vftNode);
      }

   // Allocate adequate register dependencies.
   //
   // pre = number of argument registers + 1 for VFT register
   // post = number of volatile + VMThread + return register
   //
   uint32_t pre = getProperties().getNumIntegerArgumentRegisters() + getProperties().getNumFloatArgumentRegisters() + 1;
   uint32_t post = getProperties().getNumVolatileRegisters() + 1 + (callNode->getDataType() == TR::NoType ? 0 : 1);

#if defined (PYTHON) && 0
   // Treat all preserved GP regs as volatile until register map support available.
   //
   post += getProperties().getNumberOfPreservedGPRegisters();
#endif

   TR::RegisterDependencyConditions *callDeps = generateRegisterDependencyConditions(pre, 1, cg());

   TR::RealRegister::RegNum scratchRegIndex = getProperties().getIntegerScratchRegister(1);
   callDeps->addPostCondition(vftRegister, scratchRegIndex, cg());
   callDeps->stopAddingPostConditions();

   // Evaluate outgoing arguments on the system stack and build pre-conditions.
   //
   int32_t memoryArgSize = buildArgs(callNode, callDeps);

   // Dispatch
   //
   generateRegInstruction(CALLReg, callNode, vftRegister, callDeps, cg());
   cg()->resetIsLeafMethod();

   // Build label post-conditions
   //
   TR::RegisterDependencyConditions *postDeps = generateRegisterDependencyConditions(0, post, cg());
   TR::Register *returnReg = buildVolatileAndReturnDependencies(callNode, postDeps);
   postDeps->stopAddingPostConditions();

   TR::LabelSymbol *postDepLabel = generateLabelSymbol(cg());
   generateLabelInstruction(LABEL, callNode, postDepLabel, postDeps, cg());

   return returnReg;
   }
Exemplo n.º 4
0
// Create a NoReg dependency for each child of a call that has been evaluated into a register.
// Ignore children that do not have a register since their live range should not persist outside of
// the helper call stream.
//
TR::RegisterDependencyConditions *TR_OutlinedInstructions::formEvaluatedArgumentDepList()
   {
   int32_t i, c=0;

   for (i=_callNode->getFirstArgumentIndex(); i<_callNode->getNumChildren(); i++)
      {
      TR::Register *reg = _callNode->getChild(i)->getRegister();
      if (reg)
         {
         TR::RegisterPair *regPair = reg->getRegisterPair();
         c += regPair? 2 : 1;
         }
      }

   TR::RegisterDependencyConditions *depConds = NULL;

   if (c)
      {
      TR::Machine *machine = _cg->machine();
      depConds = generateRegisterDependencyConditions(0, c, _cg);

      for (i=_callNode->getFirstArgumentIndex(); i<_callNode->getNumChildren(); i++)
         {
         TR::Register *reg = _callNode->getChild(i)->getRegister();
         if (reg)
            {
            TR::RegisterPair *regPair = reg->getRegisterPair();
            if (regPair)
               {
               depConds->addPostCondition(regPair->getLowOrder(),  TR::RealRegister::NoReg, _cg);
               depConds->addPostCondition(regPair->getHighOrder(), TR::RealRegister::NoReg, _cg);
               }
            else
               {
               depConds->addPostCondition(reg, TR::RealRegister::NoReg, _cg);
               }
            }
         }

      depConds->stopAddingConditions();
      }

   return depConds;
   }
Exemplo n.º 5
0
void
TR_S390BinaryAnalyser::longSubtractAnalyser(TR::Node * root)
   {
   TR::Node * firstChild;
   TR::Node * secondChild;
   TR::Instruction * cursor = NULL;
   TR::RegisterDependencyConditions * dependencies = NULL;
   bool setsOrReadsCC = NEED_CC(root) || (root->getOpCodeValue() == TR::lusubb);
   TR::InstOpCode::Mnemonic regToRegOpCode;
   TR::InstOpCode::Mnemonic memToRegOpCode;
   TR::Compilation *comp = TR::comp();

   if (TR::Compiler->target.is64Bit() || cg()->use64BitRegsOn32Bit())
      {
      if (!setsOrReadsCC)
         {
         regToRegOpCode = TR::InstOpCode::SGR;
         memToRegOpCode = TR::InstOpCode::SG;
         }
      else
         {
         regToRegOpCode = TR::InstOpCode::SLGR;
         memToRegOpCode = TR::InstOpCode::SLG;
         }
      }
   else
      {
      regToRegOpCode = TR::InstOpCode::SLR;
      memToRegOpCode = TR::InstOpCode::SL;
      }

   firstChild = root->getFirstChild();
   secondChild = root->getSecondChild();
   TR::Register * firstRegister = firstChild->getRegister();
   TR::Register * secondRegister = secondChild->getRegister();

   setInputs(firstChild, firstRegister, secondChild, secondRegister,
             false, false, comp);

   /**  Attempt to use SGH to subtract halfword (64 <- 16).
    * The second child is a halfword from memory */
   bool is16BitMemory2Operand = false;
   if (TR::Compiler->target.cpu.getS390SupportsZ14() &&
       secondChild->getOpCodeValue() == TR::s2l &&
       secondChild->getFirstChild()->getOpCodeValue() == TR::sloadi &&
       secondChild->isSingleRefUnevaluated() &&
       secondChild->getFirstChild()->isSingleRefUnevaluated())
      {
      setMem2();
      memToRegOpCode = TR::InstOpCode::SGH;
      is16BitMemory2Operand = true;
      }

   if (getEvalChild1())
      {
      firstRegister = cg()->evaluate(firstChild);
      }

   if (getEvalChild2())
      {
      secondRegister = cg()->evaluate(secondChild);
      }

   remapInputs(firstChild, firstRegister, secondChild, secondRegister);

   if ((root->getOpCodeValue() == TR::lusubb) &&
       TR_S390ComputeCC::setCarryBorrow(root->getChild(2), false, cg()))
      {
      // use SLBGR rather than SLGR/SGR
      //     SLBG rather than SLG/SG
      // or
      // use SLBR rather than SLR
      //     SLB rather than SL
      bool uses64bit = TR::Compiler->target.is64Bit() || cg()->use64BitRegsOn32Bit();
      regToRegOpCode = uses64bit ? TR::InstOpCode::SLBGR : TR::InstOpCode::SLBR;
      memToRegOpCode = uses64bit ? TR::InstOpCode::SLBG  : TR::InstOpCode::SLB;
      }

   if (TR::Compiler->target.is64Bit() || cg()->use64BitRegsOn32Bit())
      {
      if (getCopyReg1())
         {
         TR::Register * thirdReg = cg()->allocate64bitRegister();

         root->setRegister(thirdReg);
         generateRRInstruction(cg(), TR::InstOpCode::LGR, root, thirdReg, firstRegister);
         if (getBinaryReg3Reg2())
            {
            generateRRInstruction(cg(), regToRegOpCode, root, thirdReg, secondRegister);
            }
         else // assert getBinaryReg3Mem2() == true
            {
            TR::MemoryReference * longMR = generateS390MemoryReference(secondChild, cg());

            generateRXInstruction(cg(), memToRegOpCode, root, thirdReg, longMR);
            longMR->stopUsingMemRefRegister(cg());
            }
         }
      else if (getBinaryReg1Reg2())
         {
         generateRRInstruction(cg(), regToRegOpCode, root, firstRegister, secondRegister);

         root->setRegister(firstRegister);
         }
      else // assert getBinaryReg1Mem2() == true
         {
         TR_ASSERT(  !getInvalid(), "TR_S390BinaryAnalyser::invalid case\n");

         TR::Node* baseAddrNode = is16BitMemory2Operand ? secondChild->getFirstChild() : secondChild;
         TR::MemoryReference * longMR = generateS390MemoryReference(baseAddrNode, cg());

         generateRXInstruction(cg(), memToRegOpCode, root, firstRegister, longMR);

         longMR->stopUsingMemRefRegister(cg());
         root->setRegister(firstRegister);

         if(is16BitMemory2Operand)
            {
            cg()->decReferenceCount(secondChild->getFirstChild());
            }
         }

      }
   else    // if 32bit codegen...
      {
      bool zArchTrexsupported = performTransformation(comp, "O^O Use SL/SLB for long sub.");

      TR::Register * highDiff = NULL;
      TR::LabelSymbol * doneLSub = TR::LabelSymbol::create(cg()->trHeapMemory(),cg());
      if (getCopyReg1())
         {
         TR::Register * lowThird = cg()->allocateRegister();
         TR::Register * highThird = cg()->allocateRegister();

         TR::RegisterPair * thirdReg = cg()->allocateConsecutiveRegisterPair(lowThird, highThird);

         highDiff = highThird;

         dependencies = new (cg()->trHeapMemory()) TR::RegisterDependencyConditions(0, 9, cg());
         dependencies->addPostCondition(firstRegister, TR::RealRegister::EvenOddPair);
         dependencies->addPostCondition(firstRegister->getHighOrder(), TR::RealRegister::LegalEvenOfPair);
         dependencies->addPostCondition(firstRegister->getLowOrder(), TR::RealRegister::LegalOddOfPair);

         // If 2nd operand has ref count of 1 and can be accessed by a memory reference,
         // then second register will not be used.
         if(secondRegister == firstRegister && !setsOrReadsCC)
            {
            TR_ASSERT( false, "lsub with identical children - fix Simplifier");
            }
         if (secondRegister != NULL && firstRegister != secondRegister)
            {
            dependencies->addPostCondition(secondRegister, TR::RealRegister::EvenOddPair);
            dependencies->addPostCondition(secondRegister->getHighOrder(), TR::RealRegister::LegalEvenOfPair);
            dependencies->addPostCondition(secondRegister->getLowOrder(), TR::RealRegister::LegalOddOfPair);
            }
         dependencies->addPostCondition(highThird, TR::RealRegister::AssignAny);

         root->setRegister(thirdReg);
         generateRRInstruction(cg(), TR::InstOpCode::LR, root, highThird, firstRegister->getHighOrder());
         generateRRInstruction(cg(), TR::InstOpCode::LR, root, lowThird, firstRegister->getLowOrder());
         if (getBinaryReg3Reg2())
            {
            if ((ENABLE_ZARCH_FOR_32 && zArchTrexsupported) || setsOrReadsCC)
               {
               generateRRInstruction(cg(), regToRegOpCode, root, lowThird, secondRegister->getLowOrder());
               generateRRInstruction(cg(), TR::InstOpCode::SLBR, root, highThird, secondRegister->getHighOrder());
               }
            else
               {
               generateRRInstruction(cg(), TR::InstOpCode::SR, root, highThird, secondRegister->getHighOrder());
               generateRRInstruction(cg(), TR::InstOpCode::SLR, root, lowThird, secondRegister->getLowOrder());
               }
            }
         else // assert getBinaryReg3Mem2() == true
            {
            TR::MemoryReference * highMR = generateS390MemoryReference(secondChild, cg());
            TR::MemoryReference * lowMR = generateS390MemoryReference(*highMR, 4, cg());
            dependencies->addAssignAnyPostCondOnMemRef(highMR);

            if ((ENABLE_ZARCH_FOR_32 && zArchTrexsupported) || setsOrReadsCC)
               {
               generateRXInstruction(cg(), memToRegOpCode, root, lowThird, lowMR);
               generateRXInstruction(cg(), TR::InstOpCode::SLB, root, highThird, highMR);
               }
            else
               {
               generateRXInstruction(cg(), TR::InstOpCode::S, root, highThird, highMR);
               generateRXInstruction(cg(), TR::InstOpCode::SL, root, lowThird, lowMR);
               }
            highMR->stopUsingMemRefRegister(cg());
            lowMR->stopUsingMemRefRegister(cg());
            }
         }
      else if (getBinaryReg1Reg2())
         {
         dependencies = new (cg()->trHeapMemory()) TR::RegisterDependencyConditions(0, 6, cg());
         dependencies->addPostCondition(firstRegister, TR::RealRegister::EvenOddPair);
         dependencies->addPostCondition(firstRegister->getHighOrder(), TR::RealRegister::LegalEvenOfPair);
         dependencies->addPostCondition(firstRegister->getLowOrder(), TR::RealRegister::LegalOddOfPair);

         if(secondRegister == firstRegister)
            {
            TR_ASSERT( false, "lsub with identical children - fix Simplifier");
            }

         if (secondRegister != firstRegister)
            {
            dependencies->addPostCondition(secondRegister, TR::RealRegister::EvenOddPair);
            dependencies->addPostCondition(secondRegister->getHighOrder(), TR::RealRegister::LegalEvenOfPair);
            dependencies->addPostCondition(secondRegister->getLowOrder(), TR::RealRegister::LegalOddOfPair);
            }

         if ((ENABLE_ZARCH_FOR_32 && zArchTrexsupported) || setsOrReadsCC)
            {
            generateRRInstruction(cg(), regToRegOpCode, root, firstRegister->getLowOrder(), secondRegister->getLowOrder());
            generateRRInstruction(cg(), TR::InstOpCode::SLBR, root, firstRegister->getHighOrder(), secondRegister->getHighOrder());
            }
         else
            {
            generateRRInstruction(cg(), TR::InstOpCode::SR, root, firstRegister->getHighOrder(), secondRegister->getHighOrder());
            generateRRInstruction(cg(), TR::InstOpCode::SLR, root, firstRegister->getLowOrder(), secondRegister->getLowOrder());
            }

         highDiff = firstRegister->getHighOrder();
         root->setRegister(firstRegister);
         }
      else // assert getBinaryReg1Mem2() == true
         {
         TR_ASSERT(  !getInvalid(),"TR_S390BinaryAnalyser::invalid case\n");

         dependencies = new (cg()->trHeapMemory()) TR::RegisterDependencyConditions(0, 5, cg());
         dependencies->addPostCondition(firstRegister, TR::RealRegister::EvenOddPair);
         dependencies->addPostCondition(firstRegister->getHighOrder(), TR::RealRegister::LegalEvenOfPair);
         dependencies->addPostCondition(firstRegister->getLowOrder(), TR::RealRegister::LegalOddOfPair);

         TR::MemoryReference * highMR = generateS390MemoryReference(secondChild, cg());
         TR::MemoryReference * lowMR = generateS390MemoryReference(*highMR, 4, cg());
         dependencies->addAssignAnyPostCondOnMemRef(highMR);

         if ((ENABLE_ZARCH_FOR_32 && zArchTrexsupported) || setsOrReadsCC)
            {
            generateRXInstruction(cg(), memToRegOpCode, root, firstRegister->getLowOrder(), lowMR);
            generateRXInstruction(cg(), TR::InstOpCode::SLB, root, firstRegister->getHighOrder(), highMR);
            }
         else
            {
            generateRXInstruction(cg(), TR::InstOpCode::S, root, firstRegister->getHighOrder(), highMR);
            generateRXInstruction(cg(), TR::InstOpCode::SL, root, firstRegister->getLowOrder(), lowMR);
            }
         highDiff = firstRegister->getHighOrder();
         root->setRegister(firstRegister);
         highMR->stopUsingMemRefRegister(cg());
         lowMR->stopUsingMemRefRegister(cg());
         }

      if (!((ENABLE_ZARCH_FOR_32 && zArchTrexsupported) || setsOrReadsCC))
         {
         // Check for overflow in LS int. If overflow, we are done.
         generateS390BranchInstruction(cg(), TR::InstOpCode::BRC,TR::InstOpCode::COND_MASK3, root, doneLSub);

         // Increment MS int due to overflow in LS int
         generateRIInstruction(cg(), TR::InstOpCode::AHI, root, highDiff, -1);

         generateS390LabelInstruction(cg(), TR::InstOpCode::LABEL, root, doneLSub, dependencies);
         }
      }

   cg()->decReferenceCount(firstChild);
   cg()->decReferenceCount(secondChild);

   return;
   }
Exemplo n.º 6
0
TR::Register *OMR::X86::AMD64::TreeEvaluator::dbits2lEvaluator(TR::Node *node, TR::CodeGenerator *cg)
   {
   // TODO:AMD64: Peepholing
   TR::Node      *child  = node->getFirstChild();
   TR::Register  *sreg   = cg->evaluate(child);
   TR::Register  *treg   = cg->allocateRegister(TR_GPR);
   generateRegRegInstruction(MOVQReg8Reg, node, treg, sreg, cg);
   if (node->normalizeNanValues())
      {
      static char *disableFastNormalizeNaNs = feGetEnv("TR_disableFastNormalizeNaNs");
      if (disableFastNormalizeNaNs)
         {
         // This one is not clever, but it is simple, and it's based directly
         // on the IA32 version which is known to work, so is safer.
         //
         TR::RegisterDependencyConditions  *deps = generateRegisterDependencyConditions((uint8_t)0, (uint8_t)1, cg);
         deps->addPostCondition(treg, TR::RealRegister::NoReg, cg);

         TR::IA32ConstantDataSnippet *nan1Snippet = cg->findOrCreate8ByteConstant(node, DOUBLE_NAN_1_LOW);
         TR::IA32ConstantDataSnippet *nan2Snippet = cg->findOrCreate8ByteConstant(node, DOUBLE_NAN_2_LOW);
         TR::MemoryReference      *nan1MR      = generateX86MemoryReference(nan1Snippet, cg);
         TR::MemoryReference      *nan2MR      = generateX86MemoryReference(nan2Snippet, cg);

         TR::LabelSymbol *startLabel     = TR::LabelSymbol::create(cg->trHeapMemory(),cg);
         TR::LabelSymbol *normalizeLabel = TR::LabelSymbol::create(cg->trHeapMemory(),cg);
         TR::LabelSymbol *endLabel       = TR::LabelSymbol::create(cg->trHeapMemory(),cg);
         startLabel->setStartInternalControlFlow();
         endLabel  ->setEndInternalControlFlow();

         generateLabelInstruction(   LABEL,       node, startLabel,               cg);
         generateRegMemInstruction(  CMP8RegMem,  node, treg, nan1MR,             cg);
         generateLabelInstruction(   JGE4,        node, normalizeLabel,           cg);
         generateRegMemInstruction(  CMP8RegMem,  node, treg, nan2MR,             cg);
         generateLabelInstruction(   JB4,         node, endLabel,                 cg);
         generateLabelInstruction(   LABEL,       node, normalizeLabel,           cg);
         generateRegImm64Instruction( MOV8RegImm64, node, treg, DOUBLE_NAN,         cg);
         generateLabelInstruction(   LABEL,       node, endLabel,           deps, cg);
         }
      else
         {
         // A bunch of bookkeeping
         //
         uint64_t nanDetector = DOUBLE_NAN_2_LOW;

         TR::RegisterDependencyConditions  *internalControlFlowDeps = generateRegisterDependencyConditions((uint8_t)0, (uint8_t)1, cg);
         internalControlFlowDeps->addPostCondition(treg, TR::RealRegister::NoReg, cg);

         TR::RegisterDependencyConditions  *helperDeps = generateRegisterDependencyConditions((uint8_t)1, (uint8_t)1, cg);
         helperDeps->addPreCondition( treg, TR::RealRegister::eax, cg);
         helperDeps->addPostCondition(treg, TR::RealRegister::eax, cg);

         TR::IA32ConstantDataSnippet *nanDetectorSnippet  = cg->findOrCreate8ByteConstant(node, nanDetector);
         TR::MemoryReference      *nanDetectorMR       = generateX86MemoryReference(nanDetectorSnippet,  cg);

         TR::LabelSymbol *startLabel     = TR::LabelSymbol::create(cg->trHeapMemory(),cg);
         TR::LabelSymbol *slowPathLabel  = TR::LabelSymbol::create(cg->trHeapMemory(),cg);
         TR::LabelSymbol *normalizeLabel = TR::LabelSymbol::create(cg->trHeapMemory(),cg);
         TR::LabelSymbol *endLabel       = TR::LabelSymbol::create(cg->trHeapMemory(),cg);
         startLabel->setStartInternalControlFlow();
         endLabel  ->setEndInternalControlFlow();

         // Fast path: if subtracting nanDetector leaves CF=0 or OF=1, then it
         // must be a NaN.
         //
         generateLabelInstruction(  LABEL,       node, startLabel,           cg);
         generateRegMemInstruction( CMP8RegMem,  node, treg, nanDetectorMR,  cg);
         generateLabelInstruction(  JAE4,        node, slowPathLabel,        cg);
         generateLabelInstruction(  JO4,         node, slowPathLabel,        cg);

         // Slow path
         //
         TR_OutlinedInstructions *slowPath = new (cg->trHeapMemory()) TR_OutlinedInstructions(slowPathLabel, cg);
         cg->getOutlinedInstructionsList().push_front(slowPath);
         slowPath->swapInstructionListsWithCompilation();
         generateLabelInstruction(NULL, LABEL,       slowPathLabel,          cg)->setNode(node);
         generateRegImm64Instruction(MOV8RegImm64, node, treg, DOUBLE_NAN, cg);
         generateLabelInstruction(      JMP4,        node, endLabel,         cg);
         slowPath->swapInstructionListsWithCompilation();

         // Merge point
         //
         generateLabelInstruction(LABEL, node, endLabel, internalControlFlowDeps, cg);
         }
      }
   node->setRegister(treg);
   cg->decReferenceCount(child);
   return treg;
   }
Exemplo n.º 7
0
TR::Register *TR::AMD64SystemLinkage::buildDirectDispatch(
      TR::Node *callNode,
      bool spillFPRegs)
   {
   TR::SymbolReference *methodSymRef = callNode->getSymbolReference();
   TR::MethodSymbol *methodSymbol = methodSymRef->getSymbol()->castToMethodSymbol();

   TR::Register *returnReg;

   // Allocate adequate register dependencies.
   //
   // pre = number of argument registers
   // post = number of volatile + return register
   //
   uint32_t pre = getProperties().getNumIntegerArgumentRegisters() + getProperties().getNumFloatArgumentRegisters();
   uint32_t post = getProperties().getNumVolatileRegisters() + (callNode->getDataType() == TR::NoType ? 0 : 1);

#if defined (PYTHON) && 0
   // Treat all preserved GP regs as volatile until register map support available.
   //
   post += getProperties().getNumberOfPreservedGPRegisters();
#endif

   TR::RegisterDependencyConditions *preDeps = generateRegisterDependencyConditions(pre, 0, cg());
   TR::RegisterDependencyConditions *postDeps = generateRegisterDependencyConditions(0, post, cg());

   // Evaluate outgoing arguments on the system stack and build pre-conditions.
   //
   int32_t memoryArgSize = buildArgs(callNode, preDeps);

   // Build post-conditions.
   //
   returnReg = buildVolatileAndReturnDependencies(callNode, postDeps);
   postDeps->stopAddingPostConditions();

   // Find the second scratch register in the post dependency list.
   //
   TR::Register *scratchReg = NULL;
   TR::RealRegister::RegNum scratchRegIndex = getProperties().getIntegerScratchRegister(1);
   for (int32_t i=0; i<post; i++)
      {
      if (postDeps->getPostConditions()->getRegisterDependency(i)->getRealRegister() == scratchRegIndex)
         {
         scratchReg = postDeps->getPostConditions()->getRegisterDependency(i)->getRegister();
         break;
         }
      }

#if defined(PYTHON) && 0
   // For Python, store the instruction that contains the GC map at this site into
   // the frame object.
   //
   TR::SymbolReference *frameObjectSymRef =
      comp()->getSymRefTab()->findOrCreateAutoSymbol(comp()->getMethodSymbol(), 0, TR::Address, true, false, true);

   TR::Register *frameObjectRegister = cg()->allocateRegister();
   generateRegMemInstruction(
         L8RegMem,
         callNode,
         frameObjectRegister,
         generateX86MemoryReference(frameObjectSymRef, cg()),
         cg());

   TR::RealRegister *espReal = cg()->machine()->getX86RealRegister(TR::RealRegister::esp);
   TR::Register *gcMapPCRegister = cg()->allocateRegister();

   generateRegMemInstruction(
         LEA8RegMem,
         callNode,
         gcMapPCRegister,
         generateX86MemoryReference(espReal, -8, cg()),
         cg());

   // Use "volatile" registers across the call.  Once proper register map support
   // is implemented, r14 and r15 will no longer be volatile and a different pair
   // should be chosen.
   //
   TR::RegisterDependencyConditions *gcMapDeps = generateRegisterDependencyConditions(0, 2, cg());
   gcMapDeps->addPostCondition(frameObjectRegister, TR::RealRegister::r14, cg());
   gcMapDeps->addPostCondition(gcMapPCRegister, TR::RealRegister::r15, cg());
   gcMapDeps->stopAddingPostConditions();

   generateMemRegInstruction(
         S8MemReg,
         callNode,
         generateX86MemoryReference(frameObjectRegister, fe()->getPythonGCMapPCOffsetInFrame(), cg()),
         gcMapPCRegister,
         gcMapDeps,
         cg());

   cg()->stopUsingRegister(frameObjectRegister);
   cg()->stopUsingRegister(gcMapPCRegister);
#endif

   TR::Instruction *instr;
   if (methodSymbol->getMethodAddress())
      {
      TR_ASSERT(scratchReg, "could not find second scratch register");
      auto LoadRegisterInstruction = generateRegImm64SymInstruction(
         MOV8RegImm64,
         callNode,
         scratchReg,
         (uintptr_t)methodSymbol->getMethodAddress(),
         methodSymRef,
         cg());

      if (TR::Options::getCmdLineOptions()->getOption(TR_EmitRelocatableELFFile))
         {
         LoadRegisterInstruction->setReloKind(TR_NativeMethodAbsolute);
         }

      instr = generateRegInstruction(CALLReg, callNode, scratchReg, preDeps, cg());
      }
   else
      {
      instr = generateImmSymInstruction(CALLImm4, callNode, (uintptrj_t)methodSymbol->getMethodAddress(), methodSymRef, preDeps, cg());
      }

   cg()->resetIsLeafMethod();

   instr->setNeedsGCMap(getProperties().getPreservedRegisterMapForGC());

   cg()->stopUsingRegister(scratchReg);

   TR::LabelSymbol *postDepLabel = generateLabelSymbol(cg());
   generateLabelInstruction(LABEL, callNode, postDepLabel, postDeps, cg());

   return returnReg;
   }
Exemplo n.º 8
0
TR::RegisterDependencyConditions *
OMR::Power::RegisterDependencyConditions::clone(
   TR::CodeGenerator *cg,
   TR::RegisterDependencyConditions *added)
   {
   TR::RegisterDependencyConditions *result;
   TR::RegisterDependency           *singlePair;
   int32_t      idx, preNum, postNum, addPre=0, addPost=0;
#if defined(DEBUG)
   int32_t      preGPR=0, postGPR=0;
#endif

   if (added != NULL)
      {
      addPre = added->getAddCursorForPre();
      addPost = added->getAddCursorForPost();
      }
   preNum = this->getAddCursorForPre();
   postNum = this->getAddCursorForPost();
   result = new (cg->trHeapMemory()) TR::RegisterDependencyConditions(getNumPreConditions()+addPre,
                   getNumPostConditions()+addPost, cg->trMemory());

   for (idx=0; idx<postNum; idx++)
      {
      singlePair = this->getPostConditions()->getRegisterDependency(idx);
      //eliminate duplicate virtual->NoReg dependencies in 'this' conditions
      if (!((TR::RealRegister::NoReg == singlePair->getRealRegister()) && (added->postConditionContainsVirtual(singlePair->getRegister()))))
         result->addPostCondition(singlePair->getRegister(), singlePair->getRealRegister(),
                               singlePair->getFlags());
#if defined(DEBUG)
      if (singlePair->getRegister()->getKind() == TR_GPR)
         postGPR++;
#endif
      }

   for (idx=0; idx<preNum; idx++)
      {
      singlePair = this->getPreConditions()->getRegisterDependency(idx);
      //eliminate duplicate virtual->NoReg dependencies in 'this' conditions
      if (!((TR::RealRegister::NoReg == singlePair->getRealRegister()) && (added->preConditionContainsVirtual(singlePair->getRegister()))))
         result->addPreCondition(singlePair->getRegister(), singlePair->getRealRegister(),
                              singlePair->getFlags());
#if defined(DEBUG)
      if (singlePair->getRegister()->getKind() == TR_GPR)
         preGPR++;
#endif
      }

   for (idx=0; idx<addPost; idx++)
      {
      singlePair = added->getPostConditions()->getRegisterDependency(idx);
      result->addPostCondition(singlePair->getRegister(), singlePair->getRealRegister(),
                               singlePair->getFlags());
#if defined(DEBUG)
      if (singlePair->getRegister()->getKind() == TR_GPR)
         postGPR++;
#endif
      }

   for (idx=0; idx<addPre; idx++)
      {
      singlePair = added->getPreConditions()->getRegisterDependency(idx);
      result->addPreCondition(singlePair->getRegister(), singlePair->getRealRegister(),
                              singlePair->getFlags());
#if defined(DEBUG)
      if (singlePair->getRegister()->getKind() == TR_GPR)
         preGPR++;
#endif
      }

#if defined(DEBUG)
   int32_t max_gpr = cg->getProperties().getNumAllocatableIntegerRegisters();
   TR_ASSERT(preGPR<=max_gpr && postGPR<=max_gpr, "Over the limit of available global regsiters.");
#endif
   return result;
   }
Exemplo n.º 9
0
TR::RegisterDependencyConditions* TR_PPCScratchRegisterDependencyConditions::createDependencyConditions(TR::CodeGenerator *cg,
                                                                                                          TR_PPCScratchRegisterDependencyConditions *pre,
                                                                                                          TR_PPCScratchRegisterDependencyConditions *post)
   {
   int32_t preCount = pre ? pre->getNumberOfDependencies() : 0;
   int32_t postCount = post ? post->getNumberOfDependencies() : 0;
   TR_LiveRegisters *lrVector = cg->getLiveRegisters(TR_VSX_VECTOR);
   bool liveVSXVectorReg = (!lrVector || (lrVector->getNumberOfLiveRegisters() > 0));
   TR_LiveRegisters *lrScalar = cg->getLiveRegisters(TR_VSX_SCALAR);
   bool liveVSXScalarReg = (!lrScalar || (lrScalar->getNumberOfLiveRegisters() > 0));
   if (liveVSXVectorReg)
      {
      preCount += 64;
      postCount += 64;
      }
   else if (liveVSXScalarReg)
      {
      preCount += 32;
      postCount += 32;
      }

   TR::RegisterDependencyConditions *dependencies = new (cg->trHeapMemory()) TR::RegisterDependencyConditions(preCount,
                                                                                                                  postCount,
                                                                                                                  cg->trMemory());
   for (int i = 0; i < (pre ? pre->_numGPRDeps : 0); ++i)
      {
      dependencies->addPreCondition(pre->_gprDeps[i].getRegister(), pre->_gprDeps[i].getRealRegister(), pre->_gprDeps[i].getFlags());
      if (pre->_excludeGPR0 & (1 << i))
         dependencies->getPreConditions()->getRegisterDependency(i)->setExcludeGPR0();
      }
   for (int i = 0; i < (post ? post->_numGPRDeps : 0); ++i)
      {
      dependencies->addPostCondition(post->_gprDeps[i].getRegister(), post->_gprDeps[i].getRealRegister(), post->_gprDeps[i].getFlags());
      if (post->_excludeGPR0 & (1 << i))
         dependencies->getPostConditions()->getRegisterDependency(i)->setExcludeGPR0();
      }
   for (int i = 0; i < (pre ? pre->_numCCRDeps : 0); ++i)
      {
      dependencies->addPreCondition(pre->_ccrDeps[i].getRegister(), pre->_ccrDeps[i].getRealRegister(), pre->_ccrDeps[i].getFlags());
      }
   for (int i = 0; i < (post ? post->_numCCRDeps : 0); ++i)
      {
      dependencies->addPostCondition(post->_ccrDeps[i].getRegister(), post->_ccrDeps[i].getRealRegister(), post->_ccrDeps[i].getFlags());
      }

   const TR_PPCLinkageProperties& properties = cg->getLinkage()->getProperties();
   if (liveVSXVectorReg)
      {
      for (int32_t i=TR::RealRegister::FirstVSR; i<=TR::RealRegister::LastVSR; i++)
         {
         if (!properties.getPreserved((TR::RealRegister::RegNum)i))
            {
            TR::Register *vreg = cg->allocateRegister(TR_FPR);
            vreg->setPlaceholderReg();
            dependencies->addPreCondition(vreg, (TR::RealRegister::RegNum)i);
            dependencies->addPostCondition(vreg, (TR::RealRegister::RegNum)i);
            }
         }
      }
   else
      {
      if (liveVSXScalarReg)
         {
         for (int32_t i=TR::RealRegister::vsr32; i<=TR::RealRegister::LastVSR; i++)
            {
            if (!properties.getPreserved((TR::RealRegister::RegNum)i))
               {
               TR::Register *vreg = cg->allocateRegister(TR_FPR);
               vreg->setPlaceholderReg();
               dependencies->addPreCondition(vreg, (TR::RealRegister::RegNum)i);
               dependencies->addPostCondition(vreg, (TR::RealRegister::RegNum)i);
               }
            }
         }
      }

   return dependencies;
   }
Exemplo n.º 10
0
void OMR::Power::RegisterDependencyConditions::bookKeepingRegisterUses(TR::Instruction *instr, TR::CodeGenerator *cg)
   {
   // *this    swipeable for debugging purposes
   TR::Register *virtReg;
   TR::RealRegister::RegNum regNum;
   TR::RegisterDependencyConditions *assoc;
   int numAssoc = 0;

   if (instr->getOpCodeValue() == TR::InstOpCode::assocreg)
      return;

   // Don't track associations or emit assocregs in outlined code
   // Register assigner can save/restore associations across outlined sections properly, however no such mechanism exists for instruction selection
   // so we don't want these associations to clobber the associations that were set in main line code, which are more important
   // TODO: Fix this by saving/restoring the associations in swapInstructionListsWithCompilation()
   bool isOOL = cg->getIsInOOLSection();

   TR::Machine *machine = cg->machine();
   assoc = !isOOL ? new (cg->trHeapMemory()) TR::RegisterDependencyConditions(0,_addCursorForPre, cg->trMemory()) : 0;

   for (int i = 0; i < _addCursorForPre; i++)
      {
      virtReg = _preConditions->getRegisterDependency(i)->getRegister();
      regNum  = _preConditions->getRegisterDependency(i)->getRealRegister();

      if (!isOOL)
         {
         // Add to the association condition when the association is changed
         // from one virtual register to another
         if (machine->getVirtualAssociatedWithReal(regNum) != virtReg && machine->getVirtualAssociatedWithReal(regNum) != 0)
            {
            assoc->addPostCondition(machine->getVirtualAssociatedWithReal(regNum), regNum);
            numAssoc++;
            }
         // Keep track of the virtual register map to real registers!
         machine->setVirtualAssociatedWithReal(regNum, virtReg);
         }

      instr->useRegister(virtReg);

      if (!isOOL)
         {
         cg->setRealRegisterAssociation(virtReg, regNum);
         if (_preConditions->getRegisterDependency(i)->getExcludeGPR0())
            cg->addRealRegisterInterference(virtReg, TR::RealRegister::gr0);
         }
      }

   if (numAssoc > 0)
      {
      // Emit an AssocRegs instruction to track the previous association
      assoc->setNumPostConditions(numAssoc, cg->trMemory());
      generateDepInstruction(cg, TR::InstOpCode::assocreg, instr->getNode(), assoc, instr->getPrev());
      }

   for (int j = 0; j < _addCursorForPost; j++)
      {
      virtReg = _postConditions->getRegisterDependency(j)->getRegister();
      regNum  = _postConditions->getRegisterDependency(j)->getRealRegister();

      instr->useRegister(virtReg);

      if (!isOOL)
         {
         cg->setRealRegisterAssociation(virtReg, regNum);
         if (_postConditions->getRegisterDependency(j)->getExcludeGPR0())
	    cg->addRealRegisterInterference(virtReg, TR::RealRegister::gr0);
         }
      }
   }
Exemplo n.º 11
0
TR::Register *TR_X86FPCompareAnalyser::fpCompareAnalyser(TR::Node       *root,
                                                         TR_X86OpCodes cmpRegRegOpCode,
                                                         TR_X86OpCodes cmpRegMemOpCode,
                                                         TR_X86OpCodes cmpiRegRegOpCode,
                                                         bool           useFCOMIInstructions)
   {
   TR::Node      *firstChild,
                *secondChild;
   TR::ILOpCodes  cmpOp = root->getOpCodeValue();
   bool          reverseMemOp = false;
   bool          reverseCmpOp = false;
   TR::Compilation* comp = _cg->comp();
   TR_X86OpCodes cmpInstr = useFCOMIInstructions ? cmpiRegRegOpCode : cmpRegRegOpCode;

   // Some operators must have their operands swapped to improve the generated
   // code needed to evaluate the result of the comparison.
   //
   bool mustSwapOperands = (cmpOp == TR::iffcmple ||
                            cmpOp == TR::ifdcmple ||
                            cmpOp == TR::iffcmpgtu ||
                            cmpOp == TR::ifdcmpgtu ||
                            cmpOp == TR::fcmple ||
                            cmpOp == TR::dcmple ||
                            cmpOp == TR::fcmpgtu ||
                            cmpOp == TR::dcmpgtu ||
                            (useFCOMIInstructions &&
                             (cmpOp == TR::iffcmplt ||
                              cmpOp == TR::ifdcmplt ||
                              cmpOp == TR::iffcmpgeu ||
                              cmpOp == TR::ifdcmpgeu ||
                              cmpOp == TR::fcmplt ||
                              cmpOp == TR::dcmplt ||
                              cmpOp == TR::fcmpgeu ||
                              cmpOp == TR::dcmpgeu))) ? true : false;

   // Some operators should not have their operands swapped to improve the generated
   // code needed to evaluate the result of the comparison.
   //
   bool preventOperandSwapping = (cmpOp == TR::iffcmpltu ||
                                  cmpOp == TR::ifdcmpltu ||
                                  cmpOp == TR::iffcmpge ||
                                  cmpOp == TR::ifdcmpge ||
                                  cmpOp == TR::fcmpltu ||
                                  cmpOp == TR::dcmpltu ||
                                  cmpOp == TR::fcmpge ||
                                  cmpOp == TR::dcmpge ||
                                  (useFCOMIInstructions &&
                                   (cmpOp == TR::iffcmpgt ||
                                    cmpOp == TR::ifdcmpgt ||
                                    cmpOp == TR::iffcmpleu ||
                                    cmpOp == TR::ifdcmpleu ||
                                    cmpOp == TR::fcmpgt ||
                                    cmpOp == TR::dcmpgt ||
                                    cmpOp == TR::fcmpleu ||
                                    cmpOp == TR::dcmpleu))) ? true : false;

   // For correctness, don't swap operands of these operators.
   //
   if (cmpOp == TR::fcmpg || cmpOp == TR::fcmpl ||
       cmpOp == TR::dcmpg || cmpOp == TR::dcmpl)
      {
      preventOperandSwapping = true;
      }

   // Initial operand evaluation ordering.
   //
   if (preventOperandSwapping || (!mustSwapOperands && _cg->whichChildToEvaluate(root) == 0))
      {
      firstChild  = root->getFirstChild();
      secondChild = root->getSecondChild();
      setReversedOperands(false);
      }
   else
      {
      firstChild  = root->getSecondChild();
      secondChild = root->getFirstChild();
      setReversedOperands(true);
      }

   TR::Register *firstRegister  = firstChild->getRegister();
   TR::Register *secondRegister = secondChild->getRegister();

   setInputs(firstChild,
             firstRegister,
             secondChild,
             secondRegister,
             useFCOMIInstructions,

             // If either 'preventOperandSwapping' or 'mustSwapOperands' is set then the
             // initial operand ordering set above must be maintained.
             //
             preventOperandSwapping || mustSwapOperands);

   // Make sure any required operand ordering is respected.
   //
   if ((getCmpReg2Reg1() || getCmpReg2Mem1()) &&
       (mustSwapOperands || preventOperandSwapping))
      {
      reverseCmpOp = getCmpReg2Reg1() ? true : false;
      reverseMemOp = getCmpReg2Mem1() ? true : false;
      }

   // If we are not comparing with a memory operand, one of them evaluates
   // to a zero, and the zero is not already on the stack, then we can use
   // FTST to save a register.
   //
   // (With a memory operand, either the constant zero needs to be loaded
   // to use FCOM, or the memory operand needs to be loaded to use FTST,
   // so there is no gain in using FTST.)
   //
   // If the constant zero is in the target register, using FTST means the
   // comparison will be reversed. We cannot do this if the initial ordering
   // of the operands must be maintained.
   //
   // Finally, if FTST is used and this is the last use of the target, the
   // target register may need to be explicitly popped.
   //
   TR::Register *targetRegisterForFTST = NULL;
   TR::Node     *targetChildForFTST = NULL;

   if (getEvalChild1() && isUnevaluatedZero(firstChild))  // do we need getEvalChild1() here?
      {
      if ( ((getCmpReg1Reg2() || reverseCmpOp) && !(preventOperandSwapping || mustSwapOperands)) ||
            (getCmpReg2Reg1() && !reverseCmpOp))
         {
         if (getEvalChild2())
            {
            secondRegister = _cg->evaluate(secondChild);
            }
         targetRegisterForFTST = secondRegister;
         targetChildForFTST = secondChild;
         notReversedOperands();
         }
      }
   else if (getEvalChild2() && isUnevaluatedZero(secondChild))  // do we need getEvalChild2() here?
      {
      if ( (getCmpReg1Reg2() || reverseCmpOp) ||
           (getCmpReg2Reg1() && !reverseCmpOp && !(preventOperandSwapping || mustSwapOperands)) )
         {
         if (getEvalChild1())
            {
            firstRegister = _cg->evaluate(firstChild);
            }
         targetRegisterForFTST = firstRegister;
         targetChildForFTST = firstChild;
         }
      }

   if (!targetRegisterForFTST)
      {
      // If we have a choice, evaluate the target operand last.  By doing so, we
      // help out the register assigner because the target must be TOS.  This
      // avoids an unneccessary FXCH for the target.
      //
      if (getEvalChild1() && getEvalChild2())
         {
         if (getCmpReg1Reg2() || getCmpReg1Mem2())
            {
            secondRegister = _cg->evaluate(secondChild);
            firstRegister = _cg->evaluate(firstChild);
            }
         else
            {
            firstRegister = _cg->evaluate(firstChild);
            secondRegister = _cg->evaluate(secondChild);
            }
         }
      else
         {
         if (getEvalChild1())
            {
            firstRegister = _cg->evaluate(firstChild);
            }

         if (getEvalChild2())
            {
            secondRegister = _cg->evaluate(secondChild);
            }
         }
      }

   // Adjust the FP precision of feeding operands.
   //
   if (firstRegister &&
       (firstRegister->needsPrecisionAdjustment() ||
        comp->getOption(TR_StrictFPCompares) ||
        (firstRegister->mayNeedPrecisionAdjustment() && secondChild->getOpCode().isLoadConst()) ||
        (firstRegister->mayNeedPrecisionAdjustment() && !secondRegister)))
      {
      TR::TreeEvaluator::insertPrecisionAdjustment(firstRegister, root, _cg);
      }

   if (secondRegister &&
       (secondRegister->needsPrecisionAdjustment() ||
        comp->getOption(TR_StrictFPCompares) ||
        (secondRegister->mayNeedPrecisionAdjustment() && firstChild->getOpCode().isLoadConst()) ||
        (secondRegister->mayNeedPrecisionAdjustment() && !firstRegister)))
      {
      TR::TreeEvaluator::insertPrecisionAdjustment(secondRegister, root, _cg);
      }

   // Generate the compare instruction.
   //
   if (targetRegisterForFTST)
      {
      generateFPRegInstruction(FTSTReg, root, targetRegisterForFTST, _cg);
      }
   else if (!useFCOMIInstructions && (getCmpReg1Mem2() || reverseMemOp))
      {
      TR::MemoryReference  *tempMR = generateX86MemoryReference(secondChild, _cg);
      generateFPRegMemInstruction(cmpRegMemOpCode, root, firstRegister, tempMR, _cg);
      tempMR->decNodeReferenceCounts(_cg);
      }
   else if (!useFCOMIInstructions && getCmpReg2Mem1())
      {
      TR::MemoryReference  *tempMR = generateX86MemoryReference(firstChild, _cg);
      generateFPRegMemInstruction(cmpRegMemOpCode, root, secondRegister, tempMR, _cg);
      notReversedOperands();
      tempMR->decNodeReferenceCounts(_cg);
      }
   else if (getCmpReg1Reg2() || reverseCmpOp)
      {
      generateFPCompareRegRegInstruction(cmpInstr, root, firstRegister, secondRegister, _cg);
      }
   else if (getCmpReg2Reg1())
      {
      generateFPCompareRegRegInstruction(cmpInstr, root, secondRegister, firstRegister, _cg);
      notReversedOperands();
      }

   _cg->decReferenceCount(firstChild);
   _cg->decReferenceCount(secondChild);

   // Evaluate the comparison.
   //
   if (getReversedOperands())
      {
      cmpOp = TR::ILOpCode(cmpOp).getOpCodeForSwapChildren();
      TR::Node::recreate(root, cmpOp);
      }

   if (useFCOMIInstructions && !targetRegisterForFTST)
      {
      return NULL;
      }

   // We must manually move the FP condition flags to the EFLAGS register if we don't
   // use the FCOMI instructions.
   //
   TR::Register *accRegister = _cg->allocateRegister();
   TR::RegisterDependencyConditions  *dependencies = generateRegisterDependencyConditions((uint8_t)1, 1, _cg);
   dependencies->addPreCondition(accRegister, TR::RealRegister::eax, _cg);
   dependencies->addPostCondition(accRegister, TR::RealRegister::eax, _cg);
   generateRegInstruction(STSWAcc, root, accRegister, dependencies, _cg);

   // Pop the FTST target register if it is not used any more.
   //
   if (targetRegisterForFTST &&
       targetChildForFTST && targetChildForFTST->getReferenceCount() == 0)
      {
      generateFPSTiST0RegRegInstruction(FSTRegReg, root, targetRegisterForFTST, targetRegisterForFTST, _cg);
      }

   return accRegister;
   }