Exemplo n.º 1
0
void
OMR::IlValue::storeToAuto()
   {
   if (_symRefThatCanBeUsedInOtherBlocks == NULL)
      {
      TR::Compilation *comp = TR::comp();

      // first use from another block, need to create symref and insert store tree where node  was computed
      TR::SymbolReference *symRef = comp->getSymRefTab()->createTemporary(_methodBuilder->methodSymbol(), _nodeThatComputesValue->getDataType());
      symRef->getSymbol()->setNotCollected();
      char *name = (char *) comp->trMemory()->allocateHeapMemory((2+10+1) * sizeof(char)); // 2 ("_T") + max 10 digits + trailing zero
      sprintf(name, "_T%u", symRef->getCPIndex());
      symRef->getSymbol()->getAutoSymbol()->setName(name);
      _methodBuilder->defineSymbol(name, symRef);

      // create store and its treetop
      TR::Node *storeNode = TR::Node::createStore(symRef, _nodeThatComputesValue);
      TR::TreeTop *prevTreeTop = _treeTopThatAnchorsValue->getPrevTreeTop();
      TR::TreeTop *newTree = TR::TreeTop::create(comp, storeNode);
      newTree->insertNewTreeTop(prevTreeTop, _treeTopThatAnchorsValue);

      _treeTopThatAnchorsValue->unlink(true);

      _treeTopThatAnchorsValue = newTree;
      _symRefThatCanBeUsedInOtherBlocks = symRef;
      }
   }
Exemplo n.º 2
0
TR::Register *
TR::AMD64SystemLinkage::buildIndirectDispatch(TR::Node *callNode)
   {
   TR::SymbolReference *methodSymRef = callNode->getSymbolReference();
   TR_ASSERT(methodSymRef->getSymbol()->castToMethodSymbol()->isComputed(), "system linkage only supports computed indirect call for now %p\n", callNode);

   // Evaluate VFT
   //
   TR::Register *vftRegister;
   TR::Node *vftNode = callNode->getFirstChild();
   if (vftNode->getRegister())
      {
      vftRegister = vftNode->getRegister();
      }
   else
      {
      vftRegister = cg()->evaluate(vftNode);
      }

   // Allocate adequate register dependencies.
   //
   // pre = number of argument registers + 1 for VFT register
   // post = number of volatile + VMThread + return register
   //
   uint32_t pre = getProperties().getNumIntegerArgumentRegisters() + getProperties().getNumFloatArgumentRegisters() + 1;
   uint32_t post = getProperties().getNumVolatileRegisters() + 1 + (callNode->getDataType() == TR::NoType ? 0 : 1);

#if defined (PYTHON) && 0
   // Treat all preserved GP regs as volatile until register map support available.
   //
   post += getProperties().getNumberOfPreservedGPRegisters();
#endif

   TR::RegisterDependencyConditions *callDeps = generateRegisterDependencyConditions(pre, 1, cg());

   TR::RealRegister::RegNum scratchRegIndex = getProperties().getIntegerScratchRegister(1);
   callDeps->addPostCondition(vftRegister, scratchRegIndex, cg());
   callDeps->stopAddingPostConditions();

   // Evaluate outgoing arguments on the system stack and build pre-conditions.
   //
   int32_t memoryArgSize = buildArgs(callNode, callDeps);

   // Dispatch
   //
   generateRegInstruction(CALLReg, callNode, vftRegister, callDeps, cg());
   cg()->resetIsLeafMethod();

   // Build label post-conditions
   //
   TR::RegisterDependencyConditions *postDeps = generateRegisterDependencyConditions(0, post, cg());
   TR::Register *returnReg = buildVolatileAndReturnDependencies(callNode, postDeps);
   postDeps->stopAddingPostConditions();

   TR::LabelSymbol *postDepLabel = generateLabelSymbol(cg());
   generateLabelInstruction(LABEL, callNode, postDepLabel, postDeps, cg());

   return returnReg;
   }
Exemplo n.º 3
0
void
OMR::SymbolReference::setLiteralPoolAliases(TR_BitVector * aliases, TR::SymbolReferenceTable * symRefTab)
   {
   if (!symRefTab->findGenericIntShadowSymbol())
      return;

   TR_SymRefIterator i(symRefTab->aliasBuilder.genericIntShadowSymRefs(), symRefTab);
   TR::SymbolReference * symRef;
   while ((symRef = i.getNext()))
      if (symRef->isLiteralPoolAddress() || symRef->isFromLiteralPool())
         aliases->set(symRef->getReferenceNumber());

   aliases->set(self()->getReferenceNumber());

   *aliases |= symRefTab->aliasBuilder.unsafeSymRefNumbers();
   }
Exemplo n.º 4
0
// resolved casts that are not to abstract, interface, or array need a super test
bool OMR::TreeEvaluator::instanceOfOrCheckCastNeedSuperTest(TR::Node * node, TR::CodeGenerator *cg)
   {
   TR::Node            *castClassNode    = node->getSecondChild();
   TR::MethodSymbol    *helperSym        = node->getSymbol()->castToMethodSymbol();
   TR::SymbolReference *castClassSymRef  = castClassNode->getSymbolReference();

   if (!TR::TreeEvaluator::isStaticClassSymRef(castClassSymRef))
      {
      // We could theoretically do a super test on something with no sym, but it would require significant
      // changes to platform code. The benefit is little at this point (shows up from reference arraycopy reductions)

      if (cg->supportsInliningOfIsInstance() &&
          node->getOpCodeValue() == TR::instanceof &&
          node->getSecondChild()->getOpCodeValue() != TR::loadaddr)
         return true;
      else
         return false;
      }

   TR::StaticSymbol    *castClassSym = castClassSymRef->getSymbol()->getStaticSymbol();

   if (castClassSymRef->isUnresolved())
      {
      return false;
      }
   else
      {
      TR_OpaqueClassBlock * clazz;
      // If the class is a regular class (i.e., not an interface nor an array) and
      // not known to be a final class, an inline superclass test can be generated.
      // If the helper does not preserve all the registers there will not be
      // enough registers to do the superclass test inline.
      // Also, don't generate the superclass test if optimizing for space.
      //
      if (castClassSym &&
          (clazz = (TR_OpaqueClassBlock *) castClassSym->getStaticAddress()) &&
          !TR::Compiler->cls.isClassArray(cg->comp(), clazz) &&
          !TR::Compiler->cls.isInterfaceClass(cg->comp(), clazz) &&
          !TR::Compiler->cls.isClassFinal(cg->comp(), clazz) &&
           helperSym->preservesAllRegisters() &&
          !cg->comp()->getOption(TR_OptimizeForSpace))
         return true;
      }
   return false;
   }
Exemplo n.º 5
0
void
OMR::SymbolReference::setSharedStaticAliases(TR_BitVector * aliases, TR::SymbolReferenceTable * symRefTab)
   {
   if (self()->reallySharesSymbol())
      {
      TR::DataType type = self()->getSymbol()->getType();
      TR_SymRefIterator i(type.isAddress() ? symRefTab->aliasBuilder.addressStaticSymRefs()
                                           : (type.isInt32() ? symRefTab->aliasBuilder.intStaticSymRefs()
                                                             : symRefTab->aliasBuilder.nonIntPrimitiveStaticSymRefs()), symRefTab);
      TR::SymbolReference * symRef;
      while ((symRef = i.getNext()))
         if (symRef->getSymbol() == self()->getSymbol())
            aliases->set(symRef->getReferenceNumber());
      }
   else
      aliases->set(self()->getReferenceNumber());

   *aliases |= symRefTab->aliasBuilder.unsafeSymRefNumbers();
   }
Exemplo n.º 6
0
// unresolved casts or casts to things other than abstract or interface benefit from
// an equality test
bool OMR::TreeEvaluator::instanceOfOrCheckCastNeedEqualityTest(TR::Node * node, TR::CodeGenerator *cg)
   {
   TR::Node            *castClassNode    = node->getSecondChild();
   TR::SymbolReference *castClassSymRef  = castClassNode->getSymbolReference();

   if (!TR::TreeEvaluator::isStaticClassSymRef(castClassSymRef))
      {
      return true;
      }

   TR::StaticSymbol    *castClassSym     = castClassSymRef->getSymbol()->getStaticSymbol();

   if (castClassSymRef->isUnresolved())
      {
      return false;
      }
   else
      {
      TR_OpaqueClassBlock * clazz;

      if (castClassSym
          && (clazz = (TR_OpaqueClassBlock *) castClassSym->getStaticAddress())
          && !TR::Compiler->cls.isInterfaceClass(cg->comp(), clazz)
          && (
              !TR::Compiler->cls.isAbstractClass(cg->comp(), clazz)

              // here be dragons
              // int.class, char.class, etc are final & abstract
              // usually instanceOf calls on these classes are ripped out by the optimizer
              // but in some cases they can persist to codegen which without the following
              // case causes assertions because we opt out of calling the helper and doing
              // all inline tests. Really we could just jmp to the failed side, but to reduce
              // service risk we are going to do an equality test that we know will fail
              // NOTE final abstract is not enough - all array classes are final abstract
              //      to prevent them being used with new and being extended...
              || (TR::Compiler->cls.isAbstractClass(cg->comp(), clazz) && TR::Compiler->cls.isClassFinal(cg->comp(), clazz)
                  && TR::Compiler->cls.isPrimitiveClass(cg->comp(), clazz)))
         )
         return true;
      }

   return false;
   }
Exemplo n.º 7
0
void
OMR::SymbolReference::setSharedShadowAliases(TR_BitVector * aliases, TR::SymbolReferenceTable * symRefTab)
   {
   if (self()->reallySharesSymbol() && !_symbol->isUnsafeShadowSymbol())
      {
      TR::DataType type = self()->getSymbol()->getType();
      TR_SymRefIterator i(type.isAddress() ? symRefTab->aliasBuilder.addressShadowSymRefs()
                                           : (type.isInt32() ? symRefTab->aliasBuilder.intShadowSymRefs()
                                                             : symRefTab->aliasBuilder.nonIntPrimitiveShadowSymRefs()), symRefTab);
      TR::SymbolReference * symRef;
      while ((symRef = i.getNext()))
         if (symRef->getSymbol() == self()->getSymbol())
            aliases->set(symRef->getReferenceNumber());

      // include symbol reference's own shared alias bitvector
      if (symRefTab->getSharedAliases(self()) != NULL)
         *aliases |= *(symRefTab->getSharedAliases(self()));
      }
   else
      aliases->set(self()->getReferenceNumber());

   *aliases |= symRefTab->aliasBuilder.unsafeSymRefNumbers();
   }
Exemplo n.º 8
0
bool TR_LocalAnalysis::isSupportedNodeForPREPerformance(TR::Node *node, TR::Compilation *comp, TR::Node *parent)
   {
   TR::SymbolReference *symRef = node->getOpCode().hasSymbolReference()?node->getSymbolReference():NULL;
   if (node->getOpCode().isStore() && symRef &&
       symRef->getSymbol()->isAutoOrParm())
      {
      //dumpOptDetails("Returning false for store %p\n", node);
      return false;
      }

   if (node->getOpCode().isLoadConst() && !comp->cg()->isMaterialized(node))
      {
      return false;
      }

   if (node->getOpCode().hasSymbolReference() &&
       (node->getSymbolReference() == comp->getSymRefTab()->findJavaLangClassFromClassSymbolRef()))
      {
      return false;
      }

   return true;
   }
Exemplo n.º 9
0
int32_t TR::DeadTreesElimination::process(TR::TreeTop *startTree, TR::TreeTop *endTree)
   {
   TR::StackMemoryRegion stackRegion(*comp()->trMemory());
   LongestPathMap longestPaths(std::less<TR::Node*>(), stackRegion);

   typedef TR::typed_allocator<CRAnchor, TR::Region&> CRAnchorAlloc;
   typedef TR::forward_list<CRAnchor, CRAnchorAlloc> CRAnchorList;
   CRAnchorList anchors(stackRegion);

   vcount_t visitCount = comp()->incOrResetVisitCount();
   TR::TreeTop *treeTop;
   for (treeTop = startTree; (treeTop != endTree); treeTop = treeTop->getNextTreeTop())
      treeTop->getNode()->initializeFutureUseCounts(visitCount);

   TR::Block *block = NULL;
   bool delayedRegStoresBeforeThisPass = _delayedRegStores;

   // Update visitCount as they are used in this optimization and need to be
   visitCount = comp()->incOrResetVisitCount();
   for (TR::TreeTopIterator iter(startTree, comp()); iter != endTree; ++iter)
      {
      TR::Node *node = iter.currentTree()->getNode();

      if (node->getOpCodeValue() == TR::BBStart)
         {
         block = node->getBlock();
         if (!block->isExtensionOfPreviousBlock())
            longestPaths.clear();
         }

      int vcountLimit = MAX_VCOUNT - 3;
      if (comp()->getVisitCount() > vcountLimit)
         {
         dumpOptDetails(comp(),
            "%sVisit count %d exceeds limit %d; stopping\n",
            optDetailString(), comp()->getVisitCount(), vcountLimit);
         return 0;
         }

      // correct at all intermediate stages
      //
      if ((node->getOpCodeValue() != TR::treetop) &&
          (!node->getOpCode().isAnchor() || (node->getFirstChild()->getReferenceCount() != 1)) &&
          (!node->getOpCode().isStoreReg() || (node->getFirstChild()->getReferenceCount() != 1)) &&
          (delayedRegStoresBeforeThisPass ||
           (iter.currentTree() == block->getLastRealTreeTop()) ||
           !node->getOpCode().isStoreReg() ||
           (node->getVisitCount() == visitCount)))
         {
         if (node->getOpCode().isAnchor() && node->getFirstChild()->getOpCode().isLoadIndirect())
            anchors.push_front(CRAnchor(iter.currentTree(), block));

         TR::TransformUtil::recursivelySetNodeVisitCount(node, visitCount);
         continue;
         }

      if (node->getOpCode().isStoreReg())
         _delayedRegStores = true;

      TR::Node *child = node->getFirstChild();
      if (child->getOpCodeValue() == TR::PassThrough)
         {
         TR::Node *newChild = child->getFirstChild();
         node->setAndIncChild(0, newChild);
         newChild->incFutureUseCount();
         if (child->getReferenceCount() <= 1)
            optimizer()->prepareForNodeRemoval(child);
         child->recursivelyDecReferenceCount();
         recursivelyDecFutureUseCount(child);
         child = newChild;
         }

      bool treeTopCanBeEliminated = false;

      // If the treetop child has been seen before then it must be anchored
      // somewhere above already; so we don't need the treetop to be anchoring
      // this node (as the computation is already done at the first reference to
      // the node).
      //

      if (visitCount == child->getVisitCount())
         {
         treeTopCanBeEliminated = true;
         }
      else
         {
         TR::ILOpCode &childOpCode = child->getOpCode();
         TR::ILOpCodes opCodeValue = childOpCode.getOpCodeValue();
         bool seenConditionalBranch = false;

         bool callWithNoSideEffects = child->getOpCode().isCall() &&
              child->getSymbolReference()->getSymbol()->isResolvedMethod() &&
              child->getSymbolReference()->getSymbol()->castToResolvedMethodSymbol()->isSideEffectFree();

         if (callWithNoSideEffects)
            {
            treeTopCanBeEliminated = true;
            }
         else if (!((childOpCode.isCall() && !callWithNoSideEffects) ||
               childOpCode.isStore() ||
               ((opCodeValue == TR::New ||
                 opCodeValue == TR::anewarray ||
                 opCodeValue == TR::newarray) &&
                 child->getReferenceCount() > 1) ||
                 opCodeValue == TR::multianewarray ||
                 opCodeValue == TR::MergeNew ||
               opCodeValue == TR::checkcast ||
               opCodeValue == TR::Prefetch ||
               opCodeValue == TR::iu2l ||
               ((childOpCode.isDiv() ||
                 childOpCode.isRem()) &&
                 child->getNumChildren() == 3)))
            {
            // Perform the rather complex check to see whether its safe
            // to disconnect the child node from the treetop
            //
            bool safeToReplaceNode = false;
            if (child->getReferenceCount() == 1)
               {
               safeToReplaceNode = true;
#ifdef J9_PROJECT_SPECIFIC
               if (child->getOpCode().isPackedExponentiation())
                  {
                  // pdexp has a possible message side effect in truncating or no significant digits left cases
                  safeToReplaceNode = false;
                  }
#endif
               if (opCodeValue == TR::loadaddr)
                  treeTopCanBeEliminated = true;
               }
            else if (!_cannotBeEliminated)
               {
               safeToReplaceNode = isSafeToReplaceNode(
                  child,
                  iter.currentTree(),
                  &seenConditionalBranch,
                  visitCount,
                  comp(),
                  &_targetTrees,
                  _cannotBeEliminated,
                  longestPaths);
               }

            if (safeToReplaceNode)
               {
               if (childOpCode.hasSymbolReference())
                  {
                  TR::SymbolReference *symRef = child->getSymbolReference();

                  if (symRef->getSymbol()->isAuto() || symRef->getSymbol()->isParm())
                     treeTopCanBeEliminated = true;
                  else
                     {
                     if (childOpCode.isLoad() ||
                         (opCodeValue == TR::loadaddr) ||
                         (opCodeValue == TR::instanceof) ||
                         (((opCodeValue == TR::New)  ||
                            (opCodeValue == TR::anewarray ||
                              opCodeValue == TR::newarray)) &&
                          ///child->getFirstChild()->isNonNegative()))
                           child->markedAllocationCanBeRemoved()))
                       //        opCodeValue == TR::multianewarray ||
                       //        opCodeValue == TR::MergeNew)
                        treeTopCanBeEliminated = true;
                     }
                  }
               else
                  treeTopCanBeEliminated = true;
               }
            }

         // Fix for the case when a float to non-float conversion node swings
         // down past a branch on IA32; this would cause a FP value to be commoned
         // across a branch where there was none originally; this causes pblms
         // as a value is left on the stack.
         //
         if (treeTopCanBeEliminated &&
             seenConditionalBranch)
            {
            if (!cg()->getSupportsJavaFloatSemantics())
               {
               if (child->getOpCode().isConversion() ||
                   child->getOpCode().isBooleanCompare())
                 {
                 if (child->getFirstChild()->getOpCode().isFloatingPoint() &&
                     !child->getOpCode().isFloatingPoint())
                     treeTopCanBeEliminated = false;
                 }
               }
            }

         if (treeTopCanBeEliminated)
            {
            TR::NodeChecklist visited(comp());
            bool containsFloatingPoint = false;
            for (int32_t i = 0; i < child->getNumChildren(); ++i)
               {
               // Anchor nodes with reference count > 1
               //
               bool highGlobalIndex = false;
               if (fixUpTree(child->getChild(i), iter.currentTree(), visited, highGlobalIndex, self(), visitCount))
                  containsFloatingPoint = true;
               if (highGlobalIndex)
                  {
                  dumpOptDetails(comp(),
                     "%sGlobal index limit exceeded; stopping\n",
                     optDetailString());
                  return 0;
                  }
               }

            if (seenConditionalBranch &&
                containsFloatingPoint)
               {
               if (!cg()->getSupportsJavaFloatSemantics())
                  treeTopCanBeEliminated = false;
               }
            }
         }

      // Update visitCount as they are used in this optimization and need to be
      // correct at all intermediate stages
      //
      if (!treeTopCanBeEliminated)
         TR::TransformUtil::recursivelySetNodeVisitCount(node, visitCount);

      if (treeTopCanBeEliminated)
         {
         TR::TreeTop *prevTree = iter.currentTree()->getPrevTreeTop();
         TR::TreeTop *nextTree = iter.currentTree()->getNextTreeTop();

         if (!node->getOpCode().isStoreReg() || (node->getFirstChild()->getReferenceCount() == 1))
            {
            // Actually going to remove the treetop now
            //
            if (performTransformation(comp(), "%sRemove tree : [" POINTER_PRINTF_FORMAT "] ([" POINTER_PRINTF_FORMAT "] = %s)\n", optDetailString(), node, node->getFirstChild(), node->getFirstChild()->getOpCode().getName()))
               {
               prevTree->join(nextTree);
               optimizer()->prepareForNodeRemoval(node);
               ///child->recursivelyDecReferenceCount();
               node->recursivelyDecReferenceCount();
               recursivelyDecFutureUseCount(child);
               iter.jumpTo(prevTree);
               if (child->getReferenceCount() == 1)
                  requestOpt(OMR::treeSimplification, true, block);

               if (nextTree->getNode()->getOpCodeValue() == TR::Goto
                   && prevTree->getNode()->getOpCodeValue() == TR::BBStart
                   && !prevTree->getNode()->getBlock()->isExtensionOfPreviousBlock())
                  {
                  requestOpt(
                     OMR::redundantGotoElimination,
                     prevTree->getNode()->getBlock());
                  }
               }
            }
         else
            {
            if (performTransformation(comp(), "%sMove tree : [" POINTER_PRINTF_FORMAT "]([" POINTER_PRINTF_FORMAT "] = %s) to end of block\n", optDetailString(), node, node->getFirstChild(), node->getFirstChild()->getOpCode().getName()))
               {
               prevTree->join(nextTree);
               node->setVisitCount(visitCount);

               TR::TreeTop *lastTree = findLastTreetop(block, prevTree);
               TR::TreeTop *prevLastTree = lastTree->getPrevTreeTop();

               TR::TreeTop *cursorTreeTop = nextTree;
               while (cursorTreeTop != lastTree)
                  {
                  if (cursorTreeTop->getNode()->getOpCode().isStoreReg() &&
                      (cursorTreeTop->getNode()->getGlobalRegisterNumber() == iter.currentTree()->getNode()->getGlobalRegisterNumber()))
                     {
                     lastTree = cursorTreeTop;
                     prevLastTree = lastTree->getPrevTreeTop();
                     break;
                     }

                  cursorTreeTop = cursorTreeTop->getNextTreeTop();
                  }

               if (lastTree->getNode()->getOpCodeValue() == TR::BBStart)
                  {
                  prevLastTree = lastTree;
                  lastTree = block->getExit();
                  }

               TR::Node *lastNode = lastTree->getNode();
               TR::Node *prevLastNode = prevLastTree->getNode();

               if (lastNode->getOpCode().isIf() && !lastNode->getOpCode().isCompBranchOnly() &&
                   prevLastNode->getOpCode().isStoreReg() &&
                   ((prevLastNode->getFirstChild() == lastNode->getFirstChild()) ||
                    (prevLastNode->getFirstChild() == lastNode->getSecondChild())))
                  {
                  lastTree = prevLastTree;
                  prevLastTree = lastTree->getPrevTreeTop();
                  }

               prevLastTree->join(iter.currentTree());
               iter.currentTree()->join(lastTree);

               iter.jumpTo(prevTree);
               requestOpt(OMR::treeSimplification, true, block);
               }
            }
         }
      }

   for (auto it = anchors.begin(); it != anchors.end(); ++it)
      {
      TR::Node *anchor = it->tree->getNode();
      TR::Node *load = anchor->getChild(0);
      if (load->getReferenceCount() > 1)
         continue;

      // We can eliminate the indirect load immediately, but for the moment the
      // subtree providing the base object has to be anchored.

      TR::Node *heapBase = anchor->getChild(1);

      TR::Node::recreate(anchor, TR::treetop);
      anchor->setAndIncChild(0, load->getChild(0));
      anchor->setChild(1, NULL);
      anchor->setNumChildren(1);

      if (!heapBase->getOpCode().isLoadConst())
         {
         it->tree->insertAfter(
            TR::TreeTop::create(
               comp(),
               TR::Node::create(heapBase, TR::treetop, 1, heapBase)));
         }

      load->recursivelyDecReferenceCount();
      heapBase->recursivelyDecReferenceCount();

      // A later pass of dead trees can likely move (or even remove) the base
      // object expression.

      requestOpt(OMR::deadTreesElimination, true, it->block);
      }

   return 1; // actual cost
   }
Exemplo n.º 10
0
// Returns true if there is any constraint to the move
bool TR_LocalLiveRangeReduction::isAnySymInDefinedOrUsedBy(TR_TreeRefInfo *currentTreeRefInfo, TR::Node *currentNode, TR_TreeRefInfo *movingTreeRefInfo )
   {
   TR::Node *movingNode = movingTreeRefInfo->getTreeTop()->getNode();
   // ignore anchors
   //
   if (movingNode->getOpCode().isAnchor())
      movingNode = movingNode->getFirstChild();

   TR::ILOpCode &opCode = currentNode->getOpCode();

   ////if ((opCode.getOpCodeValue() == TR::monent) || (opCode.getOpCodeValue() == TR::monexit))
   if (nodeMaybeMonitor(currentNode))
      {
      if (trace())
    	 traceMsg(comp(),"cannot move %p beyond monitor %p\n",movingNode,currentNode);
      return true;
      }

   // Don't move gc points or things across gc points
   //
   if (movingNode->canGCandReturn() ||
         currentNode->canGCandReturn())
      {
      if (trace())
         traceMsg(comp(), "cannot move gc points %p past %p\n", movingNode, currentNode);
      return true;
      }

   // Don't move checks or calls at all
   //
   if (containsCallOrCheck(movingTreeRefInfo,movingNode))
      {
      if (trace())
    	   traceMsg(comp(),"cannot move check or call %s\n", getDebug()->getName(movingNode));
      return true;
      }

   // Don't move object header store past a GC point
   //
   if ((currentNode->getOpCode().isWrtBar() || currentNode->canCauseGC()) && mayBeObjectHeaderStore(movingNode, fe()))
      {
      if (trace())
    	   traceMsg(comp(),"cannot move possible object header store %s past GC point %s\n", getDebug()->getName(movingNode), getDebug()->getName(currentNode));
      return true;
      }

   if (TR::Compiler->target.cpu.isPower() && opCode.getOpCodeValue() == TR::allocationFence)
      {
      // Can't move allocations past flushes
      if (movingNode->getOpCodeValue() == TR::treetop &&
          movingNode->getFirstChild()->getOpCode().isNew() &&
          (currentNode->getAllocation() == NULL ||
           currentNode->getAllocation() == movingNode->getFirstChild()))
         {
         if (trace())
            {
            traceMsg(comp(),"cannot move %p beyond flush %p - ", movingNode, currentNode);
            if (currentNode->getAllocation() == NULL)
               traceMsg(comp(),"(flush with null allocation)\n");
            else
               traceMsg(comp(),"(flush for allocation %p)\n", currentNode->getAllocation());
            }
         return true;
         }

      // Can't move certain stores past flushes
      // Exclude all indirect stores, they may be for stack allocs, in which case the flush is needed at least as a scheduling barrier
      // Direct stores to autos and parms are the only safe candidates
      if (movingNode->getOpCode().isStoreIndirect() ||
          (movingNode->getOpCode().isStoreDirect() && !movingNode->getSymbol()->isParm() && !movingNode->getSymbol()->isAuto()))
         {
         if (trace())
            traceMsg(comp(),"cannot move %p beyond flush %p - (flush for possible stack alloc)", movingNode, currentNode);
         return true;
         }
      }

   for (int32_t i = 0; i < currentNode->getNumChildren(); i++)
      {
      TR::Node *child = currentNode->getChild(i);

      //Any node that has side effects (like call and newarrya) cannot be evaluated in the middle of the tree.
      if (movingTreeRefInfo->getFirstRefNodesList()->find(child))
         {
         //for calls and unresolve symbol that are not under check

         if (child->exceptionsRaised() ||
             (child->getOpCode().hasSymbolReference() && child->getSymbolReference()->isUnresolved()))
    	    {
    	    if (trace())
    	       traceMsg(comp(),"cannot move %p beyond %p - cannot change evaluation point of %p\n ",movingNode,currentTreeRefInfo->getTreeTop()->getNode(),child);
            return true;
    	    }

         else if(movingNode->getOpCode().isStore())
            {
            TR::SymbolReference *stSymRef = movingNode->getSymbolReference();
            int32_t stSymRefNum = stSymRef->getReferenceNumber();
            //TR::SymbolReference *stSymRef = movingNode->getSymbolReference();
            int32_t numHelperSymbols = comp()->getSymRefTab()->getNumHelperSymbols();
            if ((comp()->getSymRefTab()->isNonHelper(stSymRefNum, TR::SymbolReferenceTable::vftSymbol))||
                (comp()->getSymRefTab()->isNonHelper(stSymRefNum, TR::SymbolReferenceTable::contiguousArraySizeSymbol))||
                (comp()->getSymRefTab()->isNonHelper(stSymRefNum, TR::SymbolReferenceTable::discontiguousArraySizeSymbol))||
                (stSymRef == comp()->getSymRefTab()->findHeaderFlagsSymbolRef())||
                (stSymRef->getSymbol() == comp()->getSymRefTab()->findGenericIntShadowSymbol()))

               return true;
            }

         else if (movingNode->getOpCode().isResolveOrNullCheck())
            {
    	    if (trace())
    	       traceMsg(comp(),"cannot move %p beyond %p - node %p under ResolveOrNullCheck",movingNode,currentTreeRefInfo->getTreeTop()->getNode(),currentNode);
            return true;
            }

    	 else if (TR::Compiler->target.is64Bit() &&
    		  movingNode->getOpCode().isBndCheck() &&
    		  ((opCode.getOpCodeValue() == TR::i2l) || (opCode.getOpCodeValue() == TR::iu2l)) &&
    		  !child->isNonNegative())
    	    {
    	    if (trace())
    	       traceMsg(comp(),"cannot move %p beyond %p - changing the eval point of %p will casue extra cg instruction ",movingNode,currentTreeRefInfo->getTreeTop()->getNode(),currentNode);
    	    return true;
    	    }
         }

      //don't recurse over nodes each are not the first reference
      if (child->getReferenceCount()==1 || currentTreeRefInfo->getFirstRefNodesList()->find(child))
         {
         if (isAnySymInDefinedOrUsedBy(currentTreeRefInfo, child, movingTreeRefInfo ))
            return true;
         }
      }

   return false;
   }
Exemplo n.º 11
0
void TR_LocalLiveRangeReduction::populatePotentialDeps(TR_TreeRefInfo *treeRefInfo,TR::Node *node)
   {
   TR::ILOpCode &opCode = node->getOpCode();
   if (node->getOpCode().hasSymbolReference())
      {
      TR::SymbolReference *symRef = node->getSymbolReference();
      int32_t symRefNum = symRef->getReferenceNumber();

      //set defSym - all symbols that might be written

      if (opCode.isCall() || opCode.isResolveCheck()|| opCode.isStore() || node->mightHaveVolatileSymbolReference())
         {

         bool isCallDirect = false;
         if (node->getOpCode().isCallDirect())
            isCallDirect = true;

         if (!symRef->getUseDefAliases(isCallDirect).isZero(comp()))
            {
            TR::SparseBitVector useDefAliases(comp()->allocator());
            symRef->getUseDefAliases(isCallDirect).getAliases(useDefAliases);
            TR::SparseBitVector::Cursor aliasCursor(useDefAliases);
            for (aliasCursor.SetToFirstOne(); aliasCursor.Valid(); aliasCursor.SetToNextOne())
               {
               int32_t nextAlias = aliasCursor;
               treeRefInfo->getDefSym()->set(nextAlias);
               }
            }

         if (opCode.isStore())
            treeRefInfo->getDefSym()->set(symRefNum);
         }
      //set useSym - all symbols that are used
      if (opCode.canRaiseException())
         {
         TR::SparseBitVector useAliases(comp()->allocator());
         symRef->getUseonlyAliases().getAliases(useAliases);
            {
            TR::SparseBitVector::Cursor aliasesCursor(useAliases);
            for (aliasesCursor.SetToFirstOne(); aliasesCursor.Valid(); aliasesCursor.SetToNextOne())
               {
               int32_t nextAlias = aliasesCursor;
               treeRefInfo->getUseSym()->set(nextAlias);
               }
            }
         }
      if (opCode.isLoadVar() || (opCode.getOpCodeValue() == TR::loadaddr))
         {
         treeRefInfo->getUseSym()->set(symRefNum);
         }

      }
   for (int32_t i = 0; i < node->getNumChildren(); i++)
      {
      TR::Node *child = node->getChild(i);

      //don't recurse over references (nodes which are not the first reference)
      //
      if (child->getReferenceCount()==1 || treeRefInfo->getFirstRefNodesList()->find(child))
         populatePotentialDeps(treeRefInfo,child );
      }
   return;
   }
Exemplo n.º 12
0
void TR_ReachingDefinitions::initializeGenAndKillSetInfoForNode(TR::Node *node, TR_UseDefInfo::BitVector &defsKilled, bool seenException, int32_t blockNum, TR::Node *parent)
   {
   // Update gen and kill info for nodes in this subtree
   //
   int32_t i;

   if (node->getVisitCount() == comp()->getVisitCount())
      return;
   node->setVisitCount(comp()->getVisitCount());

   // Process the children first
   //
   for (i = node->getNumChildren()-1; i >= 0; --i)
      {
      initializeGenAndKillSetInfoForNode(node->getChild(i), defsKilled, seenException, blockNum, node);
      }

   bool irrelevantStore = false;
   scount_t nodeIndex = node->getLocalIndex();
   if (nodeIndex <= 0)
      {
      if (node->getOpCode().isStore() &&
          node->getSymbol()->isAutoOrParm() &&
          node->storedValueIsIrrelevant())
         {
         irrelevantStore = true;
         }
      else
         return;
      }

   bool foundDefsToKill = false;
   int32_t numDefNodes = 0;
   defsKilled.Clear();

   TR::ILOpCode &opCode = node->getOpCode();
   TR::SymbolReference *symRef;
   TR::Symbol *sym;
   uint16_t symIndex;
   uint32_t num_aliases;

   if (_useDefInfo->_useDefForRegs &&
        (opCode.isLoadReg() ||
       opCode.isStoreReg()))
      {
      sym = NULL;
      symRef = NULL;
      symIndex = _useDefInfo->getNumSymbols() + node->getGlobalRegisterNumber();
      num_aliases = 1;
      }
   else
      {
      symRef = node->getSymbolReference();
      sym = symRef->getSymbol();
      symIndex = symRef->getSymbol()->getLocalIndex();
      num_aliases = _useDefInfo->getNumAliases(symRef, _aux);
      }


   if (symIndex == NULL_USEDEF_SYMBOL_INDEX || node->getOpCode().isCall() || node->getOpCode().isFence() ||
       (parent && parent->getOpCode().isResolveCheck() && num_aliases > 1))
      {
      // A call or unresolved reference is a definition of all
      // symbols it is aliased with
      //
      numDefNodes = num_aliases;

      //for all symbols that are a mustdef of a call, kill defs of those symbols
      if (node->getOpCode().isCall())
         foundDefsToKill = false;
      }
   else if (irrelevantStore || _useDefInfo->isExpandedDefIndex(nodeIndex))
      {
      // DefOnly node defines all symbols it is aliased with
      // UseDef node(load) defines only the symbol itself
      //

      if (!irrelevantStore)
         {
         numDefNodes = num_aliases;
         numDefNodes = _useDefInfo->isExpandedUseDefIndex(nodeIndex) ? 1 : numDefNodes;

         if (!_useDefInfo->getDefsForSymbolIsZero(symIndex, _aux) &&
             (!sym ||
             (!sym->isShadow() &&
             !sym->isMethod())))
            {
            foundDefsToKill = true;
               // defsKilled ORed with defsForSymbol(symIndex);
           _useDefInfo->getDefsForSymbol(defsKilled, symIndex, _aux);
            }
         if (node->getOpCode().isStoreIndirect())
            {
            int32_t memSymIndex = _useDefInfo->getMemorySymbolIndex(node);
            if (memSymIndex != -1 &&
                !_useDefInfo->getDefsForSymbolIsZero(memSymIndex, _aux))
               {
               foundDefsToKill = true;
               // defsKilled ORed with defsForSymbol(symIndex);
               _useDefInfo->getDefsForSymbol(defsKilled, memSymIndex, _aux);
               }
            }
         }
      else if (!_useDefInfo->getDefsForSymbolIsZero(symIndex, _aux))
         {
         numDefNodes = 1;
         foundDefsToKill = true;
         // defsKilled ORed with defsForSymbol(symIndex);
         _useDefInfo->getDefsForSymbol(defsKilled, symIndex, _aux);
         }
      }
   else
      {
      numDefNodes = 0;
      }

   if (foundDefsToKill)
      {
      if (_regularKillSetInfo[blockNum] == NULL)
         allocateContainer(&_regularKillSetInfo[blockNum]);
      *_regularKillSetInfo[blockNum] |= defsKilled;
      if (!seenException)
         {
         if (_exceptionKillSetInfo[blockNum] == NULL)
            allocateContainer(&_exceptionKillSetInfo[blockNum]);
         *_exceptionKillSetInfo[blockNum] |= defsKilled;
         }
      }
   if (_regularGenSetInfo[blockNum] == NULL)
     allocateContainer(&_regularGenSetInfo[blockNum]);
   else if (foundDefsToKill)
      *_regularGenSetInfo[blockNum] -= defsKilled;

   if (_exceptionGenSetInfo[blockNum] == NULL)
      allocateContainer(&_exceptionGenSetInfo[blockNum]);
   else if (foundDefsToKill && !seenException)
      *_exceptionGenSetInfo[blockNum] -= defsKilled;

   if (!irrelevantStore)
      {
      for (i = 0; i < numDefNodes; ++i)
         {
         _regularGenSetInfo[blockNum]->set(nodeIndex+i);
         _exceptionGenSetInfo[blockNum]->set(nodeIndex+i);
         }
      }
   else // fake up the method entry def as the def index to "gen" to avoid a use without a def completely
      {
      _regularGenSetInfo[blockNum]->set(sym->getLocalIndex());
      _exceptionGenSetInfo[blockNum]->set(sym->getLocalIndex());
      }
   }
Exemplo n.º 13
0
TR::Register *TR::AMD64SystemLinkage::buildDirectDispatch(
      TR::Node *callNode,
      bool spillFPRegs)
   {
   TR::SymbolReference *methodSymRef = callNode->getSymbolReference();
   TR::MethodSymbol *methodSymbol = methodSymRef->getSymbol()->castToMethodSymbol();

   TR::Register *returnReg;

   // Allocate adequate register dependencies.
   //
   // pre = number of argument registers
   // post = number of volatile + return register
   //
   uint32_t pre = getProperties().getNumIntegerArgumentRegisters() + getProperties().getNumFloatArgumentRegisters();
   uint32_t post = getProperties().getNumVolatileRegisters() + (callNode->getDataType() == TR::NoType ? 0 : 1);

#if defined (PYTHON) && 0
   // Treat all preserved GP regs as volatile until register map support available.
   //
   post += getProperties().getNumberOfPreservedGPRegisters();
#endif

   TR::RegisterDependencyConditions *preDeps = generateRegisterDependencyConditions(pre, 0, cg());
   TR::RegisterDependencyConditions *postDeps = generateRegisterDependencyConditions(0, post, cg());

   // Evaluate outgoing arguments on the system stack and build pre-conditions.
   //
   int32_t memoryArgSize = buildArgs(callNode, preDeps);

   // Build post-conditions.
   //
   returnReg = buildVolatileAndReturnDependencies(callNode, postDeps);
   postDeps->stopAddingPostConditions();

   // Find the second scratch register in the post dependency list.
   //
   TR::Register *scratchReg = NULL;
   TR::RealRegister::RegNum scratchRegIndex = getProperties().getIntegerScratchRegister(1);
   for (int32_t i=0; i<post; i++)
      {
      if (postDeps->getPostConditions()->getRegisterDependency(i)->getRealRegister() == scratchRegIndex)
         {
         scratchReg = postDeps->getPostConditions()->getRegisterDependency(i)->getRegister();
         break;
         }
      }

#if defined(PYTHON) && 0
   // For Python, store the instruction that contains the GC map at this site into
   // the frame object.
   //
   TR::SymbolReference *frameObjectSymRef =
      comp()->getSymRefTab()->findOrCreateAutoSymbol(comp()->getMethodSymbol(), 0, TR::Address, true, false, true);

   TR::Register *frameObjectRegister = cg()->allocateRegister();
   generateRegMemInstruction(
         L8RegMem,
         callNode,
         frameObjectRegister,
         generateX86MemoryReference(frameObjectSymRef, cg()),
         cg());

   TR::RealRegister *espReal = cg()->machine()->getX86RealRegister(TR::RealRegister::esp);
   TR::Register *gcMapPCRegister = cg()->allocateRegister();

   generateRegMemInstruction(
         LEA8RegMem,
         callNode,
         gcMapPCRegister,
         generateX86MemoryReference(espReal, -8, cg()),
         cg());

   // Use "volatile" registers across the call.  Once proper register map support
   // is implemented, r14 and r15 will no longer be volatile and a different pair
   // should be chosen.
   //
   TR::RegisterDependencyConditions *gcMapDeps = generateRegisterDependencyConditions(0, 2, cg());
   gcMapDeps->addPostCondition(frameObjectRegister, TR::RealRegister::r14, cg());
   gcMapDeps->addPostCondition(gcMapPCRegister, TR::RealRegister::r15, cg());
   gcMapDeps->stopAddingPostConditions();

   generateMemRegInstruction(
         S8MemReg,
         callNode,
         generateX86MemoryReference(frameObjectRegister, fe()->getPythonGCMapPCOffsetInFrame(), cg()),
         gcMapPCRegister,
         gcMapDeps,
         cg());

   cg()->stopUsingRegister(frameObjectRegister);
   cg()->stopUsingRegister(gcMapPCRegister);
#endif

   TR::Instruction *instr;
   if (methodSymbol->getMethodAddress())
      {
      TR_ASSERT(scratchReg, "could not find second scratch register");
      auto LoadRegisterInstruction = generateRegImm64SymInstruction(
         MOV8RegImm64,
         callNode,
         scratchReg,
         (uintptr_t)methodSymbol->getMethodAddress(),
         methodSymRef,
         cg());

      if (TR::Options::getCmdLineOptions()->getOption(TR_EmitRelocatableELFFile))
         {
         LoadRegisterInstruction->setReloKind(TR_NativeMethodAbsolute);
         }

      instr = generateRegInstruction(CALLReg, callNode, scratchReg, preDeps, cg());
      }
   else
      {
      instr = generateImmSymInstruction(CALLImm4, callNode, (uintptrj_t)methodSymbol->getMethodAddress(), methodSymRef, preDeps, cg());
      }

   cg()->resetIsLeafMethod();

   instr->setNeedsGCMap(getProperties().getPreservedRegisterMapForGC());

   cg()->stopUsingRegister(scratchReg);

   TR::LabelSymbol *postDepLabel = generateLabelSymbol(cg());
   generateLabelInstruction(LABEL, callNode, postDepLabel, postDeps, cg());

   return returnReg;
   }
Exemplo n.º 14
0
TR_ExpressionsSimplification::LoopInfo*
TR_ExpressionsSimplification::findLoopInfo(TR_RegionStructure* region)
   {
   ListIterator<TR::CFGEdge> exitEdges(&region->getExitEdges());

   if (region->getExitEdges().getSize() != 1)
      {
      if (trace())
         traceMsg(comp(), "Region with more than 1 exit edges can't be handled\n");
      return 0;
      }

   TR_StructureSubGraphNode* exitNode = toStructureSubGraphNode(exitEdges.getFirst()->getFrom());

   if (!exitNode->getStructure()->asBlock())
      {
      if (trace())
         traceMsg(comp(), "The exit block can't be found\n");
      return 0;
      }

   TR::Block *exitBlock = exitNode->getStructure()->asBlock()->getBlock();
   TR::Node *lastTreeInExitBlock = exitBlock->getLastRealTreeTop()->getNode();

   if (trace())
      {
      traceMsg(comp(), "The exit block is %d\n", exitBlock->getNumber());
      traceMsg(comp(), "The branch node is %p\n", lastTreeInExitBlock);
      }


   if (!lastTreeInExitBlock->getOpCode().isBranch())
      {
      if (trace())
         traceMsg(comp(), "The branch node couldn't be found\n");
      return 0;
      }

   if (lastTreeInExitBlock->getNumChildren() < 2)
      {
      if (trace())
         traceMsg(comp(), "The branch node has less than 2 children\n");
      return 0;
      }

   TR::Node *firstChildOfLastTree = lastTreeInExitBlock->getFirstChild();
   TR::Node *secondChildOfLastTree = lastTreeInExitBlock->getSecondChild();

   if (!firstChildOfLastTree->getOpCode().hasSymbolReference())
      {
      if (trace())
         traceMsg(comp(), "The branch node's first child node %p - its opcode does not have a symbol reference\n", firstChildOfLastTree);
      return 0;
      }

   TR::SymbolReference *firstChildSymRef = firstChildOfLastTree->getSymbolReference();

   if (trace())
      traceMsg(comp(), "Symbol Reference: %p Symbol: %p\n", firstChildSymRef, firstChildSymRef->getSymbol());

   // Locate the induction variable that matches with the exit node symbol
   //
   TR_InductionVariable *indVar = region->findMatchingIV(firstChildSymRef);
   if (!indVar) return 0;

   if (!indVar->getIncr()->asIntConst())
      {
      if (trace())
         traceMsg(comp(), "Increment is not a constant\n");
      return 0;
      }

   int32_t increment = indVar->getIncr()->getLowInt();

   _visitCount = comp()->incVisitCount();
   bool indVarWrittenAndUsedUnexpectedly = false;
   if (firstChildOfLastTree->getReferenceCount() > 1)
      {
      TR::TreeTop *cursorTreeTopInExitBlock = exitBlock->getEntry();
      TR::TreeTop *exitTreeTopInExitBlock = exitBlock->getExit();

      bool loadSeen = false;
      while (cursorTreeTopInExitBlock != exitTreeTopInExitBlock)
         {
         TR::Node *cursorNode = cursorTreeTopInExitBlock->getNode();
         if (checkForLoad(cursorNode, firstChildOfLastTree))
            loadSeen = true;

         if (!cursorNode->getOpCode().isStore() &&
             (cursorNode->getNumChildren() > 0))
           cursorNode = cursorNode->getFirstChild();

         if (cursorNode->getOpCode().isStore() &&
             (cursorNode->getSymbolReference() == firstChildSymRef))
            {
            indVarWrittenAndUsedUnexpectedly = true;
            if ((cursorNode->getFirstChild() == firstChildOfLastTree) ||
                !loadSeen)
               indVarWrittenAndUsedUnexpectedly = false;
            else
               break;
            }

         cursorTreeTopInExitBlock = cursorTreeTopInExitBlock->getNextTreeTop();
         }
      }

   if (indVarWrittenAndUsedUnexpectedly)
      {
      return 0;
      }

   int32_t lowerBound;
   int32_t upperBound = 0;
   TR::Node *bound = 0;
   bool equals = false;

   switch(lastTreeInExitBlock->getOpCodeValue())
      {
      case TR::ificmplt:
      case TR::ificmpgt:
         equals = true;
      case TR::ificmple:
      case TR::ificmpge:
         if (!(indVar->getEntry() && indVar->getEntry()->asIntConst()))
            {
            if (trace())
               traceMsg(comp(), "Entry value is not a constant\n");
            return 0;
            }
         lowerBound = indVar->getEntry()->getLowInt();

         if (secondChildOfLastTree->getOpCode().isLoadConst())
            {
            upperBound = secondChildOfLastTree->getInt();
            }
         else if (secondChildOfLastTree->getOpCode().isLoadVar())
            {
            bound = secondChildOfLastTree;
            }
         else
            {
            if (trace())
               traceMsg(comp(), "Second child is not a const or a load\n");
            return 0;
            }
         return new (trStackMemory()) LoopInfo(bound, lowerBound, upperBound, increment, equals);


      default:
         if (trace())
            traceMsg(comp(), "The condition has not been implemeted\n");
         return 0;
      }

   return 0;
   }
Exemplo n.º 15
0
// Build arguments for system linkage dispatch.
//
int32_t TR::AMD64SystemLinkage::buildArgs(
      TR::Node *callNode,
      TR::RegisterDependencyConditions *deps)
   {
   TR::SymbolReference *methodSymRef = callNode->getSymbolReference();
   TR::MethodSymbol *methodSymbol = methodSymRef->getSymbol()->castToMethodSymbol();
   TR::RealRegister::RegNum noReg = TR::RealRegister::NoReg;
   TR::RealRegister *espReal = machine()->getX86RealRegister(TR::RealRegister::esp);
   int32_t firstNodeArgument = callNode->getFirstArgumentIndex();
   int32_t lastNodeArgument = callNode->getNumChildren() - 1;
   int32_t offset = 0;
   int32_t sizeOfOutGoingArgs= 0;
   uint16_t numIntArgs = 0,
            numFloatArgs = 0;
   int32_t first, last, direction;
   int32_t numCopiedRegs = 0;
   TR::Register *copiedRegs[TR::X86LinkageProperties::MaxArgumentRegisters];

   if (getProperties().passArgsRightToLeft())
      {
      first = lastNodeArgument;
      last  = firstNodeArgument - 1;
      direction = -1;
      }
   else
      {
      first = firstNodeArgument;
      last  = lastNodeArgument + 1;
      direction = 1;
      }

   // If the dispatch is indirect we must add the VFT register to the preconditions
   // so that it gets register assigned with the other preconditions to the call.
   //
   if (callNode->getOpCode().isIndirect())
      {
      TR::Node *vftChild = callNode->getFirstChild();
      TR_ASSERT(vftChild->getRegister(), "expecting VFT child to be evaluated");
      TR::RealRegister::RegNum scratchRegIndex = getProperties().getIntegerScratchRegister(1);
      deps->addPreCondition(vftChild->getRegister(), scratchRegIndex, cg());
      }

   int32_t i;
   for (i = first; i != last; i += direction)
      {
      TR::parmLayoutResult layoutResult;
      TR::RealRegister::RegNum rregIndex = noReg;
      TR::Node *child = callNode->getChild(i);

      layoutParm(child, sizeOfOutGoingArgs, numIntArgs, numFloatArgs, layoutResult);

      if (layoutResult.abstract & TR::parmLayoutResult::IN_LINKAGE_REG_PAIR)
         {
         // TODO: AMD64 SysV ABI might put a struct into a pair of linkage registerr
         TR_ASSERT(false, "haven't support linkage_reg_pair yet.\n");
         }
      else if (layoutResult.abstract & TR::parmLayoutResult::IN_LINKAGE_REG)
         {
         TR_RegisterKinds regKind = layoutResult.regs[0].regKind;
         uint32_t regIndex = layoutResult.regs[0].regIndex;
         TR_ASSERT(regKind == TR_GPR || regKind == TR_FPR, "linkage registers includes TR_GPR and TR_FPR\n");
         rregIndex = (regKind == TR_FPR) ? getProperties().getFloatArgumentRegister(regIndex): getProperties().getIntegerArgumentRegister(regIndex);
         }
      else
         {
         offset = layoutResult.offset;
         }

      TR::Register *vreg;
      vreg = cg()->evaluate(child);

      bool needsStackOffsetUpdate = false;
      if (rregIndex != noReg)
         {
         // For NULL JNI reference parameters, it is possible that the NULL value will be evaluated into
         // a different register than the child.  In that case it is not necessary to copy the temporary scratch
         // register across the call.
         //
         if ((child->getReferenceCount() > 1) &&
             (vreg == child->getRegister()))
            {
            TR::Register *argReg = cg()->allocateRegister();
            if (vreg->containsCollectedReference())
               argReg->setContainsCollectedReference();
            generateRegRegInstruction(TR::Linkage::movOpcodes(RegReg, movType(child->getDataType())), child, argReg, vreg, cg());
            vreg = argReg;
            copiedRegs[numCopiedRegs++] = vreg;
            }

         deps->addPreCondition(vreg, rregIndex, cg());
         }
      else
         {
         // Ideally, we would like to push rather than move
         generateMemRegInstruction(TR::Linkage::movOpcodes(MemReg, fullRegisterMovType(vreg)),
                                   child,
                                   generateX86MemoryReference(espReal, offset, cg()),
                                   vreg,
                                   cg());
         }

      cg()->decReferenceCount(child);
      }

   // Now that we're finished making the preconditions, all the interferences
   // are established and we can kill these regs.
   //
   for (i = 0; i < numCopiedRegs; i++)
      cg()->stopUsingRegister(copiedRegs[i]);

   deps->stopAddingPreConditions();

   return sizeOfOutGoingArgs;
   }
Exemplo n.º 16
0
TR::Register *TR::ARM64SystemLinkage::buildDirectDispatch(TR::Node *callNode)
   {
   TR::SymbolReference *callSymRef = callNode->getSymbolReference();

   const TR::ARM64LinkageProperties &pp = getProperties();
   TR::RealRegister *sp = cg()->machine()->getRealRegister(pp.getStackPointerRegister());

   TR::RegisterDependencyConditions *dependencies =
      new (trHeapMemory()) TR::RegisterDependencyConditions(
         pp.getNumberOfDependencyGPRegisters(),
         pp.getNumberOfDependencyGPRegisters(), trMemory());

   int32_t totalSize = buildArgs(callNode, dependencies);
   if (totalSize > 0)
      {
      if (constantIsUnsignedImm12(totalSize))
         {
         generateTrg1Src1ImmInstruction(cg(), TR::InstOpCode::subimmx, callNode, sp, sp, totalSize);
         }
      else
         {
         TR_ASSERT_FATAL(false, "Too many arguments.");
         }
      }

   TR::MethodSymbol *callSymbol = callSymRef->getSymbol()->castToMethodSymbol();
   generateImmSymInstruction(cg(), TR::InstOpCode::bl, callNode,
      (uintptr_t)callSymbol->getMethodAddress(),
      dependencies, callSymRef ? callSymRef : callNode->getSymbolReference(), NULL);

   cg()->machine()->setLinkRegisterKilled(true);

   if (totalSize > 0)
      {
      if (constantIsUnsignedImm12(totalSize))
         {
         generateTrg1Src1ImmInstruction(cg(), TR::InstOpCode::addimmx, callNode, sp, sp, totalSize);
         }
      else
         {
         TR_ASSERT_FATAL(false, "Too many arguments.");
         }
      }

   TR::Register *retReg;
   switch(callNode->getOpCodeValue())
      {
      case TR::icall:
      case TR::iucall:
         retReg = dependencies->searchPostConditionRegister(
                     pp.getIntegerReturnRegister());
         break;
      case TR::lcall:
      case TR::lucall:
      case TR::acall:
         retReg = dependencies->searchPostConditionRegister(
                     pp.getLongReturnRegister());
         break;
      case TR::fcall:
      case TR::dcall:
         retReg = dependencies->searchPostConditionRegister(
                     pp.getFloatReturnRegister());
         break;
      case TR::call:
         retReg = NULL;
         break;
      default:
         retReg = NULL;
         TR_ASSERT(false, "Unsupported direct call Opcode.");
      }

   callNode->setRegister(retReg);
   return retReg;
   }
Exemplo n.º 17
0
TR_BitVector *
addVeryRefinedCallAliasSets(TR::ResolvedMethodSymbol * methodSymbol, TR_BitVector * aliases, List<void> * methodsPeeked)
   {
   TR::Compilation *comp = TR::comp();

   void * methodId = methodSymbol->getResolvedMethod()->getPersistentIdentifier();
   if (methodsPeeked->find(methodId))
      {
      // This can't be allocated into the alias region as it must be accessed across optimizations
      TR_BitVector *heapAliases = new (comp->trHeapMemory()) TR_BitVector(comp->getSymRefCount(), comp->trMemory(), heapAlloc, growable);
      *heapAliases |= *aliases;
      return heapAliases;
      }

   // stop if the peek is getting very deep
   //
   if (methodsPeeked->getSize() >= PEEK_THRESHOLD)
      return 0;

   methodsPeeked->add(methodId);

   dumpOptDetails(comp, "O^O REFINING ALIASES: Peeking into the IL to refine aliases \n");

   if (!methodSymbol->getResolvedMethod()->genMethodILForPeeking(methodSymbol, comp, true))
      return 0;

   TR::SymbolReferenceTable * symRefTab = comp->getSymRefTab();
   for (TR::TreeTop * tt = methodSymbol->getFirstTreeTop(); tt; tt = tt->getNextTreeTop())
      {
	   TR::Node *node = tt->getNode();
      if (node->getOpCode().isResolveCheck())
         return 0;

      if ((node->getOpCodeValue() == TR::treetop) ||
          (node->getOpCodeValue() == TR::compressedRefs) ||
          node->getOpCode().isCheck())
         node = node->getFirstChild();

      if (node->getOpCode().isStore())
         {
         TR::SymbolReference * symRefInCallee = node->getSymbolReference(), * symRefInCaller;
         TR::Symbol * symInCallee = symRefInCallee->getSymbol();
         TR::DataType type = symInCallee->getDataType();
         if (symInCallee->isShadow())
            {
            if (symInCallee->isArrayShadowSymbol())
               symRefInCaller = symRefTab->getSymRef(symRefTab->getArrayShadowIndex(type));

            else if (symInCallee->isArrayletShadowSymbol())
               symRefInCaller = symRefTab->getSymRef(symRefTab->getArrayletShadowIndex(type));

            else
               symRefInCaller = symRefTab->findShadowSymbol(symRefInCallee->getOwningMethod(comp), symRefInCallee->getCPIndex(), type);

            if (symRefInCaller)
               {
               if (symRefInCaller->reallySharesSymbol(comp))
                  symRefInCaller->setSharedShadowAliases(aliases, symRefTab);

               aliases->set(symRefInCaller->getReferenceNumber());
               }

            }
         else if (symInCallee->isStatic())
            {
            symRefInCaller = symRefTab->findStaticSymbol(symRefInCallee->getOwningMethod(comp), symRefInCallee->getCPIndex(), type);
            if (symRefInCaller)
               {
               if (symRefInCaller->reallySharesSymbol(comp))
                  symRefInCaller->setSharedStaticAliases(aliases, symRefTab);
               else
                  aliases->set(symRefInCaller->getReferenceNumber());
               }
            }
         }
      else if (node->getOpCode().isCall())
         {
         if (node->getOpCode().isCallIndirect())
            return 0;
         TR::ResolvedMethodSymbol * calleeSymbol = node->getSymbol()->getResolvedMethodSymbol();
         if (!calleeSymbol)
            return 0;
         TR_ResolvedMethod * calleeMethod = calleeSymbol->getResolvedMethod();
         if (!calleeMethod->isCompilable(comp->trMemory()) || calleeMethod->isJNINative())
            return 0;

         if (!addVeryRefinedCallAliasSets(calleeSymbol, aliases, methodsPeeked))
            return 0;
         }
      else if (node->getOpCodeValue() == TR::monent)
         return 0;
      }

   // This can't be allocated into the alias region as it must be accessed across optimizations
   TR_BitVector *heapAliases = new (comp->trHeapMemory()) TR_BitVector(comp->getSymRefCount(), comp->trMemory(), heapAlloc, growable);
   *heapAliases |= *aliases;
   return heapAliases;
   }
Exemplo n.º 18
0
// Collects nodes that involved in PRE that are not stores or checks.
// These nodes require temps.
//
bool TR_LocalAnalysisInfo::collectSupportedNodes(TR::Node *node, TR::Node *parent)
   {
   if (node->getVisitCount() == _visitCount)
      return false;

   node->setVisitCount(_visitCount);

   bool flag = false;
   bool childRelevant = false;
   TR::ILOpCode &opCode = node->getOpCode();

   int32_t i;
   for (i = 0; i < node->getNumChildren(); i++)
      {
      TR::Node *child = node->getChild(i);
      if (collectSupportedNodes(child, node))
         flag = true;

      if (_checkExpressions->get(child->getLocalIndex()))
         childRelevant = true;
      }

   if (TR_LocalAnalysis::isSupportedNode(node, _compilation, parent))
      {
      _supportedNodesAsArray[node->getLocalIndex()] = node;

      bool indirectionSafe = true;
      if (opCode.isIndirect() && (opCode.isLoadVar() || opCode.isStore()))
         {
         indirectionSafe = false;
         if (node->getFirstChild()->isThisPointer() &&
             node->getFirstChild()->isNonNull())
            {
            indirectionSafe = true;
            TR::Node *firstChild = node->getFirstChild();
            TR::SymbolReference *symRef = firstChild->getSymbolReference();
            int32_t len;
            const char *sig = symRef->getTypeSignature(len);

            TR::SymbolReference *otherSymRef = node->getSymbolReference();

            TR_OpaqueClassBlock *cl = NULL;
            if (sig && (len > 0))
               cl = _compilation->fe()->getClassFromSignature(sig, len, symRef->getOwningMethod(_compilation));

            TR_OpaqueClassBlock *otherClassObject = NULL;
            int32_t otherLen;
            const char *otherSig = otherSymRef->getOwningMethod(_compilation)->classNameOfFieldOrStatic(otherSymRef->getCPIndex(), otherLen);
            if (otherSig)
               {
               otherSig = classNameToSignature(otherSig, otherLen, _compilation);
               otherClassObject = _compilation->fe()->getClassFromSignature(otherSig, otherLen, otherSymRef->getOwningMethod(_compilation));
               }

            if (!cl ||
                !otherClassObject ||
                (cl != otherClassObject))
               indirectionSafe = false;
            }
         }

      if (childRelevant ||
         (!indirectionSafe || (opCode.isArrayLength())) ||
         (node->getOpCode().isArrayRef()) ||
         (opCode.hasSymbolReference() && (node->getSymbolReference()->isUnresolved() || node->getSymbol()->isArrayShadowSymbol())) ||
         (opCode.isDiv() || opCode.isRem()))
         _checkExpressions->set(node->getLocalIndex());
      }

   return flag;
   }
Exemplo n.º 19
0
TR::Register *IA32LinkageUtils::pushIntegerWordArg(
      TR::Node *child,
      TR::CodeGenerator *cg)
   {
   TR::Register *pushRegister;
   if (child->getRegister() == NULL)
      {
      if (child->getOpCode().isLoadConst())
         {
         int32_t value = child->getInt();
         TR_X86OpCodes pushOp;
         if (value >= -128 && value <= 127)
            {
            pushOp = PUSHImms;
            }
         else
            {
            pushOp = PUSHImm4;
            }

         generateImmInstruction(pushOp, child, value, cg);
         cg->decReferenceCount(child);
         return NULL;
         }
      else if (child->getOpCodeValue() == TR::loadaddr)
         {
         TR::SymbolReference * symRef = child->getSymbolReference();
         TR::StaticSymbol *sym = symRef->getSymbol()->getStaticSymbol();
         if (sym)
            {
            TR_ASSERT(!symRef->isUnresolved(), "pushIntegerWordArg loadaddr expecting resolved symbol");
            generateImmSymInstruction(PUSHImm4, child, (uintptrj_t)sym->getStaticAddress(), symRef, cg);
            cg->decReferenceCount(child);
            return NULL;
            }
         }
      else if (child->getOpCodeValue() == TR::fbits2i &&
               !child->normalizeNanValues() &&
               child->getReferenceCount() == 1)
         {
         pushRegister = pushFloatArg(child->getFirstChild(), cg);
         cg->decReferenceCount(child);
         return pushRegister;
         }
      else if (child->getOpCode().isMemoryReference() &&
               (child->getReferenceCount() == 1) &&
               (child->getSymbolReference() != cg->comp()->getSymRefTab()->findVftSymbolRef()))
         {
         TR::MemoryReference  *tempMR = generateX86MemoryReference(child, cg);
         generateMemInstruction(PUSHMem, child, tempMR, cg);
         tempMR->decNodeReferenceCounts(cg);
         cg->decReferenceCount(child);
         return NULL;
         }
      }

   pushRegister = cg->evaluate(child);
   generateRegInstruction(PUSHReg, child, pushRegister, cg);
   cg->decReferenceCount(child);
   return pushRegister;
   }
Exemplo n.º 20
0
TR::Register *TR::AMD64SystemLinkage::buildDirectDispatch(
      TR::Node *callNode,
      bool spillFPRegs)
   {
   TR::SymbolReference *methodSymRef = callNode->getSymbolReference();
   TR::MethodSymbol *methodSymbol = methodSymRef->getSymbol()->castToMethodSymbol();

   TR::Register *returnReg;

   // Allocate adequate register dependencies.
   //
   // pre = number of argument registers
   // post = number of volatile + return register
   //
   uint32_t pre = getProperties().getNumIntegerArgumentRegisters() + getProperties().getNumFloatArgumentRegisters();
   uint32_t post = getProperties().getNumVolatileRegisters() + (callNode->getDataType() == TR::NoType ? 0 : 1);

   TR::RegisterDependencyConditions *preDeps = generateRegisterDependencyConditions(pre, 0, cg());
   TR::RegisterDependencyConditions *postDeps = generateRegisterDependencyConditions(0, post, cg());

   // Evaluate outgoing arguments on the system stack and build pre-conditions.
   //
   int32_t memoryArgSize = buildArgs(callNode, preDeps);

   // Build post-conditions.
   //
   returnReg = buildVolatileAndReturnDependencies(callNode, postDeps);
   postDeps->stopAddingPostConditions();

   // Find the second scratch register in the post dependency list.
   //
   TR::Register *scratchReg = NULL;
   TR::RealRegister::RegNum scratchRegIndex = getProperties().getIntegerScratchRegister(1);
   for (int32_t i=0; i<post; i++)
      {
      if (postDeps->getPostConditions()->getRegisterDependency(i)->getRealRegister() == scratchRegIndex)
         {
         scratchReg = postDeps->getPostConditions()->getRegisterDependency(i)->getRegister();
         break;
         }
      }

   TR::Instruction *instr;
   if (methodSymbol->getMethodAddress())
      {
      TR_ASSERT(scratchReg, "could not find second scratch register");
      auto LoadRegisterInstruction = generateRegImm64SymInstruction(
         MOV8RegImm64,
         callNode,
         scratchReg,
         (uintptr_t)methodSymbol->getMethodAddress(),
         methodSymRef,
         cg());

      if (comp()->getOption(TR_EmitRelocatableELFFile))
         {
         LoadRegisterInstruction->setReloKind(TR_NativeMethodAbsolute);
         }

      instr = generateRegInstruction(CALLReg, callNode, scratchReg, preDeps, cg());
      }
   else
      {
      instr = generateImmSymInstruction(CALLImm4, callNode, (uintptrj_t)methodSymbol->getMethodAddress(), methodSymRef, preDeps, cg());
      }

   cg()->resetIsLeafMethod();

   instr->setNeedsGCMap(getProperties().getPreservedRegisterMapForGC());

   cg()->stopUsingRegister(scratchReg);

   TR::LabelSymbol *postDepLabel = generateLabelSymbol(cg());
   generateLabelInstruction(LABEL, callNode, postDepLabel, postDeps, cg());

   return returnReg;
   }
Exemplo n.º 21
0
void
TR_Debug::print(TR::FILE *pOutFile, TR::S390CallSnippet * snippet)
   {
   uint8_t * bufferPos = snippet->getSnippetLabel()->getCodeLocation();
   TR::Node * callNode = snippet->getNode();
   TR::SymbolReference * methodSymRef = snippet->getRealMethodSymbolReference();
   if(!methodSymRef)
      methodSymRef = callNode->getSymbolReference();

   TR::MethodSymbol * methodSymbol = methodSymRef->getSymbol()->castToMethodSymbol();
   TR::SymbolReference * glueRef;
   int8_t padbytes = snippet->getPadBytes();

   printSnippetLabel(pOutFile, snippet->getSnippetLabel(), bufferPos,
      methodSymRef->isUnresolved() ? "Unresolved Call Snippet" : "Call Snippet");

   bufferPos = printS390ArgumentsFlush(pOutFile, callNode, bufferPos, snippet->getSizeOfArguments());

   if (methodSymRef->isUnresolved() || _comp->compileRelocatableCode())
      {
      if (methodSymbol->isSpecial())
         {
         glueRef = _cg->getSymRef(TR_S390interpreterUnresolvedSpecialGlue);
         }
      else if (methodSymbol->isStatic())
         {
         glueRef = _cg->getSymRef(TR_S390interpreterUnresolvedStaticGlue);
         }
      else
         {
         glueRef = _cg->getSymRef(TR_S390interpreterUnresolvedDirectVirtualGlue);
         }
      }
   else
      {
      bool synchronised = methodSymbol->isSynchronised();

      if ((methodSymbol->isVMInternalNative() || methodSymbol->isJITInternalNative()))
         {
         glueRef = _cg->getSymRef(TR_S390nativeStaticHelper);
         }
      else
         {
         switch (callNode->getDataType())
            {
            case TR::NoType:
               if (synchronised)
                  {
                  glueRef = _cg->getSymRef(TR_S390interpreterSyncVoidStaticGlue);
                  }
               else
                  {
                  glueRef = _cg->getSymRef(TR_S390interpreterVoidStaticGlue);
                  }
               break;
            case TR::Int8:
            case TR::Int16:
            case TR::Int32:
               if (synchronised)
                  {
                  glueRef = _cg->getSymRef(TR_S390interpreterSyncIntStaticGlue);
                  }
               else
                  {
                  glueRef = _cg->getSymRef(TR_S390interpreterIntStaticGlue);

                  }
               break;
            case TR::Address:
            if (TR::Compiler->target.is64Bit())
               {
               if (synchronised)
                  {
                  glueRef = _cg->getSymRef(TR_S390interpreterSyncLongStaticGlue);
                  }
               else
                  {
                  glueRef = _cg->getSymRef(TR_S390interpreterLongStaticGlue);
                  }
               }
            else
               {
               if (synchronised)
                  {
                  glueRef = _cg->getSymRef(TR_S390interpreterSyncIntStaticGlue);
                  }
               else
                  {
                  glueRef = _cg->getSymRef(TR_S390interpreterIntStaticGlue);
                  }
               }
               break;

            case TR::Int64:
               if (synchronised)
                  {
                  glueRef = _cg->getSymRef(TR_S390interpreterSyncLongStaticGlue);
                  }
               else
                  {
                  glueRef = _cg->getSymRef(TR_S390interpreterLongStaticGlue);
                  }
               break;

            case TR::Float:
               if (synchronised)
                  {
                  glueRef = _cg->getSymRef(TR_S390interpreterSyncFloatStaticGlue);
                  }
               else
                  {
                  glueRef = _cg->getSymRef(TR_S390interpreterFloatStaticGlue);
                  }
               break;

            case TR::Double:
               if (synchronised)
                  {
                  glueRef = _cg->getSymRef(TR_S390interpreterSyncDoubleStaticGlue);
                  }
               else
                  {
                  glueRef = _cg->getSymRef(TR_S390interpreterDoubleStaticGlue);
                  }
               break;

            default:
               TR_ASSERT(0, "Bad return data type for a call node.  DataType was %s\n",
                  getName(callNode->getDataType()));
            }
         }
      }
   bufferPos = printRuntimeInstrumentationOnOffInstruction(pOutFile, bufferPos, false); // RIOFF

   if (snippet->getKind() == TR::Snippet::IsUnresolvedCall)
      {
      int lengthOfLoad = (TR::Compiler->target.is64Bit())?6:4;

      printPrefix(pOutFile, NULL, bufferPos, 6);
      trfprintf(pOutFile, "LARL \tGPR14, *+%d <%p>\t# Start of Data Const.",
                        8 + lengthOfLoad + padbytes,
                        bufferPos + 8 + lengthOfLoad + padbytes);
      bufferPos += 6;
      if (TR::Compiler->target.is64Bit())
         {
         printPrefix(pOutFile, NULL, bufferPos, 6);
         trfprintf(pOutFile, "LG  \tGPR_EP, 0(,GPR14)");
         bufferPos += 6;
         }
      else
         {
         printPrefix(pOutFile, NULL, bufferPos, 4);
         trfprintf(pOutFile, "L   \tGPR_EP, 0(,GPR14)");
         bufferPos += 4;
         }
      printPrefix(pOutFile, NULL, bufferPos, 2);
      trfprintf(pOutFile, "BCR    \tGPR_EP");
      bufferPos += 2;
      }
   else
      {
      printPrefix(pOutFile, NULL, bufferPos, 6);
      trfprintf(pOutFile, "BRASL \tGPR14, <%p>\t# Branch to Helper Method %s",
                    snippet->getSnippetDestAddr(),
                    snippet->usedTrampoline()?"- Trampoline Used.":"");
      bufferPos += 6;
      }

   if (padbytes == 2)
      {
      printPrefix(pOutFile, NULL, bufferPos, 2);
      trfprintf(pOutFile, "DC   \t0x0000 \t\t\t# 2-bytes padding for alignment");
      bufferPos += 2;
      }
   else if (padbytes == 4)
      {
      printPrefix(pOutFile, NULL, bufferPos, 4) ;
      trfprintf(pOutFile, "DC   \t0x00000000 \t\t# 4-bytes padding for alignment");
      bufferPos += 4;
      }
   else if (padbytes == 6)
      {
      printPrefix(pOutFile, NULL, bufferPos, 6) ;
      trfprintf(pOutFile, "DC   \t0x000000000000 \t\t# 6-bytes padding for alignment");
      bufferPos += 6;
      }

   printPrefix(pOutFile, NULL, bufferPos, sizeof(intptrj_t));
   trfprintf(pOutFile, "DC   \t%p \t\t# Method Address", glueRef->getMethodAddress());
   bufferPos += sizeof(intptrj_t);


   printPrefix(pOutFile, NULL, bufferPos, sizeof(intptrj_t));
   trfprintf(pOutFile, "DC   \t%p \t\t# Call Site RA", snippet->getCallRA());
   bufferPos += sizeof(intptrj_t);

   if (methodSymRef->isUnresolved())
      {
      printPrefix(pOutFile, NULL, bufferPos, 0);
      }
   else
      {
      printPrefix(pOutFile, NULL, bufferPos, sizeof(intptrj_t));
      }

   trfprintf(pOutFile, "DC   \t%p \t\t# Method Pointer", methodSymRef->isUnresolved() ? 0 : methodSymbol->getMethodAddress());
   }
Exemplo n.º 22
0
uint8_t *
TR::S390HelperCallSnippet::emitSnippetBody()
   {
   uint8_t * cursor = cg()->getBinaryBufferCursor();
   getSnippetLabel()->setCodeLocation(cursor);

   TR::Node * callNode = getNode();
   TR::SymbolReference * helperSymRef = getHelperSymRef();
   bool jitInduceOSR = helperSymRef == cg()->symRefTab()->element(TR_induceOSRAtCurrentPC);
   if (jitInduceOSR)
      {
      // Flush in-register arguments back to the stack for interpreter
      cursor = TR::S390CallSnippet::S390flushArgumentsToStack(cursor, callNode, getSizeOfArguments(), cg());
      }


   uint32_t rEP = (uint32_t) cg()->getEntryPointRegister() - 1;

   //load vm thread into gpr13
   cursor = generateLoadVMThreadInstruction(cg(), cursor);

   // Generate RIOFF if RI is supported.
   cursor = generateRuntimeInstrumentationOnOffInstruction(cg(), cursor, TR::InstOpCode::RIOFF);

   if (                                                                               // Methods that require
             alwaysExcept())                                                          // R14 to point to snippet:
      {
      // For trace method entry/exit, we need to set up R14 to point to the
      // beginning of the data segment.  We will use BRASL to automatically
      // set R14 correctly.

      // For methods that lead to exceptions, and never return to the
      // main code, we set up R14, so that if GC occurs, the stackwalker
      // will see R14 is pointing to this snippet, and pick up the correct
      // stack map.

      *(int16_t *) cursor = 0xC0E5;                                                   // BRASL  R14, <Helper Addr>
      cursor += sizeof(int16_t);
      }
   else                                                                               // Otherwise:
      {
      // We're not sure if the helper will return.  So, we need to provide
      // the return addr of the main line code, so that when helper call
      // completes, it can jump back properly.

      // Load Return Address into R14.
      intptrj_t returnAddr = (intptrj_t)getReStartLabel()->getCodeLocation();         // LARL   R14, <Return Addr>
      *(int16_t *) cursor = 0xC0E0;
      cursor += sizeof(int16_t);
      *(int32_t *) cursor = (int32_t)((returnAddr - (intptrj_t)(cursor - 2)) / 2);
      cursor += sizeof(int32_t);

      *(int16_t *) cursor = 0xC0F4;                                                   // BRCL   <Helper Addr>
      cursor += sizeof(int16_t);
      }

   // Calculate the relative offset to get to helper method.
   // If MCC is not supported, everything should be reachable.
   // If MCC is supported, we will look up the appropriate trampoline, if
   //     necessary.
   intptrj_t destAddr = (intptrj_t)(helperSymRef->getSymbol()->castToMethodSymbol()->getMethodAddress());

#if defined(TR_TARGET_64BIT)
#if defined(J9ZOS390)
   if (cg()->comp()->getOption(TR_EnableRMODE64))
#endif
      {
      if (NEEDS_TRAMPOLINE(destAddr, cursor, cg()))
         {
         destAddr = cg()->fe()->indexedTrampolineLookup(helperSymRef->getReferenceNumber(), (void *)cursor);
         this->setUsedTrampoline(true);

         // We clobber rEP if we take a trampoline. Update our register map if necessary.
         if (gcMap().getStackMap() != NULL)
            {
            gcMap().getStackMap()->maskRegisters(~(0x1 << (rEP)));
            }
         }
      }
#endif

   TR_ASSERT(CHECK_32BIT_TRAMPOLINE_RANGE(destAddr, cursor), "Helper Call is not reachable.");
   this->setSnippetDestAddr(destAddr);

   *(int32_t *) cursor = (int32_t)((destAddr - (intptrj_t)(cursor - 2)) / 2);
   AOTcgDiag1(cg()->comp(), "add TR_HelperAddress cursor=%x\n", cursor);
   cg()->addProjectSpecializedRelocation(cursor, (uint8_t*) helperSymRef, NULL, TR_HelperAddress,
                             __FILE__, __LINE__, getNode());
   cursor += sizeof(int32_t);

   gcMap().registerStackMap(cursor, cg());

   return cursor;
   }