Example #1
0
bool TR_LocalLiveRangeReduction::isNeedToBeInvestigated(TR_TreeRefInfo *treeRefInfo)
   {

   //   TR::TreeTop *treeTop = treeRefInfo->getTreeTop();
   TR::Node *node = treeRefInfo->getTreeTop()->getNode();
   TR::ILOpCode &opCode = node->getOpCode();

   if (opCode.isBranch() || opCode.isReturn() || opCode.isGoto() || opCode.isJumpWithMultipleTargets() ||
       opCode.getOpCodeValue() == TR::BBStart || opCode.getOpCodeValue() == TR::BBEnd)
      return false;

   if (opCode.getOpCodeValue() == TR::treetop || opCode.isResolveOrNullCheck())
      node  = node->getFirstChild();

   //node might have changed
   /*if ((node->getOpCodeValue() == TR::monent) ||
       (node->getOpCodeValue() == TR::monexit)||
       (node->getOpCodeValue() == TR::athrow)) */
   if (nodeMaybeMonitor(node) ||(node->getOpCodeValue() == TR::athrow))
      return false;

  /********************************************************************/
  /*Need to add support for this :stop before loadReg to same register*/
   if (node->getOpCode().isStoreReg())
      return false;
  /*******************************************************************/
   if (_movedTreesList.find(treeRefInfo))
      return false;

   if (treeRefInfo->getFirstRefNodesList()->getSize()!=0)
      return true;

   return false;

   }
Example #2
0
bool TR_LocalLiveRangeReduction::isWorthMoving(TR_TreeRefInfo *tree)
   {
   bool usesRegisterPairsForLongs = cg()->usesRegisterPairsForLongs();
   int32_t numFirstRefNodesFloat=0;
   int32_t numFirstRefNodesInt=0;
   int32_t numLastRefNodesFloat=0;
   int32_t numLastRefNodesInt=0;
   TR::Node *node;


   //check first references
   ListIterator<TR::Node> listIt(tree->getFirstRefNodesList());
   for ( node = listIt.getFirst(); node != NULL; node = listIt.getNext())
      {
      TR::ILOpCode &opCode = node->getOpCode();
      if (opCode.isFloatingPoint())
         numFirstRefNodesFloat++;
      else
         {
         //all integers, signed and unsined

         if (opCode.isLong()&& usesRegisterPairsForLongs)
            numFirstRefNodesInt+=2;
         else
            numFirstRefNodesInt++;
         }
      }
   //check last references
   listIt.set(tree->getLastRefNodesList());
   for ( node = listIt.getFirst(); node != NULL; node = listIt.getNext())
      {
      TR::ILOpCode &opCode = node->getOpCode();
      if (opCode.isFloatingPoint())
         numLastRefNodesFloat++;
      else
         {
         //all integers, signed and unsined
         if (opCode.isLong()&& usesRegisterPairsForLongs)
            numLastRefNodesInt+=2;
         else
            numLastRefNodesInt++;
         }
      }


   if (((numLastRefNodesInt < numFirstRefNodesInt) &&
        (numLastRefNodesFloat <= numFirstRefNodesFloat)) ||
       ((numLastRefNodesFloat < numFirstRefNodesFloat) &&
        (numLastRefNodesInt <= numFirstRefNodesInt)))
      return true;

   return false;
   }
Example #3
0
int32_t
TR::RegDepCopyRemoval::perform()
   {
   if (!cg()->supportsPassThroughCopyToNewVirtualRegister())
      return 0;

   discardAllNodeChoices();
   TR::TreeTop *tt;
   for (tt = comp()->getStartTree(); tt != NULL; tt = tt->getNextTreeTop())
      {
      TR::Node *node = tt->getNode();
      switch (node->getOpCodeValue())
         {
         case TR::BBStart:
            if (!node->getBlock()->isExtensionOfPreviousBlock())
               {
               if (trace())
                  traceMsg(comp(), "clearing remembered node choices at start of extended block at block_%d\n", node->getBlock()->getNumber());
               discardAllNodeChoices();
               }
            if (node->getNumChildren() > 0)
               processRegDeps(node->getFirstChild(), tt);
            break;
         case TR::BBEnd:
            if (node->getNumChildren() > 0)
               processRegDeps(node->getFirstChild(), tt);
            break;
         default:
            if (node->getOpCode().isSwitch())
               {
               TR::Node *defaultDest = node->getSecondChild();
               if (defaultDest->getNumChildren() > 0)
                  processRegDeps(defaultDest->getFirstChild(), tt);
               }
            else if (node->getOpCode().isBranch())
               {
               int nChildren = node->getNumChildren();
               // only the last child may be GlRegDeps
               for (int i = 0; i < nChildren - 1; i++)
                  TR_ASSERT(node->getChild(i)->getOpCodeValue() != TR::GlRegDeps, "GlRegDeps for branch is not the last child\n");
               if (nChildren > 0)
                  {
                  TR::Node *lastChild = node->getChild(nChildren - 1);
                  if (lastChild->getOpCodeValue() == TR::GlRegDeps)
                     processRegDeps(lastChild, tt);
                  }
               }
            break;
         }
      }
   return 1; // a bit arbitrary...
   }
Example #4
0
void TR::ILValidator::updateNodeState(Location &newLocation)
   {
   TR::Node  *node = newLocation.currentNode();
   NodeState &state = _nodeStates[node];
   if (node->getReferenceCount() == state._futureReferenceCount)
      {
      // First occurrence -- do some bookkeeping
      //
      if (node->getReferenceCount() == 0)
         {
         validityRule(newLocation, node->getOpCode().isTreeTop(), "Only nodes with isTreeTop opcodes can have refcount == 0");
         }
      else
         {
         _liveNodes.add(node);
         }
      }

   if (_liveNodes.contains(node))
      {
      validityRule(newLocation, state._futureReferenceCount >= 1, "Node already has reference count 0");
      if (--state._futureReferenceCount == 0)
         {
         _liveNodes.remove(node);
         }
      }
   else
      {
      validityRule(newLocation, node->getOpCode().isTreeTop(), "Node has already gone dead");
      }

   if (isLoggingEnabled())
      {
      static const char *traceLiveNodesDuringValidation = feGetEnv("TR_traceLiveNodesDuringValidation");
      if (traceLiveNodesDuringValidation && !_liveNodes.isEmpty())
         {
         traceMsg(comp(), "    -- Live nodes: {");
         char *separator = "";
         for (LiveNodeWindow::Iterator lnwi(_liveNodes); lnwi.currentNode(); ++lnwi)
            {
            traceMsg(comp(), "%sn%dn", separator, lnwi.currentNode()->getGlobalIndex());
            separator = ", ";
            }
         traceMsg(comp(), "}\n");
         }
      }

   }
Example #5
0
TR::Register *
OMR::ARM64::TreeEvaluator::imulEvaluator(TR::Node *node, TR::CodeGenerator *cg)
   {
   TR::Node *firstChild = node->getFirstChild();
   TR::Register *src1Reg = cg->evaluate(firstChild);
   TR::Node *secondChild = node->getSecondChild();
   TR::Register *trgReg;

   if (secondChild->getOpCode().isLoadConst() && secondChild->getRegister() == NULL)
      {
      int32_t value = secondChild->getInt();
      if (value > 0 && cg->convertMultiplyToShift(node))
         {
         // The multiply has been converted to a shift.
         trgReg = cg->evaluate(node);
         return trgReg;
         }
      else
         {
         trgReg = cg->allocateRegister();
         mulConstant32(node, trgReg, src1Reg, value, cg);
         }
      }
   else
      {
      TR::Register *src2Reg = cg->evaluate(secondChild);
      trgReg = cg->allocateRegister();
      generateMulInstruction(cg, node, trgReg, src1Reg, src2Reg);
      }
   firstChild->decReferenceCount();
   secondChild->decReferenceCount();
   node->setRegister(trgReg);
   return trgReg;
   }
Example #6
0
TR::Register *
OMR::ARM64::TreeEvaluator::lmulhEvaluator(TR::Node *node, TR::CodeGenerator *cg)
   {
   TR::Node *firstChild = node->getFirstChild();
   TR::Register *src1Reg = cg->evaluate(firstChild);
   TR::Node *secondChild = node->getSecondChild();
   TR::Register *src2Reg;
   TR::Register *trgReg = cg->allocateRegister();
   TR::Register *tmpReg = NULL;

   // lmulh is generated for constant ldiv and the second child is the magic number
   // assume magic number is usually a large odd number with little optimization opportunity
   if (secondChild->getOpCode().isLoadConst() && secondChild->getRegister() == NULL)
      {
      int64_t value = secondChild->getLongInt();
      src2Reg = tmpReg = cg->allocateRegister();
      loadConstant64(cg, node, value, src2Reg);
      }
   else
      {
      src2Reg = cg->evaluate(secondChild);
      }

   generateTrg1Src2Instruction(cg, TR::InstOpCode::smulh, node, trgReg, src1Reg, src2Reg);

   if (tmpReg)
      {
      cg->stopUsingRegister(tmpReg);
      }

   firstChild->decReferenceCount();
   secondChild->decReferenceCount();
   node->setRegister(trgReg);
   return trgReg;
   }
Example #7
0
static TR::Register *addOrSubInteger(TR::Node *node, TR::CodeGenerator *cg)
   {
   TR::Node *firstChild = node->getFirstChild();
   TR::Register *src1Reg = cg->evaluate(firstChild);
   TR::Node *secondChild = node->getSecondChild();
   TR::Register *trgReg = cg->allocateRegister();
   bool isAdd = node->getOpCode().isAdd();

   if (secondChild->getOpCode().isLoadConst() && secondChild->getRegister() == NULL)
      {
      int32_t value = secondChild->getInt();
      if (constantIsUnsignedImm12(value))
         {
         generateTrg1Src1ImmInstruction(cg, isAdd ? TR::InstOpCode::addimmw : TR::InstOpCode::subimmw, node, trgReg, src1Reg, value);
         }
      else
         {
         TR::Register *tmpReg = cg->allocateRegister();
         loadConstant32(cg, node, value, tmpReg);
         generateTrg1Src2Instruction(cg, isAdd ? TR::InstOpCode::addw : TR::InstOpCode::subw, node, trgReg, src1Reg, tmpReg);
         cg->stopUsingRegister(tmpReg);
         }
      }
   else
      {
      TR::Register *src2Reg = cg->evaluate(secondChild);
      generateTrg1Src2Instruction(cg, isAdd ? TR::InstOpCode::addw : TR::InstOpCode::subw, node, trgReg, src1Reg, src2Reg);
      }

   node->setRegister(trgReg);
   firstChild->decReferenceCount();
   secondChild->decReferenceCount();
   return trgReg;
   }
Example #8
0
static TR::Register *l2fd(TR::Node *node, TR::RealRegister *target, TR_X86OpCodes opRegMem8, TR_X86OpCodes opRegReg8, TR::CodeGenerator *cg)
   {
   TR::Node                *child = node->getFirstChild();
   TR::MemoryReference  *tempMR;

   TR_ASSERT(cg->useSSEForSinglePrecision(), "assertion failure");

   if (child->getRegister() == NULL &&
       child->getReferenceCount() == 1 &&
       child->getOpCode().isLoadVar())
      {
      tempMR = generateX86MemoryReference(child, cg);
      generateRegMemInstruction(opRegMem8, node, target, tempMR, cg);
      tempMR->decNodeReferenceCounts(cg);
      }
   else
      {
      TR::Register *intReg = cg->evaluate(child);
      generateRegRegInstruction(opRegReg8, node, target, intReg, cg);
      cg->decReferenceCount(child);
      }

   node->setRegister(target);
   return target;
   }
Example #9
0
TR::Node *TR_OutlinedInstructions::createOutlinedCallNode(TR::Node *callNode, TR::ILOpCodes callOp)
   {
   int32_t   i;
   TR::Node  *child;

   //We pass true for getSymbolReference because
   TR::Node *newCallNode = TR::Node::createWithSymRef(callNode, callOp, callNode->getNumChildren(), callNode->getSymbolReference());

   newCallNode->setReferenceCount(1);

   for (i=0; i<callNode->getNumChildren(); i++)
      {
      child = callNode->getChild(i);

      if (child->getRegister() != NULL)
         {
         // Child has already been evaluated outside this tree.
         //
         newCallNode->setAndIncChild(i, child);
         }
      else if (child->getOpCode().isLoadConst())
         {
         // Copy unevaluated constant nodes.
         //
         child = TR::Node::copy(child);
         child->setReferenceCount(1);
         newCallNode->setChild(i, child);
         }
      else
         {
         if ((child->getOpCodeValue() == TR::loadaddr) &&
             /*(callNode->getOpCodeValue() == TR::instanceof || callNode->getOpCodeValue() == TR::checkcast || callNode->getOpCodeValue() == TR::checkcastAndNULLCHK || callNode->getOpCodeValue() == TR::New || callNode->getOpCodeValue() == TR::anewarray)    &&*/
             (child->getSymbolReference()->getSymbol()) &&
             (child->getSymbolReference()->getSymbol()->getStaticSymbol()))
            {
            child = TR::Node::copy(child);
            child->setReferenceCount(1);
            newCallNode->setChild(i, child);
            }
         else
            {
            // Be very conservative at this point, even though it is possible to make it less so.  For example, this will catch
            // the case of an unevaluated argument not persisting outside of the outlined region even though one of its subtrees will.
            //
            (void)_cg->evaluate(child);

            // Do not decrement the reference count here.  It will be decremented when the call node is evaluated
            // again in the helper instruction stream.
            //
            newCallNode->setAndIncChild(i, child);
            }
         }
      }
   if(callNode->isPreparedForDirectJNI())
      {
         newCallNode->setPreparedForDirectJNI();
      }

   return newCallNode;
   }
Example #10
0
bool collectSymbolReferencesInNode(TR::Node *node,
                                   TR::SparseBitVector &symbolReferencesInNode,
                                   int32_t *numDeadSubNodes, vcount_t visitCount, TR::Compilation *comp,
                                   bool *seenInternalPointer, bool *seenArraylet,
                                   bool *cantMoveUnderBranch)
   {
   // The visit count in the node must be maintained by this method.
   //
   vcount_t oldVisitCount = node->getVisitCount();
   if (oldVisitCount == visitCount || oldVisitCount == comp->getVisitCount())
      return true;
   node->setVisitCount(comp->getVisitCount());

   //diagnostic("Walking node %p, height=%d, oldVisitCount=%d, visitCount=%d, compVisitCount=%d\n", node, *height, oldVisitCount, visitCount,comp->getVisitCount());

   // For all other subtrees collect all symbols that could be killed between
   // here and the next reference.
   //
   for (int32_t i = node->getNumChildren()-1; i >= 0; i--)
      {
      TR::Node *child = node->getChild(i);
      if (child->getFutureUseCount() == 1 &&
          child->getReferenceCount() > 1 &&
          !child->getOpCode().isLoadConst())
         *numDeadSubNodes = (*numDeadSubNodes) + 1;

      collectSymbolReferencesInNode(child, symbolReferencesInNode, numDeadSubNodes, visitCount, comp,
            seenInternalPointer, seenArraylet, cantMoveUnderBranch);
      }

   // detect if this is a direct load that shouldn't be moved under a branch (because an update was moved past
   // this load by treeSimplification)
   if (cantMoveUnderBranch &&
       (node->getOpCode().isLoadVarDirect() || node->getOpCode().isLoadReg()) &&
       node->isDontMoveUnderBranch())
      *cantMoveUnderBranch = true;

   if (seenInternalPointer && node->isInternalPointer() && node->getReferenceCount() > 1)
      *seenInternalPointer = true;

   if (seenArraylet)
      {
      if (node->getOpCode().hasSymbolReference() &&
          node->getSymbolReference()->getSymbol()->isArrayletShadowSymbol() &&
          node->getReferenceCount() > 1)
         {
         *seenArraylet = true;
         }
      }

   // Add this node's symbol reference to the set
   if (node->getOpCode().hasSymbolReference())
      {
      symbolReferencesInNode[node->getSymbolReference()->getReferenceNumber()]=true;
      }

   return true;
   }
Example #11
0
/**
 * A runtime guard block may have monitor stores and privarg stores along with the guard
 * it self. This method will rearrange these stores and split the block, managing any
 * uncommoning necessary for eventual block order.
 *
 * The provided block will become the privarg block, containing any privarg stores and additonal
 * temps for uncommoning. It must be evaluated first. The returned block will contain monitor
 * stores and the guard. If no split is required, the provided block will be returned.
 *
 * @param comp Compilation object
 * @param block Block to manipulate
 * @param cfg Current CFG
 * @return The block containing the guard.
 */
static TR::Block* splitRuntimeGuardBlock(TR::Compilation *comp, TR::Block* block, TR::CFG *cfg)
   {
   TR::NodeChecklist checklist(comp);
   TR::TreeTop *start = block->getFirstRealTreeTop();
   TR::TreeTop *guard = block->getLastRealTreeTop();
   TR::TreeTop *firstPrivArg = NULL;
   TR::TreeTop *firstMonitor = NULL;

   // Manage the unexpected case that monitors and priv args are reversed
   bool privThenMonitor = false;

   TR_ASSERT(isMergeableGuard(guard->getNode()), "last node must be guard %p", guard->getNode());

   // Search for privarg and monitor stores
   // Only commoned nodes under the guard are required to be anchored, due to the guard being
   // evaluted before the monitor stores later on
   bool anchoredTemps = false;
   for (TR::TreeTop *tt = start; tt && tt->getNode()->getOpCodeValue() != TR::BBEnd; tt = tt->getNextTreeTop())
      {
      TR::Node * node = tt->getNode();

      if (node->getOpCode().hasSymbolReference() && node->getSymbol()->holdsMonitoredObject())
         firstMonitor = firstMonitor == NULL ? tt : firstMonitor;
      else if (node->chkIsPrivatizedInlinerArg())
         {
         if (firstPrivArg == NULL)
            {
            firstPrivArg = tt;
            privThenMonitor = (firstMonitor == NULL);
            }
         }
      else if (isMergeableGuard(node))
         anchoredTemps |= anchorCommonNodes(comp, node, start, checklist);
      else
         TR_ASSERT(0, "Node other than monitor or privarg store %p before runtime guard", node);
      }

   // If there are monitors then privargs, they must be swapped around, such that all privargs are
   // evaluated first
   if (firstPrivArg && firstMonitor && !privThenMonitor)
      {
      TR::TreeTop *monitorEnd = firstPrivArg->getPrevTreeTop();
      firstMonitor->getPrevTreeTop()->join(firstPrivArg);
      guard->getPrevTreeTop()->join(firstMonitor);
      monitorEnd->join(guard);
      }

   // If there were temps created or privargs in the block, perform a split
   TR::TreeTop *split = NULL;
   if (firstPrivArg)
      split = firstMonitor ? firstMonitor : guard;
   else if (anchoredTemps)
      split = start;

   if (split)
      return block->split(split, cfg, true /* fixupCommoning */, false /* copyExceptionSuccessors */);
   return block;
   }
Example #12
0
// A naive no-aliasing-needed check to see if a treetop has any chance of killing anything
// Used by no and low opt CodeGenPrep phase passes
bool
OMR::TreeTop::isPossibleDef()
   {
   TR::Node *defNode = self()->getNode()->getOpCodeValue() == TR::treetop ? self()->getNode()->getFirstChild() : self()->getNode();
   if (defNode->getOpCode().isLikeDef())
      {
      return true;
      }
   else
      {
      return false;
      }
  }
Example #13
0
void TR::ILValidator::validityRule(Location &location, bool condition, const char *formatStr, ...)
   {
   if (!condition)
      {
      _isValidSoFar = false;
      TR::Node *node = location.currentNode();
      printDiagnostic("*** VALIDATION ERROR ***\nNode: %s n%dn\nMethod: %s\n", node->getOpCode().getName(), node->getGlobalIndex(), comp()->signature());
      va_list args;
      va_start(args, formatStr);
      vprintDiagnostic(formatStr, args);
      va_end(args);
      printDiagnostic("\n");
      FAIL();
      }
   }
Example #14
0
void
TR::RegDepCopyRemoval::readRegDeps()
   {
   for (int i = 0; i < _regDeps->getNumChildren(); i++)
      {
      TR::Node *depNode = _regDeps->getChild(i);
      TR::Node *depValue = depNode;
      if (depValue->getOpCodeValue() == TR::PassThrough)
         {
         do
            depValue = depValue->getFirstChild();
         while (depValue->getOpCodeValue() == TR::PassThrough);
         }
      else
         {
         TR_ASSERT(depNode->getOpCode().isLoadReg(), "invalid GlRegDeps child opcode n%un %s\n", depNode->getGlobalIndex(), depNode->getOpCode().getName());
         }

      // Avoid register pairs for simplicity, at least for now
      bool isRegPairDep = depNode->getHighGlobalRegisterNumber() != (TR_GlobalRegisterNumber)-1;
      bool valueNeedsRegPair = comp()->nodeNeeds2Regs(depValue);
      TR_ASSERT(isRegPairDep == valueNeedsRegPair, "mismatch on number of registers required for n%un\n", depNode->getGlobalIndex());
      if (isRegPairDep)
         {
         ignoreRegister(depNode->getLowGlobalRegisterNumber());
         ignoreRegister(depNode->getHighGlobalRegisterNumber());
         continue;
         }

      // Only process integral and address-type nodes; they'll go into GPRs
      TR_GlobalRegisterNumber reg = depNode->getGlobalRegisterNumber();
      TR::DataType depType = depValue->getType();
      if (!depType.isIntegral() && !depType.isAddress())
         {
         ignoreRegister(reg);
         continue;
         }

      RegDepInfo &dep = getRegDepInfo(reg);
      TR_ASSERT(dep.state == REGDEP_ABSENT, "register %s is multiply-specified\n", registerName(reg));
      dep.node = depNode;
      dep.value = depValue;
      dep.state = REGDEP_UNDECIDED;
      dep.childIndex = i;
      }
   }
Example #15
0
void
OMR::CodeGenerator::evaluateChildrenWithMultipleRefCount(TR::Node * node)
   {
   for (int i=0; i < node->getNumChildren(); i++)
      {
      TR::Node *child = node->getChild(i);
      if (child->getRegister() == NULL) // not already evaluated
         {
         // Note: we assume things without a symbol reference don't
         // necessarily need to be evaluated here, and can wait
         // until they are actually needed.
         //
         // vft pointers are speical - we need to evaluate the object in all cases
         // but for nopable virtual guards we can wait to load and mask the pointer
         // until we actually need to use it
         //
         if (child->getReferenceCount() > 1 && 
	     (child->getOpCode().hasSymbolReference() ||
	      (child->getOpCodeValue() == TR::l2a && child->getChild(0)->containsCompressionSequence())))
            {
            TR::SymbolReference *vftPointerSymRef = TR::comp()->getSymRefTab()->element(TR::SymbolReferenceTable::vftSymbol);
            if (node->isNopableInlineGuard()
                && self()->getSupportsVirtualGuardNOPing()
                && child->getOpCodeValue() == TR::aloadi
                && child->getChild(0)->getOpCode().hasSymbolReference()
                && child->getChild(0)->getSymbolReference() == vftPointerSymRef
                && child->getChild(0)->getOpCodeValue() == TR::aloadi)
               {
               if (!child->getChild(0)->getChild(0)->getRegister() &&
                   child->getChild(0)->getChild(0)->getReferenceCount() > 1)
                  self()->evaluate(child->getChild(0)->getChild(0));
               else
                  self()->evaluateChildrenWithMultipleRefCount(child->getChild(0)->getChild(0));
               }
            else
               {
               self()->evaluate(child);
               }
            }
         else
            {
            self()->evaluateChildrenWithMultipleRefCount(child);
            }
         }
      }
   }
Example #16
0
TR::Register *
OMR::ARM64::TreeEvaluator::imulhEvaluator(TR::Node *node, TR::CodeGenerator *cg)
   {
   TR::Node *firstChild = node->getFirstChild();
   TR::Register *src1Reg = cg->evaluate(firstChild);
   TR::Node *secondChild = node->getSecondChild();
   TR::Register *src2Reg;
   TR::Register *trgReg = cg->allocateRegister();
   TR::Register *tmpReg = NULL;

   TR::Register *zeroReg = cg->allocateRegister();
   TR::RegisterDependencyConditions *cond = new (cg->trHeapMemory()) TR::RegisterDependencyConditions(1, 1, cg->trMemory());
   addDependency(cond, zeroReg, TR::RealRegister::xzr, TR_GPR, cg);

   // imulh is generated for constant idiv and the second child is the magic number
   // assume magic number is usually a large odd number with little optimization opportunity
   if (secondChild->getOpCode().isLoadConst() && secondChild->getRegister() == NULL)
      {
      int32_t value = secondChild->getInt();
      src2Reg = tmpReg = cg->allocateRegister();
      loadConstant32(cg, node, value, src2Reg);
      }
   else
      {
      src2Reg = cg->evaluate(secondChild);
      }

   generateTrg1Src3Instruction(cg, TR::InstOpCode::smaddl, node, trgReg, src1Reg, src2Reg, zeroReg, cond);
   cg->stopUsingRegister(zeroReg);
   /* logical shift right by 32 bits */
   uint32_t imm = 0x183F; // N=1, immr=32, imms=63
   generateTrg1Src1ImmInstruction(cg, TR::InstOpCode::ubfmx, node, trgReg, trgReg, imm);

   if (tmpReg)
      {
      cg->stopUsingRegister(tmpReg);
      }

   firstChild->decReferenceCount();
   secondChild->decReferenceCount();
   node->setRegister(trgReg);
   return trgReg;
   }
Example #17
0
void TR_ExpressionsSimplification::tranformStoreMotionCandidate(TR::TreeTop *treeTop, bool *isPreheaderBlockInvalid)
   {
   TR::Node *node = treeTop->getNode();

   TR_ASSERT(node->getOpCode().isStore() && !node->getSymbol()->isStatic() && !node->getSymbol()->holdsMonitoredObject(),
      "node %p was expected to be a non-static non-monitored object store and was not.", node);

   // this candidate should be valid, either direct or indirect

   if (trace())
      comp()->getDebug()->print(comp()->getOutFile(), node, 0, true);

   TR::Block *entryBlock = _currentRegion->getEntryBlock();
   TR::Block *preheaderBlock = findPredecessorBlock(entryBlock);

   if (!preheaderBlock)
      {
      if (trace())
         traceMsg(comp(), "Fail to find a place to put the hoist code in\n");
      *isPreheaderBlockInvalid = true;
      return;
      }

   // Earlier post-dominance test ensures that the loop is executed as least once, or is canonicalized.
   // but to be safe we still perform on canonicalized loops only.
   if (_currentRegion->isCanonicalizedLoop())  // make sure that the loop is canonicalized, in which case the preheader is
      {                                        // executed in its first iteration and is protected.
      if (performTransformation(comp(), "%sMove out loop-invariant store [%p] to block_%d\n", OPT_DETAILS, node, preheaderBlock->getNumber()))
         {
         TR::Node *newNode = node->duplicateTree();
         transformNode(newNode, preheaderBlock);
         TR::TransformUtil::removeTree(comp(), treeTop);
         }
      }
   else
      {
      if (trace())
         traceMsg(comp(), "No canonicalized loop for this candidate\n");
      }
   }
Example #18
0
// also handles lrol
TR::Register *
OMR::ARM64::TreeEvaluator::irolEvaluator(TR::Node *node, TR::CodeGenerator *cg)
   {
   TR::Node *firstChild = node->getFirstChild();
   TR::Node *secondChild = node->getSecondChild();
   TR::Register *trgReg = cg->gprClobberEvaluate(firstChild);
   bool is64bit = node->getDataType().isInt64();
   TR::InstOpCode::Mnemonic op;

   if (secondChild->getOpCode().isLoadConst())
      {
      int32_t value = secondChild->getInt();
      uint32_t shift = is64bit ? (value & 0x3F) : (value & 0x1F);

      if (shift != 0)
         {
         shift = is64bit ? (64 - shift) : (32 - shift); // change ROL to ROR
         op = is64bit ? TR::InstOpCode::extrx : TR::InstOpCode::extrw; // ROR is an alias of EXTR
         generateTrg1Src2ShiftedInstruction(cg, op, node, trgReg, trgReg, trgReg, TR::SH_LSL, shift);
         }
      }
   else
      {
      TR::Register *shiftAmountReg = cg->evaluate(secondChild);
      generateNegInstruction(cg, node, shiftAmountReg, shiftAmountReg); // change ROL to ROR
      if (is64bit)
         {
         // 32->64 bit sign extension: SXTW is alias of SBFM
         uint32_t imm = 0x101F; // N=1, immr=0, imms=31
         generateTrg1Src1ImmInstruction(cg, TR::InstOpCode::sbfmx, node, shiftAmountReg, shiftAmountReg, imm);
         }
      op = is64bit ? TR::InstOpCode::rorvx : TR::InstOpCode::rorvw;
      generateTrg1Src2Instruction(cg, op, node, trgReg, trgReg, shiftAmountReg);
      }

   node->setRegister(trgReg);
   firstChild->decReferenceCount();
   secondChild->decReferenceCount();
   return trgReg;
   }
Example #19
0
int32_t TR_LocalAnalysisInfo::HashTable::hash(TR::Node *node)
   {
   // Hash on the opcode and value numbers of the children
   //
   uint32_t h, g;
   int32_t numChildren = node->getNumChildren();
   h = (node->getOpCodeValue() << 4) + numChildren;
   g = 0;
   for (int32_t i = numChildren-1; i >= 0; i--)
      {
      TR::Node *child = node->getChild(i);
      h <<= 4;

      if (child->getOpCode().hasSymbolReference())
         h += (int32_t)(intptrj_t)child->getSymbolReference()->getSymbol();
      else
         h++;

      g = h & 0xF0000000;
      h ^= g >> 24;
      }
   return (h ^ g) % _numBuckets;
   }
Example #20
0
void TR_LoadExtensions::findPreferredLoadExtensions(TR::Node* parent)
   {
   TR::ILOpCode& parentOpCode = parent->getOpCode();

   // count how a load is being used. As a signed or unsigned number?
   if (isSupportedType(parent) && parentOpCode.isConversion())
      {
      TR::Node* child = parent->getFirstChild();

      // Only examine non-trivial conversions
      if (isSupportedType(child) && parent->getSize() != child->getSize())
         {
         if (isSupportedLoad(child))
            {
            setExtensionPreference(child, parent);
            }
         else if (child->getOpCode().isLoadReg())
            {
            TR::Node* useRegLoad = child;

            TR_UseDefInfo* useDefInfo = optimizer()->getUseDefInfo();

            // If we have usedef info we can traverse all defs of this particular use and if all the defs are stores
            // of supported counted loads then we can count such loads as well. If this criteria is not met then there
            // exists at least one def (store) of this particular use which feeds from a non-load operation (an
            // addition for example). These are not candidates for skipping extension because we cannot easily extend
            // a non-load operation.
            if (useDefInfo != NULL && useDefInfo->infoIsValid() && useRegLoad->getUseDefIndex() != 0 && useDefInfo->isUseIndex(useRegLoad->getUseDefIndex() != 0))
               {
               TR_UseDefInfo::BitVector info(comp()->allocator());
               if (useDefInfo->getUseDef(info, useRegLoad->getUseDefIndex()))
                  {
                  if (trace())
                     {
                     traceMsg(comp(), "\t\tPeeking through RegLoad %p for conversion %s [%p]\n", 
                        useRegLoad, 
                        parentOpCode.getName(), 
                        parent);
                     }

                  TR_UseDefInfo::BitVector::Cursor cursor(info);

                  int32_t firstDefIndex = useDefInfo->getFirstRealDefIndex();
                  int32_t firstUseIndex = useDefInfo->getFirstUseIndex();

                  for (cursor.SetToFirstOne(); cursor.Valid(); cursor.SetToNextOne())
                     {
                     int32_t defIndex = cursor;

                     // We've examined all the defs of this particular use
                     if (defIndex >= firstUseIndex)
                        {
                        break;
                        }

                     // Do not consider defs that correspond to method arguments as we cannot force extension on those
                     if (defIndex < firstDefIndex)
                        {
                        (*excludedNodes)[parent] = true;
                        break;
                        }

                     TR::Node* defRegLoad = useDefInfo->getNode(defIndex);

                     if (defRegLoad != NULL)
                        {
                        TR::Node* defRegLoadChild = defRegLoad->getFirstChild();

                        if (defRegLoad->getOpCode().isStoreReg() && isSupportedType(defRegLoadChild) && isSupportedLoad(defRegLoadChild))
                           {
                           if (trace())
                              {
                              traceMsg(comp(), "\t\tPeeked through use %s [%p] and found def %s [%p] with child %s [%p] - Counting [%p]\n",
                                 useRegLoad->getOpCode().getName(), 
                                 useRegLoad,
                                 defRegLoad->getOpCode().getName(), 
                                 defRegLoad,
                                 defRegLoadChild->getOpCode().getName(), 
                                 defRegLoadChild, 
                                 defRegLoadChild);
                              }

                           setExtensionPreference(defRegLoadChild, parent);
                           }
                        else
                           {
                           if (trace())
                              {
                              traceMsg(comp(), "\t\tPeeked through use %s [%p] and found def %s [%p] with child %s [%p] - Excluding [%p]\n",
                                 useRegLoad->getOpCode().getName(), 
                                 useRegLoad,
                                 defRegLoad->getOpCode().getName(), 
                                 defRegLoad,
                                 defRegLoadChild != NULL ? 
                                    defRegLoadChild->getOpCode().getName() : 
                                    "NULL", 
                                 defRegLoadChild, 
                                 parent);
                              }

                           (*excludedNodes)[parent] = true;
                           }
                        }
                     }
                  }
               }
            else
               {
               (*excludedNodes)[parent] = true;
               }
            }
         }
      }


   // Exclude all loads which feed into global register stores which require sign extensions. This must be done 
   // because Load Extensions is a local optimization and it must respect global sign extension decisions made
   // by GRA. Excluding such loads prevents a situation where GRA decided that a particular global register
   // should be sign extended at its definitions however Load Extensions has determined that the same load
   // should be zero extended. If local RA were to pick the same register for the global register as well as
   // the load then we have a conflicting decision which will result in a conversion to be skipped when it is
   // not supposed to be.
   if (parentOpCode.isStoreReg() && parent->needsSignExtension() && parent->getFirstChild()->getOpCode().isLoadVar())
      {
      (*excludedNodes)[parent->getFirstChild()] = true;
      }
   }
Example #21
0
// Returns true if there is any constraint to the move
bool TR_LocalLiveRangeReduction::isAnySymInDefinedOrUsedBy(TR_TreeRefInfo *currentTreeRefInfo, TR::Node *currentNode, TR_TreeRefInfo *movingTreeRefInfo )
   {
   TR::Node *movingNode = movingTreeRefInfo->getTreeTop()->getNode();
   // ignore anchors
   //
   if (movingNode->getOpCode().isAnchor())
      movingNode = movingNode->getFirstChild();

   TR::ILOpCode &opCode = currentNode->getOpCode();

   ////if ((opCode.getOpCodeValue() == TR::monent) || (opCode.getOpCodeValue() == TR::monexit))
   if (nodeMaybeMonitor(currentNode))
      {
      if (trace())
    	 traceMsg(comp(),"cannot move %p beyond monitor %p\n",movingNode,currentNode);
      return true;
      }

   // Don't move gc points or things across gc points
   //
   if (movingNode->canGCandReturn() ||
         currentNode->canGCandReturn())
      {
      if (trace())
         traceMsg(comp(), "cannot move gc points %p past %p\n", movingNode, currentNode);
      return true;
      }

   // Don't move checks or calls at all
   //
   if (containsCallOrCheck(movingTreeRefInfo,movingNode))
      {
      if (trace())
    	   traceMsg(comp(),"cannot move check or call %s\n", getDebug()->getName(movingNode));
      return true;
      }

   // Don't move object header store past a GC point
   //
   if ((currentNode->getOpCode().isWrtBar() || currentNode->canCauseGC()) && mayBeObjectHeaderStore(movingNode, fe()))
      {
      if (trace())
    	   traceMsg(comp(),"cannot move possible object header store %s past GC point %s\n", getDebug()->getName(movingNode), getDebug()->getName(currentNode));
      return true;
      }

   if (TR::Compiler->target.cpu.isPower() && opCode.getOpCodeValue() == TR::allocationFence)
      {
      // Can't move allocations past flushes
      if (movingNode->getOpCodeValue() == TR::treetop &&
          movingNode->getFirstChild()->getOpCode().isNew() &&
          (currentNode->getAllocation() == NULL ||
           currentNode->getAllocation() == movingNode->getFirstChild()))
         {
         if (trace())
            {
            traceMsg(comp(),"cannot move %p beyond flush %p - ", movingNode, currentNode);
            if (currentNode->getAllocation() == NULL)
               traceMsg(comp(),"(flush with null allocation)\n");
            else
               traceMsg(comp(),"(flush for allocation %p)\n", currentNode->getAllocation());
            }
         return true;
         }

      // Can't move certain stores past flushes
      // Exclude all indirect stores, they may be for stack allocs, in which case the flush is needed at least as a scheduling barrier
      // Direct stores to autos and parms are the only safe candidates
      if (movingNode->getOpCode().isStoreIndirect() ||
          (movingNode->getOpCode().isStoreDirect() && !movingNode->getSymbol()->isParm() && !movingNode->getSymbol()->isAuto()))
         {
         if (trace())
            traceMsg(comp(),"cannot move %p beyond flush %p - (flush for possible stack alloc)", movingNode, currentNode);
         return true;
         }
      }

   for (int32_t i = 0; i < currentNode->getNumChildren(); i++)
      {
      TR::Node *child = currentNode->getChild(i);

      //Any node that has side effects (like call and newarrya) cannot be evaluated in the middle of the tree.
      if (movingTreeRefInfo->getFirstRefNodesList()->find(child))
         {
         //for calls and unresolve symbol that are not under check

         if (child->exceptionsRaised() ||
             (child->getOpCode().hasSymbolReference() && child->getSymbolReference()->isUnresolved()))
    	    {
    	    if (trace())
    	       traceMsg(comp(),"cannot move %p beyond %p - cannot change evaluation point of %p\n ",movingNode,currentTreeRefInfo->getTreeTop()->getNode(),child);
            return true;
    	    }

         else if(movingNode->getOpCode().isStore())
            {
            TR::SymbolReference *stSymRef = movingNode->getSymbolReference();
            int32_t stSymRefNum = stSymRef->getReferenceNumber();
            //TR::SymbolReference *stSymRef = movingNode->getSymbolReference();
            int32_t numHelperSymbols = comp()->getSymRefTab()->getNumHelperSymbols();
            if ((comp()->getSymRefTab()->isNonHelper(stSymRefNum, TR::SymbolReferenceTable::vftSymbol))||
                (comp()->getSymRefTab()->isNonHelper(stSymRefNum, TR::SymbolReferenceTable::contiguousArraySizeSymbol))||
                (comp()->getSymRefTab()->isNonHelper(stSymRefNum, TR::SymbolReferenceTable::discontiguousArraySizeSymbol))||
                (stSymRef == comp()->getSymRefTab()->findHeaderFlagsSymbolRef())||
                (stSymRef->getSymbol() == comp()->getSymRefTab()->findGenericIntShadowSymbol()))

               return true;
            }

         else if (movingNode->getOpCode().isResolveOrNullCheck())
            {
    	    if (trace())
    	       traceMsg(comp(),"cannot move %p beyond %p - node %p under ResolveOrNullCheck",movingNode,currentTreeRefInfo->getTreeTop()->getNode(),currentNode);
            return true;
            }

    	 else if (TR::Compiler->target.is64Bit() &&
    		  movingNode->getOpCode().isBndCheck() &&
    		  ((opCode.getOpCodeValue() == TR::i2l) || (opCode.getOpCodeValue() == TR::iu2l)) &&
    		  !child->isNonNegative())
    	    {
    	    if (trace())
    	       traceMsg(comp(),"cannot move %p beyond %p - changing the eval point of %p will casue extra cg instruction ",movingNode,currentTreeRefInfo->getTreeTop()->getNode(),currentNode);
    	    return true;
    	    }
         }

      //don't recurse over nodes each are not the first reference
      if (child->getReferenceCount()==1 || currentTreeRefInfo->getFirstRefNodesList()->find(child))
         {
         if (isAnySymInDefinedOrUsedBy(currentTreeRefInfo, child, movingTreeRefInfo ))
            return true;
         }
      }

   return false;
   }
Example #22
0
// This function splits a single succeesor block following an guard and is used to
// do the following transform
//    block - cold1         block - cold1
//      \     /        =>     |       |
//     nextBlock           nextBlock nextBlock' (called tailSplitBlock below)
//         |                  \      /
//        ...                   ...
void TR_VirtualGuardHeadMerger::tailSplitBlock(TR::Block * block, TR::Block * cold1)
   {
   TR::CFG *cfg = comp()->getFlowGraph();
   cfg->setStructure(NULL);
   TR_BlockCloner cloner(cfg);
   TR::Block *tailSplitBlock = cloner.cloneBlocks(block->getNextBlock(), block->getNextBlock());
   tailSplitBlock->setFrequency(cold1->getFrequency());
   if (cold1->isCold())
      tailSplitBlock->setIsCold();

   // physically put the block after cold1 since we want cold1 to fall through
   tailSplitBlock->getExit()->join(cold1->getExit()->getNextTreeTop());
   cold1->getExit()->join(tailSplitBlock->getEntry());

   // remove cold1's goto
   TR::TransformUtil::removeTree(comp(), cold1->getExit()->getPrevRealTreeTop());

   // copy the exception edges
   for (auto e = block->getNextBlock()->getExceptionSuccessors().begin(); e != block->getNextBlock()->getExceptionSuccessors().end(); ++e)
      cfg->addExceptionEdge(tailSplitBlock, (*e)->getTo());

   cfg->addEdge(cold1, tailSplitBlock);
   // lastly fix up the exit of tailSplitBlock
   TR::Node *tailSplitEnd = tailSplitBlock->getExit()->getPrevRealTreeTop()->getNode();
   if (tailSplitEnd->getOpCode().isGoto())
      {
      tailSplitEnd->setBranchDestination(block->getNextBlock()->getLastRealTreeTop()->getNode()->getBranchDestination());
      cfg->addEdge(tailSplitBlock, block->getNextBlock()->getSuccessors().front()->getTo());
      }
   else if (tailSplitEnd->getOpCode().isBranch())
      {
      TR::Block *gotoBlock = TR::Block::createEmptyBlock(tailSplitEnd, comp(), cold1->getFrequency());
      if (cold1->isCold())
          gotoBlock->setIsCold(true);
      gotoBlock->getExit()->join(tailSplitBlock->getExit()->getNextTreeTop());
      tailSplitBlock->getExit()->join(gotoBlock->getEntry());
      cfg->addNode(gotoBlock);

      gotoBlock->append(TR::TreeTop::create(comp(), TR::Node::create(tailSplitEnd, TR::Goto, 0, block->getNextBlock()->getExit()->getNextTreeTop())));
      cfg->addEdge(tailSplitBlock, gotoBlock);
      cfg->addEdge(tailSplitBlock, tailSplitBlock->getLastRealTreeTop()->getNode()->getBranchDestination()->getEnclosingBlock());
      cfg->addEdge(gotoBlock, block->getNextBlock()->getNextBlock());
      }
   else if (
            !tailSplitEnd->getOpCode().isReturn() &&
            !tailSplitEnd->getOpCode().isJumpWithMultipleTargets() &&
             tailSplitEnd->getOpCodeValue() != TR::athrow &&
            !(tailSplitEnd->getNumChildren() >= 1 && tailSplitEnd->getFirstChild()->getOpCodeValue() == TR::athrow)
           )
      {
      tailSplitBlock->append(TR::TreeTop::create(comp(), TR::Node::create(tailSplitEnd, TR::Goto, 0, block->getNextBlock()->getExit()->getNextTreeTop())));
      cfg->addEdge(tailSplitBlock, block->getNextBlock()->getNextBlock());
      }
   else
      {
      for (auto e = block->getNextBlock()->getSuccessors().begin(); e != block->getNextBlock()->getSuccessors().end(); ++e)
         cfg->addEdge(tailSplitBlock, (*e)->getTo());
      }
   cfg->removeEdge(cold1, block->getNextBlock());

   optimizer()->setUseDefInfo(NULL);
   optimizer()->setValueNumberInfo(NULL);
   }
Example #23
0
TR::Node *
OMR::Simplifier::unaryCancelOutWithChild(TR::Node * node, TR::Node * firstChild, TR::TreeTop *anchorTree, TR::ILOpCodes opcode, bool anchorChildren)
   {
   if (!isLegalToUnaryCancel(node, firstChild, opcode))
      return NULL;

   if (firstChild->getOpCodeValue() == opcode &&
       (node->getType().isAggregate() || firstChild->getType().isAggregate()) &&
       (node->getSize() > firstChild->getSize() || node->getSize() != firstChild->getFirstChild()->getSize()))
      {
      // ensure a truncation side-effect of a conversion is not lost
      // o2a size=3
      //   a2o size=3 // conversion truncates in addition to type cast so cannot be removed
      //     loadaddr size=4
      // This restriction could be loosened to only disallow intermediate truncations (see BCD case above) but then would require a node
      // op that would just correct for size (e.g. addrSizeMod size=3 to replace the o2a/a2o pair)
      //
      // Do allow cases when all three sizes are the same and when the middle node widens but the top and bottom node have the same size, e.g.
      //
      // i2o size=3
      //   o2i size=4
      //     oload size=3
      //
      // Also allow the special case where the grandchild is not really truncated as the 'truncated' bytes are known to be zero
      // (i.e. there really isn't an intermediate truncation of 4->3 even though it appears that way from looking at the sizes alone)
      // o2i
      //   i2o size=3
      //     iushr
      //       x
      //       iconst 8
      bool disallow = true;
      TR::Node *grandChild = firstChild->getFirstChild();
      size_t nodeSize = node->getSize();
      if (node->getType().isIntegral() &&
          nodeSize == grandChild->getSize() &&
          nodeSize > firstChild->getSize())
         {
         size_t truncatedBits = (nodeSize - firstChild->getSize()) * 8;
         if (grandChild->getOpCode().isRightShift() && grandChild->getOpCode().isShiftLogical() &&
             grandChild->getSecondChild()->getOpCode().isLoadConst() &&
             (grandChild->getSecondChild()->get64bitIntegralValue() == truncatedBits))
            {
            disallow = false;
            if (trace())
               traceMsg(comp(),"do allow unaryCancel of node %s (%p) and firstChild %s (%p) as grandChild %s (%p) zeros the %d truncated bytes\n",
                       node->getOpCode().getName(),node,firstChild->getOpCode().getName(),firstChild,
                       grandChild->getOpCode().getName(),grandChild,truncatedBits/8);
            }
         }

      if (disallow)
         {
         if (trace())
            traceMsg(comp(),"disallow unaryCancel of node %s (%p) and firstChild %s (%p) due to unequal sizes (nodeSize %d, firstChildSize %d, firstChild->childSize %d)\n",
                    node->getOpCode().getName(),node,firstChild->getOpCode().getName(),firstChild,
                    node->getSize(),firstChild->getSize(),firstChild->getFirstChild()->getSize());
         return NULL;
         }
      }

   if (firstChild->getOpCodeValue() == opcode &&
       performTransformation(comp(), "%sRemoving node [" POINTER_PRINTF_FORMAT "] %s and its child [" POINTER_PRINTF_FORMAT "] %s\n",
             optDetailString(), node, node->getOpCode().getName(), firstChild, firstChild->getOpCode().getName()))
      {
      TR::Node *grandChild = firstChild->getFirstChild();
      grandChild->incReferenceCount();
      bool anchorChildrenNeeded = anchorChildren &&
         (node->getNumChildren() > 1 ||
          firstChild->getNumChildren() > 1 ||
          node->getOpCode().hasSymbolReference() ||
          firstChild->getOpCode().hasSymbolReference());
      prepareToStopUsingNode(node, anchorTree, anchorChildrenNeeded);
      node->recursivelyDecReferenceCount();
      node->setVisitCount(0);
      return grandChild;
      }

   return NULL;
   }
Example #24
0
bool
OMR::Simplifier::isBoundDefinitelyGELength(TR::Node *boundChild, TR::Node *lengthChild)
   {
   TR::ILOpCodes boundOp = boundChild->getOpCodeValue();
   if (boundOp == TR::iadd)
      {
      TR::Node *first  = boundChild->getFirstChild();
      TR::Node *second = boundChild->getSecondChild();
      if (first == lengthChild)
         {
         TR::ILOpCodes secondOp = second->getOpCodeValue();
         if (second->getOpCode().isArrayLength()                          ||
             secondOp == TR::bu2i                                          ||
             secondOp == TR::su2i                                          ||

             (secondOp == TR::iconst &&
              second->getInt() >= 0)                                      ||

             (secondOp == TR::iand                                     &&
              second->getSecondChild()->getOpCodeValue() == TR::iconst &&
              (second->getSecondChild()->getInt() & 80000000) == 0)       ||

             (secondOp == TR::iushr                                    &&
              second->getSecondChild()->getOpCodeValue() == TR::iconst &&
              (second->getSecondChild()->getInt() & 0x1f) > 0))
            {
            return true;
            }
         }
      else if (second == lengthChild)
         {
         TR::ILOpCodes firstOp = first->getOpCodeValue();
         if (first->getOpCode().isArrayLength()                          ||
             firstOp == TR::bu2i                                          ||
             firstOp == TR::su2i                                          ||

             (firstOp == TR::iand                                     &&
              first->getSecondChild()->getOpCodeValue() == TR::iconst &&
              (first->getSecondChild()->getInt() & 80000000) == 0)       ||

             (firstOp == TR::iushr &&
              first->getSecondChild()->getOpCodeValue() == TR::iconst &&
              (first->getSecondChild()->getInt() & 0x1f) > 0))
            {
            return true;
            }
         }
      }
   else if (boundOp == TR::isub)
      {
      TR::Node *first  = boundChild->getFirstChild();
      TR::Node *second = boundChild->getSecondChild();
      if (first  == lengthChild)
         {
         TR::ILOpCodes secondOp = second->getOpCodeValue();
         if ((secondOp == TR::iconst &&
              second->getInt() < 0)                                      ||

             (secondOp == TR::ior                                      &&
              second->getSecondChild()->getOpCodeValue() == TR::iconst &&
              (second->getSecondChild()->getInt() & 0x80000000) != 0))
            {
            return true;
            }
         }
      }

   return false;
   }
Example #25
0
TR_BitVector *
addVeryRefinedCallAliasSets(TR::ResolvedMethodSymbol * methodSymbol, TR_BitVector * aliases, List<void> * methodsPeeked)
   {
   TR::Compilation *comp = TR::comp();

   void * methodId = methodSymbol->getResolvedMethod()->getPersistentIdentifier();
   if (methodsPeeked->find(methodId))
      {
      // This can't be allocated into the alias region as it must be accessed across optimizations
      TR_BitVector *heapAliases = new (comp->trHeapMemory()) TR_BitVector(comp->getSymRefCount(), comp->trMemory(), heapAlloc, growable);
      *heapAliases |= *aliases;
      return heapAliases;
      }

   // stop if the peek is getting very deep
   //
   if (methodsPeeked->getSize() >= PEEK_THRESHOLD)
      return 0;

   methodsPeeked->add(methodId);

   dumpOptDetails(comp, "O^O REFINING ALIASES: Peeking into the IL to refine aliases \n");

   if (!methodSymbol->getResolvedMethod()->genMethodILForPeeking(methodSymbol, comp, true))
      return 0;

   TR::SymbolReferenceTable * symRefTab = comp->getSymRefTab();
   for (TR::TreeTop * tt = methodSymbol->getFirstTreeTop(); tt; tt = tt->getNextTreeTop())
      {
	   TR::Node *node = tt->getNode();
      if (node->getOpCode().isResolveCheck())
         return 0;

      if ((node->getOpCodeValue() == TR::treetop) ||
          (node->getOpCodeValue() == TR::compressedRefs) ||
          node->getOpCode().isCheck())
         node = node->getFirstChild();

      if (node->getOpCode().isStore())
         {
         TR::SymbolReference * symRefInCallee = node->getSymbolReference(), * symRefInCaller;
         TR::Symbol * symInCallee = symRefInCallee->getSymbol();
         TR::DataType type = symInCallee->getDataType();
         if (symInCallee->isShadow())
            {
            if (symInCallee->isArrayShadowSymbol())
               symRefInCaller = symRefTab->getSymRef(symRefTab->getArrayShadowIndex(type));

            else if (symInCallee->isArrayletShadowSymbol())
               symRefInCaller = symRefTab->getSymRef(symRefTab->getArrayletShadowIndex(type));

            else
               symRefInCaller = symRefTab->findShadowSymbol(symRefInCallee->getOwningMethod(comp), symRefInCallee->getCPIndex(), type);

            if (symRefInCaller)
               {
               if (symRefInCaller->reallySharesSymbol(comp))
                  symRefInCaller->setSharedShadowAliases(aliases, symRefTab);

               aliases->set(symRefInCaller->getReferenceNumber());
               }

            }
         else if (symInCallee->isStatic())
            {
            symRefInCaller = symRefTab->findStaticSymbol(symRefInCallee->getOwningMethod(comp), symRefInCallee->getCPIndex(), type);
            if (symRefInCaller)
               {
               if (symRefInCaller->reallySharesSymbol(comp))
                  symRefInCaller->setSharedStaticAliases(aliases, symRefTab);
               else
                  aliases->set(symRefInCaller->getReferenceNumber());
               }
            }
         }
      else if (node->getOpCode().isCall())
         {
         if (node->getOpCode().isCallIndirect())
            return 0;
         TR::ResolvedMethodSymbol * calleeSymbol = node->getSymbol()->getResolvedMethodSymbol();
         if (!calleeSymbol)
            return 0;
         TR_ResolvedMethod * calleeMethod = calleeSymbol->getResolvedMethod();
         if (!calleeMethod->isCompilable(comp->trMemory()) || calleeMethod->isJNINative())
            return 0;

         if (!addVeryRefinedCallAliasSets(calleeSymbol, aliases, methodsPeeked))
            return 0;
         }
      else if (node->getOpCodeValue() == TR::monent)
         return 0;
      }

   // This can't be allocated into the alias region as it must be accessed across optimizations
   TR_BitVector *heapAliases = new (comp->trHeapMemory()) TR_BitVector(comp->getSymRefCount(), comp->trMemory(), heapAlloc, growable);
   *heapAliases |= *aliases;
   return heapAliases;
   }
Example #26
0
TR::Node *constrainVcall(TR::ValuePropagation *vp, TR::Node *node)
   {
   constrainCall(vp, node);
   // Look for System.arraycopy call. If the node is transformed into an arraycopy
   // re-process it.
   //
   vp->transformArrayCopyCall(node);
   if (node->getOpCodeValue() == TR::arraycopy)
      {
      node->setVisitCount(0);
      vp->launchNode(node, vp->getCurrentParent(), 0);
      return node;
      }

   if (vp->transformUnsafeCopyMemoryCall(node))
      return node;

   cacheStringAppend(vp,node);

#ifdef J9_PROJECT_SPECIFIC
   TR::SymbolReference *finalizeSymRef = vp->comp()->getSymRefTab()->findOrCreateRuntimeHelper(TR_jitCheckIfFinalizeObject, true, true, true);
   if (node->getSymbolReference() == finalizeSymRef)
      {
      TR::Node *receiver = node->getFirstChild();
      bool isGlobal;
      TR::VPConstraint *type = vp->getConstraint(receiver, isGlobal);
      bool canBeRemoved = false;
      // ensure the type is really a fixedClass
      // resolvedClass is not sufficient because java.lang.Object has an
      // empty finalizer method (hasFinalizer returns false) and the call to
      // vm helper is incorrectly optimized in this case
      //
      if (type && type->getClassType() &&
         type->getClassType()->asFixedClass())
         {
         TR_OpaqueClassBlock *klass = type->getClassType()->getClass();
         if (klass && !TR::Compiler->cls.hasFinalizer(vp->comp(), klass) && !vp->comp()->fej9()->isOwnableSyncClass(klass))
            {
            canBeRemoved = true;
            }
         }
      // If a class has a finalizer or is an ownableSync it won't be allocated on the stack. That's ensured
      // by virtue of (indirectly) calling bool J9::ObjectModel::canAllocateInlineClass(TR_OpaqueClassBlock *block)
      // Doesn't make sense to call jitCheckIfFinalizeObject for a stack
      // allocated object, so optimize
      else if (receiver->getOpCode().hasSymbolReference() && receiver->getSymbol()->isLocalObject())
         {
         canBeRemoved = true;
         }

      if (canBeRemoved &&
           performTransformation(vp->comp(), "%s Removing redundant call to jitCheckIfFinalize [%p]\n", OPT_DETAILS, node))
         {
         ///printf("found opportunity in %s to remove call to checkfinalize\n", vp->comp()->signature());fflush(stdout);
         ///traceMsg(vp->comp(), "found opportunity to remove call %p to checkfinalize\n", node);
         vp->removeNode(node);
         vp->_curTree->setNode(NULL);
         return node;
         }
      }
#endif

   return node;
   }
Example #27
0
TR::Node *
OMR::TransformUtil::scalarizeArrayCopy(
      TR::Compilation *comp,
      TR::Node *node,
      TR::TreeTop *tt,
      bool useElementType,
      bool &didTransformArrayCopyNode,
      TR::SymbolReference *sourceRef,
      TR::SymbolReference *targetRef,
      bool castToIntegral)
   {
   TR::CodeGenerator *cg = comp->cg();

   didTransformArrayCopyNode = false;

   if ((comp->getOptLevel() == noOpt) ||
       !comp->getOption(TR_ScalarizeSSOps) ||
       node->getOpCodeValue() != TR::arraycopy ||
       node->getNumChildren() != 3 ||
       comp->requiresSpineChecks() ||
       !node->getChild(2)->getOpCode().isLoadConst() ||
       cg->getOptimizationPhaseIsComplete())
      return node;

   int64_t byteLen = node->getChild(2)->get64bitIntegralValue();
   if (byteLen == 0)
      {
      if (tt)
         {
         // Anchor the first two children
         if (!node->getFirstChild()->safeToDoRecursiveDecrement())
            TR::TreeTop::create(comp, tt->getPrevTreeTop(),
                               TR::Node::create(TR::treetop, 1, node->getFirstChild()));
         if (!node->getSecondChild()->safeToDoRecursiveDecrement())
            TR::TreeTop::create(comp, tt->getPrevTreeTop(),
                               TR::Node::create(TR::treetop, 1, node->getSecondChild()));
         tt->getPrevTreeTop()->join(tt->getNextTreeTop());
         tt->getNode()->recursivelyDecReferenceCount();
         didTransformArrayCopyNode = true;
         }
      return node;
      }
   else if (byteLen < 0)
      {
      return node;
      }
   else if (byteLen > TR_MAX_OTYPE_SIZE)
      {
      return node;
      }
   TR::DataType dataType = TR::Aggregate;

   // Get the element datatype from the (hidden) 4th child
   TR::DataType elementType = node->getArrayCopyElementType();
   int32_t elementSize = TR::Symbol::convertTypeToSize(elementType);

   if (byteLen == elementSize)
      {
      dataType = elementType;
      }
   else if (!useElementType)
      {
      switch (byteLen)
         {
         case 1: dataType = TR::Int8; break;
         case 2: dataType = TR::Int16; break;
         case 4: dataType = TR::Int32; break;
         case 8: dataType = TR::Int64; break;
         }
      }
   else
      {
      return node;
      }

   // load/store double on 64-bit PPC requires offset to be word aligned
   // abort if this requirement is not met.
   // TODO: also need to check if the first two children are aload nodes
   bool cannot_use_load_store_long = false;
   if (TR::Compiler->target.cpu.isPower())
      if (dataType == TR::Int64 && TR::Compiler->target.is64Bit())
         {
         TR::Node * firstChild = node->getFirstChild();
         if (firstChild->getNumChildren() == 2)
            {
            TR::Node *offsetChild = firstChild->getSecondChild();
            TR_ASSERT(offsetChild->getOpCodeValue() != TR::iconst, "iconst shouldn't be used for 64-bit array indexing");
            if (offsetChild->getOpCodeValue() == TR::lconst)
               {
               if ((offsetChild->getLongInt() & 0x3) != 0)
                  cannot_use_load_store_long = true;
               }
            }
         TR::Node *secondChild = node->getSecondChild();
         if (secondChild->getNumChildren() == 2)
            {
            TR::Node *offsetChild = secondChild->getSecondChild();
            TR_ASSERT(offsetChild->getOpCodeValue() != TR::iconst, "iconst shouldn't be used for 64-bit array indexing");
            if (offsetChild->getOpCodeValue() == TR::lconst)
               {
               if ((offsetChild->getLongInt() & 0x3) != 0)
                  cannot_use_load_store_long = true;
               }
            }
         }
   if (cannot_use_load_store_long) return node;

   TR::SymbolReference *nodeRef;

   targetRef = comp->getSymRefTab()->findOrCreateGenericIntShadowSymbolReference(0);
   sourceRef = targetRef;

   bool trace = comp->getOption(TR_TraceScalarizeSSOps);
   if (trace)
      traceMsg(comp,"scalarizeArrayCopy: node %p got targetRef (#%d) and sourceRef (#%d)\n",
         node,targetRef?targetRef->getReferenceNumber():-1,sourceRef?sourceRef->getReferenceNumber():-1);

   if (targetRef == NULL || sourceRef == NULL)
      {
      if (trace)
         traceMsg(comp,"do not scalarizeArrayCopy node %p : targetRef is NULL (%s) or sourceRef is NULL (%s)\n",node,targetRef?"no":"yes",sourceRef?"no":"yes");
      return node;
      }
#ifdef J9_PROJECT_SPECIFIC
   if (targetRef->getSymbol()->getDataType().isBCD() ||
       sourceRef->getSymbol()->getDataType().isBCD())
      {
      return node;
      }
#endif

   if (performTransformation(comp, "%sScalarize arraycopy 0x%p\n", OPT_DETAILS, node))
      {
      TR::Node *store = TR::TransformUtil::scalarizeAddressParameter(comp, node->getSecondChild(), byteLen, dataType, targetRef, true);
      TR::Node *load = TR::TransformUtil::scalarizeAddressParameter(comp, node->getFirstChild(), byteLen, dataType, sourceRef, false);

      if (tt)
         {
         // Transforming
         //    treetop
         //      arrayCopy   <-- node
         // into
         //    *store
         //
         node->recursivelyDecReferenceCount();
         tt->setNode(node);
         }
      else
         {
         for (int16_t c = node->getNumChildren() - 1; c >= 0; c--)
            cg->recursivelyDecReferenceCount(node->getChild(c));
         }

      TR::Node::recreate(node, store->getOpCodeValue());
      node->setSymbolReference(store->getSymbolReference());

      if (store->getOpCode().isStoreIndirect())
         {
         node->setChild(0, store->getFirstChild());
         node->setAndIncChild(1, load);
         node->setNumChildren(2);
         }
      else
         {
         node->setAndIncChild(0, load);
         node->setNumChildren(1);
         }

      didTransformArrayCopyNode = true;
      }

   return node;
   }
Example #28
0
bool TR::ILValidator::treesAreValid(TR::TreeTop *start, TR::TreeTop *stop)
   {
   checkSoundness(start, stop);

   for (PostorderNodeOccurrenceIterator iter(start, _comp, "VALIDATOR"); iter != stop; ++iter)
      {
      updateNodeState(iter);

      // General node validation
      //
      validateNode(iter);

      //
      // Additional specific kinds of validation
      //

      TR::Node *node = iter.currentNode();
      if (node->getOpCodeValue() == TR::BBEnd)
         {
         // Determine whether this is the end of an extended block
         //
         bool isEndOfExtendedBlock = false;
         TR::TreeTop *nextTree = iter.currentTree()->getNextTreeTop();
         if (nextTree)
            {
            validityRule(iter, nextTree->getNode()->getOpCodeValue() == TR::BBStart, "Expected BBStart after BBEnd");
            isEndOfExtendedBlock = ! nextTree->getNode()->getBlock()->isExtensionOfPreviousBlock();
            }
         else
            {
            isEndOfExtendedBlock = true;
            }

         if (isEndOfExtendedBlock)
            validateEndOfExtendedBlock(iter);
         }

      auto opcode = node->getOpCode();
      if (opcode.expectedChildCount() != ILChildProp::UnspecifiedChildCount)
         {
         // Validate child expectations
         //

         const auto expChildCount = opcode.expectedChildCount();
         const auto actChildCount = node->getNumChildren();

         // validate child count
         if (!opcode.canHaveGlRegDeps())
            {
            // in the common case, no GlRegDeps child is expect nor present
            validityRule(iter, actChildCount == expChildCount,
                         "Child count %d does not match expected value of %d", actChildCount, expChildCount);
            }
         else if (actChildCount == (expChildCount + 1))
            {
            // adjust expected child number to account for a possible extra GlRegDeps
            // child and make sure the last child is actually a GlRegDeps
            validityRule(iter, node->getChild(actChildCount - 1)->getOpCodeValue() == TR::GlRegDeps,
                         "Child count %d does not match expected value of %d (%d without GlRegDeps) and last child is not a GlRegDeps",
                         actChildCount, expChildCount + 1, expChildCount);
            }
         else
            {
            // if expected and actual child counts don't match, then the child
            // count is just wrong, even with an expected GlRegDeps
            validityRule(iter, actChildCount == expChildCount,
                         "Child count %d matches neither expected values of %d (without GlRegDeps) nor %d (with GlRegDeps)",
                         actChildCount, expChildCount, expChildCount + 1);
            }

         // validate child types
         for (auto i = 0; i < actChildCount; ++i)
            {
            auto childOpcode = node->getChild(i)->getOpCode();
            if (childOpcode.getOpCodeValue() != TR::GlRegDeps)
               {
               const auto expChildType = opcode.expectedChildType(i);
               const auto actChildType = childOpcode.getDataType().getDataType();
               const auto expChildTypeName = expChildType == ILChildProp::UnspecifiedChildType ? "UnspecifiedChildType" : TR::DataType::getName(expChildType);
               const auto actChildTypeName = TR::DataType::getName(actChildType);
               validityRule(iter, expChildType == ILChildProp::UnspecifiedChildType || actChildType == expChildType,
                            "Child %d has unexpected type %s (expected %s)" , i, actChildTypeName, expChildTypeName);
               }
            else
               {
               // make sure the node is allowed to have a GlRegDeps child
               // and make sure that it is the last child
               validityRule(iter, opcode.canHaveGlRegDeps() && (i == actChildCount - 1), "Unexpected GlRegDeps child %d", i);
               }
            }
         }
      }

   return _isValidSoFar;
   }
Example #29
0
void
TR_S390BinaryAnalyser::genericAnalyser(TR::Node * root,
                                       TR::InstOpCode::Mnemonic regToRegOpCode,
                                       TR::InstOpCode::Mnemonic memToRegOpCode,
                                       TR::InstOpCode::Mnemonic copyOpCode)
   {
   TR::Node * firstChild;
   TR::Node * secondChild;
   firstChild = root->getFirstChild();
   secondChild = root->getSecondChild();
   TR::Register * firstRegister = firstChild->getRegister();
   TR::Register * secondRegister = secondChild->getRegister();
   TR::Compilation *comp = TR::comp();

   TR::SymbolReference * firstReference = firstChild->getOpCode().hasSymbolReference() ? firstChild->getSymbolReference() : NULL;
   TR::SymbolReference * secondReference = secondChild->getOpCode().hasSymbolReference() ? secondChild->getSymbolReference() : NULL;

   setInputs(firstChild, firstRegister, secondChild, secondRegister,
             false, false, comp,
             (cg()->isAddressOfStaticSymRefWithLockedReg(firstReference) ||
              cg()->isAddressOfPrivateStaticSymRefWithLockedReg(firstReference)),
             (cg()->isAddressOfStaticSymRefWithLockedReg(secondReference) ||
              cg()->isAddressOfPrivateStaticSymRefWithLockedReg(secondReference)));

   /*
    * Check if SH or CH can be used to evaluate this integer subtract/compare node.
    * The second operand of SH/CH is a 16-bit number from memory. And using
    * these directly can save a load instruction.
    */
   bool is16BitMemory2Operand = false;
   if (secondChild->getOpCodeValue() == TR::s2i &&
       secondChild->getFirstChild()->getOpCodeValue() == TR::sloadi &&
       secondChild->isSingleRefUnevaluated() &&
       secondChild->getFirstChild()->isSingleRefUnevaluated())
      {
      bool supported = true;

      if (memToRegOpCode == TR::InstOpCode::S)
         {
         memToRegOpCode = TR::InstOpCode::SH;
         }
      else if (memToRegOpCode == TR::InstOpCode::C)
         {
         memToRegOpCode = TR::InstOpCode::CH;
         }
      else
         {
         supported = false;
         }

      if (supported)
         {
         setMem2();
         is16BitMemory2Operand = true;
         }
      }

   if (getEvalChild1())
      {
      firstRegister = cg()->evaluate(firstChild);
      }

   if (getEvalChild2())
      {
      secondRegister = cg()->evaluate(secondChild);
      }

   remapInputs(firstChild, firstRegister, secondChild, secondRegister);

   if (getCopyReg1())
      {
      TR::Register * thirdReg;
      bool done = false;

      if (firstRegister->getKind() == TR_GPR64)
         {
         thirdReg = cg()->allocate64bitRegister();
         }
      else if (firstRegister->getKind() == TR_VRF)
         {
         TR_ASSERT(false,"VRF: genericAnalyser unimplemented");
         }
      else if (firstRegister->getKind() != TR_FPR && firstRegister->getKind() != TR_VRF)
         {
         thirdReg = cg()->allocateRegister();
         }
      else
         {
         thirdReg = cg()->allocateRegister(TR_FPR);
         }

      if (cg()->getS390ProcessorInfo()->supportsArch(TR_S390ProcessorInfo::TR_z196))
         {
         if (getBinaryReg3Reg2() || secondRegister != NULL)
            {
            if (regToRegOpCode == TR::InstOpCode::SR)
               {
               generateRRRInstruction(cg(), TR::InstOpCode::SRK, root, thirdReg, firstRegister, secondRegister);
               done = true;
               }
            else if (regToRegOpCode == TR::InstOpCode::SLR)
               {
               generateRRRInstruction(cg(), TR::InstOpCode::SLRK, root, thirdReg, firstRegister, secondRegister);
               done = true;
               }
            else if (regToRegOpCode == TR::InstOpCode::SGR)
               {
               generateRRRInstruction(cg(), TR::InstOpCode::SGRK, root, thirdReg, firstRegister, secondRegister);
               done = true;
               }
            else if (regToRegOpCode == TR::InstOpCode::SLGR)
               {
               generateRRRInstruction(cg(), TR::InstOpCode::SLGRK, root, thirdReg, firstRegister, secondRegister);
               done = true;
               }
            }
         }

      if (!done)
         {
         generateRRInstruction(cg(), copyOpCode, root, thirdReg, firstRegister);
         if (getBinaryReg3Reg2() || (secondRegister != NULL))
            {
            generateRRInstruction(cg(), regToRegOpCode, root, thirdReg, secondRegister);
            }
         else
            {
            TR::Node* loadBaseAddr = is16BitMemory2Operand ? secondChild->getFirstChild() : secondChild;
            TR::MemoryReference * tempMR = generateS390MemoryReference(loadBaseAddr, cg());

            //floating-point arithmatics don't have RXY format instructions, so no long displacement
            if (secondChild->getOpCode().isFloatingPoint())
               {
               tempMR->enforce4KDisplacementLimit(secondChild, cg(), NULL);
               }

            generateRXInstruction(cg(), memToRegOpCode, root, thirdReg, tempMR);
            tempMR->stopUsingMemRefRegister(cg());
            if (is16BitMemory2Operand)
               {
               cg()->decReferenceCount(secondChild->getFirstChild());
               }
            }
         }

      root->setRegister(thirdReg);
      }
   else if (getBinaryReg1Reg2())
      {
      generateRRInstruction(cg(), regToRegOpCode, root, firstRegister, secondRegister);
      root->setRegister(firstRegister);
      }
   else // assert getBinaryReg1Mem2() == true
      {
      TR_ASSERT(  !getInvalid(), "TR_S390BinaryAnalyser::invalid case\n");

      TR::MemoryReference * tempMR = generateS390MemoryReference(is16BitMemory2Operand ? secondChild->getFirstChild() : secondChild, cg());
      //floating-point arithmatics don't have RXY format instructions, so no long displacement
      if (secondChild->getOpCode().isFloatingPoint())
         {
         tempMR->enforce4KDisplacementLimit(secondChild, cg(), NULL);
         }

      generateRXInstruction(cg(), memToRegOpCode, root, firstRegister, tempMR);
      tempMR->stopUsingMemRefRegister(cg());
      if (is16BitMemory2Operand)
         cg()->decReferenceCount(secondChild->getFirstChild());
      root->setRegister(firstRegister);
      }

   cg()->decReferenceCount(firstChild);
   cg()->decReferenceCount(secondChild);

   return;
   }
Example #30
0
void TR_LoadExtensions::flagPreferredLoadExtensions(TR::Node* parent)
   {
   if (isSupportedType(parent) && parent->getOpCode().isConversion())
      {
      TR::Node* child = parent->getFirstChild();

      bool canSkipConversion = false;

      if (isSupportedType(child))
         {
         if (parent->getSize() == child->getSize())
            {
            TR::DebugCounter::incStaticDebugCounter(comp(), TR::DebugCounter::debugCounterName(comp(), "codegen/LoadExtensions/success/unneededConversion/%s", comp()->signature()));

            parent->setUnneededConversion(true);
            }
         else
            {
            TR::ILOpCode& childOpCode = child->getOpCode();

            if (childOpCode.isLoadReg()
               && !(parent->getSize() > 4 && TR::Compiler->target.is32Bit())
               && excludedNodes->count(parent) == 0)
               {
               TR::Node* useRegLoad = child;

               TR_UseDefInfo* useDefInfo = optimizer()->getUseDefInfo();

               if (useDefInfo != NULL && useDefInfo->infoIsValid() && useRegLoad->getUseDefIndex() != 0 && useDefInfo->isUseIndex(useRegLoad->getUseDefIndex() != 0))
                  {
                  TR_UseDefInfo::BitVector info(comp()->allocator());

                  if (useDefInfo->getUseDef(info, useRegLoad->getUseDefIndex()))
                     {
                     TR_UseDefInfo::BitVector::Cursor cursor(info);

                     int32_t firstDefIndex = useDefInfo->getFirstRealDefIndex();
                     int32_t firstUseIndex = useDefInfo->getFirstUseIndex();

                     canSkipConversion = true;

                     bool forceExtensionOnAnyLoads = false;
                     bool forceExtensionOnAllLoads = true;

                     for (cursor.SetToFirstOne(); cursor.Valid() && canSkipConversion; cursor.SetToNextOne())
                        {
                        int32_t defIndex = cursor;

                        // We've examined all the defs of this particular use
                        if (defIndex >= firstUseIndex)
                           {
                           break;
                           }

                        // Do not consider defs that correspond to method arguments as we cannot force extension on those
                        if (defIndex < firstDefIndex)
                           {
                           continue;
                           }

                        TR::Node* defRegLoad = useDefInfo->getNode(defIndex);

                        if (defRegLoad != NULL)
                           {
                           TR::Node* defRegLoadChild = defRegLoad->getFirstChild();

                           bool forceExtension = false;
                           canSkipConversion = TR_LoadExtensions::canSkipConversion(parent, defRegLoadChild, forceExtension);

                           forceExtensionOnAnyLoads |= forceExtension;
                           forceExtensionOnAllLoads &= forceExtension;

                           // If we have to force extension on any loads which feed a def of this use ensure we must also 
                           // force extension on all such loads. Conversely the conversion can be skipped if none of the
                           // loads feeding the def of this use need to be extended. This ensures either all loads feeding
                           // into defs of this use should be extended or none of them.
                           canSkipConversion &= forceExtensionOnAllLoads == forceExtensionOnAnyLoads;

                           if (trace())
                              {
                              traceMsg(comp(), "\t\tPeeked through %s [%p] and found %s [%p] with child %s [%p] - conversion %s be skipped\n",
                                 useRegLoad->getOpCode().getName(), 
                                 useRegLoad,
                                 defRegLoad->getOpCode().getName(), 
                                 defRegLoad,
                                 defRegLoadChild->getOpCode().getName(), 
                                 defRegLoadChild,
                                 canSkipConversion ? 
                                    "can" :
                                    "cannot");
                              }
                           }
                        }

                     if (canSkipConversion && performTransformation(comp(), "%sSkipping conversion %s [%p] after RegLoad\n", optDetailString(), parent->getOpCode().getName(), parent))
                        {
                        TR::DebugCounter::incStaticDebugCounter(comp(), TR::DebugCounter::debugCounterName(comp(), "codegen/LoadExtensions/success/unneededConversion/GRA/%s", comp()->signature()));

                        parent->setUnneededConversion(true);

                        if (forceExtensionOnAllLoads)
                           {
                           TR_UseDefInfo::BitVector info(comp()->allocator());

                           if (useDefInfo->getUseDef(info, useRegLoad->getUseDefIndex()))
                              {
                              TR_UseDefInfo::BitVector::Cursor cursor(info);

                              for (cursor.SetToFirstOne(); cursor.Valid(); cursor.SetToNextOne())
                                 {
                                 int32_t defIndex = cursor;

                                 // We've examined all the defs of this particular use
                                 if (defIndex >= firstUseIndex)
                                    {
                                    break;
                                    }

                                 // Do not consider defs that correspond to method arguments as we cannot force extension on those
                                 if (defIndex < firstDefIndex)
                                    {
                                    continue;
                                    }

                                 TR::Node *defRegLoad = useDefInfo->getNode(defIndex);

                                 if (defRegLoad != NULL)
                                    {
                                    TR::Node* defRegLoadChild = defRegLoad->getFirstChild();

                                    const int32_t preference = getExtensionPreference(defRegLoadChild);

                                    if (preference > 0)
                                       {
                                       if (trace())
                                          {
                                          traceMsg(comp(), "\t\t\tForcing sign extension on %s [%p]\n",
                                             defRegLoadChild->getOpCode().getName(),
                                             defRegLoadChild);
                                          }

                                       if (parent->getSize() == 8 || parent->useSignExtensionMode())
                                          {
                                          defRegLoadChild->setSignExtendTo64BitAtSource(true);
                                          }
                                       else
                                          {
                                          defRegLoadChild->setSignExtendTo32BitAtSource(true);
                                          }
                                       }

                                    if (preference < 0)
                                       {
                                       if (trace())
                                          {
                                          traceMsg(comp(), "\t\t\tForcing zero extension on %s [%p]\n",
                                             defRegLoadChild->getOpCode().getName(),
                                             defRegLoadChild);
                                          }

                                       if (parent->getSize() == 8 || parent->useSignExtensionMode())
                                          {
                                          defRegLoadChild->setZeroExtendTo64BitAtSource(true);
                                          }
                                       else
                                          {
                                          defRegLoadChild->setZeroExtendTo32BitAtSource(true);
                                          }
                                       }
                                    }
                                 }
                              }
                           }

                        if (parent->getType().isInt64() && parent->getSize() > child->getSize())
                           {
                           if (trace())
                              {
                              traceMsg(comp(), "\t\t\tSet global register %s in getExtendedToInt64GlobalRegisters for child %s [%p] with parent node %s [%p]\n",
                                 comp()->getDebug()->getGlobalRegisterName(child->getGlobalRegisterNumber()),
                                 child->getOpCode().getName(),
                                 child,
                                 parent->getOpCode().getName(),
                                 parent);
                              }

                           // getExtendedToInt64GlobalRegisters is used by the evaluators to force a larger virtual register to be used when
                           // evaluating the regload so any instructions generated by local RA are the correct size to preserve the upper bits
                           cg()->getExtendedToInt64GlobalRegisters()[child->getGlobalRegisterNumber()] = true;
                           }
                        }
                     }
                  }
               }
            }
         }

      if (!canSkipConversion)
         {
         bool forceExtension = false;
         canSkipConversion = TR_LoadExtensions::canSkipConversion(parent, child, forceExtension);

         if (canSkipConversion && performTransformation(comp(), "%sSkipping conversion %s [%p]\n", optDetailString(), parent->getOpCode().getName(), parent))
            {
            TR::DebugCounter::incStaticDebugCounter(comp(), TR::DebugCounter::debugCounterName(comp(), "codegen/LoadExtensions/success/unneededConversion/%s", comp()->signature()));

            parent->setUnneededConversion(true);

            if (forceExtension)
               {
               const int32_t preference = getExtensionPreference(child);

               if (preference > 0)
                  {
                  if (trace())
                     {
                     traceMsg(comp(), "\t\t\tForcing sign extension on %s [%p]\n",
                        child->getOpCode().getName(),
                        child);
                     }

                  if (parent->getSize() == 8 || parent->useSignExtensionMode())
                     {
                     child->setSignExtendTo64BitAtSource(true);
                     }
                  else
                     {
                     child->setSignExtendTo32BitAtSource(true);
                     }
                  }

               if (preference < 0)
                  {
                  if (trace())
                     {
                     traceMsg(comp(), "\t\t\tForcing zero extension on %s [%p]\n",
                        child->getOpCode().getName(),
                        child);
                     }

                  if (parent->getSize() == 8 || parent->useSignExtensionMode())
                     {
                     child->setZeroExtendTo64BitAtSource(true);
                     }
                  else
                     {
                     child->setZeroExtendTo32BitAtSource(true);
                     }
                  }
               }
            }
         }
      }
   }