예제 #1
0
/**
 * A runtime guard block may have monitor stores and privarg stores along with the guard
 * it self. This method will rearrange these stores and split the block, managing any
 * uncommoning necessary for eventual block order.
 *
 * The provided block will become the privarg block, containing any privarg stores and additonal
 * temps for uncommoning. It must be evaluated first. The returned block will contain monitor
 * stores and the guard. If no split is required, the provided block will be returned.
 *
 * @param comp Compilation object
 * @param block Block to manipulate
 * @param cfg Current CFG
 * @return The block containing the guard.
 */
static TR::Block* splitRuntimeGuardBlock(TR::Compilation *comp, TR::Block* block, TR::CFG *cfg)
   {
   TR::NodeChecklist checklist(comp);
   TR::TreeTop *start = block->getFirstRealTreeTop();
   TR::TreeTop *guard = block->getLastRealTreeTop();
   TR::TreeTop *firstPrivArg = NULL;
   TR::TreeTop *firstMonitor = NULL;

   // Manage the unexpected case that monitors and priv args are reversed
   bool privThenMonitor = false;

   TR_ASSERT(isMergeableGuard(guard->getNode()), "last node must be guard %p", guard->getNode());

   // Search for privarg and monitor stores
   // Only commoned nodes under the guard are required to be anchored, due to the guard being
   // evaluted before the monitor stores later on
   bool anchoredTemps = false;
   for (TR::TreeTop *tt = start; tt && tt->getNode()->getOpCodeValue() != TR::BBEnd; tt = tt->getNextTreeTop())
      {
      TR::Node * node = tt->getNode();

      if (node->getOpCode().hasSymbolReference() && node->getSymbol()->holdsMonitoredObject())
         firstMonitor = firstMonitor == NULL ? tt : firstMonitor;
      else if (node->chkIsPrivatizedInlinerArg())
         {
         if (firstPrivArg == NULL)
            {
            firstPrivArg = tt;
            privThenMonitor = (firstMonitor == NULL);
            }
         }
      else if (isMergeableGuard(node))
         anchoredTemps |= anchorCommonNodes(comp, node, start, checklist);
      else
         TR_ASSERT(0, "Node other than monitor or privarg store %p before runtime guard", node);
      }

   // If there are monitors then privargs, they must be swapped around, such that all privargs are
   // evaluated first
   if (firstPrivArg && firstMonitor && !privThenMonitor)
      {
      TR::TreeTop *monitorEnd = firstPrivArg->getPrevTreeTop();
      firstMonitor->getPrevTreeTop()->join(firstPrivArg);
      guard->getPrevTreeTop()->join(firstMonitor);
      monitorEnd->join(guard);
      }

   // If there were temps created or privargs in the block, perform a split
   TR::TreeTop *split = NULL;
   if (firstPrivArg)
      split = firstMonitor ? firstMonitor : guard;
   else if (anchoredTemps)
      split = start;

   if (split)
      return block->split(split, cfg, true /* fixupCommoning */, false /* copyExceptionSuccessors */);
   return block;
   }
예제 #2
0
void TR_ExpressionsSimplification::tranformStoreMotionCandidate(TR::TreeTop *treeTop, bool *isPreheaderBlockInvalid)
   {
   TR::Node *node = treeTop->getNode();

   TR_ASSERT(node->getOpCode().isStore() && !node->getSymbol()->isStatic() && !node->getSymbol()->holdsMonitoredObject(),
      "node %p was expected to be a non-static non-monitored object store and was not.", node);

   // this candidate should be valid, either direct or indirect

   if (trace())
      comp()->getDebug()->print(comp()->getOutFile(), node, 0, true);

   TR::Block *entryBlock = _currentRegion->getEntryBlock();
   TR::Block *preheaderBlock = findPredecessorBlock(entryBlock);

   if (!preheaderBlock)
      {
      if (trace())
         traceMsg(comp(), "Fail to find a place to put the hoist code in\n");
      *isPreheaderBlockInvalid = true;
      return;
      }

   // Earlier post-dominance test ensures that the loop is executed as least once, or is canonicalized.
   // but to be safe we still perform on canonicalized loops only.
   if (_currentRegion->isCanonicalizedLoop())  // make sure that the loop is canonicalized, in which case the preheader is
      {                                        // executed in its first iteration and is protected.
      if (performTransformation(comp(), "%sMove out loop-invariant store [%p] to block_%d\n", OPT_DETAILS, node, preheaderBlock->getNumber()))
         {
         TR::Node *newNode = node->duplicateTree();
         transformNode(newNode, preheaderBlock);
         TR::TransformUtil::removeTree(comp(), treeTop);
         }
      }
   else
      {
      if (trace())
         traceMsg(comp(), "No canonicalized loop for this candidate\n");
      }
   }
예제 #3
0
// Returns true if there is any constraint to the move
bool TR_LocalLiveRangeReduction::isAnySymInDefinedOrUsedBy(TR_TreeRefInfo *currentTreeRefInfo, TR::Node *currentNode, TR_TreeRefInfo *movingTreeRefInfo )
   {
   TR::Node *movingNode = movingTreeRefInfo->getTreeTop()->getNode();
   // ignore anchors
   //
   if (movingNode->getOpCode().isAnchor())
      movingNode = movingNode->getFirstChild();

   TR::ILOpCode &opCode = currentNode->getOpCode();

   ////if ((opCode.getOpCodeValue() == TR::monent) || (opCode.getOpCodeValue() == TR::monexit))
   if (nodeMaybeMonitor(currentNode))
      {
      if (trace())
    	 traceMsg(comp(),"cannot move %p beyond monitor %p\n",movingNode,currentNode);
      return true;
      }

   // Don't move gc points or things across gc points
   //
   if (movingNode->canGCandReturn() ||
         currentNode->canGCandReturn())
      {
      if (trace())
         traceMsg(comp(), "cannot move gc points %p past %p\n", movingNode, currentNode);
      return true;
      }

   // Don't move checks or calls at all
   //
   if (containsCallOrCheck(movingTreeRefInfo,movingNode))
      {
      if (trace())
    	   traceMsg(comp(),"cannot move check or call %s\n", getDebug()->getName(movingNode));
      return true;
      }

   // Don't move object header store past a GC point
   //
   if ((currentNode->getOpCode().isWrtBar() || currentNode->canCauseGC()) && mayBeObjectHeaderStore(movingNode, fe()))
      {
      if (trace())
    	   traceMsg(comp(),"cannot move possible object header store %s past GC point %s\n", getDebug()->getName(movingNode), getDebug()->getName(currentNode));
      return true;
      }

   if (TR::Compiler->target.cpu.isPower() && opCode.getOpCodeValue() == TR::allocationFence)
      {
      // Can't move allocations past flushes
      if (movingNode->getOpCodeValue() == TR::treetop &&
          movingNode->getFirstChild()->getOpCode().isNew() &&
          (currentNode->getAllocation() == NULL ||
           currentNode->getAllocation() == movingNode->getFirstChild()))
         {
         if (trace())
            {
            traceMsg(comp(),"cannot move %p beyond flush %p - ", movingNode, currentNode);
            if (currentNode->getAllocation() == NULL)
               traceMsg(comp(),"(flush with null allocation)\n");
            else
               traceMsg(comp(),"(flush for allocation %p)\n", currentNode->getAllocation());
            }
         return true;
         }

      // Can't move certain stores past flushes
      // Exclude all indirect stores, they may be for stack allocs, in which case the flush is needed at least as a scheduling barrier
      // Direct stores to autos and parms are the only safe candidates
      if (movingNode->getOpCode().isStoreIndirect() ||
          (movingNode->getOpCode().isStoreDirect() && !movingNode->getSymbol()->isParm() && !movingNode->getSymbol()->isAuto()))
         {
         if (trace())
            traceMsg(comp(),"cannot move %p beyond flush %p - (flush for possible stack alloc)", movingNode, currentNode);
         return true;
         }
      }

   for (int32_t i = 0; i < currentNode->getNumChildren(); i++)
      {
      TR::Node *child = currentNode->getChild(i);

      //Any node that has side effects (like call and newarrya) cannot be evaluated in the middle of the tree.
      if (movingTreeRefInfo->getFirstRefNodesList()->find(child))
         {
         //for calls and unresolve symbol that are not under check

         if (child->exceptionsRaised() ||
             (child->getOpCode().hasSymbolReference() && child->getSymbolReference()->isUnresolved()))
    	    {
    	    if (trace())
    	       traceMsg(comp(),"cannot move %p beyond %p - cannot change evaluation point of %p\n ",movingNode,currentTreeRefInfo->getTreeTop()->getNode(),child);
            return true;
    	    }

         else if(movingNode->getOpCode().isStore())
            {
            TR::SymbolReference *stSymRef = movingNode->getSymbolReference();
            int32_t stSymRefNum = stSymRef->getReferenceNumber();
            //TR::SymbolReference *stSymRef = movingNode->getSymbolReference();
            int32_t numHelperSymbols = comp()->getSymRefTab()->getNumHelperSymbols();
            if ((comp()->getSymRefTab()->isNonHelper(stSymRefNum, TR::SymbolReferenceTable::vftSymbol))||
                (comp()->getSymRefTab()->isNonHelper(stSymRefNum, TR::SymbolReferenceTable::contiguousArraySizeSymbol))||
                (comp()->getSymRefTab()->isNonHelper(stSymRefNum, TR::SymbolReferenceTable::discontiguousArraySizeSymbol))||
                (stSymRef == comp()->getSymRefTab()->findHeaderFlagsSymbolRef())||
                (stSymRef->getSymbol() == comp()->getSymRefTab()->findGenericIntShadowSymbol()))

               return true;
            }

         else if (movingNode->getOpCode().isResolveOrNullCheck())
            {
    	    if (trace())
    	       traceMsg(comp(),"cannot move %p beyond %p - node %p under ResolveOrNullCheck",movingNode,currentTreeRefInfo->getTreeTop()->getNode(),currentNode);
            return true;
            }

    	 else if (TR::Compiler->target.is64Bit() &&
    		  movingNode->getOpCode().isBndCheck() &&
    		  ((opCode.getOpCodeValue() == TR::i2l) || (opCode.getOpCodeValue() == TR::iu2l)) &&
    		  !child->isNonNegative())
    	    {
    	    if (trace())
    	       traceMsg(comp(),"cannot move %p beyond %p - changing the eval point of %p will casue extra cg instruction ",movingNode,currentTreeRefInfo->getTreeTop()->getNode(),currentNode);
    	    return true;
    	    }
         }

      //don't recurse over nodes each are not the first reference
      if (child->getReferenceCount()==1 || currentTreeRefInfo->getFirstRefNodesList()->find(child))
         {
         if (isAnySymInDefinedOrUsedBy(currentTreeRefInfo, child, movingTreeRefInfo ))
            return true;
         }
      }

   return false;
   }
예제 #4
0
파일: Aliases.cpp 프로젝트: LinHu2016/omr
TR_BitVector *
addVeryRefinedCallAliasSets(TR::ResolvedMethodSymbol * methodSymbol, TR_BitVector * aliases, List<void> * methodsPeeked)
   {
   TR::Compilation *comp = TR::comp();

   void * methodId = methodSymbol->getResolvedMethod()->getPersistentIdentifier();
   if (methodsPeeked->find(methodId))
      {
      // This can't be allocated into the alias region as it must be accessed across optimizations
      TR_BitVector *heapAliases = new (comp->trHeapMemory()) TR_BitVector(comp->getSymRefCount(), comp->trMemory(), heapAlloc, growable);
      *heapAliases |= *aliases;
      return heapAliases;
      }

   // stop if the peek is getting very deep
   //
   if (methodsPeeked->getSize() >= PEEK_THRESHOLD)
      return 0;

   methodsPeeked->add(methodId);

   dumpOptDetails(comp, "O^O REFINING ALIASES: Peeking into the IL to refine aliases \n");

   if (!methodSymbol->getResolvedMethod()->genMethodILForPeeking(methodSymbol, comp, true))
      return 0;

   TR::SymbolReferenceTable * symRefTab = comp->getSymRefTab();
   for (TR::TreeTop * tt = methodSymbol->getFirstTreeTop(); tt; tt = tt->getNextTreeTop())
      {
	   TR::Node *node = tt->getNode();
      if (node->getOpCode().isResolveCheck())
         return 0;

      if ((node->getOpCodeValue() == TR::treetop) ||
          (node->getOpCodeValue() == TR::compressedRefs) ||
          node->getOpCode().isCheck())
         node = node->getFirstChild();

      if (node->getOpCode().isStore())
         {
         TR::SymbolReference * symRefInCallee = node->getSymbolReference(), * symRefInCaller;
         TR::Symbol * symInCallee = symRefInCallee->getSymbol();
         TR::DataType type = symInCallee->getDataType();
         if (symInCallee->isShadow())
            {
            if (symInCallee->isArrayShadowSymbol())
               symRefInCaller = symRefTab->getSymRef(symRefTab->getArrayShadowIndex(type));

            else if (symInCallee->isArrayletShadowSymbol())
               symRefInCaller = symRefTab->getSymRef(symRefTab->getArrayletShadowIndex(type));

            else
               symRefInCaller = symRefTab->findShadowSymbol(symRefInCallee->getOwningMethod(comp), symRefInCallee->getCPIndex(), type);

            if (symRefInCaller)
               {
               if (symRefInCaller->reallySharesSymbol(comp))
                  symRefInCaller->setSharedShadowAliases(aliases, symRefTab);

               aliases->set(symRefInCaller->getReferenceNumber());
               }

            }
         else if (symInCallee->isStatic())
            {
            symRefInCaller = symRefTab->findStaticSymbol(symRefInCallee->getOwningMethod(comp), symRefInCallee->getCPIndex(), type);
            if (symRefInCaller)
               {
               if (symRefInCaller->reallySharesSymbol(comp))
                  symRefInCaller->setSharedStaticAliases(aliases, symRefTab);
               else
                  aliases->set(symRefInCaller->getReferenceNumber());
               }
            }
         }
      else if (node->getOpCode().isCall())
         {
         if (node->getOpCode().isCallIndirect())
            return 0;
         TR::ResolvedMethodSymbol * calleeSymbol = node->getSymbol()->getResolvedMethodSymbol();
         if (!calleeSymbol)
            return 0;
         TR_ResolvedMethod * calleeMethod = calleeSymbol->getResolvedMethod();
         if (!calleeMethod->isCompilable(comp->trMemory()) || calleeMethod->isJNINative())
            return 0;

         if (!addVeryRefinedCallAliasSets(calleeSymbol, aliases, methodsPeeked))
            return 0;
         }
      else if (node->getOpCodeValue() == TR::monent)
         return 0;
      }

   // This can't be allocated into the alias region as it must be accessed across optimizations
   TR_BitVector *heapAliases = new (comp->trHeapMemory()) TR_BitVector(comp->getSymRefCount(), comp->trMemory(), heapAlloc, growable);
   *heapAliases |= *aliases;
   return heapAliases;
   }
예제 #5
0
TR::Node *constrainVcall(TR::ValuePropagation *vp, TR::Node *node)
   {
   constrainCall(vp, node);
   // Look for System.arraycopy call. If the node is transformed into an arraycopy
   // re-process it.
   //
   vp->transformArrayCopyCall(node);
   if (node->getOpCodeValue() == TR::arraycopy)
      {
      node->setVisitCount(0);
      vp->launchNode(node, vp->getCurrentParent(), 0);
      return node;
      }

   if (vp->transformUnsafeCopyMemoryCall(node))
      return node;

   cacheStringAppend(vp,node);

#ifdef J9_PROJECT_SPECIFIC
   TR::SymbolReference *finalizeSymRef = vp->comp()->getSymRefTab()->findOrCreateRuntimeHelper(TR_jitCheckIfFinalizeObject, true, true, true);
   if (node->getSymbolReference() == finalizeSymRef)
      {
      TR::Node *receiver = node->getFirstChild();
      bool isGlobal;
      TR::VPConstraint *type = vp->getConstraint(receiver, isGlobal);
      bool canBeRemoved = false;
      // ensure the type is really a fixedClass
      // resolvedClass is not sufficient because java.lang.Object has an
      // empty finalizer method (hasFinalizer returns false) and the call to
      // vm helper is incorrectly optimized in this case
      //
      if (type && type->getClassType() &&
         type->getClassType()->asFixedClass())
         {
         TR_OpaqueClassBlock *klass = type->getClassType()->getClass();
         if (klass && !TR::Compiler->cls.hasFinalizer(vp->comp(), klass) && !vp->comp()->fej9()->isOwnableSyncClass(klass))
            {
            canBeRemoved = true;
            }
         }
      // If a class has a finalizer or is an ownableSync it won't be allocated on the stack. That's ensured
      // by virtue of (indirectly) calling bool J9::ObjectModel::canAllocateInlineClass(TR_OpaqueClassBlock *block)
      // Doesn't make sense to call jitCheckIfFinalizeObject for a stack
      // allocated object, so optimize
      else if (receiver->getOpCode().hasSymbolReference() && receiver->getSymbol()->isLocalObject())
         {
         canBeRemoved = true;
         }

      if (canBeRemoved &&
           performTransformation(vp->comp(), "%s Removing redundant call to jitCheckIfFinalize [%p]\n", OPT_DETAILS, node))
         {
         ///printf("found opportunity in %s to remove call to checkfinalize\n", vp->comp()->signature());fflush(stdout);
         ///traceMsg(vp->comp(), "found opportunity to remove call %p to checkfinalize\n", node);
         vp->removeNode(node);
         vp->_curTree->setNode(NULL);
         return node;
         }
      }
#endif

   return node;
   }
예제 #6
0
void
OMR::CodeGenPhase::performSetupForInstructionSelectionPhase(TR::CodeGenerator * cg, TR::CodeGenPhase * phase)
   {
   TR::Compilation *comp = cg->comp();

   if (TR::Compiler->target.cpu.isZ() && TR::Compiler->om.shouldGenerateReadBarriersForFieldLoads())
      {
      // TODO (GuardedStorage): We need to come up with a better solution than anchoring aloadi's
      // to enforce certain evaluation order
      traceMsg(comp, "GuardedStorage: in performSetupForInstructionSelectionPhase\n");

      auto mapAllocator = getTypedAllocator<std::pair<TR::TreeTop*, TR::TreeTop*> >(comp->allocator());

      std::map<TR::TreeTop*, TR::TreeTop*, std::less<TR::TreeTop*>, TR::typed_allocator<std::pair<TR::TreeTop* const, TR::TreeTop*>, TR::Allocator> >
         currentTreeTopToappendTreeTop(std::less<TR::TreeTop*> (), mapAllocator);

      TR_BitVector *unAnchorableAloadiNodes = comp->getBitVectorPool().get();

      for (TR::PreorderNodeIterator iter(comp->getStartTree(), comp); iter != NULL; ++iter)
         {
         TR::Node *node = iter.currentNode();

         traceMsg(comp, "GuardedStorage: Examining node = %p\n", node);

         // isNullCheck handles both TR::NULLCHK and TR::ResolveAndNULLCHK
         // both of which do not operate on their child but their
         // grandchild (or greatgrandchild).
         if (node->getOpCode().isNullCheck())
            {
            // An aloadi cannot be anchored if there is a Null Check on
            // its child. There are two situations where this occurs.
            // The first is when doing an aloadi off some node that is
            // being NULLCHK'd (see Ex1). The second is when doing an
            // icalli in which case the aloadi loads the VFT of an
            // object that must be NULLCHK'd (see Ex2).
            //
            // Ex1:
            //    n1n NULLCHK on n3n
            //    n2n    aloadi f    <-- First Child And Parent of Null Chk'd Node
            //    n3n       aload O
            //
            // Ex2:
            //    n1n NULLCHK on n4n
            //    n2n    icall foo        <-- First Child
            //    n3n       aloadi <vft>  <-- Parent of Null Chk'd Node
            //    n4n          aload O
            //    n4n       ==> aload O

            TR::Node *nodeBeingNullChkd = node->getNullCheckReference();
            if (nodeBeingNullChkd)
               {
               TR::Node *firstChild = node->getFirstChild();
               TR::Node *parentOfNullChkdNode = NULL;

               if (firstChild->getOpCode().isCall() &&
                   firstChild->getOpCode().isIndirect())
                  {
                  parentOfNullChkdNode = firstChild->getFirstChild();
                  }
               else
                  {
                  parentOfNullChkdNode = firstChild;
                  }

               if (parentOfNullChkdNode &&
                   parentOfNullChkdNode->getOpCodeValue() == TR::aloadi &&
                   parentOfNullChkdNode->getNumChildren() > 0 &&
                   parentOfNullChkdNode->getFirstChild() == nodeBeingNullChkd)
                  {
                  unAnchorableAloadiNodes->set(parentOfNullChkdNode->getGlobalIndex());
                  traceMsg(comp, "GuardedStorage: Cannot anchor  %p\n", firstChild);
                  }
               }
            }
         else
            {
            bool shouldAnchorNode = false;

            if (node->getOpCodeValue() == TR::aloadi &&
                !unAnchorableAloadiNodes->isSet(node->getGlobalIndex()))
               {
               shouldAnchorNode = true;
               }
            else if (node->getOpCodeValue() == TR::aload &&
                     node->getSymbol()->isStatic() &&
                     node->getSymbol()->isCollectedReference())
               {
               shouldAnchorNode = true;
               }

            if (shouldAnchorNode)
               {
               TR::TreeTop* anchorTreeTop = TR::TreeTop::create(comp, TR::Node::create(TR::treetop, 1, node));
               TR::TreeTop* appendTreeTop = iter.currentTree();

               if (currentTreeTopToappendTreeTop.count(appendTreeTop) > 0)
                  {
                  appendTreeTop = currentTreeTopToappendTreeTop[appendTreeTop];
                  }

               // Anchor the aload/aloadi before the current treetop
               appendTreeTop->insertBefore(anchorTreeTop);
               currentTreeTopToappendTreeTop[iter.currentTree()] = anchorTreeTop;

               traceMsg(comp, "GuardedStorage: Anchored  %p to treetop = %p\n", node, anchorTreeTop);
               }
            }
         }

      comp->getBitVectorPool().release(unAnchorableAloadiNodes);
      }

   if (cg->shouldBuildStructure() &&
       (comp->getFlowGraph()->getStructure() != NULL))
      {
      TR_Structure *rootStructure = TR_RegionAnalysis::getRegions(comp);
      comp->getFlowGraph()->setStructure(rootStructure);
      }

   phase->reportPhase(SetupForInstructionSelectionPhase);

   // Dump preIR
   if (comp->getOption(TR_TraceRegisterPressureDetails) && !comp->getOption(TR_DisableRegisterPressureSimulation))
      {
      traceMsg(comp, "         { Post optimization register pressure simulation\n");
      TR_BitVector emptyBitVector;
      vcount_t vc = comp->incVisitCount();
      cg->initializeRegisterPressureSimulator();
      for (TR::Block *block = comp->getStartBlock(); block; block = block->getNextExtendedBlock())
         {
         TR_LinkHead<TR_RegisterCandidate> emptyCandidateList;
         TR::CodeGenerator::TR_RegisterPressureState state(NULL, 0, emptyBitVector, emptyBitVector, &emptyCandidateList, cg->getNumberOfGlobalGPRs(), cg->getNumberOfGlobalFPRs(), cg->getNumberOfGlobalVRFs(), vc);
         TR::CodeGenerator::TR_RegisterPressureSummary summary(state._gprPressure, state._fprPressure, state._vrfPressure);
         cg->simulateBlockEvaluation(block, &state, &summary);
         }
      traceMsg(comp, "         }\n");
      }

   TR::LexicalMemProfiler mp(phase->getName(), comp->phaseMemProfiler());
   LexicalTimer pt(phase->getName(), comp->phaseTimer());

   cg->setUpForInstructionSelection();
   }