Example #1
0
void
OMR::CodeGenPhase::performInstructionSelectionPhase(TR::CodeGenerator * cg, TR::CodeGenPhase * phase)
   {
   TR::Compilation* comp = cg->comp();
   phase->reportPhase(InstructionSelectionPhase);

   if (comp->getOption(TR_TraceCG) || comp->getOption(TR_TraceTrees) || comp->getOptions()->getTraceCGOption(TR_TraceCGPreInstructionSelection))
      comp->dumpMethodTrees("Pre Instruction Selection Trees");

   TR::LexicalMemProfiler mp(phase->getName(), comp->phaseMemProfiler());
   LexicalTimer pt(phase->getName(), comp->phaseTimer());

   cg->doInstructionSelection();

   if (comp->getOption(TR_TraceCG) || comp->getOptions()->getTraceCGOption(TR_TraceCGPostInstructionSelection))
      comp->getDebug()->dumpMethodInstrs(comp->getOutFile(), "Post Instruction Selection Instructions", false, true);

   // check reference counts
#if defined(DEBUG) || defined(PROD_WITH_ASSUMES)
      for (int r=0; r<NumRegisterKinds; r++)
         {
         if (TO_KIND_MASK(r) & cg->getSupportedLiveRegisterKinds())
            {
            cg->checkForLiveRegisters(cg->getLiveRegisters((TR_RegisterKinds)r));
            }
         }
#endif

   // check interrupt
   if (comp->compilationShouldBeInterrupted(AFTER_INSTRUCTION_SELECTION_CONTEXT))
      {
      comp->failCompilation<TR::CompilationInterrupted>("interrupted after instruction selection");
      }
   }
Example #2
0
LexicalXmlTag::LexicalXmlTag(TR::CodeGenerator * cg): cg(cg)
   {
   TR::Compilation *comp = cg->comp();
   if (comp->getOption(TR_TraceOptDetails) || comp->getOption(TR_TraceCG))
      {
      const char *hotnessString = comp->getHotnessName(comp->getMethodHotness());
      traceMsg(comp, "<codegen\n"
              "\tmethod=\"%s\"\n"
               "\thotness=\"%s\">\n",
               comp->signature(), hotnessString);
      }
   }
Example #3
0
void
OMR::CodeGenPhase::performRegisterAssigningPhase(TR::CodeGenerator * cg, TR::CodeGenPhase * phase)
   {
   TR::Compilation* comp = cg->comp();
   phase->reportPhase(RegisterAssigningPhase);

   if (cg->getDebug())
      cg->getDebug()->roundAddressEnumerationCounters();

     {
      TR::LexicalMemProfiler mp("RA", comp->phaseMemProfiler());
      LexicalTimer pt("RA", comp->phaseTimer());

      TR_RegisterKinds colourableKindsToAssign;
      TR_RegisterKinds nonColourableKindsToAssign = cg->prepareRegistersForAssignment();

      cg->jettisonAllSpills(); // Spill temps used before now may lead to conflicts if also used by register assignment

      // Do local register assignment for non-colourable registers.
      //
      if(cg->getTraceRAOption(TR_TraceRAListing))
         if(cg->getDebug()) cg->getDebug()->dumpMethodInstrs(comp->getOutFile(),"Before Local RA",false);

      cg->doRegisterAssignment(nonColourableKindsToAssign);

      if (comp->compilationShouldBeInterrupted(AFTER_REGISTER_ASSIGNMENT_CONTEXT))
         {
         comp->failCompilation<TR::CompilationInterrupted>("interrupted after RA");
         }
      }

   if (comp->getOption(TR_TraceCG) || comp->getOptions()->getTraceCGOption(TR_TraceCGPostRegisterAssignment))
      comp->getDebug()->dumpMethodInstrs(comp->getOutFile(), "Post Register Assignment Instructions", false, true);
   }
Example #4
0
void
OMR::CodeGenPhase::performEmitSnippetsPhase(TR::CodeGenerator * cg, TR::CodeGenPhase * phase)
   {
   TR::Compilation * comp = cg->comp();
   phase->reportPhase(EmitSnippetsPhase);

   TR::LexicalMemProfiler mp("Emit Snippets", comp->phaseMemProfiler());
   LexicalTimer pt("Emit Snippets", comp->phaseTimer());

   cg->emitSnippets();

   if (comp->getOption(TR_EnableOSR))
      {
      comp->getOSRCompilationData()->checkOSRLimits();
      comp->getOSRCompilationData()->compressInstruction2SharedSlotMap();
      }

   if (comp->getOption(TR_TraceCG) || comp->getOptions()->getTraceCGOption(TR_TraceCGPostBinaryEncoding))
      {
      diagnostic("\nbuffer start = %8x, code start = %8x, buffer length = %d", cg->getBinaryBufferStart(), cg->getCodeStart(), cg->getEstimatedCodeLength());
      diagnostic("\n");
      const char * title = "Post Binary Instructions";

      comp->getDebug()->dumpMethodInstrs(comp->getOutFile(), title, false, true);

      traceMsg(comp,"<snippets>");
      comp->getDebug()->print(comp->getOutFile(), cg->getSnippetList());
      traceMsg(comp,"\n</snippets>\n");

      auto iterator = cg->getSnippetList().begin();
      int32_t estimatedSnippetStart = cg->getEstimatedSnippetStart();
      while (iterator != cg->getSnippetList().end())
         {
         estimatedSnippetStart += (*iterator)->getLength(estimatedSnippetStart);
         ++iterator;
         }
      int32_t snippetLength = estimatedSnippetStart - cg->getEstimatedSnippetStart();

      diagnostic("\nAmount of code memory allocated for this function        = %d"
                  "\nAmount of code memory consumed for this function         = %d"
                  "\nAmount of snippet code memory consumed for this function = %d\n\n",
                  cg->getEstimatedCodeLength(),
                  cg->getCodeLength(),
                  snippetLength);
      }
   }
Example #5
0
void
OMR::CodeGenPhase::performLowerTreesPhase(TR::CodeGenerator * cg, TR::CodeGenPhase * phase)
   {
   TR::Compilation * comp = cg->comp();
   phase->reportPhase(LowerTreesPhase);

   cg->lowerTrees();

   if (comp->getOption(TR_TraceCG))
      comp->dumpMethodTrees("Post Lower Trees");
   }
Example #6
0
void
OMR::CodeGenPhase::performPeepholePhase(TR::CodeGenerator * cg, TR::CodeGenPhase * phase)
   {
   TR::Compilation * comp = cg->comp();
   phase->reportPhase(PeepholePhase);

   TR::LexicalMemProfiler mp(phase->getName(), comp->phaseMemProfiler());
   LexicalTimer pt(phase->getName(), comp->phaseTimer());

   cg->doPeephole();

   if (comp->getOption(TR_TraceCG))
      comp->getDebug()->dumpMethodInstrs(comp->getOutFile(), "Post Peephole Instructions", false);
   }
Example #7
0
void
OMR::CodeGenPhase::performUncommonCallConstNodesPhase(TR::CodeGenerator * cg, TR::CodeGenPhase * phase)
   {
   TR::Compilation* comp = cg->comp();

   if(comp->getOption(TR_DisableCallConstUncommoning))
      {
      traceMsg(comp, "Skipping Uncommon Call Constant Node phase\n");
      return;
      }

   phase->reportPhase(UncommonCallConstNodesPhase);

   if (comp->getOption(TR_TraceCG) || comp->getOption(TR_TraceTrees))
      comp->dumpMethodTrees("Pre Uncommon Call Constant Node Trees");

   TR::LexicalMemProfiler mp(phase->getName(), comp->phaseMemProfiler());
   LexicalTimer pt(phase->getName(), comp->phaseTimer());

   cg->uncommonCallConstNodes();

   if (comp->getOption(TR_TraceCG) || comp->getOption(TR_TraceTrees))
      comp->dumpMethodTrees("Post Uncommon Call Constant Node Trees");
  }
Example #8
0
void
OMR::CodeGenPhase::performMapStackPhase(TR::CodeGenerator * cg, TR::CodeGenPhase * phase)
   {
   TR::Compilation* comp = cg->comp();
   cg->remapGCIndicesInInternalPtrFormat();
     {
     TR::LexicalMemProfiler mp("Stackmap", comp->phaseMemProfiler());
     LexicalTimer pt("Stackmap", comp->phaseTimer());

     cg->getLinkage()->mapStack(comp->getJittedMethodSymbol());

     if (comp->getOption(TR_TraceCG) || comp->getOptions()->getTraceCGOption(TR_TraceEarlyStackMap))
        comp->getDebug()->dumpMethodInstrs(comp->getOutFile(), "Post Stack Map", false);
     }
   cg->setMappingAutomatics();

   }
Example #9
0
void TR_PPCRegisterDependencyGroup::assignRegisters(TR::Instruction   *currentInstruction,
                                                    TR_RegisterKinds  kindToBeAssigned,
                                                    uint32_t          numberOfRegisters,
                                                    TR::CodeGenerator *cg)
   {
   // *this    swipeable for debugging purposes
   TR::Machine *machine = cg->machine();
   TR::Register   *virtReg;
   TR::RealRegister::RegNum dependentRegNum;
   TR::RealRegister *dependentRealReg, *assignedRegister, *realReg;
   int i, j;
   TR::Compilation *comp = cg->comp();

   int num_gprs = 0;
   int num_fprs = 0;
   int num_vrfs = 0;

   // Use to do lookups using real register numbers
   TR_PPCRegisterDependencyMap map(_dependencies, numberOfRegisters);

   if (!comp->getOption(TR_DisableOOL))
      {
      for (i = 0; i< numberOfRegisters; i++)
         {
         virtReg = _dependencies[i].getRegister();
         dependentRegNum = _dependencies[i].getRealRegister();
         if (dependentRegNum == TR::RealRegister::SpilledReg)
            {
            TR_ASSERT(virtReg->getBackingStorage(),"should have a backing store if dependentRegNum == spillRegIndex()\n");
            if (virtReg->getAssignedRealRegister())
               {
               // this happens when the register was first spilled in main line path then was reverse spilled
               // and assigned to a real register in OOL path. We protected the backing store when doing
               // the reverse spill so we could re-spill to the same slot now
               traceMsg (comp,"\nOOL: Found register spilled in main line and re-assigned inside OOL");
               TR::Node *currentNode = currentInstruction->getNode();
               TR::RealRegister *assignedReg    = toRealRegister(virtReg->getAssignedRegister());
               TR::MemoryReference *tempMR = new (cg->trHeapMemory()) TR::MemoryReference(currentNode, (TR::SymbolReference*)virtReg->getBackingStorage()->getSymbolReference(), sizeof(uintptr_t), cg);
               TR::InstOpCode::Mnemonic opCode;
               TR_RegisterKinds rk = virtReg->getKind();
               switch (rk)
                  {
                  case TR_GPR:
                     opCode =TR::InstOpCode::Op_load;
                     break;
                  case TR_FPR:
                     opCode = virtReg->isSinglePrecision() ? TR::InstOpCode::lfs : TR::InstOpCode::lfd;
                     break;
                  default:
                     TR_ASSERT(0, "\nRegister kind not supported in OOL spill\n");
                     break;
                  }

               TR::Instruction *inst = generateTrg1MemInstruction(cg, opCode, currentNode, assignedReg, tempMR, currentInstruction);

               assignedReg->setAssignedRegister(NULL);
               virtReg->setAssignedRegister(NULL);
               assignedReg->setState(TR::RealRegister::Free);
               if (comp->getDebug())
                  cg->traceRegisterAssignment("Generate reload of virt %s due to spillRegIndex dep at inst %p\n",comp->getDebug()->getName(virtReg),currentInstruction);
               cg->traceRAInstruction(inst);
               }

            if (!(std::find(cg->getSpilledRegisterList()->begin(), cg->getSpilledRegisterList()->end(), virtReg) != cg->getSpilledRegisterList()->end()))
               cg->getSpilledRegisterList()->push_front(virtReg);
            }
         // we also need to free up all locked backing storage if we are exiting the OOL during backwards RA assignment
         else if (currentInstruction->isLabel() && virtReg->getAssignedRealRegister())
            {
            TR::PPCLabelInstruction *labelInstr = (TR::PPCLabelInstruction *)currentInstruction;
            TR_BackingStore * location = virtReg->getBackingStorage();
            TR_RegisterKinds rk = virtReg->getKind();
            int32_t dataSize;
            if (labelInstr->getLabelSymbol()->isStartOfColdInstructionStream() && location)
               {
               traceMsg (comp,"\nOOL: Releasing backing storage (%p)\n", location);
               if (rk == TR_GPR)
                  dataSize = TR::Compiler->om.sizeofReferenceAddress();
               else
                  dataSize = 8;
               location->setMaxSpillDepth(0);
               cg->freeSpill(location,dataSize,0);
               virtReg->setBackingStorage(NULL);
               }
            }
         }
      }

   for (i = 0; i < numberOfRegisters; i++)
      {
      map.addDependency(_dependencies[i], i);

      virtReg = _dependencies[i].getRegister();
      dependentRegNum = _dependencies[i].getRealRegister();

      if (dependentRegNum != TR::RealRegister::SpilledReg)
         {
         if (virtReg->getKind() == TR_GPR)
            num_gprs++;
         else if (virtReg->getKind() == TR_FPR)
            num_fprs++;
         else if (virtReg->getKind() == TR_VRF)
            num_vrfs++;
         }
      }

#ifdef DEBUG
   int locked_gprs = 0;
   int locked_fprs = 0;
   int locked_vrfs = 0;

   // count up how many registers are locked for each type
   for(i = TR::RealRegister::FirstGPR; i <= TR::RealRegister::LastGPR; i++)
      {
        realReg = machine->getPPCRealRegister((TR::RealRegister::RegNum)i);
        if (realReg->getState() == TR::RealRegister::Locked)
           locked_gprs++;
      }
   for(i = TR::RealRegister::FirstFPR; i <= TR::RealRegister::LastFPR; i++)
      {
        realReg = machine->getPPCRealRegister((TR::RealRegister::RegNum)i);
        if (realReg->getState() == TR::RealRegister::Locked)
           locked_fprs++;
      }
   for(i = TR::RealRegister::FirstVRF; i <= TR::RealRegister::LastVRF; i++)
      {
        realReg = machine->getPPCRealRegister((TR::RealRegister::RegNum)i);
        if (realReg->getState() == TR::RealRegister::Locked)
           locked_vrfs++;
      }
   TR_ASSERT( locked_gprs == machine->getNumberOfLockedRegisters(TR_GPR),"Inconsistent number of locked GPRs");
   TR_ASSERT( locked_fprs == machine->getNumberOfLockedRegisters(TR_FPR),"Inconsistent number of locked FPRs");
   TR_ASSERT( locked_vrfs == machine->getNumberOfLockedRegisters(TR_VRF), "Inconsistent number of locked VRFs");
#endif

   // To handle circular dependencies, we block a real register if (1) it is already assigned to a correct
   // virtual register and (2) if it is assigned to one register in the list but is required by another.
   // However, if all available registers are requested, we do not block in case (2) to avoid all registers
   // being blocked.

   bool block_gprs = true;
   bool block_fprs = true;
   bool block_vrfs = true;

   TR_ASSERT(num_gprs <= (TR::RealRegister::LastGPR - TR::RealRegister::FirstGPR + 1 - machine->getNumberOfLockedRegisters(TR_GPR)), "Too many GPR dependencies, unable to assign" );
   TR_ASSERT(num_fprs <= (TR::RealRegister::LastFPR - TR::RealRegister::FirstFPR + 1 - machine->getNumberOfLockedRegisters(TR_FPR)), "Too many FPR dependencies, unable to assign" );
   TR_ASSERT(num_vrfs <= (TR::RealRegister::LastVRF - TR::RealRegister::FirstVRF + 1 - machine->getNumberOfLockedRegisters(TR_VRF)), "Too many VRF dependencies, unable to assign" );

   if (num_gprs == (TR::RealRegister::LastGPR - TR::RealRegister::FirstGPR + 1 - machine->getNumberOfLockedRegisters(TR_GPR)))
        block_gprs = false;
   if (num_fprs == (TR::RealRegister::LastFPR - TR::RealRegister::FirstFPR + 1 - machine->getNumberOfLockedRegisters(TR_FPR)))
        block_fprs = false;
   if (num_vrfs == (TR::RealRegister::LastVRF - TR::RealRegister::FirstVRF + 1 - machine->getNumberOfLockedRegisters(TR_VRF)))
        block_vrfs = false;

   for (i = 0; i < numberOfRegisters; i++)
      {
      virtReg = _dependencies[i].getRegister();

      if (virtReg->getAssignedRealRegister()!=NULL)
         {
         if (_dependencies[i].getRealRegister() == TR::RealRegister::NoReg)
            {
            virtReg->block();
            }
         else
            {
            TR::RealRegister::RegNum assignedRegNum;
            assignedRegNum = toRealRegister(virtReg->getAssignedRealRegister())->getRegisterNumber();

            // always block if required register and assigned register match;
            // block if assigned register is required by other dependency but only if
            // any spare registers are left to avoid blocking all existing registers
            if (_dependencies[i].getRealRegister() == assignedRegNum ||
                (map.getDependencyWithTarget(assignedRegNum) &&
                 ((virtReg->getKind() != TR_GPR || block_gprs) &&
                  (virtReg->getKind() != TR_FPR || block_fprs) &&
                  (virtReg->getKind() != TR_VRF || block_vrfs))))
               {
               virtReg->block();
               }
            }
         }
      }

   // Assign all virtual regs that depend on a specific real reg that is free
   for (i = 0; i < numberOfRegisters; i++)
      {
      virtReg = _dependencies[i].getRegister();
      dependentRegNum = _dependencies[i].getRealRegister();
      dependentRealReg = machine->getPPCRealRegister(dependentRegNum);

      if (dependentRegNum != TR::RealRegister::NoReg &&
          dependentRegNum != TR::RealRegister::SpilledReg &&
          dependentRealReg->getState() == TR::RealRegister::Free)
         {
         assignFreeRegisters(currentInstruction, &_dependencies[i], map, cg);
         }
      }

   // Assign all virtual regs that depend on a specfic real reg that is not free
   for (i = 0; i < numberOfRegisters; i++)
      {
      virtReg     = _dependencies[i].getRegister();
      assignedRegister = NULL;
      if (virtReg->getAssignedRealRegister() != NULL)
         {
         assignedRegister = toRealRegister(virtReg->getAssignedRealRegister());
         }
      dependentRegNum = _dependencies[i].getRealRegister();
      dependentRealReg = machine->getPPCRealRegister(dependentRegNum);
      if (dependentRegNum != TR::RealRegister::NoReg &&
          dependentRegNum != TR::RealRegister::SpilledReg &&
          dependentRealReg != assignedRegister)
         {
         bool depsBlocked = false;
         switch (_dependencies[i].getRegister()->getKind())
            {
            case TR_GPR:
               depsBlocked = block_gprs;
               break;
            case TR_FPR:
               depsBlocked = block_fprs;
               break;
            case TR_VRF:
               depsBlocked = block_vrfs;
               break;
            }
         assignContendedRegisters(currentInstruction, &_dependencies[i], map, depsBlocked, cg);
         }
      }

   // Assign all virtual regs that depend on NoReg but exclude gr0
   for (i=0; i<numberOfRegisters; i++)
      {
      if (_dependencies[i].getRealRegister() == TR::RealRegister::NoReg && _dependencies[i].getExcludeGPR0())
         {
         TR::RealRegister *realOne;

         virtReg     = _dependencies[i].getRegister();
         realOne     = virtReg->getAssignedRealRegister();
         if (realOne!=NULL && toRealRegister(realOne)->getRegisterNumber()==TR::RealRegister::gr0)
            {
            if ((assignedRegister = machine->findBestFreeRegister(currentInstruction, virtReg->getKind(), true, false, virtReg)) == NULL)
               {
               assignedRegister = machine->freeBestRegister(currentInstruction, virtReg, NULL, true);
               }
            machine->coerceRegisterAssignment(currentInstruction, virtReg, assignedRegister->getRegisterNumber());
            }
         else if (realOne == NULL)
            {
            machine->assignOneRegister(currentInstruction, virtReg, true);
            }
         virtReg->block();
         }
      }

   // Assign all virtual regs that depend on NoReg
   for (i=0; i<numberOfRegisters; i++)
      {
      if (_dependencies[i].getRealRegister() == TR::RealRegister::NoReg && !_dependencies[i].getExcludeGPR0())
         {
         TR::RealRegister *realOne;

         virtReg     = _dependencies[i].getRegister();
         realOne     = virtReg->getAssignedRealRegister();
         if (!realOne)
            {
            machine->assignOneRegister(currentInstruction, virtReg, false);
            }
         virtReg->block();
         }
      }

   unblockRegisters(numberOfRegisters);
   for (i = 0; i < numberOfRegisters; i++)
      {
      TR::Register     *dependentRegister = getRegisterDependency(i)->getRegister();
      // dependentRegister->getAssignedRegister() is NULL if the reg has already been spilled due to a spilledReg dep
      if (comp->getOption(TR_DisableOOL) || (!(cg->isOutOfLineColdPath()) && !(cg->isOutOfLineHotPath())))
         {
         TR_ASSERT(dependentRegister->getAssignedRegister(),
             "assignedRegister can not  be NULL");
         }
      if (dependentRegister->getAssignedRegister())
         {
         TR::RealRegister *assignedRegister = dependentRegister->getAssignedRegister()->getRealRegister();

         if (getRegisterDependency(i)->getRealRegister() == TR::RealRegister::NoReg)
            getRegisterDependency(i)->setRealRegister(toRealRegister(assignedRegister)->getRegisterNumber());

         machine->decFutureUseCountAndUnlatch(dependentRegister);
         }
      }
   }
Example #10
0
TR_BitVector *
OMR::SymbolReference::getUseDefAliasesBV(bool isDirectCall, bool includeGCSafePoint)
   {
   TR::Compilation *comp = TR::comp();
   TR::Region &aliasRegion = comp->aliasRegion();
   int32_t bvInitialSize = comp->getSymRefCount();
   TR_BitVectorGrowable growability = growable;

   // allow more than one shadow for an array type.  Used by LoopAliasRefiner
   const bool supportArrayRefinement=true;

   int32_t kind = _symbol->getKind();
   TR::SymbolReferenceTable * symRefTab = comp->getSymRefTab();

   // !!! NOTE !!!
   // THERE IS A COPY OF THIS LOGIC IN sharesSymbol
   //
   if (!self()->reallySharesSymbol(comp))
      {
      switch (kind)
         {
         case TR::Symbol::IsShadow:
         case TR::Symbol::IsStatic:
            {
            // For unresolved constant dynamic, we need to invoke a Java bootstrap method,
            // which can have arbitrary side effects, so the aliasing should be conservative here.
            // isConstObjectRef now returns true for condy, so we add an explicit condition,
            // more like a short-circuit, to say if we are unresolved and not isConstObjectRef
            // (this is the same as before), or if we are unresolved and condy
            // (this is the extra condition added), we would return conservative aliases.
            if ((self()->isUnresolved() && (_symbol->isConstantDynamic() || !_symbol->isConstObjectRef())) ||
	        _symbol->isVolatile() || self()->isLiteralPoolAddress() ||
                self()->isFromLiteralPool() || _symbol->isUnsafeShadowSymbol() ||
                (_symbol->isArrayShadowSymbol() && comp->getMethodSymbol()->hasVeryRefinedAliasSets()))
               {
               // getUseDefAliases might not return NULL
               }
            else if (!symRefTab->aliasBuilder.mutableGenericIntShadowHasBeenCreated())
               {
               // getUseDefAliases must return NULL
               return NULL;
               }
            else if (kind == TR::Symbol::IsStatic && !symRefTab->aliasBuilder.litPoolGenericIntShadowHasBeenCreated())
               {
               // getUseDefAliases must return NULL
               return NULL;
               }
            break;
            }
         }
      }

   // now do stuff for various kinds of symbols
   //
   switch (kind)
      {
      case TR::Symbol::IsMethod:
         {
         TR::MethodSymbol * methodSymbol = _symbol->castToMethodSymbol();

         if (!methodSymbol->isHelper())
            return symRefTab->aliasBuilder.methodAliases(self());

         if (symRefTab->isNonHelper(self(), TR::SymbolReferenceTable::arraySetSymbol) ||
             symRefTab->isNonHelper(self(), TR::SymbolReferenceTable::osrFearPointHelperSymbol) ||
             symRefTab->isNonHelper(self(), TR::SymbolReferenceTable::potentialOSRPointHelperSymbol))
            {
            return &symRefTab->aliasBuilder.defaultMethodDefAliases();
            }

         if (symRefTab->isNonHelper(self(), TR::SymbolReferenceTable::arrayCmpSymbol))
            return 0;

         switch (self()->getReferenceNumber())
            {
            case TR_methodTypeCheck:
            case TR_nullCheck:
               return &symRefTab->aliasBuilder.defaultMethodDefAliasesWithoutImmutable();

            case TR_arrayBoundsCheck:
            case TR_checkCast:
            case TR_divCheck:
            case TR_typeCheckArrayStore:
            case TR_arrayStoreException:
            case TR_incompatibleReceiver:
            case TR_IncompatibleClassChangeError:
            case TR_reportFinalFieldModified:
            case TR_reportMethodEnter:
            case TR_reportStaticMethodEnter:
            case TR_reportMethodExit:
            case TR_acquireVMAccess:
            case TR_instanceOf:
            case TR_checkAssignable:
            case TR_throwCurrentException:
            case TR_releaseVMAccess:
            case TR_stackOverflow:
            case TR_writeBarrierStore:
            case TR_writeBarrierBatchStore:
            case TR_jitProfileAddress:
            case TR_jitProfileWarmCompilePICAddress:
            case TR_jitProfileValue:
            case TR_jitProfileLongValue:
            case TR_jitProfileBigDecimalValue:
            case TR_jitProfileParseBuffer:

               return 0;

            case TR_asyncCheck:
            case TR_writeBarrierClassStoreRealTimeGC:
            case TR_writeBarrierStoreRealTimeGC:
            case TR_aNewArray:
            case TR_newObject:
            case TR_newObjectNoZeroInit:
            case TR_newArray:
            case TR_multiANewArray:
               if ((comp->generateArraylets() || comp->isDLT()) && includeGCSafePoint)
                  return &symRefTab->aliasBuilder.gcSafePointSymRefNumbers();
               else
                  return 0;

            case TR_aThrow:
               return 0;

            // The monitor exit symbol needs to be aliased with all fields in the
            // current class to ensure that all references to fields are evaluated
            // before the monitor exit
            case TR_monitorExit:
            case TR_monitorEntry:
            case TR_transactionExit:
            case TR_transactionEntry:

            default:
               // The following is the place to check for
               // a use of killsAllMethodSymbolRef... However,
               // it looks like the default action is sufficient.
               //if (symRefTab->findKillsAllMethodSymbolRef() == self())
               //   {
               //   }
               return &symRefTab->aliasBuilder.defaultMethodDefAliases();
            }
         }
      case TR::Symbol::IsResolvedMethod:
         {
         TR::ResolvedMethodSymbol * resolvedMethodSymbol = _symbol->castToResolvedMethodSymbol();

         if (!comp->getOption(TR_EnableHCR))
            {
            switch (resolvedMethodSymbol->getRecognizedMethod())
               {
#ifdef J9_PROJECT_SPECIFIC
               case TR::java_lang_System_arraycopy:
                  {
                  TR_BitVector * aliases = new (aliasRegion) TR_BitVector(bvInitialSize, aliasRegion, growability);
                  *aliases |= symRefTab->aliasBuilder.arrayElementSymRefs();
                  if (comp->generateArraylets())
                     *aliases |= symRefTab->aliasBuilder.arrayletElementSymRefs();
                  return aliases;
                  }

                  if (resolvedMethodSymbol->isPureFunction())
                      return NULL;

               case TR::java_lang_Double_longBitsToDouble:
               case TR::java_lang_Double_doubleToLongBits:
               case TR::java_lang_Float_intBitsToFloat:
               case TR::java_lang_Float_floatToIntBits:
               case TR::java_lang_Double_doubleToRawLongBits:
               case TR::java_lang_Float_floatToRawIntBits:
               case TR::java_lang_Math_sqrt:
               case TR::java_lang_StrictMath_sqrt:
               case TR::java_lang_Math_sin:
               case TR::java_lang_StrictMath_sin:
               case TR::java_lang_Math_cos:
               case TR::java_lang_StrictMath_cos:
               case TR::java_lang_Math_max_I:
               case TR::java_lang_Math_min_I:
               case TR::java_lang_Math_max_L:
               case TR::java_lang_Math_min_L:
               case TR::java_lang_Math_abs_I:
               case TR::java_lang_Math_abs_L:
               case TR::java_lang_Math_abs_F:
               case TR::java_lang_Math_abs_D:
               case TR::java_lang_Math_pow:
               case TR::java_lang_StrictMath_pow:
               case TR::java_lang_Math_exp:
               case TR::java_lang_StrictMath_exp:
               case TR::java_lang_Math_log:
               case TR::java_lang_StrictMath_log:
               case TR::java_lang_Math_floor:
               case TR::java_lang_Math_ceil:
               case TR::java_lang_Math_copySign_F:
               case TR::java_lang_Math_copySign_D:
               case TR::java_lang_StrictMath_floor:
               case TR::java_lang_StrictMath_ceil:
               case TR::java_lang_StrictMath_copySign_F:
               case TR::java_lang_StrictMath_copySign_D:
               case TR::com_ibm_Compiler_Internal__TR_Prefetch:
               case TR::java_nio_Bits_keepAlive:
                  if ((comp->generateArraylets() || comp->isDLT()) && includeGCSafePoint)
                     return &symRefTab->aliasBuilder.gcSafePointSymRefNumbers();
                  else
                     return 0;

               // no aliasing on DFP dummy stubs
               case TR::java_math_BigDecimal_DFPPerformHysteresis:
               case TR::java_math_BigDecimal_DFPUseDFP:
               case TR::java_math_BigDecimal_DFPHWAvailable:
               case TR::java_math_BigDecimal_DFPCompareTo:
               case TR::java_math_BigDecimal_DFPUnscaledValue:
               case TR::com_ibm_dataaccess_DecimalData_DFPFacilityAvailable:
               case TR::com_ibm_dataaccess_DecimalData_DFPUseDFP:
               case TR::com_ibm_dataaccess_DecimalData_DFPConvertPackedToDFP:
               case TR::com_ibm_dataaccess_DecimalData_DFPConvertDFPToPacked:
               case TR::com_ibm_dataaccess_DecimalData_createZeroBigDecimal:
               case TR::com_ibm_dataaccess_DecimalData_getlaside:
               case TR::com_ibm_dataaccess_DecimalData_setlaside:
               case TR::com_ibm_dataaccess_DecimalData_getflags:
               case TR::com_ibm_dataaccess_DecimalData_setflags:
                  if (!(
#ifdef TR_TARGET_S390
                     TR::Compiler->target.cpu.getS390SupportsDFP() ||
#endif
                      TR::Compiler->target.cpu.supportsDecimalFloatingPoint()) ||
                      comp->getOption(TR_DisableDFP))
                     return NULL;
#endif //J9_PROJECT_SPECIFIC
               default:
               	break;
               }
            }

#ifdef J9_PROJECT_SPECIFIC
         TR_ResolvedMethod * method = resolvedMethodSymbol->getResolvedMethod();
         TR_PersistentMethodInfo * methodInfo = TR_PersistentMethodInfo::get(method);
         if (methodInfo && (methodInfo->hasRefinedAliasSets() ||
                            comp->getMethodHotness() >= veryHot ||
                            resolvedMethodSymbol->hasVeryRefinedAliasSets()) &&
             (method->isStatic() || method->isFinal() || isDirectCall))
            {
            TR_BitVector * aliases = new (aliasRegion) TR_BitVector(bvInitialSize, aliasRegion, growability);
            if ((comp->generateArraylets() || comp->isDLT()) && includeGCSafePoint)
               *aliases |= symRefTab->aliasBuilder.gcSafePointSymRefNumbers();

            if (methodInfo->doesntKillAnything() && !comp->getOption(TR_DisableRefinedAliases))
               return aliases;

            if ((resolvedMethodSymbol->hasVeryRefinedAliasSets() || comp->getMethodHotness() >= hot) &&
                !debug("disableVeryRefinedCallAliasSets"))
               {
               TR_BitVector * exactAliases = 0;

               if (resolvedMethodSymbol->hasVeryRefinedAliasSets())
                  exactAliases = symRefTab->aliasBuilder.getVeryRefinedCallAliasSets(resolvedMethodSymbol);
               else
                  {
                  resolvedMethodSymbol->setHasVeryRefinedAliasSets(true);
                  List<void> methodsPeeked(comp->trMemory());
                  exactAliases = addVeryRefinedCallAliasSets(resolvedMethodSymbol, aliases, &methodsPeeked);
                  symRefTab->aliasBuilder.setVeryRefinedCallAliasSets(resolvedMethodSymbol, exactAliases);
                  }
               if (exactAliases)
                  {
                  return exactAliases;
                  }
               }

            // From here on, we're just checking refined alias info.
            // If refined aliases are disabled, return the conservative answer
            // we would have returned had we never attempted to use refined
            // aliases at all.
            //
            if (comp->getOption(TR_DisableRefinedAliases))
               return symRefTab->aliasBuilder.methodAliases(self());

            if (!methodInfo->doesntKillAddressArrayShadows())
               {

               symRefTab->aliasBuilder.addAddressArrayShadows(aliases);

               if (comp->generateArraylets())
                  aliases->set(symRefTab->getArrayletShadowIndex(TR::Address));
               }

            if (!methodInfo->doesntKillIntArrayShadows())
               {

               symRefTab->aliasBuilder.addIntArrayShadows(aliases);

               if (comp->generateArraylets())
                  {
                  aliases->set(symRefTab->getArrayletShadowIndex(TR::Int32));
                  }
               }

            if (!methodInfo->doesntKillNonIntPrimitiveArrayShadows())
               {

               symRefTab->aliasBuilder.addNonIntPrimitiveArrayShadows(aliases);

               if (comp->generateArraylets())
                  {
                  aliases->set(symRefTab->getArrayletShadowIndex(TR::Int8));
                  aliases->set(symRefTab->getArrayletShadowIndex(TR::Int16));
                  aliases->set(symRefTab->getArrayletShadowIndex(TR::Int32));
                  aliases->set(symRefTab->getArrayletShadowIndex(TR::Int64));
                  aliases->set(symRefTab->getArrayletShadowIndex(TR::Float));
                  aliases->set(symRefTab->getArrayletShadowIndex(TR::Double));
                  }
               }

            if (!methodInfo->doesntKillAddressFields())
               *aliases |= symRefTab->aliasBuilder.addressShadowSymRefs();

            if (!methodInfo->doesntKillIntFields())
               *aliases |= symRefTab->aliasBuilder.intShadowSymRefs();

            if (!methodInfo->doesntKillNonIntPrimitiveFields())
               *aliases |= symRefTab->aliasBuilder.nonIntPrimitiveShadowSymRefs();

            if (!methodInfo->doesntKillAddressStatics())
               *aliases |= symRefTab->aliasBuilder.addressStaticSymRefs();

            if (!methodInfo->doesntKillIntStatics())
               *aliases |= symRefTab->aliasBuilder.intStaticSymRefs();

            if (!methodInfo->doesntKillNonIntPrimitiveStatics())
               *aliases |= symRefTab->aliasBuilder.nonIntPrimitiveStaticSymRefs();

            TR_BitVector *methodAliases = symRefTab->aliasBuilder.methodAliases(self());
            *aliases &= *methodAliases;
            return aliases;
            }
#endif

         return symRefTab->aliasBuilder.methodAliases(self());
         }
      case TR::Symbol::IsShadow:
         {
         if ((self()->isUnresolved() && !_symbol->isConstObjectRef()) || _symbol->isVolatile() || self()->isLiteralPoolAddress() || self()->isFromLiteralPool() ||
             (_symbol->isUnsafeShadowSymbol() && !self()->reallySharesSymbol()))
            {
            if (symRefTab->aliasBuilder.unsafeArrayElementSymRefs().get(self()->getReferenceNumber()))
               {
               TR_BitVector *aliases = new (aliasRegion) TR_BitVector(bvInitialSize, aliasRegion, growability);
               *aliases |= comp->getSymRefTab()->aliasBuilder.defaultMethodDefAliasesWithoutImmutable();
               *aliases -= symRefTab->aliasBuilder.cpSymRefs();
               return aliases;
               }
            else
               return &comp->getSymRefTab()->aliasBuilder.defaultMethodDefAliasesWithoutImmutable();
            }

         TR_BitVector *aliases = NULL;
         if (_symbol == symRefTab->findGenericIntShadowSymbol())
            {
            aliases = new (aliasRegion) TR_BitVector(bvInitialSize, aliasRegion, growability);
            *aliases |= symRefTab->aliasBuilder.arrayElementSymRefs();
            if (comp->generateArraylets())
               *aliases |= symRefTab->aliasBuilder.arrayletElementSymRefs();
            *aliases |= symRefTab->aliasBuilder.genericIntShadowSymRefs();
            *aliases |= symRefTab->aliasBuilder.genericIntArrayShadowSymRefs();
            *aliases |= symRefTab->aliasBuilder.genericIntNonArrayShadowSymRefs();
            *aliases |= symRefTab->aliasBuilder.unsafeSymRefNumbers();
#ifdef J9_PROJECT_SPECIFIC
            *aliases |= symRefTab->aliasBuilder.unresolvedShadowSymRefs();
#endif
            if (symRefTab->aliasBuilder.conservativeGenericIntShadowAliasing())
               {
               *aliases |= symRefTab->aliasBuilder.addressShadowSymRefs();
               *aliases |= symRefTab->aliasBuilder.intShadowSymRefs();
               *aliases |= symRefTab->aliasBuilder.nonIntPrimitiveShadowSymRefs();
               }
            aliases->set(self()->getReferenceNumber());
            return aliases;
            }

         if (self()->reallySharesSymbol(comp))
            {
            aliases = new (aliasRegion) TR_BitVector(bvInitialSize, aliasRegion, growability);
            self()->setSharedShadowAliases(aliases, symRefTab);
            }

         if (symRefTab->findGenericIntShadowSymbol())
            {
            if (!aliases)
               aliases = new (aliasRegion) TR_BitVector(bvInitialSize, aliasRegion, growability);
            self()->setLiteralPoolAliases(aliases, symRefTab);

            if (symRefTab->aliasBuilder.conservativeGenericIntShadowAliasing() || self()->isUnresolved())
               {
               *aliases |= symRefTab->aliasBuilder.genericIntShadowSymRefs();
               *aliases |= symRefTab->aliasBuilder.genericIntArrayShadowSymRefs();
               *aliases |= symRefTab->aliasBuilder.genericIntNonArrayShadowSymRefs();
               }
            }

         if (_symbol->isArrayShadowSymbol() &&
             symRefTab->findGenericIntShadowSymbol())
            {
            if (!aliases)
               aliases = new (aliasRegion) TR_BitVector(bvInitialSize, aliasRegion, growability);
            *aliases |= symRefTab->aliasBuilder.genericIntShadowSymRefs();
            *aliases |= symRefTab->aliasBuilder.genericIntArrayShadowSymRefs();

            if (supportArrayRefinement && self()->getIndependentSymRefs())
               *aliases -= *self()->getIndependentSymRefs();
            }

#ifdef J9_PROJECT_SPECIFIC
         // make TR::PackedDecimal aliased with TR::Int8(byte)
         if (_symbol->isArrayShadowSymbol() && _symbol->getDataType() == TR::PackedDecimal)
            {
            if (!aliases)
               aliases = new (aliasRegion) TR_BitVector(bvInitialSize, aliasRegion, growability);
            aliases->set(symRefTab->getArrayShadowIndex(TR::Int8));
            }
         //the other way around.
         if (_symbol->isArrayShadowSymbol() && _symbol->getDataType() == TR::Int8)
            {
            if (!aliases)
               aliases = new (aliasRegion) TR_BitVector(bvInitialSize, aliasRegion, growability);
            aliases->set(symRefTab->getArrayShadowIndex(TR::PackedDecimal));
            }
#endif

         // alias vector arrays shadows  with corresponding scalar array shadows
         if (_symbol->isArrayShadowSymbol() && _symbol->getDataType().isVector())
            {
            if (!aliases)
               aliases = new (aliasRegion) TR_BitVector(bvInitialSize, aliasRegion, growability);
            aliases->set(symRefTab->getArrayShadowIndex(_symbol->getDataType().vectorToScalar()));
            }
         // the other way around
         if (_symbol->isArrayShadowSymbol() && !_symbol->getDataType().isVector())
            {
            if (!aliases)
               aliases = new (aliasRegion) TR_BitVector(bvInitialSize, aliasRegion, growability);
            aliases->set(symRefTab->getArrayShadowIndex(_symbol->getDataType().scalarToVector()));
            }

         if (_symbol->isArrayShadowSymbol() &&
             !symRefTab->aliasBuilder.immutableArrayElementSymRefs().isEmpty())
            {
            if (!aliases)
               aliases = new (aliasRegion) TR_BitVector(bvInitialSize, aliasRegion, growability);

            TR::DataType type = _symbol->getDataType();
            TR_BitVectorIterator bvi(symRefTab->aliasBuilder.arrayElementSymRefs());
            int32_t symRefNum;
            while (bvi.hasMoreElements())
               {
               symRefNum = bvi.getNextElement();
               if (symRefTab->getSymRef(symRefNum)->getSymbol()->getDataType() == type)
                  aliases->set(symRefNum);
               }
            }

         if (_symbol->isArrayShadowSymbol() &&
             supportArrayRefinement &&
             comp->getMethodSymbol()->hasVeryRefinedAliasSets())
            {
            if (!aliases)
               aliases = new (aliasRegion) TR_BitVector(bvInitialSize, aliasRegion, growability);

            TR::DataType type = _symbol->getDataType();
            TR_BitVectorIterator bvi(symRefTab->aliasBuilder.arrayElementSymRefs());
            int32_t symRefNum;
            while (bvi.hasMoreElements())
               {
               symRefNum = bvi.getNextElement();
               if (symRefTab->getSymRef(symRefNum)->getSymbol()->getDataType() == type)
                  aliases->set(symRefNum);
               }

            if (self()->getIndependentSymRefs())
               *aliases -= *self()->getIndependentSymRefs();

            return aliases;
            }

         if (aliases)
            aliases->set(self()->getReferenceNumber());

         if (symRefTab->aliasBuilder.unsafeArrayElementSymRefs().get(self()->getReferenceNumber()))
            *aliases -= symRefTab->aliasBuilder.cpSymRefs();
         else if (symRefTab->aliasBuilder.cpSymRefs().get(self()->getReferenceNumber()))
            *aliases -= symRefTab->aliasBuilder.unsafeArrayElementSymRefs();

         return aliases;
         }
      case TR::Symbol::IsStatic:
         {
         // For unresolved constant dynamic, we need to invoke a Java bootstrap method,
         // which can have arbitrary side effects, so the aliasing should be conservative here.
         // isConstObjectRef now returns true for condy, so we add an explicit condition,
         // more like a short-circuit, to say if we are unresolved and not isConstObjectRef
         // (this is the same as before), or if we are unresolved and condy
         // (this is the extra condition added), we would return conservative aliases.
         if ((self()->isUnresolved() && (_symbol->isConstantDynamic() || !_symbol->isConstObjectRef())) ||
	     self()->isLiteralPoolAddress() || self()->isFromLiteralPool() || _symbol->isVolatile())
            {
            return &comp->getSymRefTab()->aliasBuilder.defaultMethodDefAliases();
            }

         TR_BitVector *aliases = NULL;
         if (self()->reallySharesSymbol(comp))
            {
            aliases = new (aliasRegion) TR_BitVector(bvInitialSize, aliasRegion, growability);
            self()->setSharedStaticAliases(aliases, symRefTab);
            }

         if (symRefTab->findGenericIntShadowSymbol())
            {
            if (!aliases)
               aliases = new (aliasRegion) TR_BitVector(bvInitialSize, aliasRegion, growability);
            self()->setLiteralPoolAliases(aliases, symRefTab);
            }

         if (aliases)
            aliases->set(self()->getReferenceNumber());

         return aliases;
         }
      case TR::Symbol::IsMethodMetaData:
         {
         TR_BitVector *aliases = NULL;
         return aliases;
         }
      default:
         //TR_ASSERT(0, "getUseDefAliasing called for non method");
         if (comp->generateArraylets() && comp->getSymRefTab()->aliasBuilder.gcSafePointSymRefNumbers().get(self()->getReferenceNumber()) && includeGCSafePoint)
            return &comp->getSymRefTab()->aliasBuilder.gcSafePointSymRefNumbers();
         else
            return 0;


      }
   }
Example #11
0
TR::Register *TR_X86FPCompareAnalyser::fpCompareAnalyser(TR::Node       *root,
                                                         TR_X86OpCodes cmpRegRegOpCode,
                                                         TR_X86OpCodes cmpRegMemOpCode,
                                                         TR_X86OpCodes cmpiRegRegOpCode,
                                                         bool           useFCOMIInstructions)
   {
   TR::Node      *firstChild,
                *secondChild;
   TR::ILOpCodes  cmpOp = root->getOpCodeValue();
   bool          reverseMemOp = false;
   bool          reverseCmpOp = false;
   TR::Compilation* comp = _cg->comp();
   TR_X86OpCodes cmpInstr = useFCOMIInstructions ? cmpiRegRegOpCode : cmpRegRegOpCode;

   // Some operators must have their operands swapped to improve the generated
   // code needed to evaluate the result of the comparison.
   //
   bool mustSwapOperands = (cmpOp == TR::iffcmple ||
                            cmpOp == TR::ifdcmple ||
                            cmpOp == TR::iffcmpgtu ||
                            cmpOp == TR::ifdcmpgtu ||
                            cmpOp == TR::fcmple ||
                            cmpOp == TR::dcmple ||
                            cmpOp == TR::fcmpgtu ||
                            cmpOp == TR::dcmpgtu ||
                            (useFCOMIInstructions &&
                             (cmpOp == TR::iffcmplt ||
                              cmpOp == TR::ifdcmplt ||
                              cmpOp == TR::iffcmpgeu ||
                              cmpOp == TR::ifdcmpgeu ||
                              cmpOp == TR::fcmplt ||
                              cmpOp == TR::dcmplt ||
                              cmpOp == TR::fcmpgeu ||
                              cmpOp == TR::dcmpgeu))) ? true : false;

   // Some operators should not have their operands swapped to improve the generated
   // code needed to evaluate the result of the comparison.
   //
   bool preventOperandSwapping = (cmpOp == TR::iffcmpltu ||
                                  cmpOp == TR::ifdcmpltu ||
                                  cmpOp == TR::iffcmpge ||
                                  cmpOp == TR::ifdcmpge ||
                                  cmpOp == TR::fcmpltu ||
                                  cmpOp == TR::dcmpltu ||
                                  cmpOp == TR::fcmpge ||
                                  cmpOp == TR::dcmpge ||
                                  (useFCOMIInstructions &&
                                   (cmpOp == TR::iffcmpgt ||
                                    cmpOp == TR::ifdcmpgt ||
                                    cmpOp == TR::iffcmpleu ||
                                    cmpOp == TR::ifdcmpleu ||
                                    cmpOp == TR::fcmpgt ||
                                    cmpOp == TR::dcmpgt ||
                                    cmpOp == TR::fcmpleu ||
                                    cmpOp == TR::dcmpleu))) ? true : false;

   // For correctness, don't swap operands of these operators.
   //
   if (cmpOp == TR::fcmpg || cmpOp == TR::fcmpl ||
       cmpOp == TR::dcmpg || cmpOp == TR::dcmpl)
      {
      preventOperandSwapping = true;
      }

   // Initial operand evaluation ordering.
   //
   if (preventOperandSwapping || (!mustSwapOperands && _cg->whichChildToEvaluate(root) == 0))
      {
      firstChild  = root->getFirstChild();
      secondChild = root->getSecondChild();
      setReversedOperands(false);
      }
   else
      {
      firstChild  = root->getSecondChild();
      secondChild = root->getFirstChild();
      setReversedOperands(true);
      }

   TR::Register *firstRegister  = firstChild->getRegister();
   TR::Register *secondRegister = secondChild->getRegister();

   setInputs(firstChild,
             firstRegister,
             secondChild,
             secondRegister,
             useFCOMIInstructions,

             // If either 'preventOperandSwapping' or 'mustSwapOperands' is set then the
             // initial operand ordering set above must be maintained.
             //
             preventOperandSwapping || mustSwapOperands);

   // Make sure any required operand ordering is respected.
   //
   if ((getCmpReg2Reg1() || getCmpReg2Mem1()) &&
       (mustSwapOperands || preventOperandSwapping))
      {
      reverseCmpOp = getCmpReg2Reg1() ? true : false;
      reverseMemOp = getCmpReg2Mem1() ? true : false;
      }

   // If we are not comparing with a memory operand, one of them evaluates
   // to a zero, and the zero is not already on the stack, then we can use
   // FTST to save a register.
   //
   // (With a memory operand, either the constant zero needs to be loaded
   // to use FCOM, or the memory operand needs to be loaded to use FTST,
   // so there is no gain in using FTST.)
   //
   // If the constant zero is in the target register, using FTST means the
   // comparison will be reversed. We cannot do this if the initial ordering
   // of the operands must be maintained.
   //
   // Finally, if FTST is used and this is the last use of the target, the
   // target register may need to be explicitly popped.
   //
   TR::Register *targetRegisterForFTST = NULL;
   TR::Node     *targetChildForFTST = NULL;

   if (getEvalChild1() && isUnevaluatedZero(firstChild))  // do we need getEvalChild1() here?
      {
      if ( ((getCmpReg1Reg2() || reverseCmpOp) && !(preventOperandSwapping || mustSwapOperands)) ||
            (getCmpReg2Reg1() && !reverseCmpOp))
         {
         if (getEvalChild2())
            {
            secondRegister = _cg->evaluate(secondChild);
            }
         targetRegisterForFTST = secondRegister;
         targetChildForFTST = secondChild;
         notReversedOperands();
         }
      }
   else if (getEvalChild2() && isUnevaluatedZero(secondChild))  // do we need getEvalChild2() here?
      {
      if ( (getCmpReg1Reg2() || reverseCmpOp) ||
           (getCmpReg2Reg1() && !reverseCmpOp && !(preventOperandSwapping || mustSwapOperands)) )
         {
         if (getEvalChild1())
            {
            firstRegister = _cg->evaluate(firstChild);
            }
         targetRegisterForFTST = firstRegister;
         targetChildForFTST = firstChild;
         }
      }

   if (!targetRegisterForFTST)
      {
      // If we have a choice, evaluate the target operand last.  By doing so, we
      // help out the register assigner because the target must be TOS.  This
      // avoids an unneccessary FXCH for the target.
      //
      if (getEvalChild1() && getEvalChild2())
         {
         if (getCmpReg1Reg2() || getCmpReg1Mem2())
            {
            secondRegister = _cg->evaluate(secondChild);
            firstRegister = _cg->evaluate(firstChild);
            }
         else
            {
            firstRegister = _cg->evaluate(firstChild);
            secondRegister = _cg->evaluate(secondChild);
            }
         }
      else
         {
         if (getEvalChild1())
            {
            firstRegister = _cg->evaluate(firstChild);
            }

         if (getEvalChild2())
            {
            secondRegister = _cg->evaluate(secondChild);
            }
         }
      }

   // Adjust the FP precision of feeding operands.
   //
   if (firstRegister &&
       (firstRegister->needsPrecisionAdjustment() ||
        comp->getOption(TR_StrictFPCompares) ||
        (firstRegister->mayNeedPrecisionAdjustment() && secondChild->getOpCode().isLoadConst()) ||
        (firstRegister->mayNeedPrecisionAdjustment() && !secondRegister)))
      {
      TR::TreeEvaluator::insertPrecisionAdjustment(firstRegister, root, _cg);
      }

   if (secondRegister &&
       (secondRegister->needsPrecisionAdjustment() ||
        comp->getOption(TR_StrictFPCompares) ||
        (secondRegister->mayNeedPrecisionAdjustment() && firstChild->getOpCode().isLoadConst()) ||
        (secondRegister->mayNeedPrecisionAdjustment() && !firstRegister)))
      {
      TR::TreeEvaluator::insertPrecisionAdjustment(secondRegister, root, _cg);
      }

   // Generate the compare instruction.
   //
   if (targetRegisterForFTST)
      {
      generateFPRegInstruction(FTSTReg, root, targetRegisterForFTST, _cg);
      }
   else if (!useFCOMIInstructions && (getCmpReg1Mem2() || reverseMemOp))
      {
      TR::MemoryReference  *tempMR = generateX86MemoryReference(secondChild, _cg);
      generateFPRegMemInstruction(cmpRegMemOpCode, root, firstRegister, tempMR, _cg);
      tempMR->decNodeReferenceCounts(_cg);
      }
   else if (!useFCOMIInstructions && getCmpReg2Mem1())
      {
      TR::MemoryReference  *tempMR = generateX86MemoryReference(firstChild, _cg);
      generateFPRegMemInstruction(cmpRegMemOpCode, root, secondRegister, tempMR, _cg);
      notReversedOperands();
      tempMR->decNodeReferenceCounts(_cg);
      }
   else if (getCmpReg1Reg2() || reverseCmpOp)
      {
      generateFPCompareRegRegInstruction(cmpInstr, root, firstRegister, secondRegister, _cg);
      }
   else if (getCmpReg2Reg1())
      {
      generateFPCompareRegRegInstruction(cmpInstr, root, secondRegister, firstRegister, _cg);
      notReversedOperands();
      }

   _cg->decReferenceCount(firstChild);
   _cg->decReferenceCount(secondChild);

   // Evaluate the comparison.
   //
   if (getReversedOperands())
      {
      cmpOp = TR::ILOpCode(cmpOp).getOpCodeForSwapChildren();
      TR::Node::recreate(root, cmpOp);
      }

   if (useFCOMIInstructions && !targetRegisterForFTST)
      {
      return NULL;
      }

   // We must manually move the FP condition flags to the EFLAGS register if we don't
   // use the FCOMI instructions.
   //
   TR::Register *accRegister = _cg->allocateRegister();
   TR::RegisterDependencyConditions  *dependencies = generateRegisterDependencyConditions((uint8_t)1, 1, _cg);
   dependencies->addPreCondition(accRegister, TR::RealRegister::eax, _cg);
   dependencies->addPostCondition(accRegister, TR::RealRegister::eax, _cg);
   generateRegInstruction(STSWAcc, root, accRegister, dependencies, _cg);

   // Pop the FTST target register if it is not used any more.
   //
   if (targetRegisterForFTST &&
       targetChildForFTST && targetChildForFTST->getReferenceCount() == 0)
      {
      generateFPSTiST0RegRegInstruction(FSTRegReg, root, targetRegisterForFTST, targetRegisterForFTST, _cg);
      }

   return accRegister;
   }
Example #12
0
int32_t OMR::ConstantDataSnippet::addConstantRequest(void              *v,
                                                  TR::DataType       type,
                                                  TR::Instruction *nibble0,
                                                  TR::Instruction *nibble1,
                                                  TR::Instruction *nibble2,
                                                  TR::Instruction *nibble3,
                                                  TR::Node *node,
                                                  bool isUnloadablePicSite)
   {
   TR::Compilation *comp = cg()->comp();
   union {
      float       fvalue;
      int32_t     ivalue;
   } fin, fex;

   union {
      double      dvalue;
      int64_t     lvalue;
   } din, dex;

   intptrj_t   ain, aex;

   int32_t ret = PTOC_FULL_INDEX;

   switch(type)
      {
      case TR::Float:
         {
         ListIterator< PPCConstant<float> >  fiterator(&_floatConstants);
         PPCConstant<float>                 *fcursor=fiterator.getFirst();

         fin.fvalue = *(float *)v;
         while (fcursor != NULL)
            {
            fex.fvalue = fcursor->getConstantValue();
            if (fin.ivalue == fex.ivalue)
               break;
            fcursor = fiterator.getNext();
            }
         if (fcursor == NULL)
            {
            fcursor = new (_cg->trHeapMemory()) PPCConstant<float>(_cg, fin.fvalue);
            _floatConstants.add(fcursor);
            if (TR::Compiler->target.is64Bit() && !comp->getOption(TR_DisableTOCForConsts))
               {
               ret = TR_PPCTableOfConstants::lookUp(fin.fvalue, _cg);
               }
            fcursor->setTOCOffset(ret);
            }
         ret = fcursor->getTOCOffset();
         if (TR::Compiler->target.is32Bit() || ret==PTOC_FULL_INDEX)
            fcursor->addValueRequest(nibble0, nibble1, nibble2, nibble3);
         }
         break;

      case TR::Double:
         {
         ListIterator< PPCConstant<double> > diterator(&_doubleConstants);
         PPCConstant<double>                *dcursor=diterator.getFirst();

         din.dvalue = *(double *)v;
         while (dcursor != NULL)
            {
            dex.dvalue = dcursor->getConstantValue();
            if (din.lvalue == dex.lvalue)
               break;
            dcursor = diterator.getNext();
            }
         if (dcursor == NULL)
            {
            dcursor = new (_cg->trHeapMemory()) PPCConstant<double>(_cg, din.dvalue);
            _doubleConstants.add(dcursor);
            if (TR::Compiler->target.is64Bit() && !comp->getOption(TR_DisableTOCForConsts))
               {
               ret = TR_PPCTableOfConstants::lookUp(din.dvalue, _cg);
               }
            dcursor->setTOCOffset(ret);
            }
         ret = dcursor->getTOCOffset();
         if (TR::Compiler->target.is32Bit() || ret==PTOC_FULL_INDEX)
            dcursor->addValueRequest(nibble0, nibble1, nibble2, nibble3);
         }
         break;

      case TR::Address:
         {
         ListIterator< PPCConstant<intptrj_t> >  aiterator(&_addressConstants);
         PPCConstant<intptrj_t>                 *acursor=aiterator.getFirst();

         ain = *(intptrj_t *)v;
         while (acursor != NULL)
            {
            aex = acursor->getConstantValue();
            // if pointers require relocation, then not all pointers may be relocated for the same reason
            //   so be conservative and do not combine them (e.g. HCR versus profiled inlined site enablement)
            if (ain == aex &&
                (!cg()->profiledPointersRequireRelocation() || acursor->getNode() == node))
               break;
            acursor = aiterator.getNext();
            }
         if (acursor && acursor->isUnloadablePicSite()!=isUnloadablePicSite)
            {
            TR_ASSERT(0, "Existing address constant does not have a matching unloadable state.\n" );
            acursor = NULL; // If asserts are turned off then we should just create a duplicate constant
            }
         if (acursor == NULL)
            {
            acursor = new (_cg->trHeapMemory()) PPCConstant<intptrj_t>(_cg, ain, node, isUnloadablePicSite);
            _addressConstants.add(acursor);
            }
            acursor->addValueRequest(nibble0, nibble1, nibble2, nibble3);
         }
         break;

      default:
         TR_ASSERT(0, "Only float and address constants are supported. Data type is %s.\n", type.toString());
      }

   return(ret);
   }
Example #13
0
LexicalXmlTag::~LexicalXmlTag()
   {
   TR::Compilation *comp = cg->comp();
   if (comp->getOption(TR_TraceOptDetails) || comp->getOption(TR_TraceCG))
      traceMsg(comp, "</codegen>\n");
   }
Example #14
0
void
OMR::CodeGenPhase::performSetupForInstructionSelectionPhase(TR::CodeGenerator * cg, TR::CodeGenPhase * phase)
   {
   TR::Compilation *comp = cg->comp();

   if (TR::Compiler->target.cpu.isZ() && TR::Compiler->om.shouldGenerateReadBarriersForFieldLoads())
      {
      // TODO (GuardedStorage): We need to come up with a better solution than anchoring aloadi's
      // to enforce certain evaluation order
      traceMsg(comp, "GuardedStorage: in performSetupForInstructionSelectionPhase\n");

      auto mapAllocator = getTypedAllocator<std::pair<TR::TreeTop*, TR::TreeTop*> >(comp->allocator());

      std::map<TR::TreeTop*, TR::TreeTop*, std::less<TR::TreeTop*>, TR::typed_allocator<std::pair<TR::TreeTop* const, TR::TreeTop*>, TR::Allocator> >
         currentTreeTopToappendTreeTop(std::less<TR::TreeTop*> (), mapAllocator);

      TR_BitVector *unAnchorableAloadiNodes = comp->getBitVectorPool().get();

      for (TR::PreorderNodeIterator iter(comp->getStartTree(), comp); iter != NULL; ++iter)
         {
         TR::Node *node = iter.currentNode();

         traceMsg(comp, "GuardedStorage: Examining node = %p\n", node);

         // isNullCheck handles both TR::NULLCHK and TR::ResolveAndNULLCHK
         // both of which do not operate on their child but their
         // grandchild (or greatgrandchild).
         if (node->getOpCode().isNullCheck())
            {
            // An aloadi cannot be anchored if there is a Null Check on
            // its child. There are two situations where this occurs.
            // The first is when doing an aloadi off some node that is
            // being NULLCHK'd (see Ex1). The second is when doing an
            // icalli in which case the aloadi loads the VFT of an
            // object that must be NULLCHK'd (see Ex2).
            //
            // Ex1:
            //    n1n NULLCHK on n3n
            //    n2n    aloadi f    <-- First Child And Parent of Null Chk'd Node
            //    n3n       aload O
            //
            // Ex2:
            //    n1n NULLCHK on n4n
            //    n2n    icall foo        <-- First Child
            //    n3n       aloadi <vft>  <-- Parent of Null Chk'd Node
            //    n4n          aload O
            //    n4n       ==> aload O

            TR::Node *nodeBeingNullChkd = node->getNullCheckReference();
            if (nodeBeingNullChkd)
               {
               TR::Node *firstChild = node->getFirstChild();
               TR::Node *parentOfNullChkdNode = NULL;

               if (firstChild->getOpCode().isCall() &&
                   firstChild->getOpCode().isIndirect())
                  {
                  parentOfNullChkdNode = firstChild->getFirstChild();
                  }
               else
                  {
                  parentOfNullChkdNode = firstChild;
                  }

               if (parentOfNullChkdNode &&
                   parentOfNullChkdNode->getOpCodeValue() == TR::aloadi &&
                   parentOfNullChkdNode->getNumChildren() > 0 &&
                   parentOfNullChkdNode->getFirstChild() == nodeBeingNullChkd)
                  {
                  unAnchorableAloadiNodes->set(parentOfNullChkdNode->getGlobalIndex());
                  traceMsg(comp, "GuardedStorage: Cannot anchor  %p\n", firstChild);
                  }
               }
            }
         else
            {
            bool shouldAnchorNode = false;

            if (node->getOpCodeValue() == TR::aloadi &&
                !unAnchorableAloadiNodes->isSet(node->getGlobalIndex()))
               {
               shouldAnchorNode = true;
               }
            else if (node->getOpCodeValue() == TR::aload &&
                     node->getSymbol()->isStatic() &&
                     node->getSymbol()->isCollectedReference())
               {
               shouldAnchorNode = true;
               }

            if (shouldAnchorNode)
               {
               TR::TreeTop* anchorTreeTop = TR::TreeTop::create(comp, TR::Node::create(TR::treetop, 1, node));
               TR::TreeTop* appendTreeTop = iter.currentTree();

               if (currentTreeTopToappendTreeTop.count(appendTreeTop) > 0)
                  {
                  appendTreeTop = currentTreeTopToappendTreeTop[appendTreeTop];
                  }

               // Anchor the aload/aloadi before the current treetop
               appendTreeTop->insertBefore(anchorTreeTop);
               currentTreeTopToappendTreeTop[iter.currentTree()] = anchorTreeTop;

               traceMsg(comp, "GuardedStorage: Anchored  %p to treetop = %p\n", node, anchorTreeTop);
               }
            }
         }

      comp->getBitVectorPool().release(unAnchorableAloadiNodes);
      }

   if (cg->shouldBuildStructure() &&
       (comp->getFlowGraph()->getStructure() != NULL))
      {
      TR_Structure *rootStructure = TR_RegionAnalysis::getRegions(comp);
      comp->getFlowGraph()->setStructure(rootStructure);
      }

   phase->reportPhase(SetupForInstructionSelectionPhase);

   // Dump preIR
   if (comp->getOption(TR_TraceRegisterPressureDetails) && !comp->getOption(TR_DisableRegisterPressureSimulation))
      {
      traceMsg(comp, "         { Post optimization register pressure simulation\n");
      TR_BitVector emptyBitVector;
      vcount_t vc = comp->incVisitCount();
      cg->initializeRegisterPressureSimulator();
      for (TR::Block *block = comp->getStartBlock(); block; block = block->getNextExtendedBlock())
         {
         TR_LinkHead<TR_RegisterCandidate> emptyCandidateList;
         TR::CodeGenerator::TR_RegisterPressureState state(NULL, 0, emptyBitVector, emptyBitVector, &emptyCandidateList, cg->getNumberOfGlobalGPRs(), cg->getNumberOfGlobalFPRs(), cg->getNumberOfGlobalVRFs(), vc);
         TR::CodeGenerator::TR_RegisterPressureSummary summary(state._gprPressure, state._fprPressure, state._vrfPressure);
         cg->simulateBlockEvaluation(block, &state, &summary);
         }
      traceMsg(comp, "         }\n");
      }

   TR::LexicalMemProfiler mp(phase->getName(), comp->phaseMemProfiler());
   LexicalTimer pt(phase->getName(), comp->phaseTimer());

   cg->setUpForInstructionSelection();
   }
Example #15
0
void
OMR::CodeGenPhase::performProcessRelocationsPhase(TR::CodeGenerator * cg, TR::CodeGenPhase * phase)
   {
   TR::Compilation * comp = cg->comp();

   if (comp->getPersistentInfo()->isRuntimeInstrumentationEnabled())
      {
      // This must be called before relocations to generate the relocation data for the profiled instructions.
      cg->createHWPRecords();
      }

   phase->reportPhase(ProcessRelocationsPhase);

   TR::LexicalMemProfiler mp(phase->getName(), comp->phaseMemProfiler());
   LexicalTimer pt(phase->getName(), comp->phaseTimer());

   cg->processRelocations();

   cg->resizeCodeMemory();
   cg->registerAssumptions();

   cg->syncCode(cg->getBinaryBufferStart(), cg->getBinaryBufferCursor() - cg->getBinaryBufferStart());

   if (comp->getOption(TR_EnableOSR))
     {
     if (comp->getOption(TR_TraceOSR) && !comp->getOption(TR_DisableOSRSharedSlots))
        {
        (*comp) << "OSRCompilationData is " << *comp->getOSRCompilationData() << "\n";
        }
     }

   if (comp->getOption(TR_AOT) && (comp->getOption(TR_TraceRelocatableDataCG) || comp->getOption(TR_TraceRelocatableDataDetailsCG) || comp->getOption(TR_TraceReloCG)))
     {
     traceMsg(comp, "\n<relocatableDataCG>\n");
     if (comp->getOption(TR_TraceRelocatableDataDetailsCG)) // verbose output
        {
        uint8_t * relocatableMethodCodeStart = (uint8_t *)comp->getRelocatableMethodCodeStart();
        traceMsg(comp, "Code start = %8x, Method start pc = %x, Method start pc offset = 0x%x\n", relocatableMethodCodeStart, cg->getCodeStart(), cg->getCodeStart() - relocatableMethodCodeStart);
        }
     cg->getAheadOfTimeCompile()->dumpRelocationData();
     traceMsg(comp, "</relocatableDataCG>\n");
     }

     if (debug("dumpCodeSizes"))
        {
        diagnostic("%08d   %s\n", cg->getCodeLength(), comp->signature());
        }

     if (comp->getCurrentMethod() == NULL)
        {
        comp->getMethodSymbol()->setMethodAddress(cg->getBinaryBufferStart());
        }

     TR_ASSERT(cg->getCodeLength() <= cg->getEstimatedCodeLength(),
               "Method length estimate must be conservatively large\n"
               "    codeLength = %d, estimatedCodeLength = %d \n",
               cg->getCodeLength(), cg->getEstimatedCodeLength()
               );

     // also trace the interal stack atlas
     cg->getStackAtlas()->close(cg);

     TR::SimpleRegex * regex = comp->getOptions()->getSlipTrap();
     if (regex && TR::SimpleRegex::match(regex, comp->getCurrentMethod()))
        {
        if (TR::Compiler->target.is64Bit())
        {
        setDllSlip((char*)cg->getCodeStart(),(char*)cg->getCodeStart()+cg->getCodeLength(),"SLIPDLL64", comp);
        }
     else
        {
        setDllSlip((char*)cg->getCodeStart(),(char*)cg->getCodeStart()+cg->getCodeLength(),"SLIPDLL31", comp);
        }
     }
   if (comp->getOption(TR_TraceCG) || comp->getOptions()->getTraceCGOption(TR_TraceCGPostBinaryEncoding))
      {
      const char * title = "Post Relocation Instructions";
      comp->getDebug()->dumpMethodInstrs(comp->getOutFile(), title, false, true);

      traceMsg(comp,"<snippets>");
      comp->getDebug()->print(comp->getOutFile(), cg->getSnippetList());
      traceMsg(comp,"\n</snippets>\n");

      auto iterator = cg->getSnippetList().begin();
      int32_t estimatedSnippetStart = cg->getEstimatedSnippetStart();
      while (iterator != cg->getSnippetList().end())
         {
         estimatedSnippetStart += (*iterator)->getLength(estimatedSnippetStart);
         ++iterator;
         }
      }
   }
Example #16
0
void TR_ARMRegisterDependencyGroup::assignRegisters(TR::Instruction  *currentInstruction,
                                                    TR_RegisterKinds kindToBeAssigned,
                                                    uint32_t         numberOfRegisters,
                                                    TR::CodeGenerator *cg)
   {
   TR::Compilation *comp = cg->comp();
   TR::Machine *machine = cg->machine();
   TR::Register  *virtReg;
   TR::RealRegister::RegNum dependentRegNum;
   TR::RealRegister *dependentRealReg, *assignedRegister;
   uint32_t i, j;
   bool changed;

   if (!comp->getOption(TR_DisableOOL))
      {
      for (i = 0; i< numberOfRegisters; i++)
         {
         virtReg = dependencies[i].getRegister();
         dependentRegNum = dependencies[i].getRealRegister();
         if (dependentRegNum == TR::RealRegister::SpilledReg)
            {
            TR_ASSERT(virtReg->getBackingStorage(),"should have a backing store if dependentRegNum == spillRegIndex()\n");
            if (virtReg->getAssignedRealRegister())
               {
               // this happens when the register was first spilled in main line path then was reverse spilled
               // and assigned to a real register in OOL path. We protected the backing store when doing
               // the reverse spill so we could re-spill to the same slot now
               traceMsg (comp,"\nOOL: Found register spilled in main line and re-assigned inside OOL");
               TR::Node *currentNode = currentInstruction->getNode();
               TR::RealRegister *assignedReg    = toRealRegister(virtReg->getAssignedRegister());
               TR::MemoryReference *tempMR = new (cg->trHeapMemory()) TR::MemoryReference(currentNode, (TR::SymbolReference*)virtReg->getBackingStorage()->getSymbolReference(), sizeof(uintptr_t), cg);
               TR_ARMOpCodes opCode;
               TR_RegisterKinds rk = virtReg->getKind();
               switch (rk)
                  {
                  case TR_GPR:
                     opCode = ARMOp_ldr;
                     break;
                  case TR_FPR:
                     opCode = virtReg->isSinglePrecision() ? ARMOp_ldfs : ARMOp_ldfd;
                     break;
                  default:
                     TR_ASSERT(0, "\nRegister kind not supported in OOL spill\n");
                     break;
                  }

               TR::Instruction *inst = generateTrg1MemInstruction(cg, opCode, currentNode, assignedReg, tempMR, currentInstruction);

               assignedReg->setAssignedRegister(NULL);
               virtReg->setAssignedRegister(NULL);
               assignedReg->setState(TR::RealRegister::Free);

               if (comp->getDebug())
                  cg->traceRegisterAssignment("Generate reload of virt %s due to spillRegIndex dep at inst %p\n", cg->comp()->getDebug()->getName(virtReg),currentInstruction);
               cg->traceRAInstruction(inst);
               }
            if (!(std::find(cg->getSpilledRegisterList()->begin(), cg->getSpilledRegisterList()->end(), virtReg) != cg->getSpilledRegisterList()->end()))
               cg->getSpilledRegisterList()->push_front(virtReg);
            }
         // we also need to free up all locked backing storage if we are exiting the OOL during backwards RA assignment
         else if (currentInstruction->isLabel() && virtReg->getAssignedRealRegister())
            {
            TR::ARMLabelInstruction *labelInstr = (TR::ARMLabelInstruction *)currentInstruction;
            TR_BackingStore *location = virtReg->getBackingStorage();
            TR_RegisterKinds rk = virtReg->getKind();
            int32_t dataSize;
            if (labelInstr->getLabelSymbol()->isStartOfColdInstructionStream() && location)
               {
               traceMsg (comp,"\nOOL: Releasing backing storage (%p)\n", location);
               if (rk == TR_GPR)
                  dataSize = TR::Compiler->om.sizeofReferenceAddress();
               else
                  dataSize = 8;
               location->setMaxSpillDepth(0);
               cg->freeSpill(location,dataSize,0);
               virtReg->setBackingStorage(NULL);
               }
            }
         }
      }
   for (i = 0; i < numberOfRegisters; i++)
      {
      virtReg = dependencies[i].getRegister();

      if (virtReg->getAssignedRealRegister()!=NULL)
         {
         if (dependencies[i].getRealRegister() == TR::RealRegister::NoReg)
            {
            virtReg->block();
            }
         else
            {
            dependentRegNum = toRealRegister(virtReg->getAssignedRealRegister())->getRegisterNumber();
            for (j=0; j<numberOfRegisters; j++)
               {
               if (dependentRegNum == dependencies[j].getRealRegister())
                  {
                  virtReg->block();
                  break;
                  }
               }
            }
         }
      }

   do
      {
      changed = false;
      for (i = 0; i < numberOfRegisters; i++)
         {
         virtReg = dependencies[i].getRegister();
         dependentRegNum = dependencies[i].getRealRegister();
         dependentRealReg = machine->getRealRegister(dependentRegNum);

         if (dependentRegNum != TR::RealRegister::NoReg &&
             dependentRegNum != TR::RealRegister::SpilledReg &&
             dependentRealReg->getState() == TR::RealRegister::Free)
            {
            machine->coerceRegisterAssignment(currentInstruction, virtReg, dependentRegNum);
            virtReg->block();
            changed = true;
            }
         }
      } while (changed == true);


   do
      {
      changed = false;
      for (i = 0; i < numberOfRegisters; i++)
         {
         virtReg = dependencies[i].getRegister();
         assignedRegister = NULL;
         if (virtReg->getAssignedRealRegister() != NULL)
            {
            assignedRegister = toRealRegister(virtReg->getAssignedRealRegister());
            }
         dependentRegNum = dependencies[i].getRealRegister();
         dependentRealReg = machine->getRealRegister(dependentRegNum);
         if (dependentRegNum != TR::RealRegister::NoReg &&
             dependentRegNum != TR::RealRegister::SpilledReg &&
             dependentRealReg != assignedRegister)
            {
            machine->coerceRegisterAssignment(currentInstruction, virtReg, dependentRegNum);
            virtReg->block();
            changed = true;
            }
         }
      } while (changed == true);

   for (i=0; i<numberOfRegisters; i++)
      {
      if (dependencies[i].getRealRegister() == TR::RealRegister::NoReg)
         {
         bool excludeGPR0 = dependencies[i].getExcludeGPR0()?true:false;
         TR::RealRegister *realOne;

         virtReg = dependencies[i].getRegister();
         realOne = virtReg->getAssignedRealRegister();
         if (realOne!=NULL && excludeGPR0 && toRealRegister(realOne)->getRegisterNumber()==TR::RealRegister::gr0)
            {
            if ((assignedRegister = machine->findBestFreeRegister(virtReg->getKind(), true)) == NULL)
               {
               assignedRegister = machine->freeBestRegister(currentInstruction, virtReg->getKind(), NULL, true);
               }
            machine->coerceRegisterAssignment(currentInstruction, virtReg, assignedRegister->getRegisterNumber());
            }
         else if (realOne == NULL)
            {
            if (virtReg->getTotalUseCount() == virtReg->getFutureUseCount())
               {
               if ((assignedRegister = machine->findBestFreeRegister(virtReg->getKind(), excludeGPR0, true)) == NULL)
                  {
                  assignedRegister = machine->freeBestRegister(currentInstruction, virtReg->getKind(), NULL, excludeGPR0);
                  }
               }
            else
               {
               assignedRegister = machine->reverseSpillState(currentInstruction, virtReg, NULL, excludeGPR0);
               }
            virtReg->setAssignedRegister(assignedRegister);
            assignedRegister->setAssignedRegister(virtReg);
            assignedRegister->setState(TR::RealRegister::Assigned);
            virtReg->block();
            }
         }
      }

   unblockRegisters(numberOfRegisters);
   for (i = 0; i < numberOfRegisters; i++)
      {
      TR::Register *dependentRegister = getRegisterDependency(i)->getRegister();
      if (dependentRegister->getAssignedRegister())
      	 {
         TR::RealRegister *assignedRegister = dependentRegister->getAssignedRegister()->getRealRegister();

         if (getRegisterDependency(i)->getRealRegister() == TR::RealRegister::NoReg)
            getRegisterDependency(i)->setRealRegister(toRealRegister(assignedRegister)->getRegisterNumber());

         if (dependentRegister->decFutureUseCount() == 0)
            {
            dependentRegister->setAssignedRegister(NULL);
            assignedRegister->setAssignedRegister(NULL);
            assignedRegister->setState(TR::RealRegister::Unlatched); // Was setting to Free
            }
         }
      }
   }