Пример #1
0
const char *
TR_Debug::getNamex(TR::Snippet *snippet)
   {
   switch (snippet->getKind())
      {
#ifdef J9_PROJECT_SPECIFIC
      case TR::Snippet::IsCall:
         return "Call Snippet";
         break;
      case TR::Snippet::IsVPicData:
         return "VPic Data";
         break;
      case TR::Snippet::IsIPicData:
         return "IPic Data";
         break;
      case TR::Snippet::IsUnresolvedVirtualCall:
         return "Unresolved Virtual Call Snippet";
         break;
      case TR::Snippet::IsWriteBarrier:
      case TR::Snippet::IsWriteBarrierAMD64:
         return "Write Barrier Snippet";
         break;
      case TR::Snippet::IsJNIPause:
         return "JNI Pause Snippet";
         break;
      case TR::Snippet::IsScratchArgHelperCall:
         return "Helper Call Snippet with scratch-reg argument";
         break;
      case TR::Snippet::IsForceRecompilation:
         return "Force Recompilation Snippet";
         break;
      case TR::Snippet::IsRecompilation:
         return "Recompilation Snippet";
         break;
#endif
      case TR::Snippet::IsCheckFailure:
         return "Check Failure Snippet";
         break;
      case TR::Snippet::IsCheckFailureWithResolve:
         return "Check Failure Snippet with Resolve Call";
         break;
      case TR::Snippet::IsBoundCheckWithSpineCheck:
         return "Bound Check with Spine Check Snippet";
         break;
      case TR::Snippet::IsSpineCheck:
         return "Spine Check Snippet";
         break;
      case TR::Snippet::IsConstantData:
         return "Constant Data Snippet";
         break;
      case TR::Snippet::IsData:
         return "Data Snippet";
      case TR::Snippet::IsDivideCheck:
         return "Divide Check Snippet";
         break;

#ifdef J9_PROJECT_SPECIFIC
      case TR::Snippet::IsGuardedDevirtual:
         return "Guarded Devirtual Snippet";
         break;
#endif
      case TR::Snippet::IsHelperCall:
         return "Helper Call Snippet";
         break;
      case TR::Snippet::IsFPConversion:
         return "FP Conversion Snippet";
         break;
      case TR::Snippet::IsFPConvertToInt:
         return "FP Convert To Int Snippet";
         break;
      case TR::Snippet::IsFPConvertToLong:
         return "FP Convert To Long Snippet";
         break;
      case TR::Snippet::IsPassJNINull:
         return "Pass JNI Null Snippet";
         break;
      case TR::Snippet::IsUnresolvedDataIA32:
      case TR::Snippet::IsUnresolvedDataAMD64:
         return "Unresolved Data Snippet";
         break;
      case TR::Snippet::IsRestart:
      default:
         TR_ASSERT(0, "unexpected snippet kind: %d", snippet->getKind());
         return "Unknown snippet kind";
      }
   }
Пример #2
0
void
OMR::Symbol::setMemoryTypeShadowSymbol()
   {
   TR_ASSERT(self()->isShadow(), "assertion failure");
   _flags.set(MemoryTypeShadow);
   }
Пример #3
0
void
OMR::Symbol::setStartOfColdInstructionStream()
   {
   TR_ASSERT(self()->isLabel(), "assertion failure"); _flags.set(StartOfColdInstructionStream);
   }
Пример #4
0
void
OMR::Symbol::setRecompilationCounter()
   {
   TR_ASSERT(self()->isStatic(), "assertion failure");
   _flags.set(RecompilationCounter);
   }
Пример #5
0
void
OMR::Symbol::setArrayletShadowSymbol()
   {
   TR_ASSERT(self()->isShadow(), "assertion failure");
   _flags.set(ArrayletShadow);
   }
Пример #6
0
void
OMR::Symbol::setAddressIsCPIndexOfStatic(bool b)
   {
   TR_ASSERT(self()->isStatic(), "assertion failure");
   _flags.set(AddressIsCPIndexOfStatic, b);
   }
Пример #7
0
void
OMR::Symbol::setStartPC()
   {
   TR_ASSERT(self()->isStatic(), "assertion failure");
   _flags.set(StartPC);
   }
Пример #8
0
void
OMR::Symbol::setLocalObject()
   {
   TR_ASSERT(self()->isAuto(), "assertion failure");
   _flags.set(IsLocalObject);
   }
Пример #9
0
void
OMR::Symbol::setBehaveLikeNonTemp()
   {
   TR_ASSERT(self()->isAuto(), "assertion failure");
   _flags.set(BehaveLikeNonTemp);
   }
Пример #10
0
TR::Register *TR::AMD64SystemLinkage::buildDirectDispatch(
      TR::Node *callNode,
      bool spillFPRegs)
   {
   TR::SymbolReference *methodSymRef = callNode->getSymbolReference();
   TR::MethodSymbol *methodSymbol = methodSymRef->getSymbol()->castToMethodSymbol();

   TR::Register *returnReg;

   // Allocate adequate register dependencies.
   //
   // pre = number of argument registers
   // post = number of volatile + return register
   //
   uint32_t pre = getProperties().getNumIntegerArgumentRegisters() + getProperties().getNumFloatArgumentRegisters();
   uint32_t post = getProperties().getNumVolatileRegisters() + (callNode->getDataType() == TR::NoType ? 0 : 1);

#if defined (PYTHON) && 0
   // Treat all preserved GP regs as volatile until register map support available.
   //
   post += getProperties().getNumberOfPreservedGPRegisters();
#endif

   TR::RegisterDependencyConditions *preDeps = generateRegisterDependencyConditions(pre, 0, cg());
   TR::RegisterDependencyConditions *postDeps = generateRegisterDependencyConditions(0, post, cg());

   // Evaluate outgoing arguments on the system stack and build pre-conditions.
   //
   int32_t memoryArgSize = buildArgs(callNode, preDeps);

   // Build post-conditions.
   //
   returnReg = buildVolatileAndReturnDependencies(callNode, postDeps);
   postDeps->stopAddingPostConditions();

   // Find the second scratch register in the post dependency list.
   //
   TR::Register *scratchReg = NULL;
   TR::RealRegister::RegNum scratchRegIndex = getProperties().getIntegerScratchRegister(1);
   for (int32_t i=0; i<post; i++)
      {
      if (postDeps->getPostConditions()->getRegisterDependency(i)->getRealRegister() == scratchRegIndex)
         {
         scratchReg = postDeps->getPostConditions()->getRegisterDependency(i)->getRegister();
         break;
         }
      }

#if defined(PYTHON) && 0
   // For Python, store the instruction that contains the GC map at this site into
   // the frame object.
   //
   TR::SymbolReference *frameObjectSymRef =
      comp()->getSymRefTab()->findOrCreateAutoSymbol(comp()->getMethodSymbol(), 0, TR::Address, true, false, true);

   TR::Register *frameObjectRegister = cg()->allocateRegister();
   generateRegMemInstruction(
         L8RegMem,
         callNode,
         frameObjectRegister,
         generateX86MemoryReference(frameObjectSymRef, cg()),
         cg());

   TR::RealRegister *espReal = cg()->machine()->getRealRegister(TR::RealRegister::esp);
   TR::Register *gcMapPCRegister = cg()->allocateRegister();

   generateRegMemInstruction(
         LEA8RegMem,
         callNode,
         gcMapPCRegister,
         generateX86MemoryReference(espReal, -8, cg()),
         cg());

   // Use "volatile" registers across the call.  Once proper register map support
   // is implemented, r14 and r15 will no longer be volatile and a different pair
   // should be chosen.
   //
   TR::RegisterDependencyConditions *gcMapDeps = generateRegisterDependencyConditions(0, 2, cg());
   gcMapDeps->addPostCondition(frameObjectRegister, TR::RealRegister::r14, cg());
   gcMapDeps->addPostCondition(gcMapPCRegister, TR::RealRegister::r15, cg());
   gcMapDeps->stopAddingPostConditions();

   generateMemRegInstruction(
         S8MemReg,
         callNode,
         generateX86MemoryReference(frameObjectRegister, fe()->getPythonGCMapPCOffsetInFrame(), cg()),
         gcMapPCRegister,
         gcMapDeps,
         cg());

   cg()->stopUsingRegister(frameObjectRegister);
   cg()->stopUsingRegister(gcMapPCRegister);
#endif

   TR::Instruction *instr;
   if (methodSymbol->getMethodAddress())
      {
      TR_ASSERT(scratchReg, "could not find second scratch register");
      auto LoadRegisterInstruction = generateRegImm64SymInstruction(
         MOV8RegImm64,
         callNode,
         scratchReg,
         (uintptr_t)methodSymbol->getMethodAddress(),
         methodSymRef,
         cg());

      if (comp()->getOption(TR_EmitRelocatableELFFile))
         {
         LoadRegisterInstruction->setReloKind(TR_NativeMethodAbsolute);
         }

      instr = generateRegInstruction(CALLReg, callNode, scratchReg, preDeps, cg());
      }
   else
      {
      instr = generateImmSymInstruction(CALLImm4, callNode, (uintptrj_t)methodSymbol->getMethodAddress(), methodSymRef, preDeps, cg());
      }

   cg()->resetIsLeafMethod();

   instr->setNeedsGCMap(getProperties().getPreservedRegisterMapForGC());

   cg()->stopUsingRegister(scratchReg);

   TR::LabelSymbol *postDepLabel = generateLabelSymbol(cg());
   generateLabelInstruction(LABEL, callNode, postDepLabel, postDeps, cg());

   return returnReg;
   }
Пример #11
0
void
OMR::Symbol::setSpillTempAuto()
   {
   TR_ASSERT(self()->isAuto(), "assertion failure");
   _flags.set(SpillTemp);
   }
Пример #12
0
TR::AMD64Win64FastCallLinkage::AMD64Win64FastCallLinkage(TR::CodeGenerator *cg)
   : TR::AMD64SystemLinkage(cg)
   {
   uint8_t r, p;

   // For SysV/Win64 ABI, the outgoing argument regions need to be 16/32 bytes
   // aligned, we will reserve outgoing arguments size in prologue to get the
   // alignment properly handled, that is the reason why
   // ReservesOutgoingArgsInPrologue is needed.
   //
   _properties._properties =
      EightBytePointers   |
      EightByteParmSlots  |
      CallerCleanup       |
      IntegersInRegisters |
      LongsInRegisters    |
      FloatsInRegisters   |
      LinkageRegistersAssignedByCardinalPosition |
      CallerFrameAllocatesSpaceForLinkageRegs |
      ReservesOutgoingArgsInPrologue
      ;

   // Integer arguments
   //
   p=0;
   _properties._firstIntegerArgumentRegister = p;
   _properties._argumentRegisters[p++] = TR::RealRegister::ecx;
   _properties._argumentRegisters[p++] = TR::RealRegister::edx;
   _properties._argumentRegisters[p++] = TR::RealRegister::r8;
   _properties._argumentRegisters[p++] = TR::RealRegister::r9;
   _properties._numIntegerArgumentRegisters = p;

   // Float arguments
   //
   _properties._firstFloatArgumentRegister = p;
   for(r=0; r<=3; r++)
      _properties._argumentRegisters[p++] = TR::RealRegister::xmmIndex(r);
   _properties._numFloatArgumentRegisters = p - _properties._numIntegerArgumentRegisters;

   // Preserved registers.
   //
   p = 0;
   _properties._preservedRegisters[p++] = TR::RealRegister::edi;
   _properties._preservedRegisters[p++] = TR::RealRegister::esi;
   _properties._preservedRegisters[p++] = TR::RealRegister::ebx;
   _properties._preservedRegisters[p++] = TR::RealRegister::r12;
   _properties._preservedRegisters[p++] = TR::RealRegister::r13;
   _properties._preservedRegisters[p++] = TR::RealRegister::r14;
   _properties._preservedRegisters[p++] = TR::RealRegister::r15;

   _properties._numberOfPreservedGPRegisters = p;

   for (r=6; r<=15; r++)
      _properties._preservedRegisters[p++] = TR::RealRegister::xmmIndex(r);

   _properties._numberOfPreservedXMMRegisters = p - _properties._numberOfPreservedGPRegisters;

   _properties._maxRegistersPreservedInPrologue = p;
   _properties._numPreservedRegisters = p;

   // Volatile registers.
   //
   p = 0;
   _properties._volatileRegisters[p++] = TR::RealRegister::eax;
   _properties._volatileRegisters[p++] = TR::RealRegister::ecx;
   _properties._volatileRegisters[p++] = TR::RealRegister::edx;
   _properties._volatileRegisters[p++] = TR::RealRegister::r8;
   _properties._volatileRegisters[p++] = TR::RealRegister::r9;
   _properties._volatileRegisters[p++] = TR::RealRegister::r10;
   _properties._volatileRegisters[p++] = TR::RealRegister::r11;
   _properties._numberOfVolatileGPRegisters = p;

   for(r=0; r<=5; r++)
      _properties._volatileRegisters[p++] = TR::RealRegister::xmmIndex(r);
   _properties._numberOfVolatileXMMRegisters = p - _properties._numberOfVolatileGPRegisters;
   _properties._numVolatileRegisters = p;

   // Return registers.
   //
   _properties._returnRegisters[0] = TR::RealRegister::eax;
   _properties._returnRegisters[1] = TR::RealRegister::xmm0;
   _properties._returnRegisters[2] = TR::RealRegister::NoReg;

   // Scratch registers.
   //
   _properties._scratchRegisters[0] = TR::RealRegister::r10;
   _properties._scratchRegisters[1] = TR::RealRegister::r11;
   _properties._scratchRegisters[2] = TR::RealRegister::eax;
   _properties._numScratchRegisters = 3;

   _properties._preservedRegisterMapForGC = 0;

   _properties._framePointerRegister = TR::RealRegister::ebp;
   _properties._methodMetaDataRegister = TR::RealRegister::NoReg;
   _properties._offsetToFirstParm = RETURN_ADDRESS_SIZE;
   _properties._offsetToFirstLocal = _properties.getAlwaysDedicateFramePointerRegister() ? -GPR_REG_WIDTH : 0;

   memset(_properties._registerFlags, 0, sizeof(_properties._registerFlags));

   // Integer arguments/return
   //
   _properties._registerFlags[TR::RealRegister::ecx] = IntegerArgument;
   _properties._registerFlags[TR::RealRegister::edx] = IntegerArgument;
   _properties._registerFlags[TR::RealRegister::r8] = IntegerArgument;
   _properties._registerFlags[TR::RealRegister::r9] = IntegerArgument;

   _properties._registerFlags[TR::RealRegister::eax] = IntegerReturn;

   // Float arguments/return
   //
   _properties._registerFlags[TR::RealRegister::xmm0] = FloatArgument | FloatReturn;
   for (r=1; r <= 3; r++)
      _properties._registerFlags[TR::RealRegister::xmmIndex(r)] = FloatArgument;

   // Preserved
   //
   _properties._registerFlags[TR::RealRegister::edi] = Preserved;
   _properties._registerFlags[TR::RealRegister::esi] = Preserved;
   _properties._registerFlags[TR::RealRegister::ebx] = Preserved;
   _properties._registerFlags[TR::RealRegister::ebp] = Preserved;
   _properties._registerFlags[TR::RealRegister::esp] = Preserved;
   for (r=12; r <= 15; r++)
      _properties._registerFlags[TR::RealRegister::rIndex(r)] = Preserved;

   p = 0;

   // Volatiles that aren't linkage regs
   //
   if (OMR::X86::AMD64::Machine::enableNewPickRegister())
      {
      if (OMR::X86::AMD64::Machine::numGPRRegsWithheld(cg) == 0)
         {
         _properties._allocationOrder[p++] = TR::RealRegister::eax;
         _properties._allocationOrder[p++] = TR::RealRegister::r10;
         }
      else
         TR_ASSERT(OMR::X86::AMD64::Machine::numRegsWithheld(cg) == 2, "numRegsWithheld: only 0 and 2 currently supported");
      }
   _properties._allocationOrder[p++] = TR::RealRegister::r11;

   // Linkage regs
   //
   _properties._allocationOrder[p++] = TR::RealRegister::ecx;
   _properties._allocationOrder[p++] = TR::RealRegister::edx;
   _properties._allocationOrder[p++] = TR::RealRegister::r8;
   _properties._allocationOrder[p++] = TR::RealRegister::r9;

   // Preserved regs
   //
   _properties._allocationOrder[p++] = TR::RealRegister::edi;
   _properties._allocationOrder[p++] = TR::RealRegister::esi;
   _properties._allocationOrder[p++] = TR::RealRegister::ebx;
   _properties._allocationOrder[p++] = TR::RealRegister::r12;
   _properties._allocationOrder[p++] = TR::RealRegister::r13;
   _properties._allocationOrder[p++] = TR::RealRegister::r14;
   _properties._allocationOrder[p++] = TR::RealRegister::r15;

   TR_ASSERT(p == machine()->getNumGlobalGPRs(), "assertion failure");

   // Linkage FP regs
   //
   if (OMR::X86::AMD64::Machine::enableNewPickRegister())
      {
      if (OMR::X86::AMD64::Machine::numRegsWithheld(cg) == 0)
         {
         _properties._allocationOrder[p++] = TR::RealRegister::xmm0;
         _properties._allocationOrder[p++] = TR::RealRegister::xmm1;
         }
      else
         TR_ASSERT(OMR::X86::AMD64::Machine::numRegsWithheld(cg) == 2, "numRegsWithheld: only 0 and 2 currently supported");
      }
   _properties._allocationOrder[p++] = TR::RealRegister::xmm2;
   _properties._allocationOrder[p++] = TR::RealRegister::xmm3;
   _properties._allocationOrder[p++] = TR::RealRegister::xmm4;
   _properties._allocationOrder[p++] = TR::RealRegister::xmm5;

   // Other volatile FP regs
   //
   _properties._allocationOrder[p++] = TR::RealRegister::xmm6;
   _properties._allocationOrder[p++] = TR::RealRegister::xmm7;
   _properties._allocationOrder[p++] = TR::RealRegister::xmm8;
   _properties._allocationOrder[p++] = TR::RealRegister::xmm9;
   _properties._allocationOrder[p++] = TR::RealRegister::xmm10;
   _properties._allocationOrder[p++] = TR::RealRegister::xmm11;
   _properties._allocationOrder[p++] = TR::RealRegister::xmm12;
   _properties._allocationOrder[p++] = TR::RealRegister::xmm13;
   _properties._allocationOrder[p++] = TR::RealRegister::xmm14;
   _properties._allocationOrder[p++] = TR::RealRegister::xmm15;

   _properties._OutgoingArgAlignment = AMD64_DEFAULT_STACK_ALIGNMENT;

   TR_ASSERT(p == (machine()->getNumGlobalGPRs() + machine()->_numGlobalFPRs), "assertion failure");
   }
Пример #13
0
// Build arguments for system linkage dispatch.
//
int32_t TR::AMD64SystemLinkage::buildArgs(
      TR::Node *callNode,
      TR::RegisterDependencyConditions *deps)
   {
   TR::SymbolReference *methodSymRef = callNode->getSymbolReference();
   TR::MethodSymbol *methodSymbol = methodSymRef->getSymbol()->castToMethodSymbol();
   TR::RealRegister::RegNum noReg = TR::RealRegister::NoReg;
   TR::RealRegister *espReal = machine()->getRealRegister(TR::RealRegister::esp);
   int32_t firstNodeArgument = callNode->getFirstArgumentIndex();
   int32_t lastNodeArgument = callNode->getNumChildren() - 1;
   int32_t offset = 0;
   int32_t sizeOfOutGoingArgs= 0;
   uint16_t numIntArgs = 0,
            numFloatArgs = 0;
   int32_t first, last, direction;
   int32_t numCopiedRegs = 0;
   TR::Register *copiedRegs[TR::X86LinkageProperties::MaxArgumentRegisters];

   if (getProperties().passArgsRightToLeft())
      {
      first = lastNodeArgument;
      last  = firstNodeArgument - 1;
      direction = -1;
      }
   else
      {
      first = firstNodeArgument;
      last  = lastNodeArgument + 1;
      direction = 1;
      }

   // If the dispatch is indirect we must add the VFT register to the preconditions
   // so that it gets register assigned with the other preconditions to the call.
   //
   if (callNode->getOpCode().isIndirect())
      {
      TR::Node *vftChild = callNode->getFirstChild();
      TR_ASSERT(vftChild->getRegister(), "expecting VFT child to be evaluated");
      TR::RealRegister::RegNum scratchRegIndex = getProperties().getIntegerScratchRegister(1);
      deps->addPreCondition(vftChild->getRegister(), scratchRegIndex, cg());
      }

   int32_t i;
   for (i = first; i != last; i += direction)
      {
      TR::parmLayoutResult layoutResult;
      TR::RealRegister::RegNum rregIndex = noReg;
      TR::Node *child = callNode->getChild(i);

      layoutParm(child, sizeOfOutGoingArgs, numIntArgs, numFloatArgs, layoutResult);

      if (layoutResult.abstract & TR::parmLayoutResult::IN_LINKAGE_REG_PAIR)
         {
         // TODO: AMD64 SysV ABI might put a struct into a pair of linkage registerr
         TR_ASSERT(false, "haven't support linkage_reg_pair yet.\n");
         }
      else if (layoutResult.abstract & TR::parmLayoutResult::IN_LINKAGE_REG)
         {
         TR_RegisterKinds regKind = layoutResult.regs[0].regKind;
         uint32_t regIndex = layoutResult.regs[0].regIndex;
         TR_ASSERT(regKind == TR_GPR || regKind == TR_FPR, "linkage registers includes TR_GPR and TR_FPR\n");
         rregIndex = (regKind == TR_FPR) ? getProperties().getFloatArgumentRegister(regIndex): getProperties().getIntegerArgumentRegister(regIndex);
         }
      else
         {
         offset = layoutResult.offset;
         }

      TR::Register *vreg;
      vreg = cg()->evaluate(child);

      bool needsStackOffsetUpdate = false;
      if (rregIndex != noReg)
         {
         // For NULL JNI reference parameters, it is possible that the NULL value will be evaluated into
         // a different register than the child.  In that case it is not necessary to copy the temporary scratch
         // register across the call.
         //
         if ((child->getReferenceCount() > 1) &&
             (vreg == child->getRegister()))
            {
            TR::Register *argReg = cg()->allocateRegister();
            if (vreg->containsCollectedReference())
               argReg->setContainsCollectedReference();
            generateRegRegInstruction(TR::Linkage::movOpcodes(RegReg, movType(child->getDataType())), child, argReg, vreg, cg());
            vreg = argReg;
            copiedRegs[numCopiedRegs++] = vreg;
            }

         deps->addPreCondition(vreg, rregIndex, cg());
         }
      else
         {
         // Ideally, we would like to push rather than move
         generateMemRegInstruction(TR::Linkage::movOpcodes(MemReg, fullRegisterMovType(vreg)),
                                   child,
                                   generateX86MemoryReference(espReal, offset, cg()),
                                   vreg,
                                   cg());
         }

      cg()->decReferenceCount(child);
      }

   // Now that we're finished making the preconditions, all the interferences
   // are established and we can kill these regs.
   //
   for (i = 0; i < numCopiedRegs; i++)
      cg()->stopUsingRegister(copiedRegs[i]);

   deps->stopAddingPreConditions();

   return sizeOfOutGoingArgs;
   }
Пример #14
0
TR::Register *
TR::AMD64SystemLinkage::buildVolatileAndReturnDependencies(
      TR::Node *callNode,
      TR::RegisterDependencyConditions *deps)
   {

   if (callNode->getOpCode().isIndirect())
      {
      TR::Node *vftChild = callNode->getFirstChild();
      if (vftChild->getRegister() && (vftChild->getReferenceCount() > 1))
         {
         }
      else
         {
         // VFT child dies here; decrement it early so it doesn't interfere with dummy regs.
         cg()->recursivelyDecReferenceCount(vftChild);
         }
      }

   TR_ASSERT(deps != NULL, "expected register dependencies");

   // Figure out which is the return register.
   //
   TR::RealRegister::RegNum returnRegIndex;
   TR_RegisterKinds returnKind;

   switch (callNode->getDataType())
      {
      case TR::NoType:
         returnRegIndex = TR::RealRegister::NoReg;
         returnKind = TR_NoRegister;
         break;

      case TR::Int8:
      case TR::Int16:
      case TR::Int32:
      case TR::Int64:
      case TR::Address:
         returnRegIndex = getProperties().getIntegerReturnRegister();
         returnKind = TR_GPR;
         break;

      case TR::Float:
      case TR::Double:
         returnRegIndex = getProperties().getFloatReturnRegister();
         returnKind = TR_FPR;
         break;

      case TR::Aggregate:
      default:
         TR_ASSERT(false, "Unrecognized call node data type: #%d", (int)callNode->getDataType());
         break;
      }

   // Kill all non-preserved int and float regs besides the return register.
   //
   int32_t i;
   TR::RealRegister::RegNum scratchIndex = getProperties().getIntegerScratchRegister(1);
   for (i=0; i<getProperties().getNumVolatileRegisters(); i++)
      {
      TR::RealRegister::RegNum regIndex = getProperties()._volatileRegisters[i];

      if (regIndex != returnRegIndex)
         {
         TR_RegisterKinds rk = (i < getProperties()._numberOfVolatileGPRegisters) ? TR_GPR : TR_FPR;
         TR::Register *dummy = cg()->allocateRegister(rk);
         deps->addPostCondition(dummy, regIndex, cg());

         // Note that we don't setPlaceholderReg here.  If this volatile reg is also volatile
         // in the caller's linkage, then that flag doesn't matter much anyway.  If it's preserved
         // in the caller's linkage, then we don't want to set that flag because we want this
         // use of the register to count as a "real" use, thereby motivating the prologue to
         // preserve the register.

         // A scratch register is necessary to call the native without a trampoline.
         //
         if (callNode->getOpCode().isIndirect() || (regIndex != scratchIndex))
            cg()->stopUsingRegister(dummy);
         }
      }

#if defined (PYTHON) && 0
   // Evict the preserved registers across the call
   //
   for (i=0; i<getProperties().getNumberOfPreservedGPRegisters(); i++)
      {
      TR::RealRegister::RegNum regIndex = getProperties()._preservedRegisters[i];

      TR::Register *dummy = cg()->allocateRegister(TR_GPR);
      deps->addPostCondition(dummy, regIndex, cg());

      // Note that we don't setPlaceholderReg here.  If this volatile reg is also volatile
      // in the caller's linkage, then that flag doesn't matter much anyway.  If it's preserved
      // in the caller's linkage, then we don't want to set that flag because we want this
      // use of the register to count as a "real" use, thereby motivating the prologue to
      // preserve the register.

      // A scratch register is necessary to call the native without a trampoline.
      //
      if (callNode->getOpCode().isIndirect() || (regIndex != scratchIndex))
         cg()->stopUsingRegister(dummy);
      }
#endif

   if (callNode->getOpCode().isIndirect())
      {
      TR::Node *vftChild = callNode->getFirstChild();
      if (vftChild->getRegister() && (vftChild->getReferenceCount() > 1))
         {
         // VFT child survives the call, so we must include it in the postconditions.
         deps->addPostCondition(vftChild->getRegister(), TR::RealRegister::NoReg, cg());
         cg()->recursivelyDecReferenceCount(vftChild);
         }
      }

   // Now that everything is dead, we can allocate the return register without
   // interference
   //
   TR::Register *returnRegister;
   if (returnRegIndex)
      {
      TR_ASSERT(returnKind != TR_NoRegister, "assertion failure");

      if (callNode->getDataType() == TR::Address)
         returnRegister = cg()->allocateCollectedReferenceRegister();
      else
         {
         returnRegister = cg()->allocateRegister(returnKind);
         if (callNode->getDataType() == TR::Float)
            returnRegister->setIsSinglePrecision();
         }

      deps->addPostCondition(returnRegister, returnRegIndex, cg());
      }
   else
      returnRegister = NULL;


 // The reg dependency is left open intentionally, and need to be closed by
 // the caller. The reason is because, child class might call this method, while
 // adding more register dependecies;  if we close the reg dependency here,
 // the child class won't be able to add more register dependencies.

   return returnRegister;
   }
Пример #15
0
void
OMR::Symbol::setReinstatedReceiver()
   {
   TR_ASSERT(isParm(), "assertion failure");
   _flags.set(ReinstatedReceiver);
   }
Пример #16
0
void
OMR::Symbol::setPinningArrayPointer()
   {
   TR_ASSERT(self()->isAuto(), "assertion failure");
   _flags.set(PinningArrayPointer);
   }
Пример #17
0
void
OMR::Symbol::setConstString()
   {
   TR_ASSERT(self()->isStatic(), "assertion failure");
   _flags.set(ConstString);
   }
Пример #18
0
void
OMR::Symbol::setAutoAddressTaken()
   {
   TR_ASSERT(self()->isAuto(), "assertion failure");
   _flags.set(AutoAddressTaken);
   }
Пример #19
0
void
OMR::Symbol::setCompiledMethod()
   {
   TR_ASSERT(self()->isStatic(), "assertion failure");
   _flags.set(CompiledMethod);
   }
Пример #20
0
void
OMR::Symbol::setAutoMarkerSymbol()
   {
   TR_ASSERT(self()->isAuto(), "assertion failure");
   _flags.set(AutoMarkerSymbol);
   }
Пример #21
0
void
OMR::Symbol::setCountForRecompile()
   {
   TR_ASSERT(self()->isStatic(), "assertion failure");
   _flags.set(CountForRecompile);
   }
Пример #22
0
void
OMR::Symbol::setVariableSizeSymbol()
   {
   TR_ASSERT(self()->isAuto(), "assertion failure");
   _flags.set(VariableSizeSymbol);
   }
Пример #23
0
void
OMR::Symbol::setGCRPatchPoint()
   {
   TR_ASSERT(self()->isStatic(), "assertion failure");
   _flags.set(GCRPatchPoint);
   }
Пример #24
0
void
OMR::Symbol::setThisTempForObjectCtor()
   {
   TR_ASSERT(self()->isAuto(), "assertion failure");
   _flags.set(ThisTempForObjectCtor);
   }
Пример #25
0
void
OMR::Symbol::setGlobalFragmentShadowSymbol()
   {
   TR_ASSERT(self()->isShadow(), "assertion failure");
   _flags.set(GlobalFragmentShadow);
   }
Пример #26
0
void
OMR::Symbol::setParmHasToBeOnStack()
   {
   TR_ASSERT(self()->isParm(), "assertion failure");
   _flags.set(ParmHasToBeOnStack);
   }
Пример #27
0
void
OMR::Symbol::setPythonNameShadowSymbol()
   {
   TR_ASSERT(self()->isShadow(), "assertion failure");
   _flags.set(PythonName);
   }
Пример #28
0
void
OMR::Symbol::resetReferencedParameter()
   {
   TR_ASSERT(self()->isParm(), "assertion failure");
   _flags.reset(ReferencedParameter);
   }
Пример #29
0
void
OMR::Symbol::setEndInternalControlFlow()
   {
   TR_ASSERT(self()->isLabel(), "assertion failure"); _flags.set(EndInternalControlFlow);
   }
Пример #30
0
void
TR_Debug::printx(TR::FILE *pOutFile, TR::Snippet *snippet)
   {
   // TODO:AMD64: Clean up these #ifdefs
   if (pOutFile == NULL)
      return;
   switch (snippet->getKind())
      {
#ifdef J9_PROJECT_SPECIFIC
      case TR::Snippet::IsCall:
         print(pOutFile, (TR::X86CallSnippet *)snippet);
         break;
      case TR::Snippet::IsIPicData:
      case TR::Snippet::IsVPicData:
         print(pOutFile, (TR::X86PicDataSnippet *)snippet);
         break;
      case TR::Snippet::IsUnresolvedVirtualCall:
         print(pOutFile, (TR::X86UnresolvedVirtualCallSnippet *)snippet);
         break;
      case TR::Snippet::IsWriteBarrier:
         print(pOutFile, (TR::IA32WriteBarrierSnippet *)snippet);
         break;
#ifdef TR_TARGET_64BIT
      case TR::Snippet::IsWriteBarrierAMD64:
         print(pOutFile, (TR::AMD64WriteBarrierSnippet *)snippet);
         break;
#endif
      case TR::Snippet::IsJNIPause:
         print(pOutFile, (TR::X86JNIPauseSnippet  *)snippet);
         break;
      case TR::Snippet::IsPassJNINull:
         print(pOutFile, (TR::X86PassJNINullSnippet  *)snippet);
         break;
      case TR::Snippet::IsCheckFailure:
         print(pOutFile, (TR::X86CheckFailureSnippet *)snippet);
         break;
      case TR::Snippet::IsCheckFailureWithResolve:
         print(pOutFile, (TR::X86CheckFailureSnippetWithResolve *)snippet);
         break;
      case TR::Snippet::IsBoundCheckWithSpineCheck:
         print(pOutFile, (TR::X86BoundCheckWithSpineCheckSnippet *)snippet);
         break;
      case TR::Snippet::IsSpineCheck:
         print(pOutFile, (TR::X86SpineCheckSnippet *)snippet);
         break;
      case TR::Snippet::IsScratchArgHelperCall:
         print(pOutFile, (TR::X86ScratchArgHelperCallSnippet *)snippet);
         break;
      case TR::Snippet::IsForceRecompilation:
         print(pOutFile, (TR::X86ForceRecompilationSnippet  *)snippet);
         break;
      case TR::Snippet::IsRecompilation:
         print(pOutFile, (TR::X86RecompilationSnippet *)snippet);
         break;
#endif
      case TR::Snippet::IsConstantData:
         print(pOutFile, (TR::IA32ConstantDataSnippet *)snippet);
         break;
      case TR::Snippet::IsData:
         print(pOutFile, (TR::IA32DataSnippet *)snippet);
         break;
      case TR::Snippet::IsDivideCheck:
         print(pOutFile, (TR::X86DivideCheckSnippet  *)snippet);
         break;

#ifdef J9_PROJECT_SPECIFIC
      case TR::Snippet::IsGuardedDevirtual:
         print(pOutFile, (TR::X86GuardedDevirtualSnippet  *)snippet);
         break;
#endif
      case TR::Snippet::IsHelperCall:
         print(pOutFile, (TR::X86HelperCallSnippet  *)snippet);
         break;
      case TR::Snippet::IsFPConvertToInt:
         print(pOutFile, (TR::X86FPConvertToIntSnippet  *)snippet);
         break;
      case TR::Snippet::IsFPConvertToLong:
         print(pOutFile, (TR::X86FPConvertToLongSnippet  *)snippet);
         break;
      case TR::Snippet::IsUnresolvedDataIA32:
         print(pOutFile, (TR::UnresolvedDataSnippet *)snippet);
         break;
#ifdef TR_TARGET_64BIT
      case TR::Snippet::IsUnresolvedDataAMD64:
         print(pOutFile, (TR::UnresolvedDataSnippet *)snippet);
         break;
#endif
      case TR::Snippet::IsRestart:
      default:
         TR_ASSERT(0, "unexpected snippet kind: %d", snippet->getKind());
      }
   }