TR_RuntimeHelper TR::S390CallSnippet::getInterpretedDispatchHelper( TR::SymbolReference *methodSymRef, TR::DataType type) { TR::Compilation *comp = cg()->comp(); TR::MethodSymbol * methodSymbol = methodSymRef->getSymbol()->castToMethodSymbol(); bool isJitInduceOSRCall = false; if (methodSymbol->isHelper() && methodSymRef->isOSRInductionHelper()) { isJitInduceOSRCall = true; } if (methodSymRef->isUnresolved() || comp->compileRelocatableCode()) { TR_ASSERT(!isJitInduceOSRCall || !comp->compileRelocatableCode(), "calling jitInduceOSR is not supported yet under AOT\n"); if (methodSymbol->isSpecial()) return TR_S390interpreterUnresolvedSpecialGlue; else if (methodSymbol->isStatic()) return TR_S390interpreterUnresolvedStaticGlue; else return TR_S390interpreterUnresolvedDirectVirtualGlue; } else if (isJitInduceOSRCall) return (TR_RuntimeHelper) methodSymRef->getReferenceNumber(); else return getHelper(methodSymbol, type, cg()); }
// resolved casts that are not to abstract, interface, or array need a super test bool OMR::TreeEvaluator::instanceOfOrCheckCastNeedSuperTest(TR::Node * node, TR::CodeGenerator *cg) { TR::Node *castClassNode = node->getSecondChild(); TR::MethodSymbol *helperSym = node->getSymbol()->castToMethodSymbol(); TR::SymbolReference *castClassSymRef = castClassNode->getSymbolReference(); if (!TR::TreeEvaluator::isStaticClassSymRef(castClassSymRef)) { // We could theoretically do a super test on something with no sym, but it would require significant // changes to platform code. The benefit is little at this point (shows up from reference arraycopy reductions) if (cg->supportsInliningOfIsInstance() && node->getOpCodeValue() == TR::instanceof && node->getSecondChild()->getOpCodeValue() != TR::loadaddr) return true; else return false; } TR::StaticSymbol *castClassSym = castClassSymRef->getSymbol()->getStaticSymbol(); if (castClassSymRef->isUnresolved()) { return false; } else { TR_OpaqueClassBlock * clazz; // If the class is a regular class (i.e., not an interface nor an array) and // not known to be a final class, an inline superclass test can be generated. // If the helper does not preserve all the registers there will not be // enough registers to do the superclass test inline. // Also, don't generate the superclass test if optimizing for space. // if (castClassSym && (clazz = (TR_OpaqueClassBlock *) castClassSym->getStaticAddress()) && !TR::Compiler->cls.isClassArray(cg->comp(), clazz) && !TR::Compiler->cls.isInterfaceClass(cg->comp(), clazz) && !TR::Compiler->cls.isClassFinal(cg->comp(), clazz) && helperSym->preservesAllRegisters() && !cg->comp()->getOption(TR_OptimizeForSpace)) return true; } return false; }
bool OMR::SymbolReference::isUnresolvedMethodInCP(TR::Compilation *c) { TR_ASSERT(c->compileRelocatableCode() && self()->getSymbol()->isMethod(), "isUnresolvedMethodInCP only callable in AOT compiles on method symbols"); if (!self()->isUnresolved()) return false; if (c->getOption(TR_DisablePeekAOTResolutions)) return true; TR::MethodSymbol *sym = self()->getSymbol()->getMethodSymbol(); if (sym->isStatic()) return self()->getOwningMethod(c)->getUnresolvedStaticMethodInCP(self()->getCPIndex()); else if (sym->isSpecial()) return self()->getOwningMethod(c)->getUnresolvedSpecialMethodInCP(self()->getCPIndex()); else if (sym->isVirtual()) return self()->getOwningMethod(c)->getUnresolvedVirtualMethodInCP(self()->getCPIndex()); else return true; }
TR::Register *TR::IA32SystemLinkage::buildDirectDispatch(TR::Node *callNode, bool spillFPRegs) { TR::RealRegister *stackPointerReg = machine()->getX86RealRegister(TR::RealRegister::esp); TR::SymbolReference *methodSymRef = callNode->getSymbolReference(); TR::MethodSymbol *methodSymbol = callNode->getSymbol()->castToMethodSymbol(); TR::ILOpCodes callOpCodeValue = callNode->getOpCodeValue(); if (!methodSymbol->isHelper()) diagnostic("Building call site for %s\n", methodSymbol->getMethod()->signature(trMemory())); TR::RegisterDependencyConditions *deps; deps = generateRegisterDependencyConditions((uint8_t)0, (uint8_t)6, cg()); TR::Register *returnReg = buildVolatileAndReturnDependencies(callNode, deps); deps->stopAddingConditions(); TR::RegisterDependencyConditions *dummy = generateRegisterDependencyConditions((uint8_t)0, (uint8_t)0, cg()); uint32_t argSize = buildArgs(callNode, dummy); TR::Register* targetAddressReg = NULL; TR::MemoryReference* targetAddressMem = NULL; // Call-out int32_t stackAdjustment = cg()->getProperties().getCallerCleanup() ? 0 : -argSize; TR::X86ImmInstruction* instr = generateImmSymInstruction(CALLImm4, callNode, (uintptr_t)methodSymbol->getMethodAddress(), methodSymRef, cg()); instr->setAdjustsFramePointerBy(stackAdjustment); if (cg()->getProperties().getCallerCleanup() && argSize > 0) { // Clean up arguments // generateRegImmInstruction( (argSize <= 127) ? ADD4RegImms : ADD4RegImm4, callNode, stackPointerReg, argSize, cg() ); } // Label denoting end of dispatch code sequence; dependencies are on // this label rather than on the call // TR::LabelSymbol *endSystemCallSequence = generateLabelSymbol(cg()); generateLabelInstruction(LABEL, callNode, endSystemCallSequence, deps, cg()); // Stop using the killed registers that are not going to persist // if (deps) stopUsingKilledRegisters(deps, returnReg); // If the method returns a floating point value that is not used, insert a dummy store to // eventually pop the value from the floating point stack. // if ((callNode->getDataType() == TR::Float || callNode->getDataType() == TR::Double) && callNode->getReferenceCount() == 1) { generateFPSTiST0RegRegInstruction(FSTRegReg, callNode, returnReg, returnReg, cg()); } if (cg()->enableRegisterAssociations()) associatePreservedRegisters(deps, returnReg); return returnReg; }
TR::Register *TR::AMD64SystemLinkage::buildDirectDispatch( TR::Node *callNode, bool spillFPRegs) { TR::SymbolReference *methodSymRef = callNode->getSymbolReference(); TR::MethodSymbol *methodSymbol = methodSymRef->getSymbol()->castToMethodSymbol(); TR::Register *returnReg; // Allocate adequate register dependencies. // // pre = number of argument registers // post = number of volatile + return register // uint32_t pre = getProperties().getNumIntegerArgumentRegisters() + getProperties().getNumFloatArgumentRegisters(); uint32_t post = getProperties().getNumVolatileRegisters() + (callNode->getDataType() == TR::NoType ? 0 : 1); #if defined (PYTHON) && 0 // Treat all preserved GP regs as volatile until register map support available. // post += getProperties().getNumberOfPreservedGPRegisters(); #endif TR::RegisterDependencyConditions *preDeps = generateRegisterDependencyConditions(pre, 0, cg()); TR::RegisterDependencyConditions *postDeps = generateRegisterDependencyConditions(0, post, cg()); // Evaluate outgoing arguments on the system stack and build pre-conditions. // int32_t memoryArgSize = buildArgs(callNode, preDeps); // Build post-conditions. // returnReg = buildVolatileAndReturnDependencies(callNode, postDeps); postDeps->stopAddingPostConditions(); // Find the second scratch register in the post dependency list. // TR::Register *scratchReg = NULL; TR::RealRegister::RegNum scratchRegIndex = getProperties().getIntegerScratchRegister(1); for (int32_t i=0; i<post; i++) { if (postDeps->getPostConditions()->getRegisterDependency(i)->getRealRegister() == scratchRegIndex) { scratchReg = postDeps->getPostConditions()->getRegisterDependency(i)->getRegister(); break; } } #if defined(PYTHON) && 0 // For Python, store the instruction that contains the GC map at this site into // the frame object. // TR::SymbolReference *frameObjectSymRef = comp()->getSymRefTab()->findOrCreateAutoSymbol(comp()->getMethodSymbol(), 0, TR::Address, true, false, true); TR::Register *frameObjectRegister = cg()->allocateRegister(); generateRegMemInstruction( L8RegMem, callNode, frameObjectRegister, generateX86MemoryReference(frameObjectSymRef, cg()), cg()); TR::RealRegister *espReal = cg()->machine()->getX86RealRegister(TR::RealRegister::esp); TR::Register *gcMapPCRegister = cg()->allocateRegister(); generateRegMemInstruction( LEA8RegMem, callNode, gcMapPCRegister, generateX86MemoryReference(espReal, -8, cg()), cg()); // Use "volatile" registers across the call. Once proper register map support // is implemented, r14 and r15 will no longer be volatile and a different pair // should be chosen. // TR::RegisterDependencyConditions *gcMapDeps = generateRegisterDependencyConditions(0, 2, cg()); gcMapDeps->addPostCondition(frameObjectRegister, TR::RealRegister::r14, cg()); gcMapDeps->addPostCondition(gcMapPCRegister, TR::RealRegister::r15, cg()); gcMapDeps->stopAddingPostConditions(); generateMemRegInstruction( S8MemReg, callNode, generateX86MemoryReference(frameObjectRegister, fe()->getPythonGCMapPCOffsetInFrame(), cg()), gcMapPCRegister, gcMapDeps, cg()); cg()->stopUsingRegister(frameObjectRegister); cg()->stopUsingRegister(gcMapPCRegister); #endif TR::Instruction *instr; if (methodSymbol->getMethodAddress()) { TR_ASSERT(scratchReg, "could not find second scratch register"); auto LoadRegisterInstruction = generateRegImm64SymInstruction( MOV8RegImm64, callNode, scratchReg, (uintptr_t)methodSymbol->getMethodAddress(), methodSymRef, cg()); if (TR::Options::getCmdLineOptions()->getOption(TR_EmitRelocatableELFFile)) { LoadRegisterInstruction->setReloKind(TR_NativeMethodAbsolute); } instr = generateRegInstruction(CALLReg, callNode, scratchReg, preDeps, cg()); } else { instr = generateImmSymInstruction(CALLImm4, callNode, (uintptrj_t)methodSymbol->getMethodAddress(), methodSymRef, preDeps, cg()); } cg()->resetIsLeafMethod(); instr->setNeedsGCMap(getProperties().getPreservedRegisterMapForGC()); cg()->stopUsingRegister(scratchReg); TR::LabelSymbol *postDepLabel = generateLabelSymbol(cg()); generateLabelInstruction(LABEL, callNode, postDepLabel, postDeps, cg()); return returnReg; }
TR::Register *TR::ARM64SystemLinkage::buildDirectDispatch(TR::Node *callNode) { TR::SymbolReference *callSymRef = callNode->getSymbolReference(); const TR::ARM64LinkageProperties &pp = getProperties(); TR::RealRegister *sp = cg()->machine()->getRealRegister(pp.getStackPointerRegister()); TR::RegisterDependencyConditions *dependencies = new (trHeapMemory()) TR::RegisterDependencyConditions( pp.getNumberOfDependencyGPRegisters(), pp.getNumberOfDependencyGPRegisters(), trMemory()); int32_t totalSize = buildArgs(callNode, dependencies); if (totalSize > 0) { if (constantIsUnsignedImm12(totalSize)) { generateTrg1Src1ImmInstruction(cg(), TR::InstOpCode::subimmx, callNode, sp, sp, totalSize); } else { TR_ASSERT_FATAL(false, "Too many arguments."); } } TR::MethodSymbol *callSymbol = callSymRef->getSymbol()->castToMethodSymbol(); generateImmSymInstruction(cg(), TR::InstOpCode::bl, callNode, (uintptr_t)callSymbol->getMethodAddress(), dependencies, callSymRef ? callSymRef : callNode->getSymbolReference(), NULL); cg()->machine()->setLinkRegisterKilled(true); if (totalSize > 0) { if (constantIsUnsignedImm12(totalSize)) { generateTrg1Src1ImmInstruction(cg(), TR::InstOpCode::addimmx, callNode, sp, sp, totalSize); } else { TR_ASSERT_FATAL(false, "Too many arguments."); } } TR::Register *retReg; switch(callNode->getOpCodeValue()) { case TR::icall: case TR::iucall: retReg = dependencies->searchPostConditionRegister( pp.getIntegerReturnRegister()); break; case TR::lcall: case TR::lucall: case TR::acall: retReg = dependencies->searchPostConditionRegister( pp.getLongReturnRegister()); break; case TR::fcall: case TR::dcall: retReg = dependencies->searchPostConditionRegister( pp.getFloatReturnRegister()); break; case TR::call: retReg = NULL; break; default: retReg = NULL; TR_ASSERT(false, "Unsupported direct call Opcode."); } callNode->setRegister(retReg); return retReg; }
TR_BitVector * OMR::SymbolReference::getUseonlyAliasesBV(TR::SymbolReferenceTable * symRefTab) { int32_t kind = _symbol->getKind(); switch (kind) { case TR::Symbol::IsMethod: { TR::MethodSymbol * methodSymbol = _symbol->castToMethodSymbol(); // Aliasing for potentialOSRPointHelper // A potentialOSRPointHelper call is an exception point that may go to OSR catch block ( see // Node API exceptionsRaised), the control flow constraint imposed by the exception edge will // apply to all the global optimizations that may move things around. Local optimizations also // ask exceptionsRaised to determine if a code motion across certain point is safe. So aliasing // is not necessary. However, we'd like to add aliasing here to cause the compiler to be more // conservative about reordering this helper with other operations. The aliasing can always be // relaxed when necessary. // if (symRefTab->isNonHelper(self(), TR::SymbolReferenceTable::potentialOSRPointHelperSymbol)) { return &symRefTab->aliasBuilder.defaultMethodUseAliases(); } // Aliasing for osrFearPointHelper // Preventing the reordering of fear point helper w.r.t. OSR points and yield/invalidation points is // the minimum requirement of aliasing for OSR fear point helper. These reorderings would in almost // all cases be naturally disallowed simply due to the fact that the fear point is represented as a // call, which even without aliasing could e.g. perform I/O. Thus the following is a highly conservative // aliasing and can be relaxed later when necessary // if (symRefTab->isNonHelper(self(), TR::SymbolReferenceTable::osrFearPointHelperSymbol)) { return &symRefTab->aliasBuilder.defaultMethodUseAliases(); } if (!methodSymbol->isHelper()) { return &symRefTab->aliasBuilder.defaultMethodUseAliases(); } switch (self()->getReferenceNumber()) { case TR_asyncCheck: return 0; // helpers that don't throw have no use aliases case TR_instanceOf: case TR_checkAssignable: case TR_monitorEntry: case TR_transactionEntry: case TR_reportFinalFieldModified: case TR_reportMethodEnter: case TR_reportStaticMethodEnter: case TR_reportMethodExit: case TR_acquireVMAccess: case TR_throwCurrentException: case TR_releaseVMAccess: case TR_stackOverflow: case TR_writeBarrierStore: case TR_writeBarrierStoreGenerational: case TR_writeBarrierStoreGenerationalAndConcurrentMark: case TR_writeBarrierBatchStore: case TR_typeCheckArrayStore: case TR_arrayStoreException: case TR_arrayBoundsCheck: case TR_checkCast: case TR_divCheck: case TR_overflowCheck: case TR_nullCheck: case TR_methodTypeCheck: case TR_incompatibleReceiver: case TR_IncompatibleClassChangeError: case TR_aThrow: case TR_aNewArray: case TR_monitorExit: case TR_transactionExit: case TR_newObject: case TR_newObjectNoZeroInit: case TR_newArray: case TR_multiANewArray: default: return &symRefTab->aliasBuilder.defaultMethodUseAliases(); } } case TR::Symbol::IsResolvedMethod: { TR::ResolvedMethodSymbol * resolvedMethodSymbol = _symbol->castToResolvedMethodSymbol(); if (!TR::comp()->getOption(TR_EnableHCR)) { switch (resolvedMethodSymbol->getRecognizedMethod()) { #ifdef J9_PROJECT_SPECIFIC case TR::java_lang_Double_longBitsToDouble: case TR::java_lang_Double_doubleToLongBits: case TR::java_lang_Float_intBitsToFloat: case TR::java_lang_Float_floatToIntBits: case TR::java_lang_Double_doubleToRawLongBits: case TR::java_lang_Float_floatToRawIntBits: case TR::java_lang_Math_sqrt: case TR::java_lang_StrictMath_sqrt: case TR::java_lang_Math_sin: case TR::java_lang_StrictMath_sin: case TR::java_lang_Math_cos: case TR::java_lang_StrictMath_cos: case TR::java_lang_Math_max_I: case TR::java_lang_Math_min_I: case TR::java_lang_Math_max_L: case TR::java_lang_Math_min_L: case TR::java_lang_Math_abs_I: case TR::java_lang_Math_abs_L: case TR::java_lang_Math_abs_F: case TR::java_lang_Math_abs_D: case TR::java_lang_Math_pow: case TR::java_lang_StrictMath_pow: case TR::java_lang_Math_exp: case TR::java_lang_StrictMath_exp: case TR::java_lang_Math_log: case TR::java_lang_StrictMath_log: case TR::java_lang_Math_floor: case TR::java_lang_Math_ceil: case TR::java_lang_Math_copySign_F: case TR::java_lang_Math_copySign_D: case TR::java_lang_StrictMath_floor: case TR::java_lang_StrictMath_ceil: case TR::java_lang_StrictMath_copySign_F: case TR::java_lang_StrictMath_copySign_D: return NULL; #endif default: break; } } return &symRefTab->aliasBuilder.defaultMethodUseAliases(); } case TR::Symbol::IsAutomatic: case TR::Symbol::IsParameter: if (symRefTab->aliasBuilder.catchLocalUseSymRefs().isSet(self()->getReferenceNumber())) return &symRefTab->aliasBuilder.methodsThatMayThrow(); return 0; default: //TR_ASSERT(0, "getUseOnlyAliases: unexpected symbol kind "); return 0; } }
TR_BitVector * OMR::SymbolReference::getUseDefAliasesBV(bool isDirectCall, bool includeGCSafePoint) { TR::Compilation *comp = TR::comp(); TR::Region &aliasRegion = comp->aliasRegion(); int32_t bvInitialSize = comp->getSymRefCount(); TR_BitVectorGrowable growability = growable; // allow more than one shadow for an array type. Used by LoopAliasRefiner const bool supportArrayRefinement=true; int32_t kind = _symbol->getKind(); TR::SymbolReferenceTable * symRefTab = comp->getSymRefTab(); // !!! NOTE !!! // THERE IS A COPY OF THIS LOGIC IN sharesSymbol // if (!self()->reallySharesSymbol(comp)) { switch (kind) { case TR::Symbol::IsShadow: case TR::Symbol::IsStatic: { // For unresolved constant dynamic, we need to invoke a Java bootstrap method, // which can have arbitrary side effects, so the aliasing should be conservative here. // isConstObjectRef now returns true for condy, so we add an explicit condition, // more like a short-circuit, to say if we are unresolved and not isConstObjectRef // (this is the same as before), or if we are unresolved and condy // (this is the extra condition added), we would return conservative aliases. if ((self()->isUnresolved() && (_symbol->isConstantDynamic() || !_symbol->isConstObjectRef())) || _symbol->isVolatile() || self()->isLiteralPoolAddress() || self()->isFromLiteralPool() || _symbol->isUnsafeShadowSymbol() || (_symbol->isArrayShadowSymbol() && comp->getMethodSymbol()->hasVeryRefinedAliasSets())) { // getUseDefAliases might not return NULL } else if (!symRefTab->aliasBuilder.mutableGenericIntShadowHasBeenCreated()) { // getUseDefAliases must return NULL return NULL; } else if (kind == TR::Symbol::IsStatic && !symRefTab->aliasBuilder.litPoolGenericIntShadowHasBeenCreated()) { // getUseDefAliases must return NULL return NULL; } break; } } } // now do stuff for various kinds of symbols // switch (kind) { case TR::Symbol::IsMethod: { TR::MethodSymbol * methodSymbol = _symbol->castToMethodSymbol(); if (!methodSymbol->isHelper()) return symRefTab->aliasBuilder.methodAliases(self()); if (symRefTab->isNonHelper(self(), TR::SymbolReferenceTable::arraySetSymbol) || symRefTab->isNonHelper(self(), TR::SymbolReferenceTable::osrFearPointHelperSymbol) || symRefTab->isNonHelper(self(), TR::SymbolReferenceTable::potentialOSRPointHelperSymbol)) { return &symRefTab->aliasBuilder.defaultMethodDefAliases(); } if (symRefTab->isNonHelper(self(), TR::SymbolReferenceTable::arrayCmpSymbol)) return 0; switch (self()->getReferenceNumber()) { case TR_methodTypeCheck: case TR_nullCheck: return &symRefTab->aliasBuilder.defaultMethodDefAliasesWithoutImmutable(); case TR_arrayBoundsCheck: case TR_checkCast: case TR_divCheck: case TR_typeCheckArrayStore: case TR_arrayStoreException: case TR_incompatibleReceiver: case TR_IncompatibleClassChangeError: case TR_reportFinalFieldModified: case TR_reportMethodEnter: case TR_reportStaticMethodEnter: case TR_reportMethodExit: case TR_acquireVMAccess: case TR_instanceOf: case TR_checkAssignable: case TR_throwCurrentException: case TR_releaseVMAccess: case TR_stackOverflow: case TR_writeBarrierStore: case TR_writeBarrierBatchStore: case TR_jitProfileAddress: case TR_jitProfileWarmCompilePICAddress: case TR_jitProfileValue: case TR_jitProfileLongValue: case TR_jitProfileBigDecimalValue: case TR_jitProfileParseBuffer: return 0; case TR_asyncCheck: case TR_writeBarrierClassStoreRealTimeGC: case TR_writeBarrierStoreRealTimeGC: case TR_aNewArray: case TR_newObject: case TR_newObjectNoZeroInit: case TR_newArray: case TR_multiANewArray: if ((comp->generateArraylets() || comp->isDLT()) && includeGCSafePoint) return &symRefTab->aliasBuilder.gcSafePointSymRefNumbers(); else return 0; case TR_aThrow: return 0; // The monitor exit symbol needs to be aliased with all fields in the // current class to ensure that all references to fields are evaluated // before the monitor exit case TR_monitorExit: case TR_monitorEntry: case TR_transactionExit: case TR_transactionEntry: default: // The following is the place to check for // a use of killsAllMethodSymbolRef... However, // it looks like the default action is sufficient. //if (symRefTab->findKillsAllMethodSymbolRef() == self()) // { // } return &symRefTab->aliasBuilder.defaultMethodDefAliases(); } } case TR::Symbol::IsResolvedMethod: { TR::ResolvedMethodSymbol * resolvedMethodSymbol = _symbol->castToResolvedMethodSymbol(); if (!comp->getOption(TR_EnableHCR)) { switch (resolvedMethodSymbol->getRecognizedMethod()) { #ifdef J9_PROJECT_SPECIFIC case TR::java_lang_System_arraycopy: { TR_BitVector * aliases = new (aliasRegion) TR_BitVector(bvInitialSize, aliasRegion, growability); *aliases |= symRefTab->aliasBuilder.arrayElementSymRefs(); if (comp->generateArraylets()) *aliases |= symRefTab->aliasBuilder.arrayletElementSymRefs(); return aliases; } if (resolvedMethodSymbol->isPureFunction()) return NULL; case TR::java_lang_Double_longBitsToDouble: case TR::java_lang_Double_doubleToLongBits: case TR::java_lang_Float_intBitsToFloat: case TR::java_lang_Float_floatToIntBits: case TR::java_lang_Double_doubleToRawLongBits: case TR::java_lang_Float_floatToRawIntBits: case TR::java_lang_Math_sqrt: case TR::java_lang_StrictMath_sqrt: case TR::java_lang_Math_sin: case TR::java_lang_StrictMath_sin: case TR::java_lang_Math_cos: case TR::java_lang_StrictMath_cos: case TR::java_lang_Math_max_I: case TR::java_lang_Math_min_I: case TR::java_lang_Math_max_L: case TR::java_lang_Math_min_L: case TR::java_lang_Math_abs_I: case TR::java_lang_Math_abs_L: case TR::java_lang_Math_abs_F: case TR::java_lang_Math_abs_D: case TR::java_lang_Math_pow: case TR::java_lang_StrictMath_pow: case TR::java_lang_Math_exp: case TR::java_lang_StrictMath_exp: case TR::java_lang_Math_log: case TR::java_lang_StrictMath_log: case TR::java_lang_Math_floor: case TR::java_lang_Math_ceil: case TR::java_lang_Math_copySign_F: case TR::java_lang_Math_copySign_D: case TR::java_lang_StrictMath_floor: case TR::java_lang_StrictMath_ceil: case TR::java_lang_StrictMath_copySign_F: case TR::java_lang_StrictMath_copySign_D: case TR::com_ibm_Compiler_Internal__TR_Prefetch: case TR::java_nio_Bits_keepAlive: if ((comp->generateArraylets() || comp->isDLT()) && includeGCSafePoint) return &symRefTab->aliasBuilder.gcSafePointSymRefNumbers(); else return 0; // no aliasing on DFP dummy stubs case TR::java_math_BigDecimal_DFPPerformHysteresis: case TR::java_math_BigDecimal_DFPUseDFP: case TR::java_math_BigDecimal_DFPHWAvailable: case TR::java_math_BigDecimal_DFPCompareTo: case TR::java_math_BigDecimal_DFPUnscaledValue: case TR::com_ibm_dataaccess_DecimalData_DFPFacilityAvailable: case TR::com_ibm_dataaccess_DecimalData_DFPUseDFP: case TR::com_ibm_dataaccess_DecimalData_DFPConvertPackedToDFP: case TR::com_ibm_dataaccess_DecimalData_DFPConvertDFPToPacked: case TR::com_ibm_dataaccess_DecimalData_createZeroBigDecimal: case TR::com_ibm_dataaccess_DecimalData_getlaside: case TR::com_ibm_dataaccess_DecimalData_setlaside: case TR::com_ibm_dataaccess_DecimalData_getflags: case TR::com_ibm_dataaccess_DecimalData_setflags: if (!( #ifdef TR_TARGET_S390 TR::Compiler->target.cpu.getS390SupportsDFP() || #endif TR::Compiler->target.cpu.supportsDecimalFloatingPoint()) || comp->getOption(TR_DisableDFP)) return NULL; #endif //J9_PROJECT_SPECIFIC default: break; } } #ifdef J9_PROJECT_SPECIFIC TR_ResolvedMethod * method = resolvedMethodSymbol->getResolvedMethod(); TR_PersistentMethodInfo * methodInfo = TR_PersistentMethodInfo::get(method); if (methodInfo && (methodInfo->hasRefinedAliasSets() || comp->getMethodHotness() >= veryHot || resolvedMethodSymbol->hasVeryRefinedAliasSets()) && (method->isStatic() || method->isFinal() || isDirectCall)) { TR_BitVector * aliases = new (aliasRegion) TR_BitVector(bvInitialSize, aliasRegion, growability); if ((comp->generateArraylets() || comp->isDLT()) && includeGCSafePoint) *aliases |= symRefTab->aliasBuilder.gcSafePointSymRefNumbers(); if (methodInfo->doesntKillAnything() && !comp->getOption(TR_DisableRefinedAliases)) return aliases; if ((resolvedMethodSymbol->hasVeryRefinedAliasSets() || comp->getMethodHotness() >= hot) && !debug("disableVeryRefinedCallAliasSets")) { TR_BitVector * exactAliases = 0; if (resolvedMethodSymbol->hasVeryRefinedAliasSets()) exactAliases = symRefTab->aliasBuilder.getVeryRefinedCallAliasSets(resolvedMethodSymbol); else { resolvedMethodSymbol->setHasVeryRefinedAliasSets(true); List<void> methodsPeeked(comp->trMemory()); exactAliases = addVeryRefinedCallAliasSets(resolvedMethodSymbol, aliases, &methodsPeeked); symRefTab->aliasBuilder.setVeryRefinedCallAliasSets(resolvedMethodSymbol, exactAliases); } if (exactAliases) { return exactAliases; } } // From here on, we're just checking refined alias info. // If refined aliases are disabled, return the conservative answer // we would have returned had we never attempted to use refined // aliases at all. // if (comp->getOption(TR_DisableRefinedAliases)) return symRefTab->aliasBuilder.methodAliases(self()); if (!methodInfo->doesntKillAddressArrayShadows()) { symRefTab->aliasBuilder.addAddressArrayShadows(aliases); if (comp->generateArraylets()) aliases->set(symRefTab->getArrayletShadowIndex(TR::Address)); } if (!methodInfo->doesntKillIntArrayShadows()) { symRefTab->aliasBuilder.addIntArrayShadows(aliases); if (comp->generateArraylets()) { aliases->set(symRefTab->getArrayletShadowIndex(TR::Int32)); } } if (!methodInfo->doesntKillNonIntPrimitiveArrayShadows()) { symRefTab->aliasBuilder.addNonIntPrimitiveArrayShadows(aliases); if (comp->generateArraylets()) { aliases->set(symRefTab->getArrayletShadowIndex(TR::Int8)); aliases->set(symRefTab->getArrayletShadowIndex(TR::Int16)); aliases->set(symRefTab->getArrayletShadowIndex(TR::Int32)); aliases->set(symRefTab->getArrayletShadowIndex(TR::Int64)); aliases->set(symRefTab->getArrayletShadowIndex(TR::Float)); aliases->set(symRefTab->getArrayletShadowIndex(TR::Double)); } } if (!methodInfo->doesntKillAddressFields()) *aliases |= symRefTab->aliasBuilder.addressShadowSymRefs(); if (!methodInfo->doesntKillIntFields()) *aliases |= symRefTab->aliasBuilder.intShadowSymRefs(); if (!methodInfo->doesntKillNonIntPrimitiveFields()) *aliases |= symRefTab->aliasBuilder.nonIntPrimitiveShadowSymRefs(); if (!methodInfo->doesntKillAddressStatics()) *aliases |= symRefTab->aliasBuilder.addressStaticSymRefs(); if (!methodInfo->doesntKillIntStatics()) *aliases |= symRefTab->aliasBuilder.intStaticSymRefs(); if (!methodInfo->doesntKillNonIntPrimitiveStatics()) *aliases |= symRefTab->aliasBuilder.nonIntPrimitiveStaticSymRefs(); TR_BitVector *methodAliases = symRefTab->aliasBuilder.methodAliases(self()); *aliases &= *methodAliases; return aliases; } #endif return symRefTab->aliasBuilder.methodAliases(self()); } case TR::Symbol::IsShadow: { if ((self()->isUnresolved() && !_symbol->isConstObjectRef()) || _symbol->isVolatile() || self()->isLiteralPoolAddress() || self()->isFromLiteralPool() || (_symbol->isUnsafeShadowSymbol() && !self()->reallySharesSymbol())) { if (symRefTab->aliasBuilder.unsafeArrayElementSymRefs().get(self()->getReferenceNumber())) { TR_BitVector *aliases = new (aliasRegion) TR_BitVector(bvInitialSize, aliasRegion, growability); *aliases |= comp->getSymRefTab()->aliasBuilder.defaultMethodDefAliasesWithoutImmutable(); *aliases -= symRefTab->aliasBuilder.cpSymRefs(); return aliases; } else return &comp->getSymRefTab()->aliasBuilder.defaultMethodDefAliasesWithoutImmutable(); } TR_BitVector *aliases = NULL; if (_symbol == symRefTab->findGenericIntShadowSymbol()) { aliases = new (aliasRegion) TR_BitVector(bvInitialSize, aliasRegion, growability); *aliases |= symRefTab->aliasBuilder.arrayElementSymRefs(); if (comp->generateArraylets()) *aliases |= symRefTab->aliasBuilder.arrayletElementSymRefs(); *aliases |= symRefTab->aliasBuilder.genericIntShadowSymRefs(); *aliases |= symRefTab->aliasBuilder.genericIntArrayShadowSymRefs(); *aliases |= symRefTab->aliasBuilder.genericIntNonArrayShadowSymRefs(); *aliases |= symRefTab->aliasBuilder.unsafeSymRefNumbers(); #ifdef J9_PROJECT_SPECIFIC *aliases |= symRefTab->aliasBuilder.unresolvedShadowSymRefs(); #endif if (symRefTab->aliasBuilder.conservativeGenericIntShadowAliasing()) { *aliases |= symRefTab->aliasBuilder.addressShadowSymRefs(); *aliases |= symRefTab->aliasBuilder.intShadowSymRefs(); *aliases |= symRefTab->aliasBuilder.nonIntPrimitiveShadowSymRefs(); } aliases->set(self()->getReferenceNumber()); return aliases; } if (self()->reallySharesSymbol(comp)) { aliases = new (aliasRegion) TR_BitVector(bvInitialSize, aliasRegion, growability); self()->setSharedShadowAliases(aliases, symRefTab); } if (symRefTab->findGenericIntShadowSymbol()) { if (!aliases) aliases = new (aliasRegion) TR_BitVector(bvInitialSize, aliasRegion, growability); self()->setLiteralPoolAliases(aliases, symRefTab); if (symRefTab->aliasBuilder.conservativeGenericIntShadowAliasing() || self()->isUnresolved()) { *aliases |= symRefTab->aliasBuilder.genericIntShadowSymRefs(); *aliases |= symRefTab->aliasBuilder.genericIntArrayShadowSymRefs(); *aliases |= symRefTab->aliasBuilder.genericIntNonArrayShadowSymRefs(); } } if (_symbol->isArrayShadowSymbol() && symRefTab->findGenericIntShadowSymbol()) { if (!aliases) aliases = new (aliasRegion) TR_BitVector(bvInitialSize, aliasRegion, growability); *aliases |= symRefTab->aliasBuilder.genericIntShadowSymRefs(); *aliases |= symRefTab->aliasBuilder.genericIntArrayShadowSymRefs(); if (supportArrayRefinement && self()->getIndependentSymRefs()) *aliases -= *self()->getIndependentSymRefs(); } #ifdef J9_PROJECT_SPECIFIC // make TR::PackedDecimal aliased with TR::Int8(byte) if (_symbol->isArrayShadowSymbol() && _symbol->getDataType() == TR::PackedDecimal) { if (!aliases) aliases = new (aliasRegion) TR_BitVector(bvInitialSize, aliasRegion, growability); aliases->set(symRefTab->getArrayShadowIndex(TR::Int8)); } //the other way around. if (_symbol->isArrayShadowSymbol() && _symbol->getDataType() == TR::Int8) { if (!aliases) aliases = new (aliasRegion) TR_BitVector(bvInitialSize, aliasRegion, growability); aliases->set(symRefTab->getArrayShadowIndex(TR::PackedDecimal)); } #endif // alias vector arrays shadows with corresponding scalar array shadows if (_symbol->isArrayShadowSymbol() && _symbol->getDataType().isVector()) { if (!aliases) aliases = new (aliasRegion) TR_BitVector(bvInitialSize, aliasRegion, growability); aliases->set(symRefTab->getArrayShadowIndex(_symbol->getDataType().vectorToScalar())); } // the other way around if (_symbol->isArrayShadowSymbol() && !_symbol->getDataType().isVector()) { if (!aliases) aliases = new (aliasRegion) TR_BitVector(bvInitialSize, aliasRegion, growability); aliases->set(symRefTab->getArrayShadowIndex(_symbol->getDataType().scalarToVector())); } if (_symbol->isArrayShadowSymbol() && !symRefTab->aliasBuilder.immutableArrayElementSymRefs().isEmpty()) { if (!aliases) aliases = new (aliasRegion) TR_BitVector(bvInitialSize, aliasRegion, growability); TR::DataType type = _symbol->getDataType(); TR_BitVectorIterator bvi(symRefTab->aliasBuilder.arrayElementSymRefs()); int32_t symRefNum; while (bvi.hasMoreElements()) { symRefNum = bvi.getNextElement(); if (symRefTab->getSymRef(symRefNum)->getSymbol()->getDataType() == type) aliases->set(symRefNum); } } if (_symbol->isArrayShadowSymbol() && supportArrayRefinement && comp->getMethodSymbol()->hasVeryRefinedAliasSets()) { if (!aliases) aliases = new (aliasRegion) TR_BitVector(bvInitialSize, aliasRegion, growability); TR::DataType type = _symbol->getDataType(); TR_BitVectorIterator bvi(symRefTab->aliasBuilder.arrayElementSymRefs()); int32_t symRefNum; while (bvi.hasMoreElements()) { symRefNum = bvi.getNextElement(); if (symRefTab->getSymRef(symRefNum)->getSymbol()->getDataType() == type) aliases->set(symRefNum); } if (self()->getIndependentSymRefs()) *aliases -= *self()->getIndependentSymRefs(); return aliases; } if (aliases) aliases->set(self()->getReferenceNumber()); if (symRefTab->aliasBuilder.unsafeArrayElementSymRefs().get(self()->getReferenceNumber())) *aliases -= symRefTab->aliasBuilder.cpSymRefs(); else if (symRefTab->aliasBuilder.cpSymRefs().get(self()->getReferenceNumber())) *aliases -= symRefTab->aliasBuilder.unsafeArrayElementSymRefs(); return aliases; } case TR::Symbol::IsStatic: { // For unresolved constant dynamic, we need to invoke a Java bootstrap method, // which can have arbitrary side effects, so the aliasing should be conservative here. // isConstObjectRef now returns true for condy, so we add an explicit condition, // more like a short-circuit, to say if we are unresolved and not isConstObjectRef // (this is the same as before), or if we are unresolved and condy // (this is the extra condition added), we would return conservative aliases. if ((self()->isUnresolved() && (_symbol->isConstantDynamic() || !_symbol->isConstObjectRef())) || self()->isLiteralPoolAddress() || self()->isFromLiteralPool() || _symbol->isVolatile()) { return &comp->getSymRefTab()->aliasBuilder.defaultMethodDefAliases(); } TR_BitVector *aliases = NULL; if (self()->reallySharesSymbol(comp)) { aliases = new (aliasRegion) TR_BitVector(bvInitialSize, aliasRegion, growability); self()->setSharedStaticAliases(aliases, symRefTab); } if (symRefTab->findGenericIntShadowSymbol()) { if (!aliases) aliases = new (aliasRegion) TR_BitVector(bvInitialSize, aliasRegion, growability); self()->setLiteralPoolAliases(aliases, symRefTab); } if (aliases) aliases->set(self()->getReferenceNumber()); return aliases; } case TR::Symbol::IsMethodMetaData: { TR_BitVector *aliases = NULL; return aliases; } default: //TR_ASSERT(0, "getUseDefAliasing called for non method"); if (comp->generateArraylets() && comp->getSymRefTab()->aliasBuilder.gcSafePointSymRefNumbers().get(self()->getReferenceNumber()) && includeGCSafePoint) return &comp->getSymRefTab()->aliasBuilder.gcSafePointSymRefNumbers(); else return 0; } }
void TR_Debug::print(TR::FILE *pOutFile, TR::S390CallSnippet * snippet) { uint8_t * bufferPos = snippet->getSnippetLabel()->getCodeLocation(); TR::Node * callNode = snippet->getNode(); TR::SymbolReference * methodSymRef = snippet->getRealMethodSymbolReference(); if(!methodSymRef) methodSymRef = callNode->getSymbolReference(); TR::MethodSymbol * methodSymbol = methodSymRef->getSymbol()->castToMethodSymbol(); TR::SymbolReference * glueRef; int8_t padbytes = snippet->getPadBytes(); printSnippetLabel(pOutFile, snippet->getSnippetLabel(), bufferPos, methodSymRef->isUnresolved() ? "Unresolved Call Snippet" : "Call Snippet"); bufferPos = printS390ArgumentsFlush(pOutFile, callNode, bufferPos, snippet->getSizeOfArguments()); if (methodSymRef->isUnresolved() || _comp->compileRelocatableCode()) { if (methodSymbol->isSpecial()) { glueRef = _cg->getSymRef(TR_S390interpreterUnresolvedSpecialGlue); } else if (methodSymbol->isStatic()) { glueRef = _cg->getSymRef(TR_S390interpreterUnresolvedStaticGlue); } else { glueRef = _cg->getSymRef(TR_S390interpreterUnresolvedDirectVirtualGlue); } } else { bool synchronised = methodSymbol->isSynchronised(); if ((methodSymbol->isVMInternalNative() || methodSymbol->isJITInternalNative())) { glueRef = _cg->getSymRef(TR_S390nativeStaticHelper); } else { switch (callNode->getDataType()) { case TR::NoType: if (synchronised) { glueRef = _cg->getSymRef(TR_S390interpreterSyncVoidStaticGlue); } else { glueRef = _cg->getSymRef(TR_S390interpreterVoidStaticGlue); } break; case TR::Int8: case TR::Int16: case TR::Int32: if (synchronised) { glueRef = _cg->getSymRef(TR_S390interpreterSyncIntStaticGlue); } else { glueRef = _cg->getSymRef(TR_S390interpreterIntStaticGlue); } break; case TR::Address: if (TR::Compiler->target.is64Bit()) { if (synchronised) { glueRef = _cg->getSymRef(TR_S390interpreterSyncLongStaticGlue); } else { glueRef = _cg->getSymRef(TR_S390interpreterLongStaticGlue); } } else { if (synchronised) { glueRef = _cg->getSymRef(TR_S390interpreterSyncIntStaticGlue); } else { glueRef = _cg->getSymRef(TR_S390interpreterIntStaticGlue); } } break; case TR::Int64: if (synchronised) { glueRef = _cg->getSymRef(TR_S390interpreterSyncLongStaticGlue); } else { glueRef = _cg->getSymRef(TR_S390interpreterLongStaticGlue); } break; case TR::Float: if (synchronised) { glueRef = _cg->getSymRef(TR_S390interpreterSyncFloatStaticGlue); } else { glueRef = _cg->getSymRef(TR_S390interpreterFloatStaticGlue); } break; case TR::Double: if (synchronised) { glueRef = _cg->getSymRef(TR_S390interpreterSyncDoubleStaticGlue); } else { glueRef = _cg->getSymRef(TR_S390interpreterDoubleStaticGlue); } break; default: TR_ASSERT(0, "Bad return data type for a call node. DataType was %s\n", getName(callNode->getDataType())); } } } bufferPos = printRuntimeInstrumentationOnOffInstruction(pOutFile, bufferPos, false); // RIOFF if (snippet->getKind() == TR::Snippet::IsUnresolvedCall) { int lengthOfLoad = (TR::Compiler->target.is64Bit())?6:4; printPrefix(pOutFile, NULL, bufferPos, 6); trfprintf(pOutFile, "LARL \tGPR14, *+%d <%p>\t# Start of Data Const.", 8 + lengthOfLoad + padbytes, bufferPos + 8 + lengthOfLoad + padbytes); bufferPos += 6; if (TR::Compiler->target.is64Bit()) { printPrefix(pOutFile, NULL, bufferPos, 6); trfprintf(pOutFile, "LG \tGPR_EP, 0(,GPR14)"); bufferPos += 6; } else { printPrefix(pOutFile, NULL, bufferPos, 4); trfprintf(pOutFile, "L \tGPR_EP, 0(,GPR14)"); bufferPos += 4; } printPrefix(pOutFile, NULL, bufferPos, 2); trfprintf(pOutFile, "BCR \tGPR_EP"); bufferPos += 2; } else { printPrefix(pOutFile, NULL, bufferPos, 6); trfprintf(pOutFile, "BRASL \tGPR14, <%p>\t# Branch to Helper Method %s", snippet->getSnippetDestAddr(), snippet->usedTrampoline()?"- Trampoline Used.":""); bufferPos += 6; } if (padbytes == 2) { printPrefix(pOutFile, NULL, bufferPos, 2); trfprintf(pOutFile, "DC \t0x0000 \t\t\t# 2-bytes padding for alignment"); bufferPos += 2; } else if (padbytes == 4) { printPrefix(pOutFile, NULL, bufferPos, 4) ; trfprintf(pOutFile, "DC \t0x00000000 \t\t# 4-bytes padding for alignment"); bufferPos += 4; } else if (padbytes == 6) { printPrefix(pOutFile, NULL, bufferPos, 6) ; trfprintf(pOutFile, "DC \t0x000000000000 \t\t# 6-bytes padding for alignment"); bufferPos += 6; } printPrefix(pOutFile, NULL, bufferPos, sizeof(intptrj_t)); trfprintf(pOutFile, "DC \t%p \t\t# Method Address", glueRef->getMethodAddress()); bufferPos += sizeof(intptrj_t); printPrefix(pOutFile, NULL, bufferPos, sizeof(intptrj_t)); trfprintf(pOutFile, "DC \t%p \t\t# Call Site RA", snippet->getCallRA()); bufferPos += sizeof(intptrj_t); if (methodSymRef->isUnresolved()) { printPrefix(pOutFile, NULL, bufferPos, 0); } else { printPrefix(pOutFile, NULL, bufferPos, sizeof(intptrj_t)); } trfprintf(pOutFile, "DC \t%p \t\t# Method Pointer", methodSymRef->isUnresolved() ? 0 : methodSymbol->getMethodAddress()); }
TR::Register *TR::AMD64SystemLinkage::buildDirectDispatch( TR::Node *callNode, bool spillFPRegs) { TR::SymbolReference *methodSymRef = callNode->getSymbolReference(); TR::MethodSymbol *methodSymbol = methodSymRef->getSymbol()->castToMethodSymbol(); TR::Register *returnReg; // Allocate adequate register dependencies. // // pre = number of argument registers // post = number of volatile + return register // uint32_t pre = getProperties().getNumIntegerArgumentRegisters() + getProperties().getNumFloatArgumentRegisters(); uint32_t post = getProperties().getNumVolatileRegisters() + (callNode->getDataType() == TR::NoType ? 0 : 1); TR::RegisterDependencyConditions *preDeps = generateRegisterDependencyConditions(pre, 0, cg()); TR::RegisterDependencyConditions *postDeps = generateRegisterDependencyConditions(0, post, cg()); // Evaluate outgoing arguments on the system stack and build pre-conditions. // int32_t memoryArgSize = buildArgs(callNode, preDeps); // Build post-conditions. // returnReg = buildVolatileAndReturnDependencies(callNode, postDeps); postDeps->stopAddingPostConditions(); // Find the second scratch register in the post dependency list. // TR::Register *scratchReg = NULL; TR::RealRegister::RegNum scratchRegIndex = getProperties().getIntegerScratchRegister(1); for (int32_t i=0; i<post; i++) { if (postDeps->getPostConditions()->getRegisterDependency(i)->getRealRegister() == scratchRegIndex) { scratchReg = postDeps->getPostConditions()->getRegisterDependency(i)->getRegister(); break; } } TR::Instruction *instr; if (methodSymbol->getMethodAddress()) { TR_ASSERT(scratchReg, "could not find second scratch register"); auto LoadRegisterInstruction = generateRegImm64SymInstruction( MOV8RegImm64, callNode, scratchReg, (uintptr_t)methodSymbol->getMethodAddress(), methodSymRef, cg()); if (comp()->getOption(TR_EmitRelocatableELFFile)) { LoadRegisterInstruction->setReloKind(TR_NativeMethodAbsolute); } instr = generateRegInstruction(CALLReg, callNode, scratchReg, preDeps, cg()); } else { instr = generateImmSymInstruction(CALLImm4, callNode, (uintptrj_t)methodSymbol->getMethodAddress(), methodSymRef, preDeps, cg()); } cg()->resetIsLeafMethod(); instr->setNeedsGCMap(getProperties().getPreservedRegisterMapForGC()); cg()->stopUsingRegister(scratchReg); TR::LabelSymbol *postDepLabel = generateLabelSymbol(cg()); generateLabelInstruction(LABEL, callNode, postDepLabel, postDeps, cg()); return returnReg; }