/** * Collect direct loads in a node and its children, adding them to the provided BitVector. * * @param node The node to consider. * @param loadSymRefs BitVector of symbol reference numbers seen. * @param checklist Checklist of visited nodes. */ static void collectDirectLoads(TR::Node *node, TR_BitVector &loadSymRefs, TR::NodeChecklist &checklist) { if (checklist.contains(node)) return; checklist.add(node); if (node->getOpCode().isLoadVarDirect()) loadSymRefs.set(node->getSymbolReference()->getReferenceNumber()); for (int i = 0; i < node->getNumChildren(); i++) collectDirectLoads(node->getChild(i), loadSymRefs, checklist); }
static bool safeToMoveGuard(TR::Block *destination, TR::TreeTop *guardCandidate, TR::TreeTop *branchDest, TR_BitVector &privArgSymRefs) { static char *disablePrivArgMovement = feGetEnv("TR_DisableRuntimeGuardPrivArgMovement"); TR::TreeTop *start = destination ? destination->getExit() : TR::comp()->getStartTree(); if (guardCandidate->getNode()->isHCRGuard()) { for (TR::TreeTop *tt = start; tt && tt != guardCandidate; tt = tt->getNextTreeTop()) { if (tt->getNode()->canGCandReturn()) return false; } } else if (guardCandidate->getNode()->isOSRGuard()) { for (TR::TreeTop *tt = start; tt && tt != guardCandidate; tt = tt->getNextTreeTop()) { if (TR::comp()->isPotentialOSRPoint(tt->getNode(), NULL, true)) return false; } } else { privArgSymRefs.empty(); for (TR::TreeTop *tt = start; tt && tt != guardCandidate; tt = tt->getNextTreeTop()) { // It's safe to move the guard if there are only priv arg stores and live monitor stores // ahead of the guard if (tt->getNode()->getOpCodeValue() != TR::BBStart && tt->getNode()->getOpCodeValue() != TR::BBEnd && !tt->getNode()->chkIsPrivatizedInlinerArg() && !(tt->getNode()->getOpCode().hasSymbolReference() && tt->getNode()->getSymbol()->holdsMonitoredObject()) && !tt->getNode()->isNopableInlineGuard()) return false; if (tt->getNode()->chkIsPrivatizedInlinerArg() && (disablePrivArgMovement || // If the priv arg is not for this guard (guardCandidate->getNode()->getInlinedSiteIndex() > -1 && // if priv arg store does not have the same inlined site index as the guard's caller, that means it is not a priv arg for this guard, // then we cannot move the guard and its priv args up across other calls' priv args tt->getNode()->getInlinedSiteIndex() != TR::comp()->getInlinedCallSite(guardCandidate->getNode()->getInlinedSiteIndex())._byteCodeInfo.getCallerIndex()))) return false; if (tt->getNode()->chkIsPrivatizedInlinerArg()) privArgSymRefs.set(tt->getNode()->getSymbolReference()->getReferenceNumber()); if (tt->getNode()->isNopableInlineGuard() && tt->getNode()->getBranchDestination() != branchDest) return false; } } return true; }
TR_BitVector * OMR::SymbolReference::getUseDefAliasesBV(bool isDirectCall, bool includeGCSafePoint) { TR::Compilation *comp = TR::comp(); TR::Region &aliasRegion = comp->aliasRegion(); int32_t bvInitialSize = comp->getSymRefCount(); TR_BitVectorGrowable growability = growable; // allow more than one shadow for an array type. Used by LoopAliasRefiner const bool supportArrayRefinement=true; int32_t kind = _symbol->getKind(); TR::SymbolReferenceTable * symRefTab = comp->getSymRefTab(); // !!! NOTE !!! // THERE IS A COPY OF THIS LOGIC IN sharesSymbol // if (!self()->reallySharesSymbol(comp)) { switch (kind) { case TR::Symbol::IsShadow: case TR::Symbol::IsStatic: { // For unresolved constant dynamic, we need to invoke a Java bootstrap method, // which can have arbitrary side effects, so the aliasing should be conservative here. // isConstObjectRef now returns true for condy, so we add an explicit condition, // more like a short-circuit, to say if we are unresolved and not isConstObjectRef // (this is the same as before), or if we are unresolved and condy // (this is the extra condition added), we would return conservative aliases. if ((self()->isUnresolved() && (_symbol->isConstantDynamic() || !_symbol->isConstObjectRef())) || _symbol->isVolatile() || self()->isLiteralPoolAddress() || self()->isFromLiteralPool() || _symbol->isUnsafeShadowSymbol() || (_symbol->isArrayShadowSymbol() && comp->getMethodSymbol()->hasVeryRefinedAliasSets())) { // getUseDefAliases might not return NULL } else if (!symRefTab->aliasBuilder.mutableGenericIntShadowHasBeenCreated()) { // getUseDefAliases must return NULL return NULL; } else if (kind == TR::Symbol::IsStatic && !symRefTab->aliasBuilder.litPoolGenericIntShadowHasBeenCreated()) { // getUseDefAliases must return NULL return NULL; } break; } } } // now do stuff for various kinds of symbols // switch (kind) { case TR::Symbol::IsMethod: { TR::MethodSymbol * methodSymbol = _symbol->castToMethodSymbol(); if (!methodSymbol->isHelper()) return symRefTab->aliasBuilder.methodAliases(self()); if (symRefTab->isNonHelper(self(), TR::SymbolReferenceTable::arraySetSymbol) || symRefTab->isNonHelper(self(), TR::SymbolReferenceTable::osrFearPointHelperSymbol) || symRefTab->isNonHelper(self(), TR::SymbolReferenceTable::potentialOSRPointHelperSymbol)) { return &symRefTab->aliasBuilder.defaultMethodDefAliases(); } if (symRefTab->isNonHelper(self(), TR::SymbolReferenceTable::arrayCmpSymbol)) return 0; switch (self()->getReferenceNumber()) { case TR_methodTypeCheck: case TR_nullCheck: return &symRefTab->aliasBuilder.defaultMethodDefAliasesWithoutImmutable(); case TR_arrayBoundsCheck: case TR_checkCast: case TR_divCheck: case TR_typeCheckArrayStore: case TR_arrayStoreException: case TR_incompatibleReceiver: case TR_IncompatibleClassChangeError: case TR_reportFinalFieldModified: case TR_reportMethodEnter: case TR_reportStaticMethodEnter: case TR_reportMethodExit: case TR_acquireVMAccess: case TR_instanceOf: case TR_checkAssignable: case TR_throwCurrentException: case TR_releaseVMAccess: case TR_stackOverflow: case TR_writeBarrierStore: case TR_writeBarrierBatchStore: case TR_jitProfileAddress: case TR_jitProfileWarmCompilePICAddress: case TR_jitProfileValue: case TR_jitProfileLongValue: case TR_jitProfileBigDecimalValue: case TR_jitProfileParseBuffer: return 0; case TR_asyncCheck: case TR_writeBarrierClassStoreRealTimeGC: case TR_writeBarrierStoreRealTimeGC: case TR_aNewArray: case TR_newObject: case TR_newObjectNoZeroInit: case TR_newArray: case TR_multiANewArray: if ((comp->generateArraylets() || comp->isDLT()) && includeGCSafePoint) return &symRefTab->aliasBuilder.gcSafePointSymRefNumbers(); else return 0; case TR_aThrow: return 0; // The monitor exit symbol needs to be aliased with all fields in the // current class to ensure that all references to fields are evaluated // before the monitor exit case TR_monitorExit: case TR_monitorEntry: case TR_transactionExit: case TR_transactionEntry: default: // The following is the place to check for // a use of killsAllMethodSymbolRef... However, // it looks like the default action is sufficient. //if (symRefTab->findKillsAllMethodSymbolRef() == self()) // { // } return &symRefTab->aliasBuilder.defaultMethodDefAliases(); } } case TR::Symbol::IsResolvedMethod: { TR::ResolvedMethodSymbol * resolvedMethodSymbol = _symbol->castToResolvedMethodSymbol(); if (!comp->getOption(TR_EnableHCR)) { switch (resolvedMethodSymbol->getRecognizedMethod()) { #ifdef J9_PROJECT_SPECIFIC case TR::java_lang_System_arraycopy: { TR_BitVector * aliases = new (aliasRegion) TR_BitVector(bvInitialSize, aliasRegion, growability); *aliases |= symRefTab->aliasBuilder.arrayElementSymRefs(); if (comp->generateArraylets()) *aliases |= symRefTab->aliasBuilder.arrayletElementSymRefs(); return aliases; } if (resolvedMethodSymbol->isPureFunction()) return NULL; case TR::java_lang_Double_longBitsToDouble: case TR::java_lang_Double_doubleToLongBits: case TR::java_lang_Float_intBitsToFloat: case TR::java_lang_Float_floatToIntBits: case TR::java_lang_Double_doubleToRawLongBits: case TR::java_lang_Float_floatToRawIntBits: case TR::java_lang_Math_sqrt: case TR::java_lang_StrictMath_sqrt: case TR::java_lang_Math_sin: case TR::java_lang_StrictMath_sin: case TR::java_lang_Math_cos: case TR::java_lang_StrictMath_cos: case TR::java_lang_Math_max_I: case TR::java_lang_Math_min_I: case TR::java_lang_Math_max_L: case TR::java_lang_Math_min_L: case TR::java_lang_Math_abs_I: case TR::java_lang_Math_abs_L: case TR::java_lang_Math_abs_F: case TR::java_lang_Math_abs_D: case TR::java_lang_Math_pow: case TR::java_lang_StrictMath_pow: case TR::java_lang_Math_exp: case TR::java_lang_StrictMath_exp: case TR::java_lang_Math_log: case TR::java_lang_StrictMath_log: case TR::java_lang_Math_floor: case TR::java_lang_Math_ceil: case TR::java_lang_Math_copySign_F: case TR::java_lang_Math_copySign_D: case TR::java_lang_StrictMath_floor: case TR::java_lang_StrictMath_ceil: case TR::java_lang_StrictMath_copySign_F: case TR::java_lang_StrictMath_copySign_D: case TR::com_ibm_Compiler_Internal__TR_Prefetch: case TR::java_nio_Bits_keepAlive: if ((comp->generateArraylets() || comp->isDLT()) && includeGCSafePoint) return &symRefTab->aliasBuilder.gcSafePointSymRefNumbers(); else return 0; // no aliasing on DFP dummy stubs case TR::java_math_BigDecimal_DFPPerformHysteresis: case TR::java_math_BigDecimal_DFPUseDFP: case TR::java_math_BigDecimal_DFPHWAvailable: case TR::java_math_BigDecimal_DFPCompareTo: case TR::java_math_BigDecimal_DFPUnscaledValue: case TR::com_ibm_dataaccess_DecimalData_DFPFacilityAvailable: case TR::com_ibm_dataaccess_DecimalData_DFPUseDFP: case TR::com_ibm_dataaccess_DecimalData_DFPConvertPackedToDFP: case TR::com_ibm_dataaccess_DecimalData_DFPConvertDFPToPacked: case TR::com_ibm_dataaccess_DecimalData_createZeroBigDecimal: case TR::com_ibm_dataaccess_DecimalData_getlaside: case TR::com_ibm_dataaccess_DecimalData_setlaside: case TR::com_ibm_dataaccess_DecimalData_getflags: case TR::com_ibm_dataaccess_DecimalData_setflags: if (!( #ifdef TR_TARGET_S390 TR::Compiler->target.cpu.getS390SupportsDFP() || #endif TR::Compiler->target.cpu.supportsDecimalFloatingPoint()) || comp->getOption(TR_DisableDFP)) return NULL; #endif //J9_PROJECT_SPECIFIC default: break; } } #ifdef J9_PROJECT_SPECIFIC TR_ResolvedMethod * method = resolvedMethodSymbol->getResolvedMethod(); TR_PersistentMethodInfo * methodInfo = TR_PersistentMethodInfo::get(method); if (methodInfo && (methodInfo->hasRefinedAliasSets() || comp->getMethodHotness() >= veryHot || resolvedMethodSymbol->hasVeryRefinedAliasSets()) && (method->isStatic() || method->isFinal() || isDirectCall)) { TR_BitVector * aliases = new (aliasRegion) TR_BitVector(bvInitialSize, aliasRegion, growability); if ((comp->generateArraylets() || comp->isDLT()) && includeGCSafePoint) *aliases |= symRefTab->aliasBuilder.gcSafePointSymRefNumbers(); if (methodInfo->doesntKillAnything() && !comp->getOption(TR_DisableRefinedAliases)) return aliases; if ((resolvedMethodSymbol->hasVeryRefinedAliasSets() || comp->getMethodHotness() >= hot) && !debug("disableVeryRefinedCallAliasSets")) { TR_BitVector * exactAliases = 0; if (resolvedMethodSymbol->hasVeryRefinedAliasSets()) exactAliases = symRefTab->aliasBuilder.getVeryRefinedCallAliasSets(resolvedMethodSymbol); else { resolvedMethodSymbol->setHasVeryRefinedAliasSets(true); List<void> methodsPeeked(comp->trMemory()); exactAliases = addVeryRefinedCallAliasSets(resolvedMethodSymbol, aliases, &methodsPeeked); symRefTab->aliasBuilder.setVeryRefinedCallAliasSets(resolvedMethodSymbol, exactAliases); } if (exactAliases) { return exactAliases; } } // From here on, we're just checking refined alias info. // If refined aliases are disabled, return the conservative answer // we would have returned had we never attempted to use refined // aliases at all. // if (comp->getOption(TR_DisableRefinedAliases)) return symRefTab->aliasBuilder.methodAliases(self()); if (!methodInfo->doesntKillAddressArrayShadows()) { symRefTab->aliasBuilder.addAddressArrayShadows(aliases); if (comp->generateArraylets()) aliases->set(symRefTab->getArrayletShadowIndex(TR::Address)); } if (!methodInfo->doesntKillIntArrayShadows()) { symRefTab->aliasBuilder.addIntArrayShadows(aliases); if (comp->generateArraylets()) { aliases->set(symRefTab->getArrayletShadowIndex(TR::Int32)); } } if (!methodInfo->doesntKillNonIntPrimitiveArrayShadows()) { symRefTab->aliasBuilder.addNonIntPrimitiveArrayShadows(aliases); if (comp->generateArraylets()) { aliases->set(symRefTab->getArrayletShadowIndex(TR::Int8)); aliases->set(symRefTab->getArrayletShadowIndex(TR::Int16)); aliases->set(symRefTab->getArrayletShadowIndex(TR::Int32)); aliases->set(symRefTab->getArrayletShadowIndex(TR::Int64)); aliases->set(symRefTab->getArrayletShadowIndex(TR::Float)); aliases->set(symRefTab->getArrayletShadowIndex(TR::Double)); } } if (!methodInfo->doesntKillAddressFields()) *aliases |= symRefTab->aliasBuilder.addressShadowSymRefs(); if (!methodInfo->doesntKillIntFields()) *aliases |= symRefTab->aliasBuilder.intShadowSymRefs(); if (!methodInfo->doesntKillNonIntPrimitiveFields()) *aliases |= symRefTab->aliasBuilder.nonIntPrimitiveShadowSymRefs(); if (!methodInfo->doesntKillAddressStatics()) *aliases |= symRefTab->aliasBuilder.addressStaticSymRefs(); if (!methodInfo->doesntKillIntStatics()) *aliases |= symRefTab->aliasBuilder.intStaticSymRefs(); if (!methodInfo->doesntKillNonIntPrimitiveStatics()) *aliases |= symRefTab->aliasBuilder.nonIntPrimitiveStaticSymRefs(); TR_BitVector *methodAliases = symRefTab->aliasBuilder.methodAliases(self()); *aliases &= *methodAliases; return aliases; } #endif return symRefTab->aliasBuilder.methodAliases(self()); } case TR::Symbol::IsShadow: { if ((self()->isUnresolved() && !_symbol->isConstObjectRef()) || _symbol->isVolatile() || self()->isLiteralPoolAddress() || self()->isFromLiteralPool() || (_symbol->isUnsafeShadowSymbol() && !self()->reallySharesSymbol())) { if (symRefTab->aliasBuilder.unsafeArrayElementSymRefs().get(self()->getReferenceNumber())) { TR_BitVector *aliases = new (aliasRegion) TR_BitVector(bvInitialSize, aliasRegion, growability); *aliases |= comp->getSymRefTab()->aliasBuilder.defaultMethodDefAliasesWithoutImmutable(); *aliases -= symRefTab->aliasBuilder.cpSymRefs(); return aliases; } else return &comp->getSymRefTab()->aliasBuilder.defaultMethodDefAliasesWithoutImmutable(); } TR_BitVector *aliases = NULL; if (_symbol == symRefTab->findGenericIntShadowSymbol()) { aliases = new (aliasRegion) TR_BitVector(bvInitialSize, aliasRegion, growability); *aliases |= symRefTab->aliasBuilder.arrayElementSymRefs(); if (comp->generateArraylets()) *aliases |= symRefTab->aliasBuilder.arrayletElementSymRefs(); *aliases |= symRefTab->aliasBuilder.genericIntShadowSymRefs(); *aliases |= symRefTab->aliasBuilder.genericIntArrayShadowSymRefs(); *aliases |= symRefTab->aliasBuilder.genericIntNonArrayShadowSymRefs(); *aliases |= symRefTab->aliasBuilder.unsafeSymRefNumbers(); #ifdef J9_PROJECT_SPECIFIC *aliases |= symRefTab->aliasBuilder.unresolvedShadowSymRefs(); #endif if (symRefTab->aliasBuilder.conservativeGenericIntShadowAliasing()) { *aliases |= symRefTab->aliasBuilder.addressShadowSymRefs(); *aliases |= symRefTab->aliasBuilder.intShadowSymRefs(); *aliases |= symRefTab->aliasBuilder.nonIntPrimitiveShadowSymRefs(); } aliases->set(self()->getReferenceNumber()); return aliases; } if (self()->reallySharesSymbol(comp)) { aliases = new (aliasRegion) TR_BitVector(bvInitialSize, aliasRegion, growability); self()->setSharedShadowAliases(aliases, symRefTab); } if (symRefTab->findGenericIntShadowSymbol()) { if (!aliases) aliases = new (aliasRegion) TR_BitVector(bvInitialSize, aliasRegion, growability); self()->setLiteralPoolAliases(aliases, symRefTab); if (symRefTab->aliasBuilder.conservativeGenericIntShadowAliasing() || self()->isUnresolved()) { *aliases |= symRefTab->aliasBuilder.genericIntShadowSymRefs(); *aliases |= symRefTab->aliasBuilder.genericIntArrayShadowSymRefs(); *aliases |= symRefTab->aliasBuilder.genericIntNonArrayShadowSymRefs(); } } if (_symbol->isArrayShadowSymbol() && symRefTab->findGenericIntShadowSymbol()) { if (!aliases) aliases = new (aliasRegion) TR_BitVector(bvInitialSize, aliasRegion, growability); *aliases |= symRefTab->aliasBuilder.genericIntShadowSymRefs(); *aliases |= symRefTab->aliasBuilder.genericIntArrayShadowSymRefs(); if (supportArrayRefinement && self()->getIndependentSymRefs()) *aliases -= *self()->getIndependentSymRefs(); } #ifdef J9_PROJECT_SPECIFIC // make TR::PackedDecimal aliased with TR::Int8(byte) if (_symbol->isArrayShadowSymbol() && _symbol->getDataType() == TR::PackedDecimal) { if (!aliases) aliases = new (aliasRegion) TR_BitVector(bvInitialSize, aliasRegion, growability); aliases->set(symRefTab->getArrayShadowIndex(TR::Int8)); } //the other way around. if (_symbol->isArrayShadowSymbol() && _symbol->getDataType() == TR::Int8) { if (!aliases) aliases = new (aliasRegion) TR_BitVector(bvInitialSize, aliasRegion, growability); aliases->set(symRefTab->getArrayShadowIndex(TR::PackedDecimal)); } #endif // alias vector arrays shadows with corresponding scalar array shadows if (_symbol->isArrayShadowSymbol() && _symbol->getDataType().isVector()) { if (!aliases) aliases = new (aliasRegion) TR_BitVector(bvInitialSize, aliasRegion, growability); aliases->set(symRefTab->getArrayShadowIndex(_symbol->getDataType().vectorToScalar())); } // the other way around if (_symbol->isArrayShadowSymbol() && !_symbol->getDataType().isVector()) { if (!aliases) aliases = new (aliasRegion) TR_BitVector(bvInitialSize, aliasRegion, growability); aliases->set(symRefTab->getArrayShadowIndex(_symbol->getDataType().scalarToVector())); } if (_symbol->isArrayShadowSymbol() && !symRefTab->aliasBuilder.immutableArrayElementSymRefs().isEmpty()) { if (!aliases) aliases = new (aliasRegion) TR_BitVector(bvInitialSize, aliasRegion, growability); TR::DataType type = _symbol->getDataType(); TR_BitVectorIterator bvi(symRefTab->aliasBuilder.arrayElementSymRefs()); int32_t symRefNum; while (bvi.hasMoreElements()) { symRefNum = bvi.getNextElement(); if (symRefTab->getSymRef(symRefNum)->getSymbol()->getDataType() == type) aliases->set(symRefNum); } } if (_symbol->isArrayShadowSymbol() && supportArrayRefinement && comp->getMethodSymbol()->hasVeryRefinedAliasSets()) { if (!aliases) aliases = new (aliasRegion) TR_BitVector(bvInitialSize, aliasRegion, growability); TR::DataType type = _symbol->getDataType(); TR_BitVectorIterator bvi(symRefTab->aliasBuilder.arrayElementSymRefs()); int32_t symRefNum; while (bvi.hasMoreElements()) { symRefNum = bvi.getNextElement(); if (symRefTab->getSymRef(symRefNum)->getSymbol()->getDataType() == type) aliases->set(symRefNum); } if (self()->getIndependentSymRefs()) *aliases -= *self()->getIndependentSymRefs(); return aliases; } if (aliases) aliases->set(self()->getReferenceNumber()); if (symRefTab->aliasBuilder.unsafeArrayElementSymRefs().get(self()->getReferenceNumber())) *aliases -= symRefTab->aliasBuilder.cpSymRefs(); else if (symRefTab->aliasBuilder.cpSymRefs().get(self()->getReferenceNumber())) *aliases -= symRefTab->aliasBuilder.unsafeArrayElementSymRefs(); return aliases; } case TR::Symbol::IsStatic: { // For unresolved constant dynamic, we need to invoke a Java bootstrap method, // which can have arbitrary side effects, so the aliasing should be conservative here. // isConstObjectRef now returns true for condy, so we add an explicit condition, // more like a short-circuit, to say if we are unresolved and not isConstObjectRef // (this is the same as before), or if we are unresolved and condy // (this is the extra condition added), we would return conservative aliases. if ((self()->isUnresolved() && (_symbol->isConstantDynamic() || !_symbol->isConstObjectRef())) || self()->isLiteralPoolAddress() || self()->isFromLiteralPool() || _symbol->isVolatile()) { return &comp->getSymRefTab()->aliasBuilder.defaultMethodDefAliases(); } TR_BitVector *aliases = NULL; if (self()->reallySharesSymbol(comp)) { aliases = new (aliasRegion) TR_BitVector(bvInitialSize, aliasRegion, growability); self()->setSharedStaticAliases(aliases, symRefTab); } if (symRefTab->findGenericIntShadowSymbol()) { if (!aliases) aliases = new (aliasRegion) TR_BitVector(bvInitialSize, aliasRegion, growability); self()->setLiteralPoolAliases(aliases, symRefTab); } if (aliases) aliases->set(self()->getReferenceNumber()); return aliases; } case TR::Symbol::IsMethodMetaData: { TR_BitVector *aliases = NULL; return aliases; } default: //TR_ASSERT(0, "getUseDefAliasing called for non method"); if (comp->generateArraylets() && comp->getSymRefTab()->aliasBuilder.gcSafePointSymRefNumbers().get(self()->getReferenceNumber()) && includeGCSafePoint) return &comp->getSymRefTab()->aliasBuilder.gcSafePointSymRefNumbers(); else return 0; } }
TR::Instruction *OMR::Power::Linkage::saveArguments(TR::Instruction *cursor, bool fsd, bool saveOnly, List<TR::ParameterSymbol> &parmList) { #define REAL_REGISTER(ri) machine->getRealRegister(ri) #define REGNUM(ri) ((TR::RealRegister::RegNum)(ri)) const TR::PPCLinkageProperties& properties = self()->getProperties(); TR::Machine *machine = self()->machine(); TR::RealRegister *stackPtr = self()->cg()->getStackPointerRegister(); TR::ResolvedMethodSymbol *bodySymbol = self()->comp()->getJittedMethodSymbol(); ListIterator<TR::ParameterSymbol> paramIterator(&parmList); TR::ParameterSymbol *paramCursor; TR::Node *firstNode = self()->comp()->getStartTree()->getNode(); TR_BitVector freeScratchable; int32_t busyMoves[3][64]; int32_t busyIndex = 0, i1; bool all_saved = false; // the freeScratchable structure will not be used when saveOnly == true // no additional conditions were added with the intention of keeping the code easier to read // and not full of if conditions freeScratchable.init(TR::RealRegister::LastFPR + 1, self()->trMemory()); // first, consider all argument registers free for (i1=TR::RealRegister::FirstGPR; i1<=TR::RealRegister::LastFPR; i1++) { if (!properties.getReserved(REGNUM(i1))) { freeScratchable.set(i1); } } // second, go through all parameters and reset registers that are actually used for (paramCursor=paramIterator.getFirst(); paramCursor!=NULL; paramCursor=paramIterator.getNext()) { int32_t lri = paramCursor->getLinkageRegisterIndex(); TR::DataType type = paramCursor->getType(); if (lri >= 0) { TR::RealRegister::RegNum regNum; bool twoRegs = (TR::Compiler->target.is32Bit() && type.isInt64() && lri < properties.getNumIntArgRegs()-1); if (!type.isFloatingPoint()) { regNum = properties.getIntegerArgumentRegister(lri); if (paramCursor->isReferencedParameter()) freeScratchable.reset(regNum); if (twoRegs) if (paramCursor->isReferencedParameter()) freeScratchable.reset(regNum+1); } else { regNum = properties.getFloatArgumentRegister(lri); if (paramCursor->isReferencedParameter()) freeScratchable.reset(regNum); if (twoRegs) if (paramCursor->isReferencedParameter()) freeScratchable.reset(regNum+1); } } } for (paramCursor=paramIterator.getFirst(); paramCursor!=NULL; paramCursor=paramIterator.getNext()) { int32_t lri = paramCursor->getLinkageRegisterIndex(); int32_t ai = paramCursor->getAllocatedIndex(); int32_t offset = self()->calculateParameterRegisterOffset(paramCursor->getParameterOffset(), *paramCursor); TR::DataType type = paramCursor->getType(); int32_t dtype = type.getDataType(); // TODO: Is there an accurate assume to insert here ? if (lri >= 0) { if (!paramCursor->isReferencedParameter() && !paramCursor->isParmHasToBeOnStack()) continue; TR::RealRegister::RegNum regNum; bool twoRegs = (TR::Compiler->target.is32Bit() && type.isInt64() && lri < properties.getNumIntArgRegs()-1); if (type.isFloatingPoint()) regNum = properties.getFloatArgumentRegister(lri); else regNum = properties.getIntegerArgumentRegister(lri); // Do not save arguments to the stack if in Full Speed Debug and saveOnly is not set. // If not in Full Speed Debug, the arguments will be saved. if (((ai<0 || self()->hasToBeOnStack(paramCursor)) && !fsd) || (fsd && saveOnly)) { switch (dtype) { case TR::Int8: case TR::Int16: case TR::Int32: { TR::InstOpCode::Mnemonic op = TR::InstOpCode::stw; if (!all_saved) cursor = generateMemSrc1Instruction(self()->cg(), op, firstNode, new (self()->trHeapMemory()) TR::MemoryReference(stackPtr, offset, 4, self()->cg()), REAL_REGISTER(regNum), cursor); } break; case TR::Address: if (!all_saved) cursor = generateMemSrc1Instruction(self()->cg(),TR::InstOpCode::Op_st, firstNode, new (self()->trHeapMemory()) TR::MemoryReference(stackPtr, offset, TR::Compiler->om.sizeofReferenceAddress(), self()->cg()), REAL_REGISTER(regNum), cursor); break; case TR::Int64: if (!all_saved) cursor = generateMemSrc1Instruction(self()->cg(),TR::InstOpCode::Op_st, firstNode, new (self()->trHeapMemory()) TR::MemoryReference(stackPtr, offset, TR::Compiler->om.sizeofReferenceAddress(), self()->cg()), REAL_REGISTER(regNum), cursor); if (twoRegs) { if (!all_saved) cursor = generateMemSrc1Instruction(self()->cg(), TR::InstOpCode::stw, firstNode, new (self()->trHeapMemory()) TR::MemoryReference(stackPtr, offset+4, 4, self()->cg()), REAL_REGISTER(REGNUM(regNum+1)), cursor); if (ai<0) freeScratchable.set(regNum+1); } break; case TR::Float: cursor = generateMemSrc1Instruction(self()->cg(), TR::InstOpCode::stfs, firstNode, new (self()->trHeapMemory()) TR::MemoryReference(stackPtr, offset, 4, self()->cg()), REAL_REGISTER(regNum), cursor); break; case TR::Double: cursor = generateMemSrc1Instruction(self()->cg(), TR::InstOpCode::stfd, firstNode, new (self()->trHeapMemory()) TR::MemoryReference(stackPtr, offset, 8, self()->cg()), REAL_REGISTER(regNum), cursor); break; default: TR_ASSERT(false, "assertion failure"); break; } if (ai<0) freeScratchable.set(regNum); } // Global register is allocated to this argument. // Don't process if in Full Speed Debug and saveOnly is set if (ai>=0 && (!fsd || !saveOnly)) { if (regNum != ai) // Equal assignment: do nothing { if (freeScratchable.isSet(ai)) { cursor = generateTrg1Src1Instruction(self()->cg(), (type.isFloatingPoint()) ? TR::InstOpCode::fmr:TR::InstOpCode::mr, firstNode, REAL_REGISTER(REGNUM(ai)), REAL_REGISTER(regNum), cursor); freeScratchable.reset(ai); freeScratchable.set(regNum); } else // The status of target global register is unclear (i.e. it is a arg reg) { busyMoves[0][busyIndex] = regNum; busyMoves[1][busyIndex] = ai; busyMoves[2][busyIndex] = 0; busyIndex++; } } if (TR::Compiler->target.is32Bit() && type.isInt64()) { int32_t aiLow = paramCursor->getAllocatedLow(); if (!twoRegs) // Low part needs to come from memory { offset += 4; // We are dealing with the low part if (freeScratchable.isSet(aiLow)) { cursor = generateTrg1MemInstruction(self()->cg(), TR::InstOpCode::lwz, firstNode, REAL_REGISTER(REGNUM(aiLow)), new (self()->trHeapMemory()) TR::MemoryReference(stackPtr, offset, 4, self()->cg()), cursor); freeScratchable.reset(aiLow); } else { busyMoves[0][busyIndex] = offset; busyMoves[1][busyIndex] = aiLow; busyMoves[2][busyIndex] = 1; busyIndex++; } } else if (regNum+1 != aiLow) // Low part needs to be moved { if (freeScratchable.isSet(aiLow)) { cursor = generateTrg1Src1Instruction(self()->cg(), TR::InstOpCode::mr, firstNode, REAL_REGISTER(REGNUM(aiLow)), REAL_REGISTER(REGNUM(regNum+1)), cursor); freeScratchable.reset(aiLow); freeScratchable.set(regNum+1); } else { busyMoves[0][busyIndex] = regNum+1; busyMoves[1][busyIndex] = aiLow; busyMoves[2][busyIndex] = 0; busyIndex++; } } } } } // Don't process if in Full Speed Debug and saveOnly is set else if (ai >= 0 && (!fsd || !saveOnly)) // lri<0: arg needs to come from memory { switch (dtype) { case TR::Int8: case TR::Int16: case TR::Int32: if (freeScratchable.isSet(ai)) { cursor = generateTrg1MemInstruction(self()->cg(), TR::InstOpCode::lwz, firstNode, REAL_REGISTER(REGNUM(ai)), new (self()->trHeapMemory()) TR::MemoryReference(stackPtr, offset, 4, self()->cg()), cursor); freeScratchable.reset(ai); } else { busyMoves[0][busyIndex] = offset; busyMoves[1][busyIndex] = ai; busyMoves[2][busyIndex] = 1; busyIndex++; } break; case TR::Address: if (freeScratchable.isSet(ai)) { cursor = generateTrg1MemInstruction(self()->cg(),TR::InstOpCode::Op_load, firstNode, REAL_REGISTER(REGNUM(ai)), new (self()->trHeapMemory()) TR::MemoryReference(stackPtr, offset, TR::Compiler->om.sizeofReferenceAddress(), self()->cg()), cursor); freeScratchable.reset(ai); } else { busyMoves[0][busyIndex] = offset; busyMoves[1][busyIndex] = ai; if (TR::Compiler->target.is64Bit()) busyMoves[2][busyIndex] = 2; else busyMoves[2][busyIndex] = 1; busyIndex++; } break; case TR::Int64: if (TR::Compiler->target.is64Bit()) { if (freeScratchable.isSet(ai)) { cursor = generateTrg1MemInstruction(self()->cg(), TR::InstOpCode::ld, firstNode, REAL_REGISTER(REGNUM(ai)), new (self()->trHeapMemory()) TR::MemoryReference(stackPtr, offset, 8, self()->cg()), cursor); freeScratchable.reset(ai); } else { busyMoves[0][busyIndex] = offset; busyMoves[1][busyIndex] = ai; busyMoves[2][busyIndex] = 2; busyIndex++; } } else // 32-bit { if (freeScratchable.isSet(ai)) { cursor = generateTrg1MemInstruction(self()->cg(), TR::InstOpCode::lwz, firstNode, REAL_REGISTER(REGNUM(ai)), new (self()->trHeapMemory()) TR::MemoryReference(stackPtr, offset, 4, self()->cg()), cursor); freeScratchable.reset(ai); } else { busyMoves[0][busyIndex] = offset; busyMoves[1][busyIndex] = ai; busyMoves[2][busyIndex] = 1; busyIndex++; } ai = paramCursor->getAllocatedLow(); if (freeScratchable.isSet(ai)) { cursor = generateTrg1MemInstruction(self()->cg(), TR::InstOpCode::lwz, firstNode, REAL_REGISTER(REGNUM(ai)), new (self()->trHeapMemory()) TR::MemoryReference(stackPtr, offset+4, 4, self()->cg()), cursor); freeScratchable.reset(ai); } else { busyMoves[0][busyIndex] = offset+4; busyMoves[1][busyIndex] = ai; busyMoves[2][busyIndex] = 1; busyIndex++; } } break; case TR::Float: if (freeScratchable.isSet(ai)) { cursor = generateTrg1MemInstruction(self()->cg(), TR::InstOpCode::lfs, firstNode, REAL_REGISTER(REGNUM(ai)), new (self()->trHeapMemory()) TR::MemoryReference(stackPtr, offset, 4, self()->cg()), cursor); freeScratchable.reset(ai); } else { busyMoves[0][busyIndex] = offset; busyMoves[1][busyIndex] = ai; busyMoves[2][busyIndex] = 3; busyIndex++; } break; case TR::Double: if (freeScratchable.isSet(ai)) { cursor = generateTrg1MemInstruction(self()->cg(), TR::InstOpCode::lfd, firstNode, REAL_REGISTER(REGNUM(ai)), new (self()->trHeapMemory()) TR::MemoryReference(stackPtr, offset, 8, self()->cg()), cursor); freeScratchable.reset(ai); } else { busyMoves[0][busyIndex] = offset; busyMoves[1][busyIndex] = ai; busyMoves[2][busyIndex] = 4; busyIndex++; } break; default: break; } } } if (!fsd || !saveOnly) { bool freeMore = true; int32_t numMoves = busyIndex; while (freeMore && numMoves>0) { freeMore = false; for (i1=0; i1<busyIndex; i1++) { int32_t source = busyMoves[0][i1]; int32_t target = busyMoves[1][i1]; if (!(target<0) && freeScratchable.isSet(target)) { switch(busyMoves[2][i1]) { case 0: cursor = generateTrg1Src1Instruction(self()->cg(), (source<=TR::RealRegister::LastGPR)?TR::InstOpCode::mr:TR::InstOpCode::fmr, firstNode, REAL_REGISTER(REGNUM(target)), REAL_REGISTER(REGNUM(source)), cursor); freeScratchable.set(source); break; case 1: cursor = generateTrg1MemInstruction(self()->cg(), TR::InstOpCode::lwz, firstNode, REAL_REGISTER(REGNUM(target)), new (self()->trHeapMemory()) TR::MemoryReference(stackPtr, source, 4, self()->cg()), cursor); break; case 2: cursor = generateTrg1MemInstruction(self()->cg(), TR::InstOpCode::ld, firstNode, REAL_REGISTER(REGNUM(target)), new (self()->trHeapMemory()) TR::MemoryReference(stackPtr, source, 8, self()->cg()), cursor); break; case 3: cursor = generateTrg1MemInstruction(self()->cg(), TR::InstOpCode::lfs, firstNode, REAL_REGISTER(REGNUM(target)), new (self()->trHeapMemory()) TR::MemoryReference(stackPtr, source, 4, self()->cg()), cursor); break; case 4: cursor = generateTrg1MemInstruction(self()->cg(), TR::InstOpCode::lfd, firstNode, REAL_REGISTER(REGNUM(target)), new (self()->trHeapMemory()) TR::MemoryReference(stackPtr, source, 8, self()->cg()), cursor); break; } freeScratchable.reset(target); freeMore = true; busyMoves[0][i1] = busyMoves[1][i1] = -1; numMoves--; } } } TR_ASSERT(numMoves<=0, "Circular argument register dependency can and should be avoided."); } return(cursor); }
void OMR::CodeGenPhase::performSetupForInstructionSelectionPhase(TR::CodeGenerator * cg, TR::CodeGenPhase * phase) { TR::Compilation *comp = cg->comp(); if (TR::Compiler->target.cpu.isZ() && TR::Compiler->om.shouldGenerateReadBarriersForFieldLoads()) { // TODO (GuardedStorage): We need to come up with a better solution than anchoring aloadi's // to enforce certain evaluation order traceMsg(comp, "GuardedStorage: in performSetupForInstructionSelectionPhase\n"); auto mapAllocator = getTypedAllocator<std::pair<TR::TreeTop*, TR::TreeTop*> >(comp->allocator()); std::map<TR::TreeTop*, TR::TreeTop*, std::less<TR::TreeTop*>, TR::typed_allocator<std::pair<TR::TreeTop* const, TR::TreeTop*>, TR::Allocator> > currentTreeTopToappendTreeTop(std::less<TR::TreeTop*> (), mapAllocator); TR_BitVector *unAnchorableAloadiNodes = comp->getBitVectorPool().get(); for (TR::PreorderNodeIterator iter(comp->getStartTree(), comp); iter != NULL; ++iter) { TR::Node *node = iter.currentNode(); traceMsg(comp, "GuardedStorage: Examining node = %p\n", node); // isNullCheck handles both TR::NULLCHK and TR::ResolveAndNULLCHK // both of which do not operate on their child but their // grandchild (or greatgrandchild). if (node->getOpCode().isNullCheck()) { // An aloadi cannot be anchored if there is a Null Check on // its child. There are two situations where this occurs. // The first is when doing an aloadi off some node that is // being NULLCHK'd (see Ex1). The second is when doing an // icalli in which case the aloadi loads the VFT of an // object that must be NULLCHK'd (see Ex2). // // Ex1: // n1n NULLCHK on n3n // n2n aloadi f <-- First Child And Parent of Null Chk'd Node // n3n aload O // // Ex2: // n1n NULLCHK on n4n // n2n icall foo <-- First Child // n3n aloadi <vft> <-- Parent of Null Chk'd Node // n4n aload O // n4n ==> aload O TR::Node *nodeBeingNullChkd = node->getNullCheckReference(); if (nodeBeingNullChkd) { TR::Node *firstChild = node->getFirstChild(); TR::Node *parentOfNullChkdNode = NULL; if (firstChild->getOpCode().isCall() && firstChild->getOpCode().isIndirect()) { parentOfNullChkdNode = firstChild->getFirstChild(); } else { parentOfNullChkdNode = firstChild; } if (parentOfNullChkdNode && parentOfNullChkdNode->getOpCodeValue() == TR::aloadi && parentOfNullChkdNode->getNumChildren() > 0 && parentOfNullChkdNode->getFirstChild() == nodeBeingNullChkd) { unAnchorableAloadiNodes->set(parentOfNullChkdNode->getGlobalIndex()); traceMsg(comp, "GuardedStorage: Cannot anchor %p\n", firstChild); } } } else { bool shouldAnchorNode = false; if (node->getOpCodeValue() == TR::aloadi && !unAnchorableAloadiNodes->isSet(node->getGlobalIndex())) { shouldAnchorNode = true; } else if (node->getOpCodeValue() == TR::aload && node->getSymbol()->isStatic() && node->getSymbol()->isCollectedReference()) { shouldAnchorNode = true; } if (shouldAnchorNode) { TR::TreeTop* anchorTreeTop = TR::TreeTop::create(comp, TR::Node::create(TR::treetop, 1, node)); TR::TreeTop* appendTreeTop = iter.currentTree(); if (currentTreeTopToappendTreeTop.count(appendTreeTop) > 0) { appendTreeTop = currentTreeTopToappendTreeTop[appendTreeTop]; } // Anchor the aload/aloadi before the current treetop appendTreeTop->insertBefore(anchorTreeTop); currentTreeTopToappendTreeTop[iter.currentTree()] = anchorTreeTop; traceMsg(comp, "GuardedStorage: Anchored %p to treetop = %p\n", node, anchorTreeTop); } } } comp->getBitVectorPool().release(unAnchorableAloadiNodes); } if (cg->shouldBuildStructure() && (comp->getFlowGraph()->getStructure() != NULL)) { TR_Structure *rootStructure = TR_RegionAnalysis::getRegions(comp); comp->getFlowGraph()->setStructure(rootStructure); } phase->reportPhase(SetupForInstructionSelectionPhase); // Dump preIR if (comp->getOption(TR_TraceRegisterPressureDetails) && !comp->getOption(TR_DisableRegisterPressureSimulation)) { traceMsg(comp, " { Post optimization register pressure simulation\n"); TR_BitVector emptyBitVector; vcount_t vc = comp->incVisitCount(); cg->initializeRegisterPressureSimulator(); for (TR::Block *block = comp->getStartBlock(); block; block = block->getNextExtendedBlock()) { TR_LinkHead<TR_RegisterCandidate> emptyCandidateList; TR::CodeGenerator::TR_RegisterPressureState state(NULL, 0, emptyBitVector, emptyBitVector, &emptyCandidateList, cg->getNumberOfGlobalGPRs(), cg->getNumberOfGlobalFPRs(), cg->getNumberOfGlobalVRFs(), vc); TR::CodeGenerator::TR_RegisterPressureSummary summary(state._gprPressure, state._fprPressure, state._vrfPressure); cg->simulateBlockEvaluation(block, &state, &summary); } traceMsg(comp, " }\n"); } TR::LexicalMemProfiler mp(phase->getName(), comp->phaseMemProfiler()); LexicalTimer pt(phase->getName(), comp->phaseTimer()); cg->setUpForInstructionSelection(); }