static bool safeToMoveGuard(TR::Block *destination, TR::TreeTop *guardCandidate, TR::TreeTop *branchDest, TR_BitVector &privArgSymRefs) { static char *disablePrivArgMovement = feGetEnv("TR_DisableRuntimeGuardPrivArgMovement"); TR::TreeTop *start = destination ? destination->getExit() : TR::comp()->getStartTree(); if (guardCandidate->getNode()->isHCRGuard()) { for (TR::TreeTop *tt = start; tt && tt != guardCandidate; tt = tt->getNextTreeTop()) { if (tt->getNode()->canGCandReturn()) return false; } } else if (guardCandidate->getNode()->isOSRGuard()) { for (TR::TreeTop *tt = start; tt && tt != guardCandidate; tt = tt->getNextTreeTop()) { if (TR::comp()->isPotentialOSRPoint(tt->getNode(), NULL, true)) return false; } } else { privArgSymRefs.empty(); for (TR::TreeTop *tt = start; tt && tt != guardCandidate; tt = tt->getNextTreeTop()) { // It's safe to move the guard if there are only priv arg stores and live monitor stores // ahead of the guard if (tt->getNode()->getOpCodeValue() != TR::BBStart && tt->getNode()->getOpCodeValue() != TR::BBEnd && !tt->getNode()->chkIsPrivatizedInlinerArg() && !(tt->getNode()->getOpCode().hasSymbolReference() && tt->getNode()->getSymbol()->holdsMonitoredObject()) && !tt->getNode()->isNopableInlineGuard()) return false; if (tt->getNode()->chkIsPrivatizedInlinerArg() && (disablePrivArgMovement || // If the priv arg is not for this guard (guardCandidate->getNode()->getInlinedSiteIndex() > -1 && // if priv arg store does not have the same inlined site index as the guard's caller, that means it is not a priv arg for this guard, // then we cannot move the guard and its priv args up across other calls' priv args tt->getNode()->getInlinedSiteIndex() != TR::comp()->getInlinedCallSite(guardCandidate->getNode()->getInlinedSiteIndex())._byteCodeInfo.getCallerIndex()))) return false; if (tt->getNode()->chkIsPrivatizedInlinerArg()) privArgSymRefs.set(tt->getNode()->getSymbolReference()->getReferenceNumber()); if (tt->getNode()->isNopableInlineGuard() && tt->getNode()->getBranchDestination() != branchDest) return false; } } return true; }
/** * Search for direct loads in the taken side of a guard * * @param firstBlock The guard's branch destination * @param coldPathLoads BitVector of symbol reference numbers for any direct loads seen until the merge back to mainline */ static void collectColdPathLoads(TR::Block* firstBlock, TR_BitVector &coldPathLoads) { TR_Stack<TR::Block*> blocksToCheck(TR::comp()->trMemory(), 8, false, stackAlloc); blocksToCheck.push(firstBlock); TR::NodeChecklist checklist(TR::comp()); coldPathLoads.empty(); while (!blocksToCheck.isEmpty()) { TR::Block *block = blocksToCheck.pop(); for (TR::TreeTop *tt = block->getFirstRealTreeTop(); tt->getNode()->getOpCodeValue() != TR::BBEnd; tt = tt->getNextTreeTop()) collectDirectLoads(tt->getNode(), coldPathLoads, checklist); // Search for any successors that have not merged with the mainline for (auto itr = block->getSuccessors().begin(), end = block->getSuccessors().end(); itr != end; ++itr) { TR::Block *dest = (*itr)->getTo()->asBlock(); if (dest != TR::comp()->getFlowGraph()->getEnd() && dest->getPredecessors().size() == 1) blocksToCheck.push(dest); } } }
void TR_ReachingDefinitions::initializeGenAndKillSetInfoForNode(TR::Node *node, TR_BitVector &defsKilled, bool seenException, int32_t blockNum, TR::Node *parent) { // Update gen and kill info for nodes in this subtree // int32_t i; if (node->getVisitCount() == comp()->getVisitCount()) return; node->setVisitCount(comp()->getVisitCount()); // Process the children first // for (i = node->getNumChildren()-1; i >= 0; --i) { initializeGenAndKillSetInfoForNode(node->getChild(i), defsKilled, seenException, blockNum, node); } bool irrelevantStore = false; scount_t nodeIndex = node->getLocalIndex(); if (nodeIndex <= 0) { if (node->getOpCode().isStore() && node->getSymbol()->isAutoOrParm() && node->storedValueIsIrrelevant()) { irrelevantStore = true; } else return; } bool foundDefsToKill = false; int32_t numDefNodes = 0; defsKilled.empty(); TR::ILOpCode &opCode = node->getOpCode(); TR::SymbolReference *symRef; TR::Symbol *sym; uint16_t symIndex; uint32_t num_aliases; if (_useDefInfo->_useDefForRegs && (opCode.isLoadReg() || opCode.isStoreReg())) { sym = NULL; symRef = NULL; symIndex = _useDefInfo->getNumSymbols() + node->getGlobalRegisterNumber(); num_aliases = 1; } else { symRef = node->getSymbolReference(); sym = symRef->getSymbol(); symIndex = symRef->getSymbol()->getLocalIndex(); num_aliases = _useDefInfo->getNumAliases(symRef, _aux); } if (symIndex == NULL_USEDEF_SYMBOL_INDEX || node->getOpCode().isCall() || node->getOpCode().isFence() || (parent && parent->getOpCode().isResolveCheck() && num_aliases > 1)) { // A call or unresolved reference is a definition of all // symbols it is aliased with // numDefNodes = num_aliases; //for all symbols that are a mustdef of a call, kill defs of those symbols if (node->getOpCode().isCall()) foundDefsToKill = false; } else if (irrelevantStore || _useDefInfo->isExpandedDefIndex(nodeIndex)) { // DefOnly node defines all symbols it is aliased with // UseDef node(load) defines only the symbol itself // if (!irrelevantStore) { numDefNodes = num_aliases; numDefNodes = _useDefInfo->isExpandedUseDefIndex(nodeIndex) ? 1 : numDefNodes; if (!_useDefInfo->getDefsForSymbolIsZero(symIndex, _aux) && (!sym || (!sym->isShadow() && !sym->isMethod()))) { foundDefsToKill = true; // defsKilled ORed with defsForSymbol(symIndex); _useDefInfo->getDefsForSymbol(defsKilled, symIndex, _aux); } if (node->getOpCode().isStoreIndirect()) { int32_t memSymIndex = _useDefInfo->getMemorySymbolIndex(node); if (memSymIndex != -1 && !_useDefInfo->getDefsForSymbolIsZero(memSymIndex, _aux)) { foundDefsToKill = true; // defsKilled ORed with defsForSymbol(symIndex); _useDefInfo->getDefsForSymbol(defsKilled, memSymIndex, _aux); } } } else if (!_useDefInfo->getDefsForSymbolIsZero(symIndex, _aux)) { numDefNodes = 1; foundDefsToKill = true; // defsKilled ORed with defsForSymbol(symIndex); _useDefInfo->getDefsForSymbol(defsKilled, symIndex, _aux); } } else { numDefNodes = 0; } if (foundDefsToKill) { if (_regularKillSetInfo[blockNum] == NULL) allocateContainer(&_regularKillSetInfo[blockNum]); *_regularKillSetInfo[blockNum] |= defsKilled; if (!seenException) { if (_exceptionKillSetInfo[blockNum] == NULL) allocateContainer(&_exceptionKillSetInfo[blockNum]); *_exceptionKillSetInfo[blockNum] |= defsKilled; } } if (_regularGenSetInfo[blockNum] == NULL) allocateContainer(&_regularGenSetInfo[blockNum]); else if (foundDefsToKill) *_regularGenSetInfo[blockNum] -= defsKilled; if (_exceptionGenSetInfo[blockNum] == NULL) allocateContainer(&_exceptionGenSetInfo[blockNum]); else if (foundDefsToKill && !seenException) *_exceptionGenSetInfo[blockNum] -= defsKilled; if (!irrelevantStore) { for (i = 0; i < numDefNodes; ++i) { _regularGenSetInfo[blockNum]->set(nodeIndex+i); _exceptionGenSetInfo[blockNum]->set(nodeIndex+i); } } else // fake up the method entry def as the def index to "gen" to avoid a use without a def completely { _regularGenSetInfo[blockNum]->set(sym->getLocalIndex()); _exceptionGenSetInfo[blockNum]->set(sym->getLocalIndex()); } }