void TR_ReachingDefinitions::initializeGenAndKillSetInfo() { // For each block in the CFG build the gen and kill set for this analysis. // Go in treetop order, which guarantees that we see the correct (i.e. first) // evaluation point for each node. // TR::Block *block; int32_t blockNum = 0; bool seenException = false; TR_BitVector defsKilled(getNumberOfBits(), trMemory()->currentStackRegion()); comp()->incVisitCount(); for (TR::TreeTop *treeTop = comp()->getStartTree(); treeTop; treeTop = treeTop->getNextTreeTop()) { TR::Node *node = treeTop->getNode(); if (node->getOpCodeValue() == TR::BBStart) { block = node->getBlock(); blockNum = block->getNumber(); seenException = false; if (traceRD()) traceMsg(comp(), "\nNow generating gen and kill information for block_%d\n", blockNum); continue; } #if DEBUG if (node->getOpCodeValue() == TR::BBEnd && traceRD()) { traceMsg(comp(), " Block %d:\n", blockNum); traceMsg(comp(), " Gen set "); if (_regularGenSetInfo[blockNum]) _regularGenSetInfo[blockNum]->print(comp()); else traceMsg(comp(), "{}"); traceMsg(comp(), "\n Kill set "); if (_regularKillSetInfo[blockNum]) _regularKillSetInfo[blockNum]->print(comp()); else traceMsg(comp(), "{}"); traceMsg(comp(), "\n Exception Gen set "); if (_exceptionGenSetInfo[blockNum]) _exceptionGenSetInfo[blockNum]->print(comp()); else traceMsg(comp(), "{}"); traceMsg(comp(), "\n Exception Kill set "); if (_exceptionKillSetInfo[blockNum]) _exceptionKillSetInfo[blockNum]->print(comp()); else traceMsg(comp(), "{}"); continue; } #endif initializeGenAndKillSetInfoForNode(node, defsKilled, seenException, blockNum, NULL); if (!seenException && treeHasChecks(treeTop)) seenException = true; } }
// Simplify a complete expression tree. // Returns the next treetop to be processed. // TR::TreeTop * OMR::Simplifier::simplify(TR::TreeTop * treeTop, TR::Block * block) { TR::Node * node = treeTop->getNode(); if (node->getVisitCount() == comp()->getVisitCount()) return treeTop->getNextTreeTop(); // Note that this call to simplify may cause the treetops before or after // this treetop to be removed, so we can't hold the previous or next // treetop locally across this call. // _curTree = treeTop; node = simplify(node, block); treeTop->setNode(node); // Grab the next treetop AFTER simplification of the current treetop, since // the next may be affected by simplification. // TR::TreeTop * next = _curTree->getNextTreeTop(); // If the node is null, this treetop can be removed // if (node == NULL && (!block->getPredecessors().empty() || !block->getExceptionPredecessors().empty())) TR::TransformUtil::removeTree(comp(), treeTop); return next; }
bool TR_LocalLiveRangeReduction::isNeedToBeInvestigated(TR_TreeRefInfo *treeRefInfo) { // TR::TreeTop *treeTop = treeRefInfo->getTreeTop(); TR::Node *node = treeRefInfo->getTreeTop()->getNode(); TR::ILOpCode &opCode = node->getOpCode(); if (opCode.isBranch() || opCode.isReturn() || opCode.isGoto() || opCode.isJumpWithMultipleTargets() || opCode.getOpCodeValue() == TR::BBStart || opCode.getOpCodeValue() == TR::BBEnd) return false; if (opCode.getOpCodeValue() == TR::treetop || opCode.isResolveOrNullCheck()) node = node->getFirstChild(); //node might have changed /*if ((node->getOpCodeValue() == TR::monent) || (node->getOpCodeValue() == TR::monexit)|| (node->getOpCodeValue() == TR::athrow)) */ if (nodeMaybeMonitor(node) ||(node->getOpCodeValue() == TR::athrow)) return false; /********************************************************************/ /*Need to add support for this :stop before loadReg to same register*/ if (node->getOpCode().isStoreReg()) return false; /*******************************************************************/ if (_movedTreesList.find(treeRefInfo)) return false; if (treeRefInfo->getFirstRefNodesList()->getSize()!=0) return true; return false; }
void TR::RegDepCopyRemoval::updateSingleRegDep(TR_GlobalRegisterNumber reg, TR::Node *newValueNode) { RegDepInfo &dep = getRegDepInfo(reg); TR_ASSERT(_treetop->getNode()->getOpCodeValue() != TR::BBStart, "attempted to change %s in incoming GlRegDeps on BBStart n%un\n", registerName(reg), _treetop->getNode()->getGlobalIndex()); TR::Node *prevChild = _regDeps->getChild(dep.childIndex); TR_ASSERT(prevChild == dep.node, "childIndex and node inconsistent in RegDepInfo for %s\n", registerName(reg)); TR_ASSERT(prevChild->getGlobalRegisterNumber() == reg, "childIndex and reg inconsistent in RegDepInfo for %s\n", registerName(reg)); if (newValueNode->getOpCode().isLoadReg() && newValueNode->getGlobalRegisterNumber() == reg) { _regDeps->setAndIncChild(dep.childIndex, newValueNode); } else { TR::Node *newOutgoingPassThroughNode = TR::Node::create(TR::PassThrough, 1, newValueNode); newOutgoingPassThroughNode->setGlobalRegisterNumber(reg); _regDeps->setAndIncChild(dep.childIndex, newOutgoingPassThroughNode); } prevChild->recursivelyDecReferenceCount(); rememberNodeChoice(reg, newValueNode); }
void TR::ValidateNodeRefCountWithinBlock::validate(TR::TreeTop *firstTreeTop, TR::TreeTop *exitTreeTop) { _nodeChecklist.empty(); for (TR::TreeTop *tt = firstTreeTop; tt != exitTreeTop->getNextTreeTop(); tt = tt->getNextTreeTop()) { TR::Node *node = tt->getNode(); node->setLocalIndex(node->getReferenceCount()); validateRefCountPass1(node); } /** * We start again from the start of the block, and check the localIndex to * make sure it is 0. * * NOTE: Walking the tree backwards causes huge stack usage in validateRefCountPass2. */ _nodeChecklist.empty(); for (TR::TreeTop *tt = firstTreeTop; tt != exitTreeTop->getNextTreeTop(); tt = tt->getNextTreeTop()) { validateRefCountPass2(tt->getNode()); } }
static TR::Register *l2fd(TR::Node *node, TR::RealRegister *target, TR_X86OpCodes opRegMem8, TR_X86OpCodes opRegReg8, TR::CodeGenerator *cg) { TR::Node *child = node->getFirstChild(); TR::MemoryReference *tempMR; TR_ASSERT(cg->useSSEForSinglePrecision(), "assertion failure"); if (child->getRegister() == NULL && child->getReferenceCount() == 1 && child->getOpCode().isLoadVar()) { tempMR = generateX86MemoryReference(child, cg); generateRegMemInstruction(opRegMem8, node, target, tempMR, cg); tempMR->decNodeReferenceCounts(cg); } else { TR::Register *intReg = cg->evaluate(child); generateRegRegInstruction(opRegReg8, node, target, intReg, cg); cg->decReferenceCount(child); } node->setRegister(target); return target; }
// Simplify a sub-tree. // Returns the replaced root of the sub-tree, which may be null if the sub-tree // has been removed. // TR::Node * OMR::Simplifier::simplify(TR::Node * node, TR::Block * block) { // Set the visit count for this node to prevent recursion into it // vcount_t visitCount = comp()->getVisitCount(); node->setVisitCount(visitCount); if (node->nodeRequiresConditionCodes()) { // On Java, nodes that require condition codes must not be simplified. dftSimplifier(node, block, (TR::Simplifier *) this); return node; } // Simplify this node. // Note that the processing routine for the node is responsible for // simplifying its children. // TR::Node * newNode = simplifierOpts[node->getOpCodeValue()](node, block, (TR::Simplifier *) this); if ((node != newNode) || (newNode && ((newNode->getOpCodeValue() != node->getOpCodeValue()) || (newNode->getNumChildren() != node->getNumChildren())))) requestOpt(OMR::localCSE, true, block); return newNode; }
TR::Register * TR::AMD64SystemLinkage::buildIndirectDispatch(TR::Node *callNode) { TR::SymbolReference *methodSymRef = callNode->getSymbolReference(); TR_ASSERT(methodSymRef->getSymbol()->castToMethodSymbol()->isComputed(), "system linkage only supports computed indirect call for now %p\n", callNode); // Evaluate VFT // TR::Register *vftRegister; TR::Node *vftNode = callNode->getFirstChild(); if (vftNode->getRegister()) { vftRegister = vftNode->getRegister(); } else { vftRegister = cg()->evaluate(vftNode); } // Allocate adequate register dependencies. // // pre = number of argument registers + 1 for VFT register // post = number of volatile + VMThread + return register // uint32_t pre = getProperties().getNumIntegerArgumentRegisters() + getProperties().getNumFloatArgumentRegisters() + 1; uint32_t post = getProperties().getNumVolatileRegisters() + 1 + (callNode->getDataType() == TR::NoType ? 0 : 1); #if defined (PYTHON) && 0 // Treat all preserved GP regs as volatile until register map support available. // post += getProperties().getNumberOfPreservedGPRegisters(); #endif TR::RegisterDependencyConditions *callDeps = generateRegisterDependencyConditions(pre, 1, cg()); TR::RealRegister::RegNum scratchRegIndex = getProperties().getIntegerScratchRegister(1); callDeps->addPostCondition(vftRegister, scratchRegIndex, cg()); callDeps->stopAddingPostConditions(); // Evaluate outgoing arguments on the system stack and build pre-conditions. // int32_t memoryArgSize = buildArgs(callNode, callDeps); // Dispatch // generateRegInstruction(CALLReg, callNode, vftRegister, callDeps, cg()); cg()->resetIsLeafMethod(); // Build label post-conditions // TR::RegisterDependencyConditions *postDeps = generateRegisterDependencyConditions(0, post, cg()); TR::Register *returnReg = buildVolatileAndReturnDependencies(callNode, postDeps); postDeps->stopAddingPostConditions(); TR::LabelSymbol *postDepLabel = generateLabelSymbol(cg()); generateLabelInstruction(LABEL, callNode, postDepLabel, postDeps, cg()); return returnReg; }
/** * A runtime guard block may have monitor stores and privarg stores along with the guard * it self. This method will rearrange these stores and split the block, managing any * uncommoning necessary for eventual block order. * * The provided block will become the privarg block, containing any privarg stores and additonal * temps for uncommoning. It must be evaluated first. The returned block will contain monitor * stores and the guard. If no split is required, the provided block will be returned. * * @param comp Compilation object * @param block Block to manipulate * @param cfg Current CFG * @return The block containing the guard. */ static TR::Block* splitRuntimeGuardBlock(TR::Compilation *comp, TR::Block* block, TR::CFG *cfg) { TR::NodeChecklist checklist(comp); TR::TreeTop *start = block->getFirstRealTreeTop(); TR::TreeTop *guard = block->getLastRealTreeTop(); TR::TreeTop *firstPrivArg = NULL; TR::TreeTop *firstMonitor = NULL; // Manage the unexpected case that monitors and priv args are reversed bool privThenMonitor = false; TR_ASSERT(isMergeableGuard(guard->getNode()), "last node must be guard %p", guard->getNode()); // Search for privarg and monitor stores // Only commoned nodes under the guard are required to be anchored, due to the guard being // evaluted before the monitor stores later on bool anchoredTemps = false; for (TR::TreeTop *tt = start; tt && tt->getNode()->getOpCodeValue() != TR::BBEnd; tt = tt->getNextTreeTop()) { TR::Node * node = tt->getNode(); if (node->getOpCode().hasSymbolReference() && node->getSymbol()->holdsMonitoredObject()) firstMonitor = firstMonitor == NULL ? tt : firstMonitor; else if (node->chkIsPrivatizedInlinerArg()) { if (firstPrivArg == NULL) { firstPrivArg = tt; privThenMonitor = (firstMonitor == NULL); } } else if (isMergeableGuard(node)) anchoredTemps |= anchorCommonNodes(comp, node, start, checklist); else TR_ASSERT(0, "Node other than monitor or privarg store %p before runtime guard", node); } // If there are monitors then privargs, they must be swapped around, such that all privargs are // evaluated first if (firstPrivArg && firstMonitor && !privThenMonitor) { TR::TreeTop *monitorEnd = firstPrivArg->getPrevTreeTop(); firstMonitor->getPrevTreeTop()->join(firstPrivArg); guard->getPrevTreeTop()->join(firstMonitor); monitorEnd->join(guard); } // If there were temps created or privargs in the block, perform a split TR::TreeTop *split = NULL; if (firstPrivArg) split = firstMonitor ? firstMonitor : guard; else if (anchoredTemps) split = start; if (split) return block->split(split, cfg, true /* fixupCommoning */, false /* copyExceptionSuccessors */); return block; }
bool collectSymbolReferencesInNode(TR::Node *node, TR::SparseBitVector &symbolReferencesInNode, int32_t *numDeadSubNodes, vcount_t visitCount, TR::Compilation *comp, bool *seenInternalPointer, bool *seenArraylet, bool *cantMoveUnderBranch) { // The visit count in the node must be maintained by this method. // vcount_t oldVisitCount = node->getVisitCount(); if (oldVisitCount == visitCount || oldVisitCount == comp->getVisitCount()) return true; node->setVisitCount(comp->getVisitCount()); //diagnostic("Walking node %p, height=%d, oldVisitCount=%d, visitCount=%d, compVisitCount=%d\n", node, *height, oldVisitCount, visitCount,comp->getVisitCount()); // For all other subtrees collect all symbols that could be killed between // here and the next reference. // for (int32_t i = node->getNumChildren()-1; i >= 0; i--) { TR::Node *child = node->getChild(i); if (child->getFutureUseCount() == 1 && child->getReferenceCount() > 1 && !child->getOpCode().isLoadConst()) *numDeadSubNodes = (*numDeadSubNodes) + 1; collectSymbolReferencesInNode(child, symbolReferencesInNode, numDeadSubNodes, visitCount, comp, seenInternalPointer, seenArraylet, cantMoveUnderBranch); } // detect if this is a direct load that shouldn't be moved under a branch (because an update was moved past // this load by treeSimplification) if (cantMoveUnderBranch && (node->getOpCode().isLoadVarDirect() || node->getOpCode().isLoadReg()) && node->isDontMoveUnderBranch()) *cantMoveUnderBranch = true; if (seenInternalPointer && node->isInternalPointer() && node->getReferenceCount() > 1) *seenInternalPointer = true; if (seenArraylet) { if (node->getOpCode().hasSymbolReference() && node->getSymbolReference()->getSymbol()->isArrayletShadowSymbol() && node->getReferenceCount() > 1) { *seenArraylet = true; } } // Add this node's symbol reference to the set if (node->getOpCode().hasSymbolReference()) { symbolReferencesInNode[node->getSymbolReference()->getReferenceNumber()]=true; } return true; }
/** * @return the total instruction length in bytes for setting up arguments */ int32_t TR::S390CallSnippet::instructionCountForArguments(TR::Node * callNode, TR::CodeGenerator * cg) { int32_t intArgNum = 0, floatArgNum = 0, count = 0; TR::Linkage* linkage = cg->getLinkage(callNode->getSymbol()->castToMethodSymbol()->getLinkageConvention()); int32_t argStart = callNode->getFirstArgumentIndex(); for (int32_t i = argStart; i < callNode->getNumChildren(); i++) { TR::Node * child = callNode->getChild(i); switch (child->getDataType()) { case TR::Int8: case TR::Int16: case TR::Int32: if (intArgNum < linkage->getNumIntegerArgumentRegisters()) { count += TR::InstOpCode::getInstructionLength(TR::InstOpCode::ST); } intArgNum++; break; case TR::Address: if (intArgNum < linkage->getNumIntegerArgumentRegisters()) { count += TR::InstOpCode::getInstructionLength(TR::InstOpCode::getLoadOpCode()); } intArgNum++; break; case TR::Int64: if (intArgNum < linkage->getNumIntegerArgumentRegisters()) { count += TR::InstOpCode::getInstructionLength(TR::InstOpCode::getLoadOpCode()); if ((TR::Compiler->target.is32Bit()) && intArgNum < linkage->getNumIntegerArgumentRegisters() - 1) { count += TR::InstOpCode::getInstructionLength(TR::InstOpCode::getLoadOpCode()); } } intArgNum += TR::Compiler->target.is64Bit() ? 1 : 2; break; case TR::Float: if (floatArgNum < linkage->getNumFloatArgumentRegisters()) { count += TR::InstOpCode::getInstructionLength(TR::InstOpCode::LE); } floatArgNum++; break; case TR::Double: if (floatArgNum < linkage->getNumFloatArgumentRegisters()) { count += TR::InstOpCode::getInstructionLength(TR::InstOpCode::LD); } floatArgNum++; break; } } return count; }
static void removeGlRegDep(TR::Node * parent, TR_GlobalRegisterNumber registerNum, TR::Block *containingBlock, TR::Optimization *opt) { if (parent->getNumChildren() == 0) return; TR_ASSERT(parent->getNumChildren() > 0, "expected TR::GlRegDeps %p", parent); TR::Node * predGlRegDeps = parent->getLastChild(); if (predGlRegDeps->getOpCodeValue() != TR::GlRegDeps) // could be already removed return; TR_ASSERT(predGlRegDeps->getOpCodeValue() == TR::GlRegDeps, "expected TR::GlRegDeps"); for (int32_t i = predGlRegDeps->getNumChildren() - 1; i >= 0; --i) if (predGlRegDeps->getChild(i)->getGlobalRegisterNumber() == registerNum) { dumpOptDetails(opt->comp(), "%sRemove GlRegDep : %p\n", opt->optDetailString(), predGlRegDeps->getChild(i)); TR::Node *removedChild = predGlRegDeps->removeChild(i); if (removedChild->getReferenceCount() <= 1) { // The only remaining parent is the RegStore. Another pass of // deadTrees may be able to eliminate that. // opt->requestOpt(OMR::deadTreesElimination, true, containingBlock); } break; } if (predGlRegDeps->getNumChildren() == 0) parent->removeLastChild(); }
bool TR_LocalLiveRangeReduction::isWorthMoving(TR_TreeRefInfo *tree) { bool usesRegisterPairsForLongs = cg()->usesRegisterPairsForLongs(); int32_t numFirstRefNodesFloat=0; int32_t numFirstRefNodesInt=0; int32_t numLastRefNodesFloat=0; int32_t numLastRefNodesInt=0; TR::Node *node; //check first references ListIterator<TR::Node> listIt(tree->getFirstRefNodesList()); for ( node = listIt.getFirst(); node != NULL; node = listIt.getNext()) { TR::ILOpCode &opCode = node->getOpCode(); if (opCode.isFloatingPoint()) numFirstRefNodesFloat++; else { //all integers, signed and unsined if (opCode.isLong()&& usesRegisterPairsForLongs) numFirstRefNodesInt+=2; else numFirstRefNodesInt++; } } //check last references listIt.set(tree->getLastRefNodesList()); for ( node = listIt.getFirst(); node != NULL; node = listIt.getNext()) { TR::ILOpCode &opCode = node->getOpCode(); if (opCode.isFloatingPoint()) numLastRefNodesFloat++; else { //all integers, signed and unsined if (opCode.isLong()&& usesRegisterPairsForLongs) numLastRefNodesInt+=2; else numLastRefNodesInt++; } } if (((numLastRefNodesInt < numFirstRefNodesInt) && (numLastRefNodesFloat <= numFirstRefNodesFloat)) || ((numLastRefNodesFloat < numFirstRefNodesFloat) && (numLastRefNodesInt <= numFirstRefNodesInt))) return true; return false; }
// A naive no-aliasing-needed check to see if a treetop has any chance of killing anything // Used by no and low opt CodeGenPrep phase passes bool OMR::TreeTop::isPossibleDef() { TR::Node *defNode = self()->getNode()->getOpCodeValue() == TR::treetop ? self()->getNode()->getFirstChild() : self()->getNode(); if (defNode->getOpCode().isLikeDef()) { return true; } else { return false; } }
void TR::ILValidator::validityRule(Location &location, bool condition, const char *formatStr, ...) { if (!condition) { _isValidSoFar = false; TR::Node *node = location.currentNode(); printDiagnostic("*** VALIDATION ERROR ***\nNode: %s n%dn\nMethod: %s\n", node->getOpCode().getName(), node->getGlobalIndex(), comp()->signature()); va_list args; va_start(args, formatStr); vprintDiagnostic(formatStr, args); va_end(args); printDiagnostic("\n"); FAIL(); } }
void TR::ILValidator::updateNodeState(Location &newLocation) { TR::Node *node = newLocation.currentNode(); NodeState &state = _nodeStates[node]; if (node->getReferenceCount() == state._futureReferenceCount) { // First occurrence -- do some bookkeeping // if (node->getReferenceCount() == 0) { validityRule(newLocation, node->getOpCode().isTreeTop(), "Only nodes with isTreeTop opcodes can have refcount == 0"); } else { _liveNodes.add(node); } } if (_liveNodes.contains(node)) { validityRule(newLocation, state._futureReferenceCount >= 1, "Node already has reference count 0"); if (--state._futureReferenceCount == 0) { _liveNodes.remove(node); } } else { validityRule(newLocation, node->getOpCode().isTreeTop(), "Node has already gone dead"); } if (isLoggingEnabled()) { static const char *traceLiveNodesDuringValidation = feGetEnv("TR_traceLiveNodesDuringValidation"); if (traceLiveNodesDuringValidation && !_liveNodes.isEmpty()) { traceMsg(comp(), " -- Live nodes: {"); char *separator = ""; for (LiveNodeWindow::Iterator lnwi(_liveNodes); lnwi.currentNode(); ++lnwi) { traceMsg(comp(), "%sn%dn", separator, lnwi.currentNode()->getGlobalIndex()); separator = ", "; } traceMsg(comp(), "}\n"); } } }
void TR_ExpressionsSimplification::removeUnsupportedCandidates() { ListIterator<TR::TreeTop> candidateTTs(_candidateTTs); for (TR::TreeTop *candidateTT = candidateTTs.getFirst(); candidateTT; candidateTT = candidateTTs.getNext()) { TR::Node *candidate = candidateTT->getNode(); if (!_supportedExpressions->get(candidate->getGlobalIndex())) { if (trace()) traceMsg(comp(), "Removing candidate %p which is unsupported or has unsupported subexpressions\n", candidate); _candidateTTs->remove(candidateTT); } } }
void TR::ValidateLivenessBoundaries::validate(TR::ResolvedMethodSymbol *methodSymbol) { /** * These must be initialized at the start of every validate call, * since the same Rule object can be used multiple times to validate * the IL at different stages of the compilation. */ TR::NodeSideTable<TR::NodeState> nodeStates(comp()->trMemory()); /** * Similar to NodeChecklist, but more compact. Rather than track * node global indexes, which can be sparse, this tracks local * indexes, which are relatively dense. Furthermore, the _basis field * allows us not to waste space on nodes we saw in prior blocks. * As the name suggests, used to keep track of live Nodes. */ TR::LiveNodeWindow liveNodes(nodeStates, comp()->trMemory()); TR::TreeTop *start = methodSymbol->getFirstTreeTop(); TR::TreeTop *stop = methodSymbol->getLastTreeTop(); for (TR::PostorderNodeOccurrenceIterator iter(start, comp(), "VALIDATE_LIVENESS_BOUNDARIES"); iter != stop; ++iter) { TR::Node *node = iter.currentNode(); updateNodeState(node, nodeStates, liveNodes); if (node->getOpCodeValue() == TR::BBEnd) { /* Determine whether this is the end of an extended block */ bool isEndOfExtendedBlock = false; TR::TreeTop *nextTree = iter.currentTree()->getNextTreeTop(); if (nextTree) { TR::checkILCondition(node, nextTree->getNode()->getOpCodeValue() == TR::BBStart, comp(), "Expected BBStart after BBEnd"); isEndOfExtendedBlock = ! nextTree->getNode()->getBlock()->isExtensionOfPreviousBlock(); } else { isEndOfExtendedBlock = true; } if (isEndOfExtendedBlock) { /* Ensure there are no nodes live across the end of a block */ validateEndOfExtendedBlockBoundary(node, liveNodes); } } } }
//returns true if there is first reference of a call or check bool TR_LocalLiveRangeReduction::containsCallOrCheck(TR_TreeRefInfo *treeRefInfo, TR::Node *node) { if ((node->getOpCode().isCall() && (node->getReferenceCount()==1 || treeRefInfo->getFirstRefNodesList()->find(node))) || node->getOpCode().isCheck()) { return true; } for (int32_t i = 0; i < node->getNumChildren(); i++) { TR::Node *child = node->getChild(i); if (child->getReferenceCount()==1 || treeRefInfo->getFirstRefNodesList()->find(child)) return containsCallOrCheck(treeRefInfo, child); } return false; }
void TR::PPCTrg1ImmInstruction::addMetaDataForCodeAddress(uint8_t *cursor) { TR::Compilation *comp = cg()->comp(); if (std::find(comp->getStaticPICSites()->begin(), comp->getStaticPICSites()->end(), this) != comp->getStaticPICSites()->end()) { TR::Node *node = getNode(); cg()->jitAddPicToPatchOnClassUnload((void *)(TR::Compiler->target.is64Bit()?node->getLongInt():node->getInt()), (void *)cursor); } if (std::find(comp->getStaticMethodPICSites()->begin(), comp->getStaticMethodPICSites()->end(), this) != comp->getStaticMethodPICSites()->end()) { TR::Node *node = getNode(); cg()->jitAddPicToPatchOnClassUnload((void *) (cg()->fe()->createResolvedMethod(cg()->trMemory(), (TR_OpaqueMethodBlock *) (TR::Compiler->target.is64Bit()?node->getLongInt():node->getInt()), comp->getCurrentMethod())->classOfMethod()), (void *)cursor); } }
static TR::Register *idivHelper(TR::Node *node, bool is64bit, TR::CodeGenerator *cg) { // TODO: Add checks for special cases TR::Node *firstChild = node->getFirstChild(); TR::Register *src1Reg = cg->evaluate(firstChild); TR::Node *secondChild = node->getSecondChild(); TR::Register *src2Reg = cg->evaluate(secondChild); TR::Register *trgReg = cg->allocateRegister(); generateTrg1Src2Instruction(cg, is64bit ? TR::InstOpCode::sdivx : TR::InstOpCode::sdivw, node, trgReg, src1Reg, src2Reg); firstChild->decReferenceCount(); secondChild->decReferenceCount(); node->setRegister(trgReg); return trgReg; }
// resolved casts that are not to abstract, interface, or array need a super test bool OMR::TreeEvaluator::instanceOfOrCheckCastNeedSuperTest(TR::Node * node, TR::CodeGenerator *cg) { TR::Node *castClassNode = node->getSecondChild(); TR::MethodSymbol *helperSym = node->getSymbol()->castToMethodSymbol(); TR::SymbolReference *castClassSymRef = castClassNode->getSymbolReference(); if (!TR::TreeEvaluator::isStaticClassSymRef(castClassSymRef)) { // We could theoretically do a super test on something with no sym, but it would require significant // changes to platform code. The benefit is little at this point (shows up from reference arraycopy reductions) if (cg->supportsInliningOfIsInstance() && node->getOpCodeValue() == TR::instanceof && node->getSecondChild()->getOpCodeValue() != TR::loadaddr) return true; else return false; } TR::StaticSymbol *castClassSym = castClassSymRef->getSymbol()->getStaticSymbol(); if (castClassSymRef->isUnresolved()) { return false; } else { TR_OpaqueClassBlock * clazz; // If the class is a regular class (i.e., not an interface nor an array) and // not known to be a final class, an inline superclass test can be generated. // If the helper does not preserve all the registers there will not be // enough registers to do the superclass test inline. // Also, don't generate the superclass test if optimizing for space. // if (castClassSym && (clazz = (TR_OpaqueClassBlock *) castClassSym->getStaticAddress()) && !TR::Compiler->cls.isClassArray(cg->comp(), clazz) && !TR::Compiler->cls.isInterfaceClass(cg->comp(), clazz) && !TR::Compiler->cls.isClassFinal(cg->comp(), clazz) && helperSym->preservesAllRegisters() && !cg->comp()->getOption(TR_OptimizeForSpace)) return true; } return false; }
TR::TreeTop *searchForToStringCall(TR::ValuePropagation *vp,TR::TreeTop *tt, TR::TreeTop *exitTree, TR::Node *newBuffer, vcount_t visitCount, TR::TreeTop **toStringTree, bool useStringBuffer) { for (;tt != exitTree; tt = tt->getNextRealTreeTop()) { TR::Node *node = tt->getNode(); if (node->getNumChildren() == 1 && node->getFirstChild()->getOpCodeValue() == TR::acall) { if (checkMethodSignature(vp,node->getFirstChild()->getSymbolReference(), (useStringBuffer ? "java/lang/StringBuffer.toString()Ljava/lang/String;" : "java/lang/StringBuilder.toString()Ljava/lang/String;"))) { TR::Node *call = node->getFirstChild(); if (call->getFirstChild() == newBuffer) *toStringTree = tt; return tt; } } } return tt; }
static TR::Register *addOrSubInteger(TR::Node *node, TR::CodeGenerator *cg) { TR::Node *firstChild = node->getFirstChild(); TR::Register *src1Reg = cg->evaluate(firstChild); TR::Node *secondChild = node->getSecondChild(); TR::Register *trgReg = cg->allocateRegister(); bool isAdd = node->getOpCode().isAdd(); if (secondChild->getOpCode().isLoadConst() && secondChild->getRegister() == NULL) { int32_t value = secondChild->getInt(); if (constantIsUnsignedImm12(value)) { generateTrg1Src1ImmInstruction(cg, isAdd ? TR::InstOpCode::addimmw : TR::InstOpCode::subimmw, node, trgReg, src1Reg, value); } else { TR::Register *tmpReg = cg->allocateRegister(); loadConstant32(cg, node, value, tmpReg); generateTrg1Src2Instruction(cg, isAdd ? TR::InstOpCode::addw : TR::InstOpCode::subw, node, trgReg, src1Reg, tmpReg); cg->stopUsingRegister(tmpReg); } } else { TR::Register *src2Reg = cg->evaluate(secondChild); generateTrg1Src2Instruction(cg, isAdd ? TR::InstOpCode::addw : TR::InstOpCode::subw, node, trgReg, src1Reg, src2Reg); } node->setRegister(trgReg); firstChild->decReferenceCount(); secondChild->decReferenceCount(); return trgReg; }
TR::Register * OMR::ARM64::TreeEvaluator::lmulhEvaluator(TR::Node *node, TR::CodeGenerator *cg) { TR::Node *firstChild = node->getFirstChild(); TR::Register *src1Reg = cg->evaluate(firstChild); TR::Node *secondChild = node->getSecondChild(); TR::Register *src2Reg; TR::Register *trgReg = cg->allocateRegister(); TR::Register *tmpReg = NULL; // lmulh is generated for constant ldiv and the second child is the magic number // assume magic number is usually a large odd number with little optimization opportunity if (secondChild->getOpCode().isLoadConst() && secondChild->getRegister() == NULL) { int64_t value = secondChild->getLongInt(); src2Reg = tmpReg = cg->allocateRegister(); loadConstant64(cg, node, value, src2Reg); } else { src2Reg = cg->evaluate(secondChild); } generateTrg1Src2Instruction(cg, TR::InstOpCode::smulh, node, trgReg, src1Reg, src2Reg); if (tmpReg) { cg->stopUsingRegister(tmpReg); } firstChild->decReferenceCount(); secondChild->decReferenceCount(); node->setRegister(trgReg); return trgReg; }
TR::Register * OMR::ARM64::TreeEvaluator::imulEvaluator(TR::Node *node, TR::CodeGenerator *cg) { TR::Node *firstChild = node->getFirstChild(); TR::Register *src1Reg = cg->evaluate(firstChild); TR::Node *secondChild = node->getSecondChild(); TR::Register *trgReg; if (secondChild->getOpCode().isLoadConst() && secondChild->getRegister() == NULL) { int32_t value = secondChild->getInt(); if (value > 0 && cg->convertMultiplyToShift(node)) { // The multiply has been converted to a shift. trgReg = cg->evaluate(node); return trgReg; } else { trgReg = cg->allocateRegister(); mulConstant32(node, trgReg, src1Reg, value, cg); } } else { TR::Register *src2Reg = cg->evaluate(secondChild); trgReg = cg->allocateRegister(); generateMulInstruction(cg, node, trgReg, src1Reg, src2Reg); } firstChild->decReferenceCount(); secondChild->decReferenceCount(); node->setRegister(trgReg); return trgReg; }
// unresolved casts or casts to things other than abstract or interface benefit from // an equality test bool OMR::TreeEvaluator::instanceOfOrCheckCastNeedEqualityTest(TR::Node * node, TR::CodeGenerator *cg) { TR::Node *castClassNode = node->getSecondChild(); TR::SymbolReference *castClassSymRef = castClassNode->getSymbolReference(); if (!TR::TreeEvaluator::isStaticClassSymRef(castClassSymRef)) { return true; } TR::StaticSymbol *castClassSym = castClassSymRef->getSymbol()->getStaticSymbol(); if (castClassSymRef->isUnresolved()) { return false; } else { TR_OpaqueClassBlock * clazz; if (castClassSym && (clazz = (TR_OpaqueClassBlock *) castClassSym->getStaticAddress()) && !TR::Compiler->cls.isInterfaceClass(cg->comp(), clazz) && ( !TR::Compiler->cls.isAbstractClass(cg->comp(), clazz) // here be dragons // int.class, char.class, etc are final & abstract // usually instanceOf calls on these classes are ripped out by the optimizer // but in some cases they can persist to codegen which without the following // case causes assertions because we opt out of calling the helper and doing // all inline tests. Really we could just jmp to the failed side, but to reduce // service risk we are going to do an equality test that we know will fail // NOTE final abstract is not enough - all array classes are final abstract // to prevent them being used with new and being extended... || (TR::Compiler->cls.isAbstractClass(cg->comp(), clazz) && TR::Compiler->cls.isClassFinal(cg->comp(), clazz) && TR::Compiler->cls.isPrimitiveClass(cg->comp(), clazz))) ) return true; } return false; }
TR_GlobalRegisterNumber OMR::TreeEvaluator::getHighGlobalRegisterNumberIfAny(TR::Node *node, TR::CodeGenerator *cg) { //No need for register pairs in 64-bit mode if (TR::Compiler->target.is64Bit()) return -1; //if the node itself doesn't have a type (e.g passthrough) we assume it has a child with a type //However we keep track of the initial node as it will contain the register information. TR::Node *rootNode = node; while (node->getType() == TR::NoType) { node = node->getFirstChild(); TR_ASSERT(node, "The node should always be valid while looking for a Child with a type"); } TR_ASSERT(node->getType() != TR::NoType, "Expecting node %p, to have a specific type here", node); //Only need a register pair if the node is a 64bit Int return node->getType().isInt64() ? rootNode->getHighGlobalRegisterNumber() : -1; }
void OMR::TreeTop::removeDeadTrees(TR::Compilation * comp, TR::TreeTop* list[]) { for (int i=0; list[i] != NULL; ++i) { int numChildren = list[i]->getNode()->getNumChildren(); for (int child = numChildren-1; child>0; --child) { TR::Node * node = list[i]->getNode()->getChild(child); list[i]->insertAfter(TR::TreeTop::create(comp, TR::Node::create(TR::treetop, 1, node))); node->decReferenceCount(); } if (numChildren != 0) { TR::Node * node = list[i]->getNode()->getChild(0); list[i]->setNode(TR::Node::create(TR::treetop, 1, node)); node->decReferenceCount(); } } }
void OMR::TreeTop::removeDeadTrees(TR::Compilation * comp, TR::TreeTop* first, TR::TreeTop* last) { for (TR::TreeTop* cur = first; cur != last; cur = cur->getNextTreeTop()) { int numChildren = cur->getNode()->getNumChildren(); for (int child = numChildren-1; child>0; --child) { TR::Node * node = cur->getNode()->getChild(child); cur->insertAfter(TR::TreeTop::create(comp, TR::Node::create(TR::treetop, 1, node))); node->decReferenceCount(); } if (numChildren != 0) { TR::Node * node = cur->getNode()->getChild(0); cur->setNode(TR::Node::create(TR::treetop, 1, node)); node->decReferenceCount(); } } }