void OMR::SymbolReference::setLiteralPoolAliases(TR_BitVector * aliases, TR::SymbolReferenceTable * symRefTab) { if (!symRefTab->findGenericIntShadowSymbol()) return; TR_SymRefIterator i(symRefTab->aliasBuilder.genericIntShadowSymRefs(), symRefTab); TR::SymbolReference * symRef; while ((symRef = i.getNext())) if (symRef->isLiteralPoolAddress() || symRef->isFromLiteralPool()) aliases->set(symRef->getReferenceNumber()); aliases->set(self()->getReferenceNumber()); *aliases |= symRefTab->aliasBuilder.unsafeSymRefNumbers(); }
void OMR::SymbolReference::setSharedStaticAliases(TR_BitVector * aliases, TR::SymbolReferenceTable * symRefTab) { if (self()->reallySharesSymbol()) { TR::DataType type = self()->getSymbol()->getType(); TR_SymRefIterator i(type.isAddress() ? symRefTab->aliasBuilder.addressStaticSymRefs() : (type.isInt32() ? symRefTab->aliasBuilder.intStaticSymRefs() : symRefTab->aliasBuilder.nonIntPrimitiveStaticSymRefs()), symRefTab); TR::SymbolReference * symRef; while ((symRef = i.getNext())) if (symRef->getSymbol() == self()->getSymbol()) aliases->set(symRef->getReferenceNumber()); } else aliases->set(self()->getReferenceNumber()); *aliases |= symRefTab->aliasBuilder.unsafeSymRefNumbers(); }
void OMR::SymbolReference::setSharedShadowAliases(TR_BitVector * aliases, TR::SymbolReferenceTable * symRefTab) { if (self()->reallySharesSymbol() && !_symbol->isUnsafeShadowSymbol()) { TR::DataType type = self()->getSymbol()->getType(); TR_SymRefIterator i(type.isAddress() ? symRefTab->aliasBuilder.addressShadowSymRefs() : (type.isInt32() ? symRefTab->aliasBuilder.intShadowSymRefs() : symRefTab->aliasBuilder.nonIntPrimitiveShadowSymRefs()), symRefTab); TR::SymbolReference * symRef; while ((symRef = i.getNext())) if (symRef->getSymbol() == self()->getSymbol()) aliases->set(symRef->getReferenceNumber()); // include symbol reference's own shared alias bitvector if (symRefTab->getSharedAliases(self()) != NULL) *aliases |= *(symRefTab->getSharedAliases(self())); } else aliases->set(self()->getReferenceNumber()); *aliases |= symRefTab->aliasBuilder.unsafeSymRefNumbers(); }
// Returns true if there is any constraint to the move bool TR_LocalLiveRangeReduction::isAnySymInDefinedOrUsedBy(TR_TreeRefInfo *currentTreeRefInfo, TR::Node *currentNode, TR_TreeRefInfo *movingTreeRefInfo ) { TR::Node *movingNode = movingTreeRefInfo->getTreeTop()->getNode(); // ignore anchors // if (movingNode->getOpCode().isAnchor()) movingNode = movingNode->getFirstChild(); TR::ILOpCode &opCode = currentNode->getOpCode(); ////if ((opCode.getOpCodeValue() == TR::monent) || (opCode.getOpCodeValue() == TR::monexit)) if (nodeMaybeMonitor(currentNode)) { if (trace()) traceMsg(comp(),"cannot move %p beyond monitor %p\n",movingNode,currentNode); return true; } // Don't move gc points or things across gc points // if (movingNode->canGCandReturn() || currentNode->canGCandReturn()) { if (trace()) traceMsg(comp(), "cannot move gc points %p past %p\n", movingNode, currentNode); return true; } // Don't move checks or calls at all // if (containsCallOrCheck(movingTreeRefInfo,movingNode)) { if (trace()) traceMsg(comp(),"cannot move check or call %s\n", getDebug()->getName(movingNode)); return true; } // Don't move object header store past a GC point // if ((currentNode->getOpCode().isWrtBar() || currentNode->canCauseGC()) && mayBeObjectHeaderStore(movingNode, fe())) { if (trace()) traceMsg(comp(),"cannot move possible object header store %s past GC point %s\n", getDebug()->getName(movingNode), getDebug()->getName(currentNode)); return true; } if (TR::Compiler->target.cpu.isPower() && opCode.getOpCodeValue() == TR::allocationFence) { // Can't move allocations past flushes if (movingNode->getOpCodeValue() == TR::treetop && movingNode->getFirstChild()->getOpCode().isNew() && (currentNode->getAllocation() == NULL || currentNode->getAllocation() == movingNode->getFirstChild())) { if (trace()) { traceMsg(comp(),"cannot move %p beyond flush %p - ", movingNode, currentNode); if (currentNode->getAllocation() == NULL) traceMsg(comp(),"(flush with null allocation)\n"); else traceMsg(comp(),"(flush for allocation %p)\n", currentNode->getAllocation()); } return true; } // Can't move certain stores past flushes // Exclude all indirect stores, they may be for stack allocs, in which case the flush is needed at least as a scheduling barrier // Direct stores to autos and parms are the only safe candidates if (movingNode->getOpCode().isStoreIndirect() || (movingNode->getOpCode().isStoreDirect() && !movingNode->getSymbol()->isParm() && !movingNode->getSymbol()->isAuto())) { if (trace()) traceMsg(comp(),"cannot move %p beyond flush %p - (flush for possible stack alloc)", movingNode, currentNode); return true; } } for (int32_t i = 0; i < currentNode->getNumChildren(); i++) { TR::Node *child = currentNode->getChild(i); //Any node that has side effects (like call and newarrya) cannot be evaluated in the middle of the tree. if (movingTreeRefInfo->getFirstRefNodesList()->find(child)) { //for calls and unresolve symbol that are not under check if (child->exceptionsRaised() || (child->getOpCode().hasSymbolReference() && child->getSymbolReference()->isUnresolved())) { if (trace()) traceMsg(comp(),"cannot move %p beyond %p - cannot change evaluation point of %p\n ",movingNode,currentTreeRefInfo->getTreeTop()->getNode(),child); return true; } else if(movingNode->getOpCode().isStore()) { TR::SymbolReference *stSymRef = movingNode->getSymbolReference(); int32_t stSymRefNum = stSymRef->getReferenceNumber(); //TR::SymbolReference *stSymRef = movingNode->getSymbolReference(); int32_t numHelperSymbols = comp()->getSymRefTab()->getNumHelperSymbols(); if ((comp()->getSymRefTab()->isNonHelper(stSymRefNum, TR::SymbolReferenceTable::vftSymbol))|| (comp()->getSymRefTab()->isNonHelper(stSymRefNum, TR::SymbolReferenceTable::contiguousArraySizeSymbol))|| (comp()->getSymRefTab()->isNonHelper(stSymRefNum, TR::SymbolReferenceTable::discontiguousArraySizeSymbol))|| (stSymRef == comp()->getSymRefTab()->findHeaderFlagsSymbolRef())|| (stSymRef->getSymbol() == comp()->getSymRefTab()->findGenericIntShadowSymbol())) return true; } else if (movingNode->getOpCode().isResolveOrNullCheck()) { if (trace()) traceMsg(comp(),"cannot move %p beyond %p - node %p under ResolveOrNullCheck",movingNode,currentTreeRefInfo->getTreeTop()->getNode(),currentNode); return true; } else if (TR::Compiler->target.is64Bit() && movingNode->getOpCode().isBndCheck() && ((opCode.getOpCodeValue() == TR::i2l) || (opCode.getOpCodeValue() == TR::iu2l)) && !child->isNonNegative()) { if (trace()) traceMsg(comp(),"cannot move %p beyond %p - changing the eval point of %p will casue extra cg instruction ",movingNode,currentTreeRefInfo->getTreeTop()->getNode(),currentNode); return true; } } //don't recurse over nodes each are not the first reference if (child->getReferenceCount()==1 || currentTreeRefInfo->getFirstRefNodesList()->find(child)) { if (isAnySymInDefinedOrUsedBy(currentTreeRefInfo, child, movingTreeRefInfo )) return true; } } return false; }
void TR_LocalLiveRangeReduction::populatePotentialDeps(TR_TreeRefInfo *treeRefInfo,TR::Node *node) { TR::ILOpCode &opCode = node->getOpCode(); if (node->getOpCode().hasSymbolReference()) { TR::SymbolReference *symRef = node->getSymbolReference(); int32_t symRefNum = symRef->getReferenceNumber(); //set defSym - all symbols that might be written if (opCode.isCall() || opCode.isResolveCheck()|| opCode.isStore() || node->mightHaveVolatileSymbolReference()) { bool isCallDirect = false; if (node->getOpCode().isCallDirect()) isCallDirect = true; if (!symRef->getUseDefAliases(isCallDirect).isZero(comp())) { TR::SparseBitVector useDefAliases(comp()->allocator()); symRef->getUseDefAliases(isCallDirect).getAliases(useDefAliases); TR::SparseBitVector::Cursor aliasCursor(useDefAliases); for (aliasCursor.SetToFirstOne(); aliasCursor.Valid(); aliasCursor.SetToNextOne()) { int32_t nextAlias = aliasCursor; treeRefInfo->getDefSym()->set(nextAlias); } } if (opCode.isStore()) treeRefInfo->getDefSym()->set(symRefNum); } //set useSym - all symbols that are used if (opCode.canRaiseException()) { TR::SparseBitVector useAliases(comp()->allocator()); symRef->getUseonlyAliases().getAliases(useAliases); { TR::SparseBitVector::Cursor aliasesCursor(useAliases); for (aliasesCursor.SetToFirstOne(); aliasesCursor.Valid(); aliasesCursor.SetToNextOne()) { int32_t nextAlias = aliasesCursor; treeRefInfo->getUseSym()->set(nextAlias); } } } if (opCode.isLoadVar() || (opCode.getOpCodeValue() == TR::loadaddr)) { treeRefInfo->getUseSym()->set(symRefNum); } } for (int32_t i = 0; i < node->getNumChildren(); i++) { TR::Node *child = node->getChild(i); //don't recurse over references (nodes which are not the first reference) // if (child->getReferenceCount()==1 || treeRefInfo->getFirstRefNodesList()->find(child)) populatePotentialDeps(treeRefInfo,child ); } return; }
uint8_t * TR::S390HelperCallSnippet::emitSnippetBody() { uint8_t * cursor = cg()->getBinaryBufferCursor(); getSnippetLabel()->setCodeLocation(cursor); TR::Node * callNode = getNode(); TR::SymbolReference * helperSymRef = getHelperSymRef(); bool jitInduceOSR = helperSymRef == cg()->symRefTab()->element(TR_induceOSRAtCurrentPC); if (jitInduceOSR) { // Flush in-register arguments back to the stack for interpreter cursor = TR::S390CallSnippet::S390flushArgumentsToStack(cursor, callNode, getSizeOfArguments(), cg()); } uint32_t rEP = (uint32_t) cg()->getEntryPointRegister() - 1; //load vm thread into gpr13 cursor = generateLoadVMThreadInstruction(cg(), cursor); // Generate RIOFF if RI is supported. cursor = generateRuntimeInstrumentationOnOffInstruction(cg(), cursor, TR::InstOpCode::RIOFF); if ( // Methods that require alwaysExcept()) // R14 to point to snippet: { // For trace method entry/exit, we need to set up R14 to point to the // beginning of the data segment. We will use BRASL to automatically // set R14 correctly. // For methods that lead to exceptions, and never return to the // main code, we set up R14, so that if GC occurs, the stackwalker // will see R14 is pointing to this snippet, and pick up the correct // stack map. *(int16_t *) cursor = 0xC0E5; // BRASL R14, <Helper Addr> cursor += sizeof(int16_t); } else // Otherwise: { // We're not sure if the helper will return. So, we need to provide // the return addr of the main line code, so that when helper call // completes, it can jump back properly. // Load Return Address into R14. intptrj_t returnAddr = (intptrj_t)getReStartLabel()->getCodeLocation(); // LARL R14, <Return Addr> *(int16_t *) cursor = 0xC0E0; cursor += sizeof(int16_t); *(int32_t *) cursor = (int32_t)((returnAddr - (intptrj_t)(cursor - 2)) / 2); cursor += sizeof(int32_t); *(int16_t *) cursor = 0xC0F4; // BRCL <Helper Addr> cursor += sizeof(int16_t); } // Calculate the relative offset to get to helper method. // If MCC is not supported, everything should be reachable. // If MCC is supported, we will look up the appropriate trampoline, if // necessary. intptrj_t destAddr = (intptrj_t)(helperSymRef->getSymbol()->castToMethodSymbol()->getMethodAddress()); #if defined(TR_TARGET_64BIT) #if defined(J9ZOS390) if (cg()->comp()->getOption(TR_EnableRMODE64)) #endif { if (NEEDS_TRAMPOLINE(destAddr, cursor, cg())) { destAddr = cg()->fe()->indexedTrampolineLookup(helperSymRef->getReferenceNumber(), (void *)cursor); this->setUsedTrampoline(true); // We clobber rEP if we take a trampoline. Update our register map if necessary. if (gcMap().getStackMap() != NULL) { gcMap().getStackMap()->maskRegisters(~(0x1 << (rEP))); } } } #endif TR_ASSERT(CHECK_32BIT_TRAMPOLINE_RANGE(destAddr, cursor), "Helper Call is not reachable."); this->setSnippetDestAddr(destAddr); *(int32_t *) cursor = (int32_t)((destAddr - (intptrj_t)(cursor - 2)) / 2); AOTcgDiag1(cg()->comp(), "add TR_HelperAddress cursor=%x\n", cursor); cg()->addProjectSpecializedRelocation(cursor, (uint8_t*) helperSymRef, NULL, TR_HelperAddress, __FILE__, __LINE__, getNode()); cursor += sizeof(int32_t); gcMap().registerStackMap(cursor, cg()); return cursor; }