RematSafetyInformation(TR::Compilation *comp) : dependentSymRefs(getTypedAllocator<TR::SparseBitVector>(comp->allocator())), argumentTreeTops(getTypedAllocator<TR::TreeTop*>(comp->allocator())), rematTreeTops(getTypedAllocator<TR::TreeTop*>(comp->allocator())), comp(comp) { }
void OMR::IlValue::storeToAuto() { if (_symRefThatCanBeUsedInOtherBlocks == NULL) { TR::Compilation *comp = TR::comp(); // first use from another block, need to create symref and insert store tree where node was computed TR::SymbolReference *symRef = comp->getSymRefTab()->createTemporary(_methodBuilder->methodSymbol(), _nodeThatComputesValue->getDataType()); symRef->getSymbol()->setNotCollected(); char *name = (char *) comp->trMemory()->allocateHeapMemory((2+10+1) * sizeof(char)); // 2 ("_T") + max 10 digits + trailing zero sprintf(name, "_T%u", symRef->getCPIndex()); symRef->getSymbol()->getAutoSymbol()->setName(name); _methodBuilder->defineSymbol(name, symRef); // create store and its treetop TR::Node *storeNode = TR::Node::createStore(symRef, _nodeThatComputesValue); TR::TreeTop *prevTreeTop = _treeTopThatAnchorsValue->getPrevTreeTop(); TR::TreeTop *newTree = TR::TreeTop::create(comp, storeNode); newTree->insertNewTreeTop(prevTreeTop, _treeTopThatAnchorsValue); _treeTopThatAnchorsValue->unlink(true); _treeTopThatAnchorsValue = newTree; _symRefThatCanBeUsedInOtherBlocks = symRef; } }
void TR::ExternalRelocation::apply(TR::CodeGenerator *codeGen) { TR::Compilation *comp = codeGen->comp(); AOTcgDiag1(comp, "TR::ExternalRelocation::apply updateLocation=" POINTER_PRINTF_FORMAT " \n", getUpdateLocation()); uint8_t * relocatableMethodCodeStart = (uint8_t *)comp->getRelocatableMethodCodeStart(); getRelocationRecord()->addRelocationEntry((uint32_t)(getUpdateLocation() - relocatableMethodCodeStart)); }
uint8_t TR::ExternalOrderedPair32BitRelocation::collectModifier() { TR::Compilation *comp = TR::comp(); uint8_t * relocatableMethodCodeStart = (uint8_t *)comp->getRelocatableMethodCodeStart(); uint8_t * updateLocation; uint8_t * updateLocation2; TR_ExternalRelocationTargetKind kind = getTargetKind(); if (TR::Compiler->target.cpu.isPower() && (kind == TR_ArrayCopyHelper || kind == TR_ArrayCopyToc || kind == TR_RamMethod || kind == TR_GlobalValue || kind == TR_BodyInfoAddressLoad || kind == TR_DataAddress || kind == TR_DebugCounter)) { TR::Instruction *instr = (TR::Instruction *)getUpdateLocation(); TR::Instruction *instr2 = (TR::Instruction *)getLocation2(); updateLocation = instr->getBinaryEncoding(); updateLocation2 = instr2->getBinaryEncoding(); } else { updateLocation = getUpdateLocation(); updateLocation2 = getLocation2(); } int32_t iLoc = updateLocation - relocatableMethodCodeStart; int32_t iLoc2 = updateLocation2 - relocatableMethodCodeStart; AOTcgDiag0(comp, "TR::ExternalOrderedPair32BitRelocation::collectModifier\n"); if ( (iLoc < MIN_SHORT_OFFSET || iLoc > MAX_SHORT_OFFSET ) || (iLoc2 < MIN_SHORT_OFFSET || iLoc2 > MAX_SHORT_OFFSET ) ) return RELOCATION_TYPE_WIDE_OFFSET | RELOCATION_TYPE_ORDERED_PAIR; return RELOCATION_TYPE_ORDERED_PAIR; }
TR_RuntimeHelper TR::S390CallSnippet::getInterpretedDispatchHelper( TR::SymbolReference *methodSymRef, TR::DataType type) { TR::Compilation *comp = cg()->comp(); TR::MethodSymbol * methodSymbol = methodSymRef->getSymbol()->castToMethodSymbol(); bool isJitInduceOSRCall = false; if (methodSymbol->isHelper() && methodSymRef->isOSRInductionHelper()) { isJitInduceOSRCall = true; } if (methodSymRef->isUnresolved() || comp->compileRelocatableCode()) { TR_ASSERT(!isJitInduceOSRCall || !comp->compileRelocatableCode(), "calling jitInduceOSR is not supported yet under AOT\n"); if (methodSymbol->isSpecial()) return TR_S390interpreterUnresolvedSpecialGlue; else if (methodSymbol->isStatic()) return TR_S390interpreterUnresolvedStaticGlue; else return TR_S390interpreterUnresolvedDirectVirtualGlue; } else if (isJitInduceOSRCall) return (TR_RuntimeHelper) methodSymRef->getReferenceNumber(); else return getHelper(methodSymbol, type, cg()); }
void OMR::CodeGenPhase::performInliningReportPhase(TR::CodeGenerator * cg, TR::CodeGenPhase * phase) { TR::Compilation * comp = cg->comp(); if (comp->getOptions()->insertDebuggingCounters()>1) TR_DebuggingCounters::inliningReportForMethod(comp); }
void TR::PPCImmInstruction::addMetaDataForCodeAddress(uint8_t *cursor) { if (needsAOTRelocation()) { switch(getReloKind()) { case TR_AbsoluteHelperAddress: cg()->addExternalRelocation(new (cg()->trHeapMemory()) TR::ExternalRelocation(cursor, (uint8_t *)getSymbolReference(), TR_AbsoluteHelperAddress, cg()), __FILE__, __LINE__, getNode()); break; case TR_RamMethod: if (comp()->getOption(TR_UseSymbolValidationManager)) { cg()->addExternalRelocation( new (comp()->trHeapMemory()) TR::ExternalRelocation( cursor, (uint8_t *)comp()->getJittedMethodSymbol()->getResolvedMethod()->resolvedMethodAddress(), (uint8_t *)TR::SymbolType::typeMethod, TR_SymbolFromManager, cg()), __FILE__, __LINE__, getNode()); } else { cg()->addExternalRelocation(new (cg()->trHeapMemory()) TR::ExternalRelocation(cursor, NULL, TR_RamMethod, cg()), __FILE__, __LINE__, getNode()); } break; case TR_BodyInfoAddress: cg()->addExternalRelocation(new (cg()->trHeapMemory()) TR::ExternalRelocation(cursor, 0, TR_BodyInfoAddress, cg()), __FILE__, __LINE__, getNode()); break; default: TR_ASSERT(false, "Unsupported AOT relocation type specified."); } } TR::Compilation *comp = cg()->comp(); if (std::find(comp->getStaticPICSites()->begin(), comp->getStaticPICSites()->end(), this) != comp->getStaticPICSites()->end()) { // none-HCR: low-tag to invalidate -- BE or LE is relevant // void *valueToHash = *(void**)(cursor - (TR::Compiler->target.is64Bit()?4:0)); void *addressToPatch = TR::Compiler->target.is64Bit()? (TR::Compiler->target.cpu.isBigEndian()?cursor:(cursor-4)) : cursor; cg()->jitAddPicToPatchOnClassUnload(valueToHash, addressToPatch); } if (std::find(comp->getStaticHCRPICSites()->begin(), comp->getStaticHCRPICSites()->end(), this) != comp->getStaticHCRPICSites()->end()) { // HCR: whole pointer replacement. // void **locationToPatch = (void**)(cursor - (TR::Compiler->target.is64Bit()?4:0)); cg()->jitAddPicToPatchOnClassRedefinition(*locationToPatch, locationToPatch); cg()->addExternalRelocation(new (cg()->trHeapMemory()) TR::ExternalRelocation((uint8_t *)locationToPatch, (uint8_t *)*locationToPatch, TR_HCR, cg()), __FILE__,__LINE__, getNode()); } }
void OMR::CodeGenPhase::performRemoveUnusedLocalsPhase(TR::CodeGenerator * cg, TR::CodeGenPhase * phase) { TR::Compilation *comp = cg->comp(); phase->reportPhase(RemoveUnusedLocalsPhase); TR::LexicalMemProfiler mp(phase->getName(), comp->phaseMemProfiler()); LexicalTimer pt(phase->getName(), comp->phaseTimer()); cg->removeUnusedLocals(); }
void OMR::CodeGenPhase::performRegisterAssigningPhase(TR::CodeGenerator * cg, TR::CodeGenPhase * phase) { TR::Compilation* comp = cg->comp(); phase->reportPhase(RegisterAssigningPhase); if (cg->getDebug()) cg->getDebug()->roundAddressEnumerationCounters(); { TR::LexicalMemProfiler mp("RA", comp->phaseMemProfiler()); LexicalTimer pt("RA", comp->phaseTimer()); TR_RegisterKinds colourableKindsToAssign; TR_RegisterKinds nonColourableKindsToAssign = cg->prepareRegistersForAssignment(); cg->jettisonAllSpills(); // Spill temps used before now may lead to conflicts if also used by register assignment // Do local register assignment for non-colourable registers. // if(cg->getTraceRAOption(TR_TraceRAListing)) if(cg->getDebug()) cg->getDebug()->dumpMethodInstrs(comp->getOutFile(),"Before Local RA",false); cg->doRegisterAssignment(nonColourableKindsToAssign); if (comp->compilationShouldBeInterrupted(AFTER_REGISTER_ASSIGNMENT_CONTEXT)) { comp->failCompilation<TR::CompilationInterrupted>("interrupted after RA"); } } if (comp->getOption(TR_TraceCG) || comp->getOptions()->getTraceCGOption(TR_TraceCGPostRegisterAssignment)) comp->getDebug()->dumpMethodInstrs(comp->getOutFile(), "Post Register Assignment Instructions", false, true); }
void OMR::CodeGenPhase::performLowerTreesPhase(TR::CodeGenerator * cg, TR::CodeGenPhase * phase) { TR::Compilation * comp = cg->comp(); phase->reportPhase(LowerTreesPhase); cg->lowerTrees(); if (comp->getOption(TR_TraceCG)) comp->dumpMethodTrees("Post Lower Trees"); }
bool OMR::SymbolReference::storeCanBeRemoved() { TR::Compilation *comp = TR::comp(); TR::Symbol * s = self()->getSymbol(); return !s->isVolatile() && (((s->getDataType() != TR::Double) && (s->getDataType() != TR::Float)) || comp->cg()->getSupportsJavaFloatSemantics() || (self()->isTemporary(comp) && !s->behaveLikeNonTemp())); }
bool OMR::SymbolReference::sharesSymbol(bool includingGCSafePoint) { TR::Compilation * c = TR::comp(); if (self()->reallySharesSymbol(c)) return true; // At this point, we'd like to call getUseDefAliases(c, false) and return // true iff that is non-NULL. However, doing so caused floatSanity // (specifically CompactNullChecks) to consume immense amounts (1GB+) of // memory and run for a long, long time (half an hour or more in some // cases), so we need to copy some of that logic in here as a short-circuit. // // !!! NOTE !!! // THERE IS A COPY OF THIS LOGIC IN getUseDefAliases // int32_t kind = _symbol->getKind(); TR::SymbolReferenceTable * symRefTab = c->getSymRefTab(); switch (kind) { case TR::Symbol::IsShadow: case TR::Symbol::IsStatic: { // For unresolved constant dynamic, we need to invoke a Java bootstrap method, // which can have arbitrary side effects, so the aliasing should be conservative here. // isConstObjectRef now returns true for condy, so we add an explicit condition, // more like a short-circuit, to say if we are unresolved and not isConstObjectRef // (this is the same as before), or if we are unresolved and condy // (this is the extra condition added), we would return conservative aliases. if ((self()->isUnresolved() && (_symbol->isConstantDynamic() || !_symbol->isConstObjectRef())) || _symbol->isVolatile() || self()->isLiteralPoolAddress() || self()->isFromLiteralPool() || _symbol->isUnsafeShadowSymbol() || (_symbol->isArrayShadowSymbol() && c->getMethodSymbol()->hasVeryRefinedAliasSets())) { // getUseDefAliases might not return NULL } else if (!symRefTab->aliasBuilder.mutableGenericIntShadowHasBeenCreated()) { // getUseDefAliases must return NULL return false; } else if (kind == TR::Symbol::IsStatic && !symRefTab->aliasBuilder.litPoolGenericIntShadowHasBeenCreated()) { // getUseDefAliases must return NULL return false; } break; } } return !self()->getUseDefAliases(false, includingGCSafePoint).isZero(c); }
OMR::LabelSymbol::LabelSymbol(TR::CodeGenerator *codeGen, TR::Block *labb) : TR::Symbol(), _instruction(NULL), _codeLocation(NULL), _estimatedCodeLocation(0), _snippet(NULL) { self()->setIsLabel(); TR::Compilation *comp = TR::comp(); if (comp && comp->getDebug()) comp->getDebug()->newLabelSymbol(self()); }
OMR::LabelSymbol::LabelSymbol() : TR::Symbol(), _instruction(NULL), _codeLocation(NULL), _estimatedCodeLocation(0), _snippet(NULL), _directlyTargeted(false) { self()->setIsLabel(); TR::Compilation *comp = TR::comp(); if (comp && comp->getDebug()) comp->getDebug()->newLabelSymbol(self()); }
void TR::PPCTrg1Src1ImmInstruction::addMetaDataForCodeAddress(uint8_t *cursor) { TR::Compilation *comp = cg()->comp(); if (std::find(comp->getStaticPICSites()->begin(), comp->getStaticPICSites()->end(), this) != comp->getStaticPICSites()->end()) { cg()->jitAddPicToPatchOnClassUnload((void *)(getSourceImmPtr()), (void *)cursor); } if (std::find(comp->getStaticMethodPICSites()->begin(), comp->getStaticMethodPICSites()->end(), this) != comp->getStaticMethodPICSites()->end()) { cg()->jitAddPicToPatchOnClassUnload((void *) (cg()->fe()->createResolvedMethod(cg()->trMemory(), (TR_OpaqueMethodBlock *) (getSourceImmPtr()), comp->getCurrentMethod())->classOfMethod()), (void *)cursor); } }
uint8_t TR::ExternalRelocation::collectModifier() { TR::Compilation *comp = TR::comp(); uint8_t * relocatableMethodCodeStart = (uint8_t *)comp->getRelocatableMethodCodeStart(); uint8_t * updateLocation = getUpdateLocation(); int32_t distanceFromStartOfBuffer = updateLocation - relocatableMethodCodeStart; int32_t distanceFromStartOfMethod = updateLocation - comp->cg()->getCodeStart(); AOTcgDiag2(comp, "TR::ExternalRelocation::collectModifier distance from start of buffer=%x, from start of method=%x\n", distanceFromStartOfBuffer, distanceFromStartOfMethod); if (distanceFromStartOfBuffer < MIN_SHORT_OFFSET || distanceFromStartOfBuffer > MAX_SHORT_OFFSET) return RELOCATION_TYPE_WIDE_OFFSET; return 0; }
/** * make these two array shadows independent of each other, but still aliased to * all other array shadows */ void OMR::SymbolReference::makeIndependent(TR::SymbolReferenceTable *symRefTab, TR::SymbolReference *symRef) { TR::Compilation *comp = symRefTab->comp(); TR_ASSERT(self()->getSymbol()->isArrayShadowSymbol(),"symref #%d is not an array shadow\n",self()->getReferenceNumber()); TR_ASSERT(symRef->getSymbol()->isArrayShadowSymbol(),"symref #%d is not an array shadow\n",symRef->getReferenceNumber()); if(NULL == self()->getIndependentSymRefs()) self()->setIndependentSymRefs(new(comp->trHeapMemory()) TR_BitVector(symRefTab->getNumSymRefs(),comp->trMemory(),heapAlloc,growable)); if(NULL == symRef->getIndependentSymRefs()) symRef->setIndependentSymRefs(new(comp->trHeapMemory()) TR_BitVector(symRefTab->getNumSymRefs(),comp->trMemory(),heapAlloc,growable)); self()->getIndependentSymRefs()->set(symRef->getReferenceNumber()); symRef->getIndependentSymRefs()->set(self()->getReferenceNumber()); }
void TR::PPCTrg1ImmInstruction::addMetaDataForCodeAddress(uint8_t *cursor) { TR::Compilation *comp = cg()->comp(); if (std::find(comp->getStaticPICSites()->begin(), comp->getStaticPICSites()->end(), this) != comp->getStaticPICSites()->end()) { TR::Node *node = getNode(); cg()->jitAddPicToPatchOnClassUnload((void *)(TR::Compiler->target.is64Bit()?node->getLongInt():node->getInt()), (void *)cursor); } if (std::find(comp->getStaticMethodPICSites()->begin(), comp->getStaticMethodPICSites()->end(), this) != comp->getStaticMethodPICSites()->end()) { TR::Node *node = getNode(); cg()->jitAddPicToPatchOnClassUnload((void *) (cg()->fe()->createResolvedMethod(cg()->trMemory(), (TR_OpaqueMethodBlock *) (TR::Compiler->target.is64Bit()?node->getLongInt():node->getInt()), comp->getCurrentMethod())->classOfMethod()), (void *)cursor); } }
void OMR::CodeGenPhase::performBinaryEncodingPhase(TR::CodeGenerator * cg, TR::CodeGenPhase * phase) { TR::Compilation * comp = cg->comp(); phase->reportPhase(BinaryEncodingPhase); if (cg->getDebug()) cg->getDebug()->roundAddressEnumerationCounters(); TR::LexicalMemProfiler mp(phase->getName(), comp->phaseMemProfiler()); LexicalTimer pt(phase->getName(), comp->phaseTimer()); cg->doBinaryEncoding(); if (debug("verifyFinalNodeReferenceCounts")) { if (cg->getDebug()) cg->getDebug()->verifyFinalNodeReferenceCounts(comp->getMethodSymbol()); } }
/// called to identify the branches and their targets in the method /// causes the _blocks array to be filled in with the basic blocks of the method void findAndMarkBranchTargets() { TR::Compilation *comp = this->comp(); if (debug("branchTargets")) diagnostic("findAndMarkBranchTargets for %s\n", comp->signature()); aboutToFindBranchTargets(); for (ByteCode bc = this->first(); bc != BCunknown; bc = this->next()) { if (_printByteCodes) this->printByteCode(); int32_t i = this->bcIndex(); if (this->isBranch()) markTarget(i, this->branchDestination(i) - i); markAnySpecialBranchTargets(bc); } finishedFindingBranchTargets(); }
void TR::ExternalOrderedPair32BitRelocation::apply(TR::CodeGenerator *codeGen) { TR::Compilation *comp = codeGen->comp(); AOTcgDiag0(comp, "TR::ExternalOrderedPair32BitRelocation::apply\n"); TR::IteratedExternalRelocation *rec = getRelocationRecord(); uint8_t *codeStart = (uint8_t *)comp->getRelocatableMethodCodeStart(); TR_ExternalRelocationTargetKind kind = getRelocationRecord()->getTargetKind(); if (TR::Compiler->target.cpu.isPower() && (kind == TR_ArrayCopyHelper || kind == TR_ArrayCopyToc || kind == TR_RamMethodSequence || kind == TR_GlobalValue || kind == TR_BodyInfoAddressLoad || kind == TR_DataAddress || kind == TR_DebugCounter)) { TR::Instruction *instr = (TR::Instruction *)getUpdateLocation(); TR::Instruction *instr2 = (TR::Instruction *)getLocation2(); rec->addRelocationEntry((uint32_t)(instr->getBinaryEncoding() - codeStart)); rec->addRelocationEntry((uint32_t)(instr2->getBinaryEncoding() - codeStart)); } else { rec->addRelocationEntry(getUpdateLocation() - codeStart); rec->addRelocationEntry(getLocation2() - codeStart); } }
void TestCompiler::FrontEnd::generateBinaryEncodingPrologue( TR_BinaryEncodingData *beData, TR::CodeGenerator *cg) { TR::Compilation* comp = cg->comp(); TR_S390BinaryEncodingData *data = (TR_S390BinaryEncodingData *)beData; data->cursorInstruction = comp->getFirstInstruction(); data->estimate = 0; data->preProcInstruction = data->cursorInstruction; data->jitTojitStart = data->cursorInstruction; data->cursorInstruction = NULL; TR::Instruction * preLoadArgs, * endLoadArgs; preLoadArgs = data->preProcInstruction; endLoadArgs = preLoadArgs; TR::Instruction * oldFirstInstruction = data->cursorInstruction; data->cursorInstruction = comp->getFirstInstruction(); static char *disableAlignJITEP = feGetEnv("TR_DisableAlignJITEP"); // Padding for JIT Entry Point if (!disableAlignJITEP) { data->estimate += 256; } while (data->cursorInstruction && data->cursorInstruction->getOpCodeValue() != TR::InstOpCode::PROC) { data->estimate = data->cursorInstruction->estimateBinaryLength(data->estimate); data->cursorInstruction = data->cursorInstruction->getNext(); } cg->getLinkage()->createPrologue(data->cursorInstruction); //cg->getLinkage()->analyzePrologue(); }
void OMR::CodeGenPhase::performMapStackPhase(TR::CodeGenerator * cg, TR::CodeGenPhase * phase) { TR::Compilation* comp = cg->comp(); cg->remapGCIndicesInInternalPtrFormat(); { TR::LexicalMemProfiler mp("Stackmap", comp->phaseMemProfiler()); LexicalTimer pt("Stackmap", comp->phaseTimer()); cg->getLinkage()->mapStack(comp->getJittedMethodSymbol()); if (comp->getOption(TR_TraceCG) || comp->getOptions()->getTraceCGOption(TR_TraceEarlyStackMap)) comp->getDebug()->dumpMethodInstrs(comp->getOutFile(), "Post Stack Map", false); } cg->setMappingAutomatics(); }
void OMR::CodeGenPhase::performUncommonCallConstNodesPhase(TR::CodeGenerator * cg, TR::CodeGenPhase * phase) { TR::Compilation* comp = cg->comp(); if(comp->getOption(TR_DisableCallConstUncommoning)) { traceMsg(comp, "Skipping Uncommon Call Constant Node phase\n"); return; } phase->reportPhase(UncommonCallConstNodesPhase); if (comp->getOption(TR_TraceCG) || comp->getOption(TR_TraceTrees)) comp->dumpMethodTrees("Pre Uncommon Call Constant Node Trees"); TR::LexicalMemProfiler mp(phase->getName(), comp->phaseMemProfiler()); LexicalTimer pt(phase->getName(), comp->phaseTimer()); cg->uncommonCallConstNodes(); if (comp->getOption(TR_TraceCG) || comp->getOption(TR_TraceTrees)) comp->dumpMethodTrees("Post Uncommon Call Constant Node Trees"); }
TR::Register *OMR::X86::AMD64::TreeEvaluator::i2lEvaluator(TR::Node *node, TR::CodeGenerator *cg) { TR::Compilation *comp = cg->comp(); if (node->getFirstChild()->getOpCode().isLoadConst()) { TR::Register *targetRegister = cg->allocateRegister(); generateRegImmInstruction(MOV8RegImm4, node, targetRegister, node->getFirstChild()->getInt(), cg); node->setRegister(targetRegister); cg->decReferenceCount(node->getFirstChild()); return targetRegister; } else { // In theory, because iRegStore has chosen to disregard needsSignExtension, // we must disregard skipSignExtension here for correctness. // // However, in fact, it is actually safe to obey skipSignExtension so // long as the optimizer only uses it on nodes known to be non-negative // when the i2l occurs. We do already have isNonNegative for that // purpose, but it may not always be set by the optimizer if a node known // to be non-negative at one point in a block is commoned up above the // BNDCHK or branch that determines the node's non-negativity. The // codegen does set the flag during tree evaluation, but the // skipSignExtension flag is set by the optimizer with more global // knowledge than the tree evaluator, so we will trust it. // TR_X86OpCodes regMemOpCode,regRegOpCode; if( node->isNonNegative() || (node->skipSignExtension() && performTransformation(comp, "TREE EVALUATION: skipping sign extension on node %s despite lack of isNonNegative\n", comp->getDebug()->getName(node)))) { // We prefer these plain (zero-extending) opcodes because the analyser can often eliminate them // regMemOpCode = L4RegMem; regRegOpCode = MOVZXReg8Reg4; } else { regMemOpCode = MOVSXReg8Mem4; regRegOpCode = MOVSXReg8Reg4; } return TR::TreeEvaluator::conversionAnalyser(node, regMemOpCode, regRegOpCode, cg); } }
LexicalXmlTag::LexicalXmlTag(TR::CodeGenerator * cg): cg(cg) { TR::Compilation *comp = cg->comp(); if (comp->getOption(TR_TraceOptDetails) || comp->getOption(TR_TraceCG)) { const char *hotnessString = comp->getHotnessName(comp->getMethodHotness()); traceMsg(comp, "<codegen\n" "\tmethod=\"%s\"\n" "\thotness=\"%s\">\n", comp->signature(), hotnessString); } }
void OMR::CodeGenPhase::performPeepholePhase(TR::CodeGenerator * cg, TR::CodeGenPhase * phase) { TR::Compilation * comp = cg->comp(); phase->reportPhase(PeepholePhase); TR::LexicalMemProfiler mp(phase->getName(), comp->phaseMemProfiler()); LexicalTimer pt(phase->getName(), comp->phaseTimer()); cg->doPeephole(); if (comp->getOption(TR_TraceCG)) comp->getDebug()->dumpMethodInstrs(comp->getOutFile(), "Post Peephole Instructions", false); }
void TR_PPCRegisterDependencyGroup::assignRegisters(TR::Instruction *currentInstruction, TR_RegisterKinds kindToBeAssigned, uint32_t numberOfRegisters, TR::CodeGenerator *cg) { // *this swipeable for debugging purposes TR::Machine *machine = cg->machine(); TR::Register *virtReg; TR::RealRegister::RegNum dependentRegNum; TR::RealRegister *dependentRealReg, *assignedRegister, *realReg; int i, j; TR::Compilation *comp = cg->comp(); int num_gprs = 0; int num_fprs = 0; int num_vrfs = 0; // Use to do lookups using real register numbers TR_PPCRegisterDependencyMap map(_dependencies, numberOfRegisters); if (!comp->getOption(TR_DisableOOL)) { for (i = 0; i< numberOfRegisters; i++) { virtReg = _dependencies[i].getRegister(); dependentRegNum = _dependencies[i].getRealRegister(); if (dependentRegNum == TR::RealRegister::SpilledReg) { TR_ASSERT(virtReg->getBackingStorage(),"should have a backing store if dependentRegNum == spillRegIndex()\n"); if (virtReg->getAssignedRealRegister()) { // this happens when the register was first spilled in main line path then was reverse spilled // and assigned to a real register in OOL path. We protected the backing store when doing // the reverse spill so we could re-spill to the same slot now traceMsg (comp,"\nOOL: Found register spilled in main line and re-assigned inside OOL"); TR::Node *currentNode = currentInstruction->getNode(); TR::RealRegister *assignedReg = toRealRegister(virtReg->getAssignedRegister()); TR::MemoryReference *tempMR = new (cg->trHeapMemory()) TR::MemoryReference(currentNode, (TR::SymbolReference*)virtReg->getBackingStorage()->getSymbolReference(), sizeof(uintptr_t), cg); TR::InstOpCode::Mnemonic opCode; TR_RegisterKinds rk = virtReg->getKind(); switch (rk) { case TR_GPR: opCode =TR::InstOpCode::Op_load; break; case TR_FPR: opCode = virtReg->isSinglePrecision() ? TR::InstOpCode::lfs : TR::InstOpCode::lfd; break; default: TR_ASSERT(0, "\nRegister kind not supported in OOL spill\n"); break; } TR::Instruction *inst = generateTrg1MemInstruction(cg, opCode, currentNode, assignedReg, tempMR, currentInstruction); assignedReg->setAssignedRegister(NULL); virtReg->setAssignedRegister(NULL); assignedReg->setState(TR::RealRegister::Free); if (comp->getDebug()) cg->traceRegisterAssignment("Generate reload of virt %s due to spillRegIndex dep at inst %p\n",comp->getDebug()->getName(virtReg),currentInstruction); cg->traceRAInstruction(inst); } if (!(std::find(cg->getSpilledRegisterList()->begin(), cg->getSpilledRegisterList()->end(), virtReg) != cg->getSpilledRegisterList()->end())) cg->getSpilledRegisterList()->push_front(virtReg); } // we also need to free up all locked backing storage if we are exiting the OOL during backwards RA assignment else if (currentInstruction->isLabel() && virtReg->getAssignedRealRegister()) { TR::PPCLabelInstruction *labelInstr = (TR::PPCLabelInstruction *)currentInstruction; TR_BackingStore * location = virtReg->getBackingStorage(); TR_RegisterKinds rk = virtReg->getKind(); int32_t dataSize; if (labelInstr->getLabelSymbol()->isStartOfColdInstructionStream() && location) { traceMsg (comp,"\nOOL: Releasing backing storage (%p)\n", location); if (rk == TR_GPR) dataSize = TR::Compiler->om.sizeofReferenceAddress(); else dataSize = 8; location->setMaxSpillDepth(0); cg->freeSpill(location,dataSize,0); virtReg->setBackingStorage(NULL); } } } } for (i = 0; i < numberOfRegisters; i++) { map.addDependency(_dependencies[i], i); virtReg = _dependencies[i].getRegister(); dependentRegNum = _dependencies[i].getRealRegister(); if (dependentRegNum != TR::RealRegister::SpilledReg) { if (virtReg->getKind() == TR_GPR) num_gprs++; else if (virtReg->getKind() == TR_FPR) num_fprs++; else if (virtReg->getKind() == TR_VRF) num_vrfs++; } } #ifdef DEBUG int locked_gprs = 0; int locked_fprs = 0; int locked_vrfs = 0; // count up how many registers are locked for each type for(i = TR::RealRegister::FirstGPR; i <= TR::RealRegister::LastGPR; i++) { realReg = machine->getPPCRealRegister((TR::RealRegister::RegNum)i); if (realReg->getState() == TR::RealRegister::Locked) locked_gprs++; } for(i = TR::RealRegister::FirstFPR; i <= TR::RealRegister::LastFPR; i++) { realReg = machine->getPPCRealRegister((TR::RealRegister::RegNum)i); if (realReg->getState() == TR::RealRegister::Locked) locked_fprs++; } for(i = TR::RealRegister::FirstVRF; i <= TR::RealRegister::LastVRF; i++) { realReg = machine->getPPCRealRegister((TR::RealRegister::RegNum)i); if (realReg->getState() == TR::RealRegister::Locked) locked_vrfs++; } TR_ASSERT( locked_gprs == machine->getNumberOfLockedRegisters(TR_GPR),"Inconsistent number of locked GPRs"); TR_ASSERT( locked_fprs == machine->getNumberOfLockedRegisters(TR_FPR),"Inconsistent number of locked FPRs"); TR_ASSERT( locked_vrfs == machine->getNumberOfLockedRegisters(TR_VRF), "Inconsistent number of locked VRFs"); #endif // To handle circular dependencies, we block a real register if (1) it is already assigned to a correct // virtual register and (2) if it is assigned to one register in the list but is required by another. // However, if all available registers are requested, we do not block in case (2) to avoid all registers // being blocked. bool block_gprs = true; bool block_fprs = true; bool block_vrfs = true; TR_ASSERT(num_gprs <= (TR::RealRegister::LastGPR - TR::RealRegister::FirstGPR + 1 - machine->getNumberOfLockedRegisters(TR_GPR)), "Too many GPR dependencies, unable to assign" ); TR_ASSERT(num_fprs <= (TR::RealRegister::LastFPR - TR::RealRegister::FirstFPR + 1 - machine->getNumberOfLockedRegisters(TR_FPR)), "Too many FPR dependencies, unable to assign" ); TR_ASSERT(num_vrfs <= (TR::RealRegister::LastVRF - TR::RealRegister::FirstVRF + 1 - machine->getNumberOfLockedRegisters(TR_VRF)), "Too many VRF dependencies, unable to assign" ); if (num_gprs == (TR::RealRegister::LastGPR - TR::RealRegister::FirstGPR + 1 - machine->getNumberOfLockedRegisters(TR_GPR))) block_gprs = false; if (num_fprs == (TR::RealRegister::LastFPR - TR::RealRegister::FirstFPR + 1 - machine->getNumberOfLockedRegisters(TR_FPR))) block_fprs = false; if (num_vrfs == (TR::RealRegister::LastVRF - TR::RealRegister::FirstVRF + 1 - machine->getNumberOfLockedRegisters(TR_VRF))) block_vrfs = false; for (i = 0; i < numberOfRegisters; i++) { virtReg = _dependencies[i].getRegister(); if (virtReg->getAssignedRealRegister()!=NULL) { if (_dependencies[i].getRealRegister() == TR::RealRegister::NoReg) { virtReg->block(); } else { TR::RealRegister::RegNum assignedRegNum; assignedRegNum = toRealRegister(virtReg->getAssignedRealRegister())->getRegisterNumber(); // always block if required register and assigned register match; // block if assigned register is required by other dependency but only if // any spare registers are left to avoid blocking all existing registers if (_dependencies[i].getRealRegister() == assignedRegNum || (map.getDependencyWithTarget(assignedRegNum) && ((virtReg->getKind() != TR_GPR || block_gprs) && (virtReg->getKind() != TR_FPR || block_fprs) && (virtReg->getKind() != TR_VRF || block_vrfs)))) { virtReg->block(); } } } } // Assign all virtual regs that depend on a specific real reg that is free for (i = 0; i < numberOfRegisters; i++) { virtReg = _dependencies[i].getRegister(); dependentRegNum = _dependencies[i].getRealRegister(); dependentRealReg = machine->getPPCRealRegister(dependentRegNum); if (dependentRegNum != TR::RealRegister::NoReg && dependentRegNum != TR::RealRegister::SpilledReg && dependentRealReg->getState() == TR::RealRegister::Free) { assignFreeRegisters(currentInstruction, &_dependencies[i], map, cg); } } // Assign all virtual regs that depend on a specfic real reg that is not free for (i = 0; i < numberOfRegisters; i++) { virtReg = _dependencies[i].getRegister(); assignedRegister = NULL; if (virtReg->getAssignedRealRegister() != NULL) { assignedRegister = toRealRegister(virtReg->getAssignedRealRegister()); } dependentRegNum = _dependencies[i].getRealRegister(); dependentRealReg = machine->getPPCRealRegister(dependentRegNum); if (dependentRegNum != TR::RealRegister::NoReg && dependentRegNum != TR::RealRegister::SpilledReg && dependentRealReg != assignedRegister) { bool depsBlocked = false; switch (_dependencies[i].getRegister()->getKind()) { case TR_GPR: depsBlocked = block_gprs; break; case TR_FPR: depsBlocked = block_fprs; break; case TR_VRF: depsBlocked = block_vrfs; break; } assignContendedRegisters(currentInstruction, &_dependencies[i], map, depsBlocked, cg); } } // Assign all virtual regs that depend on NoReg but exclude gr0 for (i=0; i<numberOfRegisters; i++) { if (_dependencies[i].getRealRegister() == TR::RealRegister::NoReg && _dependencies[i].getExcludeGPR0()) { TR::RealRegister *realOne; virtReg = _dependencies[i].getRegister(); realOne = virtReg->getAssignedRealRegister(); if (realOne!=NULL && toRealRegister(realOne)->getRegisterNumber()==TR::RealRegister::gr0) { if ((assignedRegister = machine->findBestFreeRegister(currentInstruction, virtReg->getKind(), true, false, virtReg)) == NULL) { assignedRegister = machine->freeBestRegister(currentInstruction, virtReg, NULL, true); } machine->coerceRegisterAssignment(currentInstruction, virtReg, assignedRegister->getRegisterNumber()); } else if (realOne == NULL) { machine->assignOneRegister(currentInstruction, virtReg, true); } virtReg->block(); } } // Assign all virtual regs that depend on NoReg for (i=0; i<numberOfRegisters; i++) { if (_dependencies[i].getRealRegister() == TR::RealRegister::NoReg && !_dependencies[i].getExcludeGPR0()) { TR::RealRegister *realOne; virtReg = _dependencies[i].getRegister(); realOne = virtReg->getAssignedRealRegister(); if (!realOne) { machine->assignOneRegister(currentInstruction, virtReg, false); } virtReg->block(); } } unblockRegisters(numberOfRegisters); for (i = 0; i < numberOfRegisters; i++) { TR::Register *dependentRegister = getRegisterDependency(i)->getRegister(); // dependentRegister->getAssignedRegister() is NULL if the reg has already been spilled due to a spilledReg dep if (comp->getOption(TR_DisableOOL) || (!(cg->isOutOfLineColdPath()) && !(cg->isOutOfLineHotPath()))) { TR_ASSERT(dependentRegister->getAssignedRegister(), "assignedRegister can not be NULL"); } if (dependentRegister->getAssignedRegister()) { TR::RealRegister *assignedRegister = dependentRegister->getAssignedRegister()->getRealRegister(); if (getRegisterDependency(i)->getRealRegister() == TR::RealRegister::NoReg) getRegisterDependency(i)->setRealRegister(toRealRegister(assignedRegister)->getRegisterNumber()); machine->decFutureUseCountAndUnlatch(dependentRegister); } } }
TR_BitVector * addVeryRefinedCallAliasSets(TR::ResolvedMethodSymbol * methodSymbol, TR_BitVector * aliases, List<void> * methodsPeeked) { TR::Compilation *comp = TR::comp(); void * methodId = methodSymbol->getResolvedMethod()->getPersistentIdentifier(); if (methodsPeeked->find(methodId)) { // This can't be allocated into the alias region as it must be accessed across optimizations TR_BitVector *heapAliases = new (comp->trHeapMemory()) TR_BitVector(comp->getSymRefCount(), comp->trMemory(), heapAlloc, growable); *heapAliases |= *aliases; return heapAliases; } // stop if the peek is getting very deep // if (methodsPeeked->getSize() >= PEEK_THRESHOLD) return 0; methodsPeeked->add(methodId); dumpOptDetails(comp, "O^O REFINING ALIASES: Peeking into the IL to refine aliases \n"); if (!methodSymbol->getResolvedMethod()->genMethodILForPeeking(methodSymbol, comp, true)) return 0; TR::SymbolReferenceTable * symRefTab = comp->getSymRefTab(); for (TR::TreeTop * tt = methodSymbol->getFirstTreeTop(); tt; tt = tt->getNextTreeTop()) { TR::Node *node = tt->getNode(); if (node->getOpCode().isResolveCheck()) return 0; if ((node->getOpCodeValue() == TR::treetop) || (node->getOpCodeValue() == TR::compressedRefs) || node->getOpCode().isCheck()) node = node->getFirstChild(); if (node->getOpCode().isStore()) { TR::SymbolReference * symRefInCallee = node->getSymbolReference(), * symRefInCaller; TR::Symbol * symInCallee = symRefInCallee->getSymbol(); TR::DataType type = symInCallee->getDataType(); if (symInCallee->isShadow()) { if (symInCallee->isArrayShadowSymbol()) symRefInCaller = symRefTab->getSymRef(symRefTab->getArrayShadowIndex(type)); else if (symInCallee->isArrayletShadowSymbol()) symRefInCaller = symRefTab->getSymRef(symRefTab->getArrayletShadowIndex(type)); else symRefInCaller = symRefTab->findShadowSymbol(symRefInCallee->getOwningMethod(comp), symRefInCallee->getCPIndex(), type); if (symRefInCaller) { if (symRefInCaller->reallySharesSymbol(comp)) symRefInCaller->setSharedShadowAliases(aliases, symRefTab); aliases->set(symRefInCaller->getReferenceNumber()); } } else if (symInCallee->isStatic()) { symRefInCaller = symRefTab->findStaticSymbol(symRefInCallee->getOwningMethod(comp), symRefInCallee->getCPIndex(), type); if (symRefInCaller) { if (symRefInCaller->reallySharesSymbol(comp)) symRefInCaller->setSharedStaticAliases(aliases, symRefTab); else aliases->set(symRefInCaller->getReferenceNumber()); } } } else if (node->getOpCode().isCall()) { if (node->getOpCode().isCallIndirect()) return 0; TR::ResolvedMethodSymbol * calleeSymbol = node->getSymbol()->getResolvedMethodSymbol(); if (!calleeSymbol) return 0; TR_ResolvedMethod * calleeMethod = calleeSymbol->getResolvedMethod(); if (!calleeMethod->isCompilable(comp->trMemory()) || calleeMethod->isJNINative()) return 0; if (!addVeryRefinedCallAliasSets(calleeSymbol, aliases, methodsPeeked)) return 0; } else if (node->getOpCodeValue() == TR::monent) return 0; } // This can't be allocated into the alias region as it must be accessed across optimizations TR_BitVector *heapAliases = new (comp->trHeapMemory()) TR_BitVector(comp->getSymRefCount(), comp->trMemory(), heapAlloc, growable); *heapAliases |= *aliases; return heapAliases; }
TR_BitVector * OMR::SymbolReference::getUseDefAliasesBV(bool isDirectCall, bool includeGCSafePoint) { TR::Compilation *comp = TR::comp(); TR::Region &aliasRegion = comp->aliasRegion(); int32_t bvInitialSize = comp->getSymRefCount(); TR_BitVectorGrowable growability = growable; // allow more than one shadow for an array type. Used by LoopAliasRefiner const bool supportArrayRefinement=true; int32_t kind = _symbol->getKind(); TR::SymbolReferenceTable * symRefTab = comp->getSymRefTab(); // !!! NOTE !!! // THERE IS A COPY OF THIS LOGIC IN sharesSymbol // if (!self()->reallySharesSymbol(comp)) { switch (kind) { case TR::Symbol::IsShadow: case TR::Symbol::IsStatic: { // For unresolved constant dynamic, we need to invoke a Java bootstrap method, // which can have arbitrary side effects, so the aliasing should be conservative here. // isConstObjectRef now returns true for condy, so we add an explicit condition, // more like a short-circuit, to say if we are unresolved and not isConstObjectRef // (this is the same as before), or if we are unresolved and condy // (this is the extra condition added), we would return conservative aliases. if ((self()->isUnresolved() && (_symbol->isConstantDynamic() || !_symbol->isConstObjectRef())) || _symbol->isVolatile() || self()->isLiteralPoolAddress() || self()->isFromLiteralPool() || _symbol->isUnsafeShadowSymbol() || (_symbol->isArrayShadowSymbol() && comp->getMethodSymbol()->hasVeryRefinedAliasSets())) { // getUseDefAliases might not return NULL } else if (!symRefTab->aliasBuilder.mutableGenericIntShadowHasBeenCreated()) { // getUseDefAliases must return NULL return NULL; } else if (kind == TR::Symbol::IsStatic && !symRefTab->aliasBuilder.litPoolGenericIntShadowHasBeenCreated()) { // getUseDefAliases must return NULL return NULL; } break; } } } // now do stuff for various kinds of symbols // switch (kind) { case TR::Symbol::IsMethod: { TR::MethodSymbol * methodSymbol = _symbol->castToMethodSymbol(); if (!methodSymbol->isHelper()) return symRefTab->aliasBuilder.methodAliases(self()); if (symRefTab->isNonHelper(self(), TR::SymbolReferenceTable::arraySetSymbol) || symRefTab->isNonHelper(self(), TR::SymbolReferenceTable::osrFearPointHelperSymbol) || symRefTab->isNonHelper(self(), TR::SymbolReferenceTable::potentialOSRPointHelperSymbol)) { return &symRefTab->aliasBuilder.defaultMethodDefAliases(); } if (symRefTab->isNonHelper(self(), TR::SymbolReferenceTable::arrayCmpSymbol)) return 0; switch (self()->getReferenceNumber()) { case TR_methodTypeCheck: case TR_nullCheck: return &symRefTab->aliasBuilder.defaultMethodDefAliasesWithoutImmutable(); case TR_arrayBoundsCheck: case TR_checkCast: case TR_divCheck: case TR_typeCheckArrayStore: case TR_arrayStoreException: case TR_incompatibleReceiver: case TR_IncompatibleClassChangeError: case TR_reportFinalFieldModified: case TR_reportMethodEnter: case TR_reportStaticMethodEnter: case TR_reportMethodExit: case TR_acquireVMAccess: case TR_instanceOf: case TR_checkAssignable: case TR_throwCurrentException: case TR_releaseVMAccess: case TR_stackOverflow: case TR_writeBarrierStore: case TR_writeBarrierBatchStore: case TR_jitProfileAddress: case TR_jitProfileWarmCompilePICAddress: case TR_jitProfileValue: case TR_jitProfileLongValue: case TR_jitProfileBigDecimalValue: case TR_jitProfileParseBuffer: return 0; case TR_asyncCheck: case TR_writeBarrierClassStoreRealTimeGC: case TR_writeBarrierStoreRealTimeGC: case TR_aNewArray: case TR_newObject: case TR_newObjectNoZeroInit: case TR_newArray: case TR_multiANewArray: if ((comp->generateArraylets() || comp->isDLT()) && includeGCSafePoint) return &symRefTab->aliasBuilder.gcSafePointSymRefNumbers(); else return 0; case TR_aThrow: return 0; // The monitor exit symbol needs to be aliased with all fields in the // current class to ensure that all references to fields are evaluated // before the monitor exit case TR_monitorExit: case TR_monitorEntry: case TR_transactionExit: case TR_transactionEntry: default: // The following is the place to check for // a use of killsAllMethodSymbolRef... However, // it looks like the default action is sufficient. //if (symRefTab->findKillsAllMethodSymbolRef() == self()) // { // } return &symRefTab->aliasBuilder.defaultMethodDefAliases(); } } case TR::Symbol::IsResolvedMethod: { TR::ResolvedMethodSymbol * resolvedMethodSymbol = _symbol->castToResolvedMethodSymbol(); if (!comp->getOption(TR_EnableHCR)) { switch (resolvedMethodSymbol->getRecognizedMethod()) { #ifdef J9_PROJECT_SPECIFIC case TR::java_lang_System_arraycopy: { TR_BitVector * aliases = new (aliasRegion) TR_BitVector(bvInitialSize, aliasRegion, growability); *aliases |= symRefTab->aliasBuilder.arrayElementSymRefs(); if (comp->generateArraylets()) *aliases |= symRefTab->aliasBuilder.arrayletElementSymRefs(); return aliases; } if (resolvedMethodSymbol->isPureFunction()) return NULL; case TR::java_lang_Double_longBitsToDouble: case TR::java_lang_Double_doubleToLongBits: case TR::java_lang_Float_intBitsToFloat: case TR::java_lang_Float_floatToIntBits: case TR::java_lang_Double_doubleToRawLongBits: case TR::java_lang_Float_floatToRawIntBits: case TR::java_lang_Math_sqrt: case TR::java_lang_StrictMath_sqrt: case TR::java_lang_Math_sin: case TR::java_lang_StrictMath_sin: case TR::java_lang_Math_cos: case TR::java_lang_StrictMath_cos: case TR::java_lang_Math_max_I: case TR::java_lang_Math_min_I: case TR::java_lang_Math_max_L: case TR::java_lang_Math_min_L: case TR::java_lang_Math_abs_I: case TR::java_lang_Math_abs_L: case TR::java_lang_Math_abs_F: case TR::java_lang_Math_abs_D: case TR::java_lang_Math_pow: case TR::java_lang_StrictMath_pow: case TR::java_lang_Math_exp: case TR::java_lang_StrictMath_exp: case TR::java_lang_Math_log: case TR::java_lang_StrictMath_log: case TR::java_lang_Math_floor: case TR::java_lang_Math_ceil: case TR::java_lang_Math_copySign_F: case TR::java_lang_Math_copySign_D: case TR::java_lang_StrictMath_floor: case TR::java_lang_StrictMath_ceil: case TR::java_lang_StrictMath_copySign_F: case TR::java_lang_StrictMath_copySign_D: case TR::com_ibm_Compiler_Internal__TR_Prefetch: case TR::java_nio_Bits_keepAlive: if ((comp->generateArraylets() || comp->isDLT()) && includeGCSafePoint) return &symRefTab->aliasBuilder.gcSafePointSymRefNumbers(); else return 0; // no aliasing on DFP dummy stubs case TR::java_math_BigDecimal_DFPPerformHysteresis: case TR::java_math_BigDecimal_DFPUseDFP: case TR::java_math_BigDecimal_DFPHWAvailable: case TR::java_math_BigDecimal_DFPCompareTo: case TR::java_math_BigDecimal_DFPUnscaledValue: case TR::com_ibm_dataaccess_DecimalData_DFPFacilityAvailable: case TR::com_ibm_dataaccess_DecimalData_DFPUseDFP: case TR::com_ibm_dataaccess_DecimalData_DFPConvertPackedToDFP: case TR::com_ibm_dataaccess_DecimalData_DFPConvertDFPToPacked: case TR::com_ibm_dataaccess_DecimalData_createZeroBigDecimal: case TR::com_ibm_dataaccess_DecimalData_getlaside: case TR::com_ibm_dataaccess_DecimalData_setlaside: case TR::com_ibm_dataaccess_DecimalData_getflags: case TR::com_ibm_dataaccess_DecimalData_setflags: if (!( #ifdef TR_TARGET_S390 TR::Compiler->target.cpu.getS390SupportsDFP() || #endif TR::Compiler->target.cpu.supportsDecimalFloatingPoint()) || comp->getOption(TR_DisableDFP)) return NULL; #endif //J9_PROJECT_SPECIFIC default: break; } } #ifdef J9_PROJECT_SPECIFIC TR_ResolvedMethod * method = resolvedMethodSymbol->getResolvedMethod(); TR_PersistentMethodInfo * methodInfo = TR_PersistentMethodInfo::get(method); if (methodInfo && (methodInfo->hasRefinedAliasSets() || comp->getMethodHotness() >= veryHot || resolvedMethodSymbol->hasVeryRefinedAliasSets()) && (method->isStatic() || method->isFinal() || isDirectCall)) { TR_BitVector * aliases = new (aliasRegion) TR_BitVector(bvInitialSize, aliasRegion, growability); if ((comp->generateArraylets() || comp->isDLT()) && includeGCSafePoint) *aliases |= symRefTab->aliasBuilder.gcSafePointSymRefNumbers(); if (methodInfo->doesntKillAnything() && !comp->getOption(TR_DisableRefinedAliases)) return aliases; if ((resolvedMethodSymbol->hasVeryRefinedAliasSets() || comp->getMethodHotness() >= hot) && !debug("disableVeryRefinedCallAliasSets")) { TR_BitVector * exactAliases = 0; if (resolvedMethodSymbol->hasVeryRefinedAliasSets()) exactAliases = symRefTab->aliasBuilder.getVeryRefinedCallAliasSets(resolvedMethodSymbol); else { resolvedMethodSymbol->setHasVeryRefinedAliasSets(true); List<void> methodsPeeked(comp->trMemory()); exactAliases = addVeryRefinedCallAliasSets(resolvedMethodSymbol, aliases, &methodsPeeked); symRefTab->aliasBuilder.setVeryRefinedCallAliasSets(resolvedMethodSymbol, exactAliases); } if (exactAliases) { return exactAliases; } } // From here on, we're just checking refined alias info. // If refined aliases are disabled, return the conservative answer // we would have returned had we never attempted to use refined // aliases at all. // if (comp->getOption(TR_DisableRefinedAliases)) return symRefTab->aliasBuilder.methodAliases(self()); if (!methodInfo->doesntKillAddressArrayShadows()) { symRefTab->aliasBuilder.addAddressArrayShadows(aliases); if (comp->generateArraylets()) aliases->set(symRefTab->getArrayletShadowIndex(TR::Address)); } if (!methodInfo->doesntKillIntArrayShadows()) { symRefTab->aliasBuilder.addIntArrayShadows(aliases); if (comp->generateArraylets()) { aliases->set(symRefTab->getArrayletShadowIndex(TR::Int32)); } } if (!methodInfo->doesntKillNonIntPrimitiveArrayShadows()) { symRefTab->aliasBuilder.addNonIntPrimitiveArrayShadows(aliases); if (comp->generateArraylets()) { aliases->set(symRefTab->getArrayletShadowIndex(TR::Int8)); aliases->set(symRefTab->getArrayletShadowIndex(TR::Int16)); aliases->set(symRefTab->getArrayletShadowIndex(TR::Int32)); aliases->set(symRefTab->getArrayletShadowIndex(TR::Int64)); aliases->set(symRefTab->getArrayletShadowIndex(TR::Float)); aliases->set(symRefTab->getArrayletShadowIndex(TR::Double)); } } if (!methodInfo->doesntKillAddressFields()) *aliases |= symRefTab->aliasBuilder.addressShadowSymRefs(); if (!methodInfo->doesntKillIntFields()) *aliases |= symRefTab->aliasBuilder.intShadowSymRefs(); if (!methodInfo->doesntKillNonIntPrimitiveFields()) *aliases |= symRefTab->aliasBuilder.nonIntPrimitiveShadowSymRefs(); if (!methodInfo->doesntKillAddressStatics()) *aliases |= symRefTab->aliasBuilder.addressStaticSymRefs(); if (!methodInfo->doesntKillIntStatics()) *aliases |= symRefTab->aliasBuilder.intStaticSymRefs(); if (!methodInfo->doesntKillNonIntPrimitiveStatics()) *aliases |= symRefTab->aliasBuilder.nonIntPrimitiveStaticSymRefs(); TR_BitVector *methodAliases = symRefTab->aliasBuilder.methodAliases(self()); *aliases &= *methodAliases; return aliases; } #endif return symRefTab->aliasBuilder.methodAliases(self()); } case TR::Symbol::IsShadow: { if ((self()->isUnresolved() && !_symbol->isConstObjectRef()) || _symbol->isVolatile() || self()->isLiteralPoolAddress() || self()->isFromLiteralPool() || (_symbol->isUnsafeShadowSymbol() && !self()->reallySharesSymbol())) { if (symRefTab->aliasBuilder.unsafeArrayElementSymRefs().get(self()->getReferenceNumber())) { TR_BitVector *aliases = new (aliasRegion) TR_BitVector(bvInitialSize, aliasRegion, growability); *aliases |= comp->getSymRefTab()->aliasBuilder.defaultMethodDefAliasesWithoutImmutable(); *aliases -= symRefTab->aliasBuilder.cpSymRefs(); return aliases; } else return &comp->getSymRefTab()->aliasBuilder.defaultMethodDefAliasesWithoutImmutable(); } TR_BitVector *aliases = NULL; if (_symbol == symRefTab->findGenericIntShadowSymbol()) { aliases = new (aliasRegion) TR_BitVector(bvInitialSize, aliasRegion, growability); *aliases |= symRefTab->aliasBuilder.arrayElementSymRefs(); if (comp->generateArraylets()) *aliases |= symRefTab->aliasBuilder.arrayletElementSymRefs(); *aliases |= symRefTab->aliasBuilder.genericIntShadowSymRefs(); *aliases |= symRefTab->aliasBuilder.genericIntArrayShadowSymRefs(); *aliases |= symRefTab->aliasBuilder.genericIntNonArrayShadowSymRefs(); *aliases |= symRefTab->aliasBuilder.unsafeSymRefNumbers(); #ifdef J9_PROJECT_SPECIFIC *aliases |= symRefTab->aliasBuilder.unresolvedShadowSymRefs(); #endif if (symRefTab->aliasBuilder.conservativeGenericIntShadowAliasing()) { *aliases |= symRefTab->aliasBuilder.addressShadowSymRefs(); *aliases |= symRefTab->aliasBuilder.intShadowSymRefs(); *aliases |= symRefTab->aliasBuilder.nonIntPrimitiveShadowSymRefs(); } aliases->set(self()->getReferenceNumber()); return aliases; } if (self()->reallySharesSymbol(comp)) { aliases = new (aliasRegion) TR_BitVector(bvInitialSize, aliasRegion, growability); self()->setSharedShadowAliases(aliases, symRefTab); } if (symRefTab->findGenericIntShadowSymbol()) { if (!aliases) aliases = new (aliasRegion) TR_BitVector(bvInitialSize, aliasRegion, growability); self()->setLiteralPoolAliases(aliases, symRefTab); if (symRefTab->aliasBuilder.conservativeGenericIntShadowAliasing() || self()->isUnresolved()) { *aliases |= symRefTab->aliasBuilder.genericIntShadowSymRefs(); *aliases |= symRefTab->aliasBuilder.genericIntArrayShadowSymRefs(); *aliases |= symRefTab->aliasBuilder.genericIntNonArrayShadowSymRefs(); } } if (_symbol->isArrayShadowSymbol() && symRefTab->findGenericIntShadowSymbol()) { if (!aliases) aliases = new (aliasRegion) TR_BitVector(bvInitialSize, aliasRegion, growability); *aliases |= symRefTab->aliasBuilder.genericIntShadowSymRefs(); *aliases |= symRefTab->aliasBuilder.genericIntArrayShadowSymRefs(); if (supportArrayRefinement && self()->getIndependentSymRefs()) *aliases -= *self()->getIndependentSymRefs(); } #ifdef J9_PROJECT_SPECIFIC // make TR::PackedDecimal aliased with TR::Int8(byte) if (_symbol->isArrayShadowSymbol() && _symbol->getDataType() == TR::PackedDecimal) { if (!aliases) aliases = new (aliasRegion) TR_BitVector(bvInitialSize, aliasRegion, growability); aliases->set(symRefTab->getArrayShadowIndex(TR::Int8)); } //the other way around. if (_symbol->isArrayShadowSymbol() && _symbol->getDataType() == TR::Int8) { if (!aliases) aliases = new (aliasRegion) TR_BitVector(bvInitialSize, aliasRegion, growability); aliases->set(symRefTab->getArrayShadowIndex(TR::PackedDecimal)); } #endif // alias vector arrays shadows with corresponding scalar array shadows if (_symbol->isArrayShadowSymbol() && _symbol->getDataType().isVector()) { if (!aliases) aliases = new (aliasRegion) TR_BitVector(bvInitialSize, aliasRegion, growability); aliases->set(symRefTab->getArrayShadowIndex(_symbol->getDataType().vectorToScalar())); } // the other way around if (_symbol->isArrayShadowSymbol() && !_symbol->getDataType().isVector()) { if (!aliases) aliases = new (aliasRegion) TR_BitVector(bvInitialSize, aliasRegion, growability); aliases->set(symRefTab->getArrayShadowIndex(_symbol->getDataType().scalarToVector())); } if (_symbol->isArrayShadowSymbol() && !symRefTab->aliasBuilder.immutableArrayElementSymRefs().isEmpty()) { if (!aliases) aliases = new (aliasRegion) TR_BitVector(bvInitialSize, aliasRegion, growability); TR::DataType type = _symbol->getDataType(); TR_BitVectorIterator bvi(symRefTab->aliasBuilder.arrayElementSymRefs()); int32_t symRefNum; while (bvi.hasMoreElements()) { symRefNum = bvi.getNextElement(); if (symRefTab->getSymRef(symRefNum)->getSymbol()->getDataType() == type) aliases->set(symRefNum); } } if (_symbol->isArrayShadowSymbol() && supportArrayRefinement && comp->getMethodSymbol()->hasVeryRefinedAliasSets()) { if (!aliases) aliases = new (aliasRegion) TR_BitVector(bvInitialSize, aliasRegion, growability); TR::DataType type = _symbol->getDataType(); TR_BitVectorIterator bvi(symRefTab->aliasBuilder.arrayElementSymRefs()); int32_t symRefNum; while (bvi.hasMoreElements()) { symRefNum = bvi.getNextElement(); if (symRefTab->getSymRef(symRefNum)->getSymbol()->getDataType() == type) aliases->set(symRefNum); } if (self()->getIndependentSymRefs()) *aliases -= *self()->getIndependentSymRefs(); return aliases; } if (aliases) aliases->set(self()->getReferenceNumber()); if (symRefTab->aliasBuilder.unsafeArrayElementSymRefs().get(self()->getReferenceNumber())) *aliases -= symRefTab->aliasBuilder.cpSymRefs(); else if (symRefTab->aliasBuilder.cpSymRefs().get(self()->getReferenceNumber())) *aliases -= symRefTab->aliasBuilder.unsafeArrayElementSymRefs(); return aliases; } case TR::Symbol::IsStatic: { // For unresolved constant dynamic, we need to invoke a Java bootstrap method, // which can have arbitrary side effects, so the aliasing should be conservative here. // isConstObjectRef now returns true for condy, so we add an explicit condition, // more like a short-circuit, to say if we are unresolved and not isConstObjectRef // (this is the same as before), or if we are unresolved and condy // (this is the extra condition added), we would return conservative aliases. if ((self()->isUnresolved() && (_symbol->isConstantDynamic() || !_symbol->isConstObjectRef())) || self()->isLiteralPoolAddress() || self()->isFromLiteralPool() || _symbol->isVolatile()) { return &comp->getSymRefTab()->aliasBuilder.defaultMethodDefAliases(); } TR_BitVector *aliases = NULL; if (self()->reallySharesSymbol(comp)) { aliases = new (aliasRegion) TR_BitVector(bvInitialSize, aliasRegion, growability); self()->setSharedStaticAliases(aliases, symRefTab); } if (symRefTab->findGenericIntShadowSymbol()) { if (!aliases) aliases = new (aliasRegion) TR_BitVector(bvInitialSize, aliasRegion, growability); self()->setLiteralPoolAliases(aliases, symRefTab); } if (aliases) aliases->set(self()->getReferenceNumber()); return aliases; } case TR::Symbol::IsMethodMetaData: { TR_BitVector *aliases = NULL; return aliases; } default: //TR_ASSERT(0, "getUseDefAliasing called for non method"); if (comp->generateArraylets() && comp->getSymRefTab()->aliasBuilder.gcSafePointSymRefNumbers().get(self()->getReferenceNumber()) && includeGCSafePoint) return &comp->getSymRefTab()->aliasBuilder.gcSafePointSymRefNumbers(); else return 0; } }