void TR::ARMSystemLinkage::createEpilogue(TR::Instruction *cursor) { TR::CodeGenerator *codeGen = cg(); const TR::ARMLinkageProperties& properties = getProperties(); TR::Machine *machine = codeGen->machine(); TR::Node *lastNode = cursor->getNode(); TR::ResolvedMethodSymbol* bodySymbol = comp()->getJittedMethodSymbol(); TR::RealRegister *stackPtr = machine->getRealRegister(properties.getStackPointerRegister()); // restore link register (r14) auto *stackSlot = new (trHeapMemory()) TR::MemoryReference(stackPtr, bodySymbol->getLocalMappingCursor(), codeGen); cursor = generateMemSrc1Instruction(cg(), ARMOp_ldr, lastNode, stackSlot, machine->getRealRegister(TR::RealRegister::gr14), cursor); // restore all preserved registers for (int r = TR::RealRegister::gr4; r <= TR::RealRegister::gr11; ++r) { auto *stackSlot = new (trHeapMemory()) TR::MemoryReference(stackPtr, (TR::RealRegister::gr11 - r + 1)*4 + bodySymbol->getLocalMappingCursor(), codeGen); cursor = generateMemSrc1Instruction(cg(), ARMOp_ldr, lastNode, stackSlot, machine->getRealRegister((TR::RealRegister::RegNum)r), cursor); } // remove space for preserved registers auto frameSize = codeGen->getFrameSizeInBytes(); cursor = generateTrg1Src1ImmInstruction(codeGen, ARMOp_add, lastNode, stackPtr, stackPtr, frameSize, 0, cursor); // return using `mov r15, r14` TR::RealRegister *gr14 = machine->getRealRegister(TR::RealRegister::gr14); TR::RealRegister *gr15 = machine->getRealRegister(TR::RealRegister::gr15); cursor = generateTrg1Src1Instruction(codeGen, ARMOp_mov, lastNode, gr15, gr14, cursor); }
void TR::ARM64SystemLinkage::createEpilogue(TR::Instruction *cursor) { TR::CodeGenerator *codeGen = cg(); const TR::ARM64LinkageProperties& properties = getProperties(); TR::Machine *machine = codeGen->machine(); TR::Node *lastNode = cursor->getNode(); TR::ResolvedMethodSymbol *bodySymbol = comp()->getJittedMethodSymbol(); TR::RealRegister *sp = machine->getRealRegister(properties.getStackPointerRegister()); // restore callee-saved registers uint32_t offset = bodySymbol->getLocalMappingCursor(); for (int r = TR::RealRegister::x19; r <= TR::RealRegister::x28; r++) { TR::RealRegister *rr = machine->getRealRegister((TR::RealRegister::RegNum)r); if (rr->getHasBeenAssignedInMethod()) { TR::MemoryReference *stackSlot = new (trHeapMemory()) TR::MemoryReference(sp, offset, codeGen); cursor = generateTrg1MemInstruction(cg(), TR::InstOpCode::ldrimmx, lastNode, rr, stackSlot, cursor); offset += 8; } } for (int r = TR::RealRegister::v8; r <= TR::RealRegister::v15; r++) { TR::RealRegister *rr = machine->getRealRegister((TR::RealRegister::RegNum)r); if (rr->getHasBeenAssignedInMethod()) { TR::MemoryReference *stackSlot = new (trHeapMemory()) TR::MemoryReference(sp, offset, codeGen); cursor = generateTrg1MemInstruction(cg(), TR::InstOpCode::vldrimmd, lastNode, rr, stackSlot, cursor); offset += 8; } } // restore link register (x30) TR::RealRegister *lr = machine->getRealRegister(TR::RealRegister::lr); if (machine->getLinkRegisterKilled()) { TR::MemoryReference *stackSlot = new (trHeapMemory()) TR::MemoryReference(sp, 0, codeGen); cursor = generateTrg1MemInstruction(cg(), TR::InstOpCode::ldrimmx, lastNode, lr, stackSlot, cursor); } // remove space for preserved registers uint32_t frameSize = codeGen->getFrameSizeInBytes(); if (constantIsUnsignedImm12(frameSize)) { cursor = generateTrg1Src1ImmInstruction(codeGen, TR::InstOpCode::addimmx, lastNode, sp, sp, frameSize, cursor); } else { TR_UNIMPLEMENTED(); } // return cursor = generateRegBranchInstruction(codeGen, TR::InstOpCode::ret, lastNode, lr, cursor); }
TR::Instruction * TR_X86SystemLinkage::savePreservedRegisters(TR::Instruction *cursor) { // For IA32, if disableShrinkWrapping, usePushForPreservedRegs will be true; otherwise false; // For X64, shrinkWraping is always on, and usePushForPreservedRegs always false; // TR_ASSERT(!getProperties().getUsesPushesForPreservedRegs(), "assertion failure"); TR::ResolvedMethodSymbol *bodySymbol = comp()->getJittedMethodSymbol(); const int32_t localSize = getProperties().getOffsetToFirstLocal() - bodySymbol->getLocalMappingCursor(); const int32_t pointerSize = getProperties().getPointerSize(); int32_t offsetCursor = -localSize + getProperties().getOffsetToFirstLocal() - pointerSize; if (_properties.getUsesPushesForPreservedRegs()) { for (int32_t pindex = _properties.getMaxRegistersPreservedInPrologue()-1; pindex >= 0; pindex--) { TR::RealRegister::RegNum idx = _properties.getPreservedRegister((uint32_t)pindex); TR::RealRegister *reg = machine()->getX86RealRegister(idx); if (reg->getHasBeenAssignedInMethod() && reg->getState() != TR::RealRegister::Locked) { cursor = new (trHeapMemory()) TR::X86RegInstruction(cursor, PUSHReg, reg, cg()); } } } else { TR_BitVector *p = cg()->getPreservedRegsInPrologue(); for (int32_t pindex = getProperties().getMaxRegistersPreservedInPrologue()-1; pindex >= 0; pindex--) { TR::RealRegister::RegNum idx = _properties.getPreservedRegister((uint32_t)pindex); TR::RealRegister *reg = machine()->getX86RealRegister(getProperties().getPreservedRegister((uint32_t)pindex)); if(reg->getHasBeenAssignedInMethod() && reg->getState() != TR::RealRegister::Locked) { if (!p || p->get(idx)) { cursor = generateMemRegInstruction( cursor, movOpcodes[MemReg][fullRegisterMovType(reg)], generateX86MemoryReference(machine()->getX86RealRegister(TR::RealRegister::vfp), offsetCursor, cg()), reg, cg() ); } offsetCursor -= pointerSize; } } } return cursor; }
bool checkMethodSignature(TR::ValuePropagation *vp, TR::SymbolReference *symRef, const char *sig) { TR::Symbol *symbol = symRef->getSymbol(); if (!symbol->isResolvedMethod()) return false; TR::ResolvedMethodSymbol *method = symbol->castToResolvedMethodSymbol(); if (!method) return false; if (strncmp(method->getResolvedMethod()->signature(vp->trMemory()), sig, strlen(sig)) == 0) return true; return false; }
void TR::ILValidator::validate(const OMR::ILValidationStrategy *strategy) { /** * Selection Phase: * From all the available `ILValidationRule`s, only select the ones * corresponding to the given `OMR::ILValidationStrategy`. */ std::vector<TR::MethodValidationRule *> reqMethodValidationRules = getRequiredMethodValidationRules(strategy); std::vector<TR::BlockValidationRule *> reqBlockValidationRules = getRequiredBlockValidationRules(strategy); std::vector<TR::NodeValidationRule *> reqNodeValidationRules = getRequiredNodeValidationRules(strategy); /** * Validation Phase: * Validate against the required set of `ILValidationRule`s. */ /* Rules that are veriified over the entire method. */ TR::ResolvedMethodSymbol* methodSymbol = comp()->getMethodSymbol(); for (auto it = reqMethodValidationRules.begin(); it != reqMethodValidationRules.end(); ++it) { (*it)->validate(methodSymbol); } /* Checks performed across an extended blocks. */ for (auto it = reqBlockValidationRules.begin(); it != reqBlockValidationRules.end(); ++it) { TR::TreeTop *tt, *exitTreeTop; for (tt = methodSymbol->getFirstTreeTop(); tt; tt = exitTreeTop->getNextTreeTop()) { TR::TreeTop *firstTreeTop = tt; exitTreeTop = tt->getExtendedBlockExitTreeTop(); (*it)->validate(firstTreeTop, exitTreeTop); } } /* NodeValidationRules only check per node for a specific property. */ for (auto it = reqNodeValidationRules.begin(); it != reqNodeValidationRules.end(); ++it) { for (TR::PreorderNodeIterator nodeIter(methodSymbol->getFirstTreeTop(), comp(), "NODE_VALIDATOR"); nodeIter.currentTree(); ++nodeIter) { (*it)->validate(nodeIter.currentNode()); } } }
TR::Instruction * TR::X86SystemLinkage::savePreservedRegisters(TR::Instruction *cursor) { // For IA32 usePushForPreservedRegs will be true; // For X64, usePushForPreservedRegs always false; TR::ResolvedMethodSymbol *bodySymbol = comp()->getJittedMethodSymbol(); const int32_t localSize = getProperties().getOffsetToFirstLocal() - bodySymbol->getLocalMappingCursor(); const int32_t pointerSize = getProperties().getPointerSize(); int32_t offsetCursor = -localSize + getProperties().getOffsetToFirstLocal() - pointerSize; if (_properties.getUsesPushesForPreservedRegs()) { for (int32_t pindex = _properties.getMaxRegistersPreservedInPrologue()-1; pindex >= 0; pindex--) { TR::RealRegister::RegNum idx = _properties.getPreservedRegister((uint32_t)pindex); TR::RealRegister *reg = machine()->getX86RealRegister(idx); if (reg->getHasBeenAssignedInMethod() && reg->getState() != TR::RealRegister::Locked) { cursor = new (trHeapMemory()) TR::X86RegInstruction(cursor, PUSHReg, reg, cg()); } } } else { for (int32_t pindex = getProperties().getMaxRegistersPreservedInPrologue()-1; pindex >= 0; pindex--) { TR::RealRegister::RegNum idx = _properties.getPreservedRegister((uint32_t)pindex); TR::RealRegister *reg = machine()->getX86RealRegister(getProperties().getPreservedRegister((uint32_t)pindex)); if(reg->getHasBeenAssignedInMethod() && reg->getState() != TR::RealRegister::Locked) { cursor = generateMemRegInstruction( cursor, TR::Linkage::movOpcodes(MemReg, fullRegisterMovType(reg)), generateX86MemoryReference(machine()->getX86RealRegister(TR::RealRegister::vfp), offsetCursor, cg()), reg, cg() ); offsetCursor -= pointerSize; } } } return cursor; }
// Copies parameters from where they enter the method (either on stack or in a // linkage register) to their "home location" where the method body will expect // to find them (either on stack or in a global register). // TR::Instruction * TR::X86SystemLinkage::copyParametersToHomeLocation(TR::Instruction *cursor) { TR::Machine *machine = cg()->machine(); TR::RealRegister *framePointer = machine->getX86RealRegister(TR::RealRegister::vfp); TR::ResolvedMethodSymbol *bodySymbol = comp()->getJittedMethodSymbol(); ListIterator<TR::ParameterSymbol> paramIterator(&(bodySymbol->getParameterList())); TR::ParameterSymbol *paramCursor; const TR::RealRegister::RegNum noReg = TR::RealRegister::NoReg; TR_ASSERT(noReg == 0, "noReg must be zero so zero-initializing movStatus will work"); TR::MovStatus movStatus[TR::RealRegister::NumRegisters] = {{(TR::RealRegister::RegNum)0,(TR::RealRegister::RegNum)0,(TR_MovDataTypes)0}}; // We must always do the stores first, then the reg-reg copies, then the // loads, so that we never clobber a register we will need later. However, // the logic is simpler if we do the loads and stores in the same loop. // Therefore, we maintain a separate instruction cursor for the loads. // // We defer the initialization of loadCursor until we generate the first // load. Otherwise, if we happen to generate some stores first, then the // store cursor would get ahead of the loadCursor, and the instructions // would end up in the wrong order despite our efforts. // TR::Instruction *loadCursor = NULL; // Phase 1: generate RegMem and MemReg movs, and collect information about // the required RegReg movs. // for (paramCursor = paramIterator.getFirst(); paramCursor != NULL; paramCursor = paramIterator.getNext()) { int8_t lri = paramCursor->getLinkageRegisterIndex(); // How the parameter enters the method TR::RealRegister::RegNum ai // Where method body expects to find it = (TR::RealRegister::RegNum)paramCursor->getAllocatedIndex(); int32_t offset = paramCursor->getParameterOffset(); // Location of the parameter's stack slot TR_MovDataTypes movDataType = paramMovType(paramCursor); // What sort of MOV instruction does it need? // Copy the parameter to wherever it should be // if (lri == NOT_LINKAGE) // It's on the stack { if (ai == NOT_ASSIGNED) // It only needs to be on the stack { // Nothing to do } else // Method body expects it to be in the ai register { if (loadCursor == NULL) loadCursor = cursor; if (debug("traceCopyParametersToHomeLocation")) diagnostic("copyParametersToHomeLocation: Loading %d\n", ai); // ai := stack loadCursor = generateRegMemInstruction( loadCursor, TR::Linkage::movOpcodes(RegMem, movDataType), machine->getX86RealRegister(ai), generateX86MemoryReference(framePointer, offset, cg()), cg() ); } } else // It's in a linkage register { TR::RealRegister::RegNum sourceIndex = getProperties().getArgumentRegister(lri, isFloat(movDataType)); // Copy to the stack if necessary // if (ai == NOT_ASSIGNED || hasToBeOnStack(paramCursor)) { if (comp()->getOption(TR_TraceCG)) traceMsg(comp(), "copyToHomeLocation param %p, linkage reg index %d, allocated index %d, parameter offset %d, hasToBeOnStack %d, parm->isParmHasToBeOnStack() %d.\n", paramCursor, lri, ai, offset, hasToBeOnStack(paramCursor), paramCursor->isParmHasToBeOnStack()); if (debug("traceCopyParametersToHomeLocation")) diagnostic("copyParametersToHomeLocation: Storing %d\n", sourceIndex); // stack := lri cursor = generateMemRegInstruction( cursor, TR::Linkage::movOpcodes(MemReg, movDataType), generateX86MemoryReference(framePointer, offset, cg()), machine->getX86RealRegister(sourceIndex), cg() ); } // Copy to the ai register if necessary // if (ai != NOT_ASSIGNED && ai != sourceIndex) { // This parameter needs a RegReg move. We don't know yet whether // we need the value in the target register, so for now we just // remember that we need to do this and keep going. // TR_ASSERT(movStatus[ai ].sourceReg == noReg, "Each target reg must have only one source"); TR_ASSERT(movStatus[sourceIndex].targetReg == noReg, "Each source reg must have only one target"); if (debug("traceCopyParametersToHomeLocation")) diagnostic("copyParametersToHomeLocation: Planning to move %d to %d\n", sourceIndex, ai); movStatus[ai].sourceReg = sourceIndex; movStatus[sourceIndex].targetReg = ai; movStatus[sourceIndex].outgoingDataType = movDataType; } if (debug("traceCopyParametersToHomeLocation") && ai == sourceIndex) { diagnostic("copyParametersToHomeLocation: Parameter #%d already in register %d\n", lri, ai); } } } // Phase 2: Iterate through the parameters again to insert the RegReg moves. // for (paramCursor = paramIterator.getFirst(); paramCursor != NULL; paramCursor = paramIterator.getNext()) { if (paramCursor->getLinkageRegisterIndex() == NOT_LINKAGE) continue; const TR::RealRegister::RegNum paramReg = getProperties().getArgumentRegister(paramCursor->getLinkageRegisterIndex(), isFloat(paramMovType(paramCursor))); if (movStatus[paramReg].targetReg == 0) { // This parameter does not need to be copied anywhere if (debug("traceCopyParametersToHomeLocation")) diagnostic("copyParametersToHomeLocation: Not moving %d\n", paramReg); } else { if (debug("traceCopyParametersToHomeLocation")) diagnostic("copyParametersToHomeLocation: Preparing to move %d\n", paramReg); // If a mov's target register is the source for another mov, we need // to do that other mov first. The idea is to find the end point of // the chain of movs starting with paramReg and ending with a // register whose current value is not needed; then do that chain of // movs in reverse order. // TR_ASSERT(noReg == 0, "noReg must be zero (not %d) for zero-filled initialization to work", noReg); TR::RealRegister::RegNum regCursor; // Find the last target in the chain // regCursor = movStatus[paramReg].targetReg; while(movStatus[regCursor].targetReg != noReg) { // Haven't found the end yet regCursor = movStatus[regCursor].targetReg; TR_ASSERT(regCursor != paramReg, "Can't yet handle cyclic dependencies"); // TODO:AMD64 Use scratch register to break cycles // A properly-written pickRegister should never // cause cycles to occur in the first place. However, we may want // to consider adding cycle-breaking logic so that (1) pickRegister // has more flexibility, and (2) we're more robust against // otherwise harmless bugs in pickRegister. } // Work our way backward along the chain, generating all the necessary movs // while(movStatus[regCursor].sourceReg != noReg) { TR::RealRegister::RegNum source = movStatus[regCursor].sourceReg; if (debug("traceCopyParametersToHomeLocation")) diagnostic("copyParametersToHomeLocation: Moving %d to %d\n", source, regCursor); // regCursor := regCursor.sourceReg cursor = generateRegRegInstruction( cursor, TR::Linkage::movOpcodes(RegReg, movStatus[source].outgoingDataType), machine->getX86RealRegister(regCursor), machine->getX86RealRegister(source), cg() ); // Update movStatus as we go so we don't generate redundant movs movStatus[regCursor].sourceReg = noReg; movStatus[source ].targetReg = noReg; // Continue with the next register in the chain regCursor = source; } } } // Return the last instruction we inserted, whether or not it was a load. // return loadCursor? loadCursor : cursor; }
void TR::ARM64SystemLinkage::createPrologue(TR::Instruction *cursor, List<TR::ParameterSymbol> &parmList) { TR::CodeGenerator *codeGen = cg(); TR::Machine *machine = codeGen->machine(); TR::ResolvedMethodSymbol *bodySymbol = comp()->getJittedMethodSymbol(); const TR::ARM64LinkageProperties& properties = getProperties(); TR::RealRegister *sp = machine->getRealRegister(properties.getStackPointerRegister()); TR::Node *firstNode = comp()->getStartTree()->getNode(); // allocate stack space uint32_t frameSize = (uint32_t)codeGen->getFrameSizeInBytes(); if (constantIsUnsignedImm12(frameSize)) { cursor = generateTrg1Src1ImmInstruction(codeGen, TR::InstOpCode::subimmx, firstNode, sp, sp, frameSize, cursor); } else { TR_UNIMPLEMENTED(); } // save link register (x30) if (machine->getLinkRegisterKilled()) { TR::MemoryReference *stackSlot = new (trHeapMemory()) TR::MemoryReference(sp, 0, codeGen); cursor = generateMemSrc1Instruction(cg(), TR::InstOpCode::strimmx, firstNode, stackSlot, machine->getRealRegister(TR::RealRegister::x30), cursor); } // spill argument registers int32_t nextIntArgReg = 0; int32_t nextFltArgReg = 0; ListIterator<TR::ParameterSymbol> parameterIterator(&parmList); for (TR::ParameterSymbol *parameter = parameterIterator.getFirst(); parameter != NULL && (nextIntArgReg < getProperties().getNumIntArgRegs() || nextFltArgReg < getProperties().getNumFloatArgRegs()); parameter = parameterIterator.getNext()) { TR::MemoryReference *stackSlot = new (trHeapMemory()) TR::MemoryReference(sp, parameter->getParameterOffset(), codeGen); TR::InstOpCode::Mnemonic op; switch (parameter->getDataType()) { case TR::Int8: case TR::Int16: case TR::Int32: case TR::Int64: case TR::Address: if (nextIntArgReg < getProperties().getNumIntArgRegs()) { op = (parameter->getSize() == 8) ? TR::InstOpCode::strimmx : TR::InstOpCode::strimmw; cursor = generateMemSrc1Instruction(cg(), op, firstNode, stackSlot, machine->getRealRegister((TR::RealRegister::RegNum)(TR::RealRegister::x0 + nextIntArgReg)), cursor); nextIntArgReg++; } else { nextIntArgReg = getProperties().getNumIntArgRegs() + 1; } break; case TR::Float: case TR::Double: if (nextFltArgReg < getProperties().getNumFloatArgRegs()) { op = (parameter->getSize() == 8) ? TR::InstOpCode::vstrimmd : TR::InstOpCode::vstrimms; cursor = generateMemSrc1Instruction(cg(), op, firstNode, stackSlot, machine->getRealRegister((TR::RealRegister::RegNum)(TR::RealRegister::v0 + nextFltArgReg)), cursor); nextFltArgReg++; } else { nextFltArgReg = getProperties().getNumFloatArgRegs() + 1; } break; case TR::Aggregate: TR_ASSERT(false, "Function parameters of aggregate types are not currently supported on AArch64."); break; default: TR_ASSERT(false, "Unknown parameter type."); } } // save callee-saved registers uint32_t offset = bodySymbol->getLocalMappingCursor(); for (int r = TR::RealRegister::x19; r <= TR::RealRegister::x28; r++) { TR::RealRegister *rr = machine->getRealRegister((TR::RealRegister::RegNum)r); if (rr->getHasBeenAssignedInMethod()) { TR::MemoryReference *stackSlot = new (trHeapMemory()) TR::MemoryReference(sp, offset, codeGen); cursor = generateMemSrc1Instruction(cg(), TR::InstOpCode::strimmx, firstNode, stackSlot, rr, cursor); offset += 8; } } for (int r = TR::RealRegister::v8; r <= TR::RealRegister::v15; r++) { TR::RealRegister *rr = machine->getRealRegister((TR::RealRegister::RegNum)r); if (rr->getHasBeenAssignedInMethod()) { TR::MemoryReference *stackSlot = new (trHeapMemory()) TR::MemoryReference(sp, offset, codeGen); cursor = generateMemSrc1Instruction(cg(), TR::InstOpCode::vstrimmd, firstNode, stackSlot, rr, cursor); offset += 8; } } }
TR_BitVector * OMR::SymbolReference::getUseonlyAliasesBV(TR::SymbolReferenceTable * symRefTab) { int32_t kind = _symbol->getKind(); switch (kind) { case TR::Symbol::IsMethod: { TR::MethodSymbol * methodSymbol = _symbol->castToMethodSymbol(); // Aliasing for potentialOSRPointHelper // A potentialOSRPointHelper call is an exception point that may go to OSR catch block ( see // Node API exceptionsRaised), the control flow constraint imposed by the exception edge will // apply to all the global optimizations that may move things around. Local optimizations also // ask exceptionsRaised to determine if a code motion across certain point is safe. So aliasing // is not necessary. However, we'd like to add aliasing here to cause the compiler to be more // conservative about reordering this helper with other operations. The aliasing can always be // relaxed when necessary. // if (symRefTab->isNonHelper(self(), TR::SymbolReferenceTable::potentialOSRPointHelperSymbol)) { return &symRefTab->aliasBuilder.defaultMethodUseAliases(); } // Aliasing for osrFearPointHelper // Preventing the reordering of fear point helper w.r.t. OSR points and yield/invalidation points is // the minimum requirement of aliasing for OSR fear point helper. These reorderings would in almost // all cases be naturally disallowed simply due to the fact that the fear point is represented as a // call, which even without aliasing could e.g. perform I/O. Thus the following is a highly conservative // aliasing and can be relaxed later when necessary // if (symRefTab->isNonHelper(self(), TR::SymbolReferenceTable::osrFearPointHelperSymbol)) { return &symRefTab->aliasBuilder.defaultMethodUseAliases(); } if (!methodSymbol->isHelper()) { return &symRefTab->aliasBuilder.defaultMethodUseAliases(); } switch (self()->getReferenceNumber()) { case TR_asyncCheck: return 0; // helpers that don't throw have no use aliases case TR_instanceOf: case TR_checkAssignable: case TR_monitorEntry: case TR_transactionEntry: case TR_reportFinalFieldModified: case TR_reportMethodEnter: case TR_reportStaticMethodEnter: case TR_reportMethodExit: case TR_acquireVMAccess: case TR_throwCurrentException: case TR_releaseVMAccess: case TR_stackOverflow: case TR_writeBarrierStore: case TR_writeBarrierStoreGenerational: case TR_writeBarrierStoreGenerationalAndConcurrentMark: case TR_writeBarrierBatchStore: case TR_typeCheckArrayStore: case TR_arrayStoreException: case TR_arrayBoundsCheck: case TR_checkCast: case TR_divCheck: case TR_overflowCheck: case TR_nullCheck: case TR_methodTypeCheck: case TR_incompatibleReceiver: case TR_IncompatibleClassChangeError: case TR_aThrow: case TR_aNewArray: case TR_monitorExit: case TR_transactionExit: case TR_newObject: case TR_newObjectNoZeroInit: case TR_newArray: case TR_multiANewArray: default: return &symRefTab->aliasBuilder.defaultMethodUseAliases(); } } case TR::Symbol::IsResolvedMethod: { TR::ResolvedMethodSymbol * resolvedMethodSymbol = _symbol->castToResolvedMethodSymbol(); if (!TR::comp()->getOption(TR_EnableHCR)) { switch (resolvedMethodSymbol->getRecognizedMethod()) { #ifdef J9_PROJECT_SPECIFIC case TR::java_lang_Double_longBitsToDouble: case TR::java_lang_Double_doubleToLongBits: case TR::java_lang_Float_intBitsToFloat: case TR::java_lang_Float_floatToIntBits: case TR::java_lang_Double_doubleToRawLongBits: case TR::java_lang_Float_floatToRawIntBits: case TR::java_lang_Math_sqrt: case TR::java_lang_StrictMath_sqrt: case TR::java_lang_Math_sin: case TR::java_lang_StrictMath_sin: case TR::java_lang_Math_cos: case TR::java_lang_StrictMath_cos: case TR::java_lang_Math_max_I: case TR::java_lang_Math_min_I: case TR::java_lang_Math_max_L: case TR::java_lang_Math_min_L: case TR::java_lang_Math_abs_I: case TR::java_lang_Math_abs_L: case TR::java_lang_Math_abs_F: case TR::java_lang_Math_abs_D: case TR::java_lang_Math_pow: case TR::java_lang_StrictMath_pow: case TR::java_lang_Math_exp: case TR::java_lang_StrictMath_exp: case TR::java_lang_Math_log: case TR::java_lang_StrictMath_log: case TR::java_lang_Math_floor: case TR::java_lang_Math_ceil: case TR::java_lang_Math_copySign_F: case TR::java_lang_Math_copySign_D: case TR::java_lang_StrictMath_floor: case TR::java_lang_StrictMath_ceil: case TR::java_lang_StrictMath_copySign_F: case TR::java_lang_StrictMath_copySign_D: return NULL; #endif default: break; } } return &symRefTab->aliasBuilder.defaultMethodUseAliases(); } case TR::Symbol::IsAutomatic: case TR::Symbol::IsParameter: if (symRefTab->aliasBuilder.catchLocalUseSymRefs().isSet(self()->getReferenceNumber())) return &symRefTab->aliasBuilder.methodsThatMayThrow(); return 0; default: //TR_ASSERT(0, "getUseOnlyAliases: unexpected symbol kind "); return 0; } }
TR_BitVector * addVeryRefinedCallAliasSets(TR::ResolvedMethodSymbol * methodSymbol, TR_BitVector * aliases, List<void> * methodsPeeked) { TR::Compilation *comp = TR::comp(); void * methodId = methodSymbol->getResolvedMethod()->getPersistentIdentifier(); if (methodsPeeked->find(methodId)) { // This can't be allocated into the alias region as it must be accessed across optimizations TR_BitVector *heapAliases = new (comp->trHeapMemory()) TR_BitVector(comp->getSymRefCount(), comp->trMemory(), heapAlloc, growable); *heapAliases |= *aliases; return heapAliases; } // stop if the peek is getting very deep // if (methodsPeeked->getSize() >= PEEK_THRESHOLD) return 0; methodsPeeked->add(methodId); dumpOptDetails(comp, "O^O REFINING ALIASES: Peeking into the IL to refine aliases \n"); if (!methodSymbol->getResolvedMethod()->genMethodILForPeeking(methodSymbol, comp, true)) return 0; TR::SymbolReferenceTable * symRefTab = comp->getSymRefTab(); for (TR::TreeTop * tt = methodSymbol->getFirstTreeTop(); tt; tt = tt->getNextTreeTop()) { TR::Node *node = tt->getNode(); if (node->getOpCode().isResolveCheck()) return 0; if ((node->getOpCodeValue() == TR::treetop) || (node->getOpCodeValue() == TR::compressedRefs) || node->getOpCode().isCheck()) node = node->getFirstChild(); if (node->getOpCode().isStore()) { TR::SymbolReference * symRefInCallee = node->getSymbolReference(), * symRefInCaller; TR::Symbol * symInCallee = symRefInCallee->getSymbol(); TR::DataType type = symInCallee->getDataType(); if (symInCallee->isShadow()) { if (symInCallee->isArrayShadowSymbol()) symRefInCaller = symRefTab->getSymRef(symRefTab->getArrayShadowIndex(type)); else if (symInCallee->isArrayletShadowSymbol()) symRefInCaller = symRefTab->getSymRef(symRefTab->getArrayletShadowIndex(type)); else symRefInCaller = symRefTab->findShadowSymbol(symRefInCallee->getOwningMethod(comp), symRefInCallee->getCPIndex(), type); if (symRefInCaller) { if (symRefInCaller->reallySharesSymbol(comp)) symRefInCaller->setSharedShadowAliases(aliases, symRefTab); aliases->set(symRefInCaller->getReferenceNumber()); } } else if (symInCallee->isStatic()) { symRefInCaller = symRefTab->findStaticSymbol(symRefInCallee->getOwningMethod(comp), symRefInCallee->getCPIndex(), type); if (symRefInCaller) { if (symRefInCaller->reallySharesSymbol(comp)) symRefInCaller->setSharedStaticAliases(aliases, symRefTab); else aliases->set(symRefInCaller->getReferenceNumber()); } } } else if (node->getOpCode().isCall()) { if (node->getOpCode().isCallIndirect()) return 0; TR::ResolvedMethodSymbol * calleeSymbol = node->getSymbol()->getResolvedMethodSymbol(); if (!calleeSymbol) return 0; TR_ResolvedMethod * calleeMethod = calleeSymbol->getResolvedMethod(); if (!calleeMethod->isCompilable(comp->trMemory()) || calleeMethod->isJNINative()) return 0; if (!addVeryRefinedCallAliasSets(calleeSymbol, aliases, methodsPeeked)) return 0; } else if (node->getOpCodeValue() == TR::monent) return 0; } // This can't be allocated into the alias region as it must be accessed across optimizations TR_BitVector *heapAliases = new (comp->trHeapMemory()) TR_BitVector(comp->getSymRefCount(), comp->trMemory(), heapAlloc, growable); *heapAliases |= *aliases; return heapAliases; }
TR_BitVector * OMR::SymbolReference::getUseDefAliasesBV(bool isDirectCall, bool includeGCSafePoint) { TR::Compilation *comp = TR::comp(); TR::Region &aliasRegion = comp->aliasRegion(); int32_t bvInitialSize = comp->getSymRefCount(); TR_BitVectorGrowable growability = growable; // allow more than one shadow for an array type. Used by LoopAliasRefiner const bool supportArrayRefinement=true; int32_t kind = _symbol->getKind(); TR::SymbolReferenceTable * symRefTab = comp->getSymRefTab(); // !!! NOTE !!! // THERE IS A COPY OF THIS LOGIC IN sharesSymbol // if (!self()->reallySharesSymbol(comp)) { switch (kind) { case TR::Symbol::IsShadow: case TR::Symbol::IsStatic: { // For unresolved constant dynamic, we need to invoke a Java bootstrap method, // which can have arbitrary side effects, so the aliasing should be conservative here. // isConstObjectRef now returns true for condy, so we add an explicit condition, // more like a short-circuit, to say if we are unresolved and not isConstObjectRef // (this is the same as before), or if we are unresolved and condy // (this is the extra condition added), we would return conservative aliases. if ((self()->isUnresolved() && (_symbol->isConstantDynamic() || !_symbol->isConstObjectRef())) || _symbol->isVolatile() || self()->isLiteralPoolAddress() || self()->isFromLiteralPool() || _symbol->isUnsafeShadowSymbol() || (_symbol->isArrayShadowSymbol() && comp->getMethodSymbol()->hasVeryRefinedAliasSets())) { // getUseDefAliases might not return NULL } else if (!symRefTab->aliasBuilder.mutableGenericIntShadowHasBeenCreated()) { // getUseDefAliases must return NULL return NULL; } else if (kind == TR::Symbol::IsStatic && !symRefTab->aliasBuilder.litPoolGenericIntShadowHasBeenCreated()) { // getUseDefAliases must return NULL return NULL; } break; } } } // now do stuff for various kinds of symbols // switch (kind) { case TR::Symbol::IsMethod: { TR::MethodSymbol * methodSymbol = _symbol->castToMethodSymbol(); if (!methodSymbol->isHelper()) return symRefTab->aliasBuilder.methodAliases(self()); if (symRefTab->isNonHelper(self(), TR::SymbolReferenceTable::arraySetSymbol) || symRefTab->isNonHelper(self(), TR::SymbolReferenceTable::osrFearPointHelperSymbol) || symRefTab->isNonHelper(self(), TR::SymbolReferenceTable::potentialOSRPointHelperSymbol)) { return &symRefTab->aliasBuilder.defaultMethodDefAliases(); } if (symRefTab->isNonHelper(self(), TR::SymbolReferenceTable::arrayCmpSymbol)) return 0; switch (self()->getReferenceNumber()) { case TR_methodTypeCheck: case TR_nullCheck: return &symRefTab->aliasBuilder.defaultMethodDefAliasesWithoutImmutable(); case TR_arrayBoundsCheck: case TR_checkCast: case TR_divCheck: case TR_typeCheckArrayStore: case TR_arrayStoreException: case TR_incompatibleReceiver: case TR_IncompatibleClassChangeError: case TR_reportFinalFieldModified: case TR_reportMethodEnter: case TR_reportStaticMethodEnter: case TR_reportMethodExit: case TR_acquireVMAccess: case TR_instanceOf: case TR_checkAssignable: case TR_throwCurrentException: case TR_releaseVMAccess: case TR_stackOverflow: case TR_writeBarrierStore: case TR_writeBarrierBatchStore: case TR_jitProfileAddress: case TR_jitProfileWarmCompilePICAddress: case TR_jitProfileValue: case TR_jitProfileLongValue: case TR_jitProfileBigDecimalValue: case TR_jitProfileParseBuffer: return 0; case TR_asyncCheck: case TR_writeBarrierClassStoreRealTimeGC: case TR_writeBarrierStoreRealTimeGC: case TR_aNewArray: case TR_newObject: case TR_newObjectNoZeroInit: case TR_newArray: case TR_multiANewArray: if ((comp->generateArraylets() || comp->isDLT()) && includeGCSafePoint) return &symRefTab->aliasBuilder.gcSafePointSymRefNumbers(); else return 0; case TR_aThrow: return 0; // The monitor exit symbol needs to be aliased with all fields in the // current class to ensure that all references to fields are evaluated // before the monitor exit case TR_monitorExit: case TR_monitorEntry: case TR_transactionExit: case TR_transactionEntry: default: // The following is the place to check for // a use of killsAllMethodSymbolRef... However, // it looks like the default action is sufficient. //if (symRefTab->findKillsAllMethodSymbolRef() == self()) // { // } return &symRefTab->aliasBuilder.defaultMethodDefAliases(); } } case TR::Symbol::IsResolvedMethod: { TR::ResolvedMethodSymbol * resolvedMethodSymbol = _symbol->castToResolvedMethodSymbol(); if (!comp->getOption(TR_EnableHCR)) { switch (resolvedMethodSymbol->getRecognizedMethod()) { #ifdef J9_PROJECT_SPECIFIC case TR::java_lang_System_arraycopy: { TR_BitVector * aliases = new (aliasRegion) TR_BitVector(bvInitialSize, aliasRegion, growability); *aliases |= symRefTab->aliasBuilder.arrayElementSymRefs(); if (comp->generateArraylets()) *aliases |= symRefTab->aliasBuilder.arrayletElementSymRefs(); return aliases; } if (resolvedMethodSymbol->isPureFunction()) return NULL; case TR::java_lang_Double_longBitsToDouble: case TR::java_lang_Double_doubleToLongBits: case TR::java_lang_Float_intBitsToFloat: case TR::java_lang_Float_floatToIntBits: case TR::java_lang_Double_doubleToRawLongBits: case TR::java_lang_Float_floatToRawIntBits: case TR::java_lang_Math_sqrt: case TR::java_lang_StrictMath_sqrt: case TR::java_lang_Math_sin: case TR::java_lang_StrictMath_sin: case TR::java_lang_Math_cos: case TR::java_lang_StrictMath_cos: case TR::java_lang_Math_max_I: case TR::java_lang_Math_min_I: case TR::java_lang_Math_max_L: case TR::java_lang_Math_min_L: case TR::java_lang_Math_abs_I: case TR::java_lang_Math_abs_L: case TR::java_lang_Math_abs_F: case TR::java_lang_Math_abs_D: case TR::java_lang_Math_pow: case TR::java_lang_StrictMath_pow: case TR::java_lang_Math_exp: case TR::java_lang_StrictMath_exp: case TR::java_lang_Math_log: case TR::java_lang_StrictMath_log: case TR::java_lang_Math_floor: case TR::java_lang_Math_ceil: case TR::java_lang_Math_copySign_F: case TR::java_lang_Math_copySign_D: case TR::java_lang_StrictMath_floor: case TR::java_lang_StrictMath_ceil: case TR::java_lang_StrictMath_copySign_F: case TR::java_lang_StrictMath_copySign_D: case TR::com_ibm_Compiler_Internal__TR_Prefetch: case TR::java_nio_Bits_keepAlive: if ((comp->generateArraylets() || comp->isDLT()) && includeGCSafePoint) return &symRefTab->aliasBuilder.gcSafePointSymRefNumbers(); else return 0; // no aliasing on DFP dummy stubs case TR::java_math_BigDecimal_DFPPerformHysteresis: case TR::java_math_BigDecimal_DFPUseDFP: case TR::java_math_BigDecimal_DFPHWAvailable: case TR::java_math_BigDecimal_DFPCompareTo: case TR::java_math_BigDecimal_DFPUnscaledValue: case TR::com_ibm_dataaccess_DecimalData_DFPFacilityAvailable: case TR::com_ibm_dataaccess_DecimalData_DFPUseDFP: case TR::com_ibm_dataaccess_DecimalData_DFPConvertPackedToDFP: case TR::com_ibm_dataaccess_DecimalData_DFPConvertDFPToPacked: case TR::com_ibm_dataaccess_DecimalData_createZeroBigDecimal: case TR::com_ibm_dataaccess_DecimalData_getlaside: case TR::com_ibm_dataaccess_DecimalData_setlaside: case TR::com_ibm_dataaccess_DecimalData_getflags: case TR::com_ibm_dataaccess_DecimalData_setflags: if (!( #ifdef TR_TARGET_S390 TR::Compiler->target.cpu.getS390SupportsDFP() || #endif TR::Compiler->target.cpu.supportsDecimalFloatingPoint()) || comp->getOption(TR_DisableDFP)) return NULL; #endif //J9_PROJECT_SPECIFIC default: break; } } #ifdef J9_PROJECT_SPECIFIC TR_ResolvedMethod * method = resolvedMethodSymbol->getResolvedMethod(); TR_PersistentMethodInfo * methodInfo = TR_PersistentMethodInfo::get(method); if (methodInfo && (methodInfo->hasRefinedAliasSets() || comp->getMethodHotness() >= veryHot || resolvedMethodSymbol->hasVeryRefinedAliasSets()) && (method->isStatic() || method->isFinal() || isDirectCall)) { TR_BitVector * aliases = new (aliasRegion) TR_BitVector(bvInitialSize, aliasRegion, growability); if ((comp->generateArraylets() || comp->isDLT()) && includeGCSafePoint) *aliases |= symRefTab->aliasBuilder.gcSafePointSymRefNumbers(); if (methodInfo->doesntKillAnything() && !comp->getOption(TR_DisableRefinedAliases)) return aliases; if ((resolvedMethodSymbol->hasVeryRefinedAliasSets() || comp->getMethodHotness() >= hot) && !debug("disableVeryRefinedCallAliasSets")) { TR_BitVector * exactAliases = 0; if (resolvedMethodSymbol->hasVeryRefinedAliasSets()) exactAliases = symRefTab->aliasBuilder.getVeryRefinedCallAliasSets(resolvedMethodSymbol); else { resolvedMethodSymbol->setHasVeryRefinedAliasSets(true); List<void> methodsPeeked(comp->trMemory()); exactAliases = addVeryRefinedCallAliasSets(resolvedMethodSymbol, aliases, &methodsPeeked); symRefTab->aliasBuilder.setVeryRefinedCallAliasSets(resolvedMethodSymbol, exactAliases); } if (exactAliases) { return exactAliases; } } // From here on, we're just checking refined alias info. // If refined aliases are disabled, return the conservative answer // we would have returned had we never attempted to use refined // aliases at all. // if (comp->getOption(TR_DisableRefinedAliases)) return symRefTab->aliasBuilder.methodAliases(self()); if (!methodInfo->doesntKillAddressArrayShadows()) { symRefTab->aliasBuilder.addAddressArrayShadows(aliases); if (comp->generateArraylets()) aliases->set(symRefTab->getArrayletShadowIndex(TR::Address)); } if (!methodInfo->doesntKillIntArrayShadows()) { symRefTab->aliasBuilder.addIntArrayShadows(aliases); if (comp->generateArraylets()) { aliases->set(symRefTab->getArrayletShadowIndex(TR::Int32)); } } if (!methodInfo->doesntKillNonIntPrimitiveArrayShadows()) { symRefTab->aliasBuilder.addNonIntPrimitiveArrayShadows(aliases); if (comp->generateArraylets()) { aliases->set(symRefTab->getArrayletShadowIndex(TR::Int8)); aliases->set(symRefTab->getArrayletShadowIndex(TR::Int16)); aliases->set(symRefTab->getArrayletShadowIndex(TR::Int32)); aliases->set(symRefTab->getArrayletShadowIndex(TR::Int64)); aliases->set(symRefTab->getArrayletShadowIndex(TR::Float)); aliases->set(symRefTab->getArrayletShadowIndex(TR::Double)); } } if (!methodInfo->doesntKillAddressFields()) *aliases |= symRefTab->aliasBuilder.addressShadowSymRefs(); if (!methodInfo->doesntKillIntFields()) *aliases |= symRefTab->aliasBuilder.intShadowSymRefs(); if (!methodInfo->doesntKillNonIntPrimitiveFields()) *aliases |= symRefTab->aliasBuilder.nonIntPrimitiveShadowSymRefs(); if (!methodInfo->doesntKillAddressStatics()) *aliases |= symRefTab->aliasBuilder.addressStaticSymRefs(); if (!methodInfo->doesntKillIntStatics()) *aliases |= symRefTab->aliasBuilder.intStaticSymRefs(); if (!methodInfo->doesntKillNonIntPrimitiveStatics()) *aliases |= symRefTab->aliasBuilder.nonIntPrimitiveStaticSymRefs(); TR_BitVector *methodAliases = symRefTab->aliasBuilder.methodAliases(self()); *aliases &= *methodAliases; return aliases; } #endif return symRefTab->aliasBuilder.methodAliases(self()); } case TR::Symbol::IsShadow: { if ((self()->isUnresolved() && !_symbol->isConstObjectRef()) || _symbol->isVolatile() || self()->isLiteralPoolAddress() || self()->isFromLiteralPool() || (_symbol->isUnsafeShadowSymbol() && !self()->reallySharesSymbol())) { if (symRefTab->aliasBuilder.unsafeArrayElementSymRefs().get(self()->getReferenceNumber())) { TR_BitVector *aliases = new (aliasRegion) TR_BitVector(bvInitialSize, aliasRegion, growability); *aliases |= comp->getSymRefTab()->aliasBuilder.defaultMethodDefAliasesWithoutImmutable(); *aliases -= symRefTab->aliasBuilder.cpSymRefs(); return aliases; } else return &comp->getSymRefTab()->aliasBuilder.defaultMethodDefAliasesWithoutImmutable(); } TR_BitVector *aliases = NULL; if (_symbol == symRefTab->findGenericIntShadowSymbol()) { aliases = new (aliasRegion) TR_BitVector(bvInitialSize, aliasRegion, growability); *aliases |= symRefTab->aliasBuilder.arrayElementSymRefs(); if (comp->generateArraylets()) *aliases |= symRefTab->aliasBuilder.arrayletElementSymRefs(); *aliases |= symRefTab->aliasBuilder.genericIntShadowSymRefs(); *aliases |= symRefTab->aliasBuilder.genericIntArrayShadowSymRefs(); *aliases |= symRefTab->aliasBuilder.genericIntNonArrayShadowSymRefs(); *aliases |= symRefTab->aliasBuilder.unsafeSymRefNumbers(); #ifdef J9_PROJECT_SPECIFIC *aliases |= symRefTab->aliasBuilder.unresolvedShadowSymRefs(); #endif if (symRefTab->aliasBuilder.conservativeGenericIntShadowAliasing()) { *aliases |= symRefTab->aliasBuilder.addressShadowSymRefs(); *aliases |= symRefTab->aliasBuilder.intShadowSymRefs(); *aliases |= symRefTab->aliasBuilder.nonIntPrimitiveShadowSymRefs(); } aliases->set(self()->getReferenceNumber()); return aliases; } if (self()->reallySharesSymbol(comp)) { aliases = new (aliasRegion) TR_BitVector(bvInitialSize, aliasRegion, growability); self()->setSharedShadowAliases(aliases, symRefTab); } if (symRefTab->findGenericIntShadowSymbol()) { if (!aliases) aliases = new (aliasRegion) TR_BitVector(bvInitialSize, aliasRegion, growability); self()->setLiteralPoolAliases(aliases, symRefTab); if (symRefTab->aliasBuilder.conservativeGenericIntShadowAliasing() || self()->isUnresolved()) { *aliases |= symRefTab->aliasBuilder.genericIntShadowSymRefs(); *aliases |= symRefTab->aliasBuilder.genericIntArrayShadowSymRefs(); *aliases |= symRefTab->aliasBuilder.genericIntNonArrayShadowSymRefs(); } } if (_symbol->isArrayShadowSymbol() && symRefTab->findGenericIntShadowSymbol()) { if (!aliases) aliases = new (aliasRegion) TR_BitVector(bvInitialSize, aliasRegion, growability); *aliases |= symRefTab->aliasBuilder.genericIntShadowSymRefs(); *aliases |= symRefTab->aliasBuilder.genericIntArrayShadowSymRefs(); if (supportArrayRefinement && self()->getIndependentSymRefs()) *aliases -= *self()->getIndependentSymRefs(); } #ifdef J9_PROJECT_SPECIFIC // make TR::PackedDecimal aliased with TR::Int8(byte) if (_symbol->isArrayShadowSymbol() && _symbol->getDataType() == TR::PackedDecimal) { if (!aliases) aliases = new (aliasRegion) TR_BitVector(bvInitialSize, aliasRegion, growability); aliases->set(symRefTab->getArrayShadowIndex(TR::Int8)); } //the other way around. if (_symbol->isArrayShadowSymbol() && _symbol->getDataType() == TR::Int8) { if (!aliases) aliases = new (aliasRegion) TR_BitVector(bvInitialSize, aliasRegion, growability); aliases->set(symRefTab->getArrayShadowIndex(TR::PackedDecimal)); } #endif // alias vector arrays shadows with corresponding scalar array shadows if (_symbol->isArrayShadowSymbol() && _symbol->getDataType().isVector()) { if (!aliases) aliases = new (aliasRegion) TR_BitVector(bvInitialSize, aliasRegion, growability); aliases->set(symRefTab->getArrayShadowIndex(_symbol->getDataType().vectorToScalar())); } // the other way around if (_symbol->isArrayShadowSymbol() && !_symbol->getDataType().isVector()) { if (!aliases) aliases = new (aliasRegion) TR_BitVector(bvInitialSize, aliasRegion, growability); aliases->set(symRefTab->getArrayShadowIndex(_symbol->getDataType().scalarToVector())); } if (_symbol->isArrayShadowSymbol() && !symRefTab->aliasBuilder.immutableArrayElementSymRefs().isEmpty()) { if (!aliases) aliases = new (aliasRegion) TR_BitVector(bvInitialSize, aliasRegion, growability); TR::DataType type = _symbol->getDataType(); TR_BitVectorIterator bvi(symRefTab->aliasBuilder.arrayElementSymRefs()); int32_t symRefNum; while (bvi.hasMoreElements()) { symRefNum = bvi.getNextElement(); if (symRefTab->getSymRef(symRefNum)->getSymbol()->getDataType() == type) aliases->set(symRefNum); } } if (_symbol->isArrayShadowSymbol() && supportArrayRefinement && comp->getMethodSymbol()->hasVeryRefinedAliasSets()) { if (!aliases) aliases = new (aliasRegion) TR_BitVector(bvInitialSize, aliasRegion, growability); TR::DataType type = _symbol->getDataType(); TR_BitVectorIterator bvi(symRefTab->aliasBuilder.arrayElementSymRefs()); int32_t symRefNum; while (bvi.hasMoreElements()) { symRefNum = bvi.getNextElement(); if (symRefTab->getSymRef(symRefNum)->getSymbol()->getDataType() == type) aliases->set(symRefNum); } if (self()->getIndependentSymRefs()) *aliases -= *self()->getIndependentSymRefs(); return aliases; } if (aliases) aliases->set(self()->getReferenceNumber()); if (symRefTab->aliasBuilder.unsafeArrayElementSymRefs().get(self()->getReferenceNumber())) *aliases -= symRefTab->aliasBuilder.cpSymRefs(); else if (symRefTab->aliasBuilder.cpSymRefs().get(self()->getReferenceNumber())) *aliases -= symRefTab->aliasBuilder.unsafeArrayElementSymRefs(); return aliases; } case TR::Symbol::IsStatic: { // For unresolved constant dynamic, we need to invoke a Java bootstrap method, // which can have arbitrary side effects, so the aliasing should be conservative here. // isConstObjectRef now returns true for condy, so we add an explicit condition, // more like a short-circuit, to say if we are unresolved and not isConstObjectRef // (this is the same as before), or if we are unresolved and condy // (this is the extra condition added), we would return conservative aliases. if ((self()->isUnresolved() && (_symbol->isConstantDynamic() || !_symbol->isConstObjectRef())) || self()->isLiteralPoolAddress() || self()->isFromLiteralPool() || _symbol->isVolatile()) { return &comp->getSymRefTab()->aliasBuilder.defaultMethodDefAliases(); } TR_BitVector *aliases = NULL; if (self()->reallySharesSymbol(comp)) { aliases = new (aliasRegion) TR_BitVector(bvInitialSize, aliasRegion, growability); self()->setSharedStaticAliases(aliases, symRefTab); } if (symRefTab->findGenericIntShadowSymbol()) { if (!aliases) aliases = new (aliasRegion) TR_BitVector(bvInitialSize, aliasRegion, growability); self()->setLiteralPoolAliases(aliases, symRefTab); } if (aliases) aliases->set(self()->getReferenceNumber()); return aliases; } case TR::Symbol::IsMethodMetaData: { TR_BitVector *aliases = NULL; return aliases; } default: //TR_ASSERT(0, "getUseDefAliasing called for non method"); if (comp->generateArraylets() && comp->getSymRefTab()->aliasBuilder.gcSafePointSymRefNumbers().get(self()->getReferenceNumber()) && includeGCSafePoint) return &comp->getSymRefTab()->aliasBuilder.gcSafePointSymRefNumbers(); else return 0; } }
static int cacheStringAppend(TR::ValuePropagation *vp,TR::Node *node) { return 0; if (!vp->lastTimeThrough()) return 0; TR::TreeTop *tt = vp->_curTree; TR::TreeTop *newTree = tt; TR::TreeTop *startTree = 0; TR::TreeTop *exitTree = vp->_curBlock->getExit(); TR::Node *newBuffer; if(node->getNumChildren() >= 1) newBuffer = node->getFirstChild(); else return 0; enum {MAX_STRINGS = 2}; int initWithString = 0; bool initWithInteger = false; TR::TreeTop *appendTree[MAX_STRINGS+1]; TR::Node *appendedString[MAX_STRINGS+1]; char pattern[MAX_STRINGS+1]; int stringCount = 0; bool useStringBuffer=false; TR::SymbolReference *valueOfSymRef[MAX_STRINGS+1]; bool success = false; char *sigBuffer="java/lang/StringBuffer.<init>("; char *sigBuilder = "java/lang/StringBuilder.<init>("; char *sigInit = "java/lang/String.<init>("; //////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// if (checkMethodSignature(vp,node->getSymbolReference(), sigInit)) { TR::Symbol *symbol =node->getSymbolReference()->getSymbol(); TR_ResolvedMethod *m = symbol->castToResolvedMethodSymbol()->getResolvedMethod(); if (strncmp(m->signatureChars(), "(Ljava/lang/String;Ljava/lang/String;)V", m->signatureLength())==0) { vp->_cachedStringPeepHolesVcalls.add(new (vp->comp()->trStackMemory()) TR::ValuePropagation::VPTreeTopPair(tt,tt->getPrevRealTreeTop())); } } //////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// if (checkMethodSignature(vp,node->getSymbolReference(), sigBuffer)) { useStringBuffer=true; success = true; } else if (checkMethodSignature(vp,node->getSymbolReference(), sigBuilder)) { success = true; useStringBuffer=false; } else { return 0; } if (success) { TR::Symbol *symbol =node->getSymbolReference()->getSymbol(); TR_ResolvedMethod *m = symbol->castToResolvedMethodSymbol()->getResolvedMethod(); if (strncmp(m->signatureChars(), "()V", m->signatureLength())==0) { // Diagnostics }else { return 0; } } else // <init> not found (could be unresolved) { return 0; } // now search for StringBuffer.append calls that are chained to one another TR::TreeTop *lastAppendTree = 0; // updated when we find an append TR::Node *child = newBuffer; while (1) { startTree = tt->getNextRealTreeTop(); appendedString[stringCount] = 0; int visitCount = 0; if (useStringBuffer) tt = searchForStringAppend(vp,"java/lang/StringBuffer.append(", startTree, exitTree, TR::acall, child, visitCount, appendedString + stringCount); else tt = searchForStringAppend(vp,"java/lang/StringBuilder.append(", startTree, exitTree, TR::acall, child, visitCount, appendedString + stringCount); if (appendedString[stringCount]) // we found it { appendTree[stringCount] = tt; // we could exit here if too many appends are chained if (stringCount >= MAX_STRINGS) return 0; // see which type of append we have TR::Symbol *symbol = tt->getNode()->getFirstChild()->getSymbolReference()->getSymbol(); TR_ASSERT(symbol->isResolvedMethod(), "assertion failure"); TR::ResolvedMethodSymbol *method = symbol->castToResolvedMethodSymbol(); TR_ASSERT(method, "assertion failure"); TR_ResolvedMethod *m = method->getResolvedMethod(); if (strncmp(m->signatureChars(), "(Ljava/lang/String;)", 20)==0) { pattern[stringCount] = 'S'; valueOfSymRef[stringCount] = 0; // don't need conversion to string } else // appending something that needs conversion using valueOf { TR::SymbolReference *symRefForValueOf = 0; // In the following we can vp->compare only (C) because we know that // StringBuffer.append returns a StringBuffer. //s char *sigBuffer = m->signatureChars(); TR_ASSERT(m->signatureLength() >= 3, "The minimum signature length should be 3 for ()V"); } stringCount++; } else // the chain of appends is broken { appendTree[stringCount] = 0; pattern[stringCount] = 0; // string terminator break; } lastAppendTree = tt; child = tt->getNode()->getFirstChild(); // the first node is a NULLCHK and its child is the call } // end while if (stringCount < 2) return 0; // cannot apply StringPeepholes if (stringCount > MAX_STRINGS) return 0; if (stringCount == 3) return 0; // same as above TR_ASSERT(lastAppendTree, "If stringCount <=2 then we must have found an append"); // now look for the toString call TR::TreeTop *toStringTree = 0; //visitCount = vp->comp()->incVisitCount(); int visitCount=0; tt = searchForToStringCall(vp,lastAppendTree->getNextRealTreeTop(), exitTree, lastAppendTree->getNode()->getFirstChild(), visitCount, &toStringTree, useStringBuffer); if (!toStringTree) return 0; vp->_cachedStringBufferVcalls.add(new (vp->comp()->trStackMemory()) TR::ValuePropagation::VPStringCached(appendTree[0],appendTree[1],appendedString[0],appendedString[1],newTree,toStringTree)); }
void TR::ARMSystemLinkage::createPrologue(TR::Instruction *cursor) { TR::CodeGenerator *codeGen = cg(); const TR::ARMLinkageProperties& properties = getProperties(); TR::Machine *machine = codeGen->machine(); TR::ResolvedMethodSymbol* bodySymbol = comp()->getJittedMethodSymbol(); TR::Node *firstNode = comp()->getStartTree()->getNode(); TR::RealRegister *stackPtr = machine->getRealRegister(properties.getStackPointerRegister()); // Entry breakpoint // if (comp()->getOption(TR_EntryBreakPoints)) { cursor = new (trHeapMemory()) TR::Instruction(cursor, ARMOp_bad, firstNode, cg()); } // allocate stack space auto frameSize = codeGen->getFrameSizeInBytes(); cursor = generateTrg1Src1ImmInstruction(codeGen, ARMOp_sub, firstNode, stackPtr, stackPtr, frameSize, 0, cursor); // spill argument registers auto nextIntArgReg = 0; auto nextFltArgReg = 0; ListIterator<TR::ParameterSymbol> parameterIterator(&bodySymbol->getParameterList()); for (TR::ParameterSymbol *parameter = parameterIterator.getFirst(); parameter!=NULL && (nextIntArgReg < getProperties().getNumIntArgRegs() || nextFltArgReg < getProperties().getNumFloatArgRegs()); parameter=parameterIterator.getNext()) { auto *stackSlot = new (trHeapMemory()) TR::MemoryReference(stackPtr, parameter->getParameterOffset(), codeGen); switch (parameter->getDataType()) { case TR::Int8: case TR::Int16: case TR::Int32: case TR::Address: if (nextIntArgReg < getProperties().getNumIntArgRegs()) { cursor = generateMemSrc1Instruction(cg(), ARMOp_str, firstNode, stackSlot, machine->getRealRegister((TR::RealRegister::RegNum)(TR::RealRegister::gr0 + nextIntArgReg)), cursor); nextIntArgReg++; } else { nextIntArgReg = getProperties().getNumIntArgRegs() + 1; } break; case TR::Int64: nextIntArgReg += nextIntArgReg & 0x1; // round to next even number if (nextIntArgReg + 1 < getProperties().getNumIntArgRegs()) { cursor = generateMemSrc1Instruction(cg(), ARMOp_str, firstNode, stackSlot, machine->getRealRegister((TR::RealRegister::RegNum)(TR::RealRegister::gr0 + nextIntArgReg)), cursor); stackSlot = new (trHeapMemory()) TR::MemoryReference(stackPtr, parameter->getParameterOffset() + 4, codeGen); cursor = generateMemSrc1Instruction(cg(), ARMOp_str, firstNode, stackSlot, machine->getRealRegister((TR::RealRegister::RegNum)(TR::RealRegister::gr0 + nextIntArgReg + 1)), cursor); nextIntArgReg += 2; } else { nextIntArgReg = getProperties().getNumIntArgRegs() + 1; } break; case TR::Float: comp()->failCompilation<UnsupportedParameterType>("Compiling methods with a single precision floating point parameter is not supported"); break; case TR::Double: if (nextFltArgReg < getProperties().getNumFloatArgRegs()) { cursor = generateMemSrc1Instruction(cg(), ARMOp_fstd, firstNode, stackSlot, machine->getRealRegister((TR::RealRegister::RegNum)(TR::RealRegister::fp0 + nextFltArgReg)), cursor); nextFltArgReg += 1; } else { nextFltArgReg = getProperties().getNumFloatArgRegs() + 1; } break; case TR::Aggregate: TR_ASSERT(false, "Function parameters of aggregate types are not currently supported on ARM."); } } // save all preserved registers for (int r = TR::RealRegister::gr4; r <= TR::RealRegister::gr11; ++r) { auto *stackSlot = new (trHeapMemory()) TR::MemoryReference(stackPtr, (TR::RealRegister::gr11 - r + 1)*4 + bodySymbol->getLocalMappingCursor(), codeGen); cursor = generateMemSrc1Instruction(cg(), ARMOp_str, firstNode, stackSlot, machine->getRealRegister((TR::RealRegister::RegNum)r), cursor); } // save link register (r14) auto *stackSlot = new (trHeapMemory()) TR::MemoryReference(stackPtr, bodySymbol->getLocalMappingCursor(), codeGen); cursor = generateMemSrc1Instruction(cg(), ARMOp_str, firstNode, stackSlot, machine->getRealRegister(TR::RealRegister::gr14), cursor); }
TR_ResolvedMethod *resolvedMethod() { return _resolvedMethod->getResolvedMethod(); }
TR::Instruction *OMR::Power::Linkage::flushArguments(TR::Instruction *cursor) { TR::Machine *machine = self()->machine(); TR::RealRegister *stackPtr = self()->cg()->getStackPointerRegister(); TR::ResolvedMethodSymbol *bodySymbol = self()->comp()->getJittedMethodSymbol(); ListIterator<TR::ParameterSymbol> paramIterator(&(bodySymbol->getParameterList())); TR::ParameterSymbol *paramCursor = paramIterator.getFirst(); TR::Node *firstNode = self()->comp()->getStartTree()->getNode(); int32_t numIntArgs = 0, numFloatArgs = 0; const TR::PPCLinkageProperties& properties = self()->getProperties(); while ( (paramCursor!=NULL) && ( (numIntArgs < properties.getNumIntArgRegs()) || (numFloatArgs < properties.getNumFloatArgRegs()) ) ) { TR::RealRegister *argRegister; int32_t offset = paramCursor->getParameterOffset(); // If parm is referenced or required to be on stack (i.e. FSD), we have to flush. bool hasToStoreToStack = paramCursor->isReferencedParameter() || paramCursor->isParmHasToBeOnStack(); switch (paramCursor->getDataType()) { case TR::Int8: case TR::Int16: case TR::Int32: if (hasToStoreToStack && numIntArgs<properties.getNumIntArgRegs()) { argRegister = machine->getRealRegister(properties.getIntegerArgumentRegister(numIntArgs)); cursor = generateMemSrc1Instruction(self()->cg(), TR::InstOpCode::stw, firstNode, new (self()->trHeapMemory()) TR::MemoryReference(stackPtr, offset, 4, self()->cg()), argRegister, cursor); } numIntArgs++; break; case TR::Address: if (numIntArgs<properties.getNumIntArgRegs()) { argRegister = machine->getRealRegister(properties.getIntegerArgumentRegister(numIntArgs)); cursor = generateMemSrc1Instruction(self()->cg(),TR::InstOpCode::Op_st, firstNode, new (self()->trHeapMemory()) TR::MemoryReference(stackPtr, offset, TR::Compiler->om.sizeofReferenceAddress(), self()->cg()), argRegister, cursor); } numIntArgs++; break; case TR::Int64: if (hasToStoreToStack && numIntArgs<properties.getNumIntArgRegs()) { argRegister = machine->getRealRegister(properties.getIntegerArgumentRegister(numIntArgs)); if (TR::Compiler->target.is64Bit()) cursor = generateMemSrc1Instruction(self()->cg(),TR::InstOpCode::Op_st, firstNode, new (self()->trHeapMemory()) TR::MemoryReference(stackPtr, offset, 8, self()->cg()), argRegister, cursor); else { cursor = generateMemSrc1Instruction(self()->cg(), TR::InstOpCode::stw, firstNode, new (self()->trHeapMemory()) TR::MemoryReference(stackPtr, offset, 4, self()->cg()), argRegister, cursor); if (numIntArgs < properties.getNumIntArgRegs()-1) { argRegister = machine->getRealRegister(properties.getIntegerArgumentRegister(numIntArgs+1)); cursor = generateMemSrc1Instruction(self()->cg(), TR::InstOpCode::stw, firstNode, new (self()->trHeapMemory()) TR::MemoryReference(stackPtr, offset+4, 4, self()->cg()), argRegister, cursor); } } } if (TR::Compiler->target.is64Bit()) numIntArgs++; else numIntArgs+=2; break; case TR::Float: if (hasToStoreToStack && numFloatArgs<properties.getNumFloatArgRegs()) { argRegister = machine->getRealRegister(properties.getFloatArgumentRegister(numFloatArgs)); cursor = generateMemSrc1Instruction(self()->cg(), TR::InstOpCode::stfs, firstNode, new (self()->trHeapMemory()) TR::MemoryReference(stackPtr, offset, 4, self()->cg()), argRegister, cursor); } numFloatArgs++; break; case TR::Double: if (hasToStoreToStack && numFloatArgs<properties.getNumFloatArgRegs()) { argRegister = machine->getRealRegister(properties.getFloatArgumentRegister(numFloatArgs)); cursor = generateMemSrc1Instruction(self()->cg(), TR::InstOpCode::stfd, firstNode, new (self()->trHeapMemory()) TR::MemoryReference(stackPtr, offset, 8, self()->cg()), argRegister, cursor); } numFloatArgs++; break; } paramCursor = paramIterator.getNext(); } return(cursor); }
TR::Instruction *OMR::Power::Linkage::loadUpArguments(TR::Instruction *cursor) { if (!self()->cg()->buildInterpreterEntryPoint()) // would be better to use a different linkage for this purpose return cursor; TR::Machine *machine = self()->machine(); TR::RealRegister *stackPtr = self()->cg()->getStackPointerRegister(); TR::ResolvedMethodSymbol *bodySymbol = self()->comp()->getJittedMethodSymbol(); ListIterator<TR::ParameterSymbol> paramIterator(&(bodySymbol->getParameterList())); TR::ParameterSymbol *paramCursor = paramIterator.getFirst(); TR::Node *firstNode = self()->comp()->getStartTree()->getNode(); int32_t numIntArgs = 0, numFloatArgs = 0; const TR::PPCLinkageProperties& properties = self()->getProperties(); while ( (paramCursor!=NULL) && ( (numIntArgs < properties.getNumIntArgRegs()) || (numFloatArgs < properties.getNumFloatArgRegs()) ) ) { TR::RealRegister *argRegister; int32_t offset = paramCursor->getParameterOffset(); bool hasToLoadFromStack = paramCursor->isReferencedParameter() || paramCursor->isParmHasToBeOnStack(); switch (paramCursor->getDataType()) { case TR::Int8: case TR::Int16: case TR::Int32: if (hasToLoadFromStack && numIntArgs<properties.getNumIntArgRegs()) { argRegister = machine->getRealRegister(properties.getIntegerArgumentRegister(numIntArgs)); cursor = generateTrg1MemInstruction(self()->cg(), TR::InstOpCode::lwz, firstNode, argRegister, new (self()->trHeapMemory()) TR::MemoryReference(stackPtr, offset, 4, self()->cg()), cursor); } numIntArgs++; break; case TR::Address: if (numIntArgs<properties.getNumIntArgRegs()) { argRegister = machine->getRealRegister(properties.getIntegerArgumentRegister(numIntArgs)); cursor = generateTrg1MemInstruction(self()->cg(),TR::InstOpCode::Op_load, firstNode, argRegister, new (self()->trHeapMemory()) TR::MemoryReference(stackPtr, offset, TR::Compiler->om.sizeofReferenceAddress(), self()->cg()), cursor); } numIntArgs++; break; case TR::Int64: if (hasToLoadFromStack && numIntArgs<properties.getNumIntArgRegs()) { argRegister = machine->getRealRegister(properties.getIntegerArgumentRegister(numIntArgs)); if (TR::Compiler->target.is64Bit()) cursor = generateTrg1MemInstruction(self()->cg(), TR::InstOpCode::ld, firstNode, argRegister, new (self()->trHeapMemory()) TR::MemoryReference(stackPtr, offset, 8, self()->cg()), cursor); else { cursor = generateTrg1MemInstruction(self()->cg(), TR::InstOpCode::lwz, firstNode, argRegister, new (self()->trHeapMemory()) TR::MemoryReference(stackPtr, offset, 4, self()->cg()), cursor); if (numIntArgs < properties.getNumIntArgRegs()-1) { argRegister = machine->getRealRegister(properties.getIntegerArgumentRegister(numIntArgs+1)); cursor = generateTrg1MemInstruction(self()->cg(), TR::InstOpCode::lwz, firstNode, argRegister, new (self()->trHeapMemory()) TR::MemoryReference(stackPtr, offset+4, 4, self()->cg()), cursor); } } } if (TR::Compiler->target.is64Bit()) numIntArgs++; else numIntArgs+=2; break; case TR::Float: if (hasToLoadFromStack && numFloatArgs<properties.getNumFloatArgRegs()) { argRegister = machine->getRealRegister(properties.getFloatArgumentRegister(numFloatArgs)); cursor = generateTrg1MemInstruction(self()->cg(), TR::InstOpCode::lfs, firstNode, argRegister, new (self()->trHeapMemory()) TR::MemoryReference(stackPtr, offset, 4, self()->cg()), cursor); } numFloatArgs++; break; case TR::Double: if (hasToLoadFromStack && numFloatArgs<properties.getNumFloatArgRegs()) { argRegister = machine->getRealRegister(properties.getFloatArgumentRegister(numFloatArgs)); cursor = generateTrg1MemInstruction(self()->cg(), TR::InstOpCode::lfd, firstNode, argRegister, new (self()->trHeapMemory()) TR::MemoryReference(stackPtr, offset, 8, self()->cg()), cursor); } numFloatArgs++; break; } paramCursor = paramIterator.getNext(); } return(cursor); }