void OMR::SymbolReference::setSharedStaticAliases(TR_BitVector * aliases, TR::SymbolReferenceTable * symRefTab) { if (self()->reallySharesSymbol()) { TR::DataType type = self()->getSymbol()->getType(); TR_SymRefIterator i(type.isAddress() ? symRefTab->aliasBuilder.addressStaticSymRefs() : (type.isInt32() ? symRefTab->aliasBuilder.intStaticSymRefs() : symRefTab->aliasBuilder.nonIntPrimitiveStaticSymRefs()), symRefTab); TR::SymbolReference * symRef; while ((symRef = i.getNext())) if (symRef->getSymbol() == self()->getSymbol()) aliases->set(symRef->getReferenceNumber()); } else aliases->set(self()->getReferenceNumber()); *aliases |= symRefTab->aliasBuilder.unsafeSymRefNumbers(); }
void OMR::SymbolReference::setSharedShadowAliases(TR_BitVector * aliases, TR::SymbolReferenceTable * symRefTab) { if (self()->reallySharesSymbol() && !_symbol->isUnsafeShadowSymbol()) { TR::DataType type = self()->getSymbol()->getType(); TR_SymRefIterator i(type.isAddress() ? symRefTab->aliasBuilder.addressShadowSymRefs() : (type.isInt32() ? symRefTab->aliasBuilder.intShadowSymRefs() : symRefTab->aliasBuilder.nonIntPrimitiveShadowSymRefs()), symRefTab); TR::SymbolReference * symRef; while ((symRef = i.getNext())) if (symRef->getSymbol() == self()->getSymbol()) aliases->set(symRef->getReferenceNumber()); // include symbol reference's own shared alias bitvector if (symRefTab->getSharedAliases(self()) != NULL) *aliases |= *(symRefTab->getSharedAliases(self())); } else aliases->set(self()->getReferenceNumber()); *aliases |= symRefTab->aliasBuilder.unsafeSymRefNumbers(); }
int32_t TR::ARM64SystemLinkage::buildArgs(TR::Node *callNode, TR::RegisterDependencyConditions *dependencies) { const TR::ARM64LinkageProperties &properties = getProperties(); TR::ARM64MemoryArgument *pushToMemory = NULL; TR::Register *argMemReg; TR::Register *tempReg; int32_t argIndex = 0; int32_t numMemArgs = 0; int32_t argSize = 0; int32_t numIntegerArgs = 0; int32_t numFloatArgs = 0; int32_t totalSize; int32_t i; TR::Node *child; TR::DataType childType; TR::DataType resType = callNode->getType(); uint32_t firstArgumentChild = callNode->getFirstArgumentIndex(); /* Step 1 - figure out how many arguments are going to be spilled to memory i.e. not in registers */ for (i = firstArgumentChild; i < callNode->getNumChildren(); i++) { child = callNode->getChild(i); childType = child->getDataType(); switch (childType) { case TR::Int8: case TR::Int16: case TR::Int32: case TR::Int64: case TR::Address: if (numIntegerArgs >= properties.getNumIntArgRegs()) numMemArgs++; numIntegerArgs++; break; case TR::Float: case TR::Double: if (numFloatArgs >= properties.getNumFloatArgRegs()) numMemArgs++; numFloatArgs++; break; default: TR_ASSERT(false, "Argument type %s is not supported\n", childType.toString()); } } // From here, down, any new stack allocations will expire / die when the function returns TR::StackMemoryRegion stackMemoryRegion(*trMemory()); /* End result of Step 1 - determined number of memory arguments! */ if (numMemArgs > 0) { pushToMemory = new (trStackMemory()) TR::ARM64MemoryArgument[numMemArgs]; argMemReg = cg()->allocateRegister(); } totalSize = numMemArgs * 8; // align to 16-byte boundary totalSize = (totalSize + 15) & (~15); numIntegerArgs = 0; numFloatArgs = 0; for (i = firstArgumentChild; i < callNode->getNumChildren(); i++) { TR::MemoryReference *mref = NULL; TR::Register *argRegister; TR::InstOpCode::Mnemonic op; child = callNode->getChild(i); childType = child->getDataType(); switch (childType) { case TR::Int8: case TR::Int16: case TR::Int32: case TR::Int64: case TR::Address: if (childType == TR::Address) argRegister = pushAddressArg(child); else if (childType == TR::Int64) argRegister = pushLongArg(child); else argRegister = pushIntegerWordArg(child); if (numIntegerArgs < properties.getNumIntArgRegs()) { if (!cg()->canClobberNodesRegister(child, 0)) { if (argRegister->containsCollectedReference()) tempReg = cg()->allocateCollectedReferenceRegister(); else tempReg = cg()->allocateRegister(); generateMovInstruction(cg(), callNode, tempReg, argRegister); argRegister = tempReg; } if (numIntegerArgs == 0 && (resType.isAddress() || resType.isInt32() || resType.isInt64())) { TR::Register *resultReg; if (resType.isAddress()) resultReg = cg()->allocateCollectedReferenceRegister(); else resultReg = cg()->allocateRegister(); dependencies->addPreCondition(argRegister, TR::RealRegister::x0); dependencies->addPostCondition(resultReg, TR::RealRegister::x0); } else { addDependency(dependencies, argRegister, properties.getIntegerArgumentRegister(numIntegerArgs), TR_GPR, cg()); } } else { // numIntegerArgs >= properties.getNumIntArgRegs() if (childType == TR::Address || childType == TR::Int64) { op = TR::InstOpCode::strpostx; } else { op = TR::InstOpCode::strpostw; } mref = getOutgoingArgumentMemRef(argMemReg, argRegister, op, pushToMemory[argIndex++]); argSize += 8; // always 8-byte aligned } numIntegerArgs++; break; case TR::Float: case TR::Double: if (childType == TR::Float) argRegister = pushFloatArg(child); else argRegister = pushDoubleArg(child); if (numFloatArgs < properties.getNumFloatArgRegs()) { if (!cg()->canClobberNodesRegister(child, 0)) { tempReg = cg()->allocateRegister(TR_FPR); op = (childType == TR::Float) ? TR::InstOpCode::fmovs : TR::InstOpCode::fmovd; generateTrg1Src1Instruction(cg(), op, callNode, tempReg, argRegister); argRegister = tempReg; } if ((numFloatArgs == 0 && resType.isFloatingPoint())) { TR::Register *resultReg; if (resType.getDataType() == TR::Float) resultReg = cg()->allocateSinglePrecisionRegister(); else resultReg = cg()->allocateRegister(TR_FPR); dependencies->addPreCondition(argRegister, TR::RealRegister::v0); dependencies->addPostCondition(resultReg, TR::RealRegister::v0); } else { addDependency(dependencies, argRegister, properties.getFloatArgumentRegister(numFloatArgs), TR_FPR, cg()); } } else { // numFloatArgs >= properties.getNumFloatArgRegs() if (childType == TR::Double) { op = TR::InstOpCode::vstrpostd; } else { op = TR::InstOpCode::vstrposts; } mref = getOutgoingArgumentMemRef(argMemReg, argRegister, op, pushToMemory[argIndex++]); argSize += 8; // always 8-byte aligned } numFloatArgs++; break; } // end of switch } // end of for // NULL deps for non-preserved and non-system regs while (numIntegerArgs < properties.getNumIntArgRegs()) { if (numIntegerArgs == 0 && resType.isAddress()) { dependencies->addPreCondition(cg()->allocateRegister(), properties.getIntegerArgumentRegister(0)); dependencies->addPostCondition(cg()->allocateCollectedReferenceRegister(), properties.getIntegerArgumentRegister(0)); } else { addDependency(dependencies, NULL, properties.getIntegerArgumentRegister(numIntegerArgs), TR_GPR, cg()); } numIntegerArgs++; } int32_t floatRegsUsed = (numFloatArgs > properties.getNumFloatArgRegs()) ? properties.getNumFloatArgRegs() : numFloatArgs; for (i = (TR::RealRegister::RegNum)((uint32_t)TR::RealRegister::v0 + floatRegsUsed); i <= TR::RealRegister::LastFPR; i++) { if (!properties.getPreserved((TR::RealRegister::RegNum)i)) { // NULL dependency for non-preserved regs addDependency(dependencies, NULL, (TR::RealRegister::RegNum)i, TR_FPR, cg()); } } if (numMemArgs > 0) { TR::RealRegister *sp = cg()->machine()->getRealRegister(properties.getStackPointerRegister()); generateTrg1Src1ImmInstruction(cg(), TR::InstOpCode::subimmx, callNode, argMemReg, sp, totalSize); for (argIndex = 0; argIndex < numMemArgs; argIndex++) { TR::Register *aReg = pushToMemory[argIndex].argRegister; generateMemSrc1Instruction(cg(), pushToMemory[argIndex].opCode, callNode, pushToMemory[argIndex].argMemory, aReg); cg()->stopUsingRegister(aReg); } cg()->stopUsingRegister(argMemReg); } return totalSize; }