TR::Register *IA32LinkageUtils::pushLongArg( TR::Node *child, TR::CodeGenerator *cg) { TR::Register *pushRegister; if (child->getRegister() == NULL) { if (child->getOpCode().isLoadConst()) { TR_X86OpCodes pushOp; int32_t highValue = child->getLongIntHigh(); if (highValue >= -128 && highValue <= 127) { pushOp = PUSHImms; } else { pushOp = PUSHImm4; } generateImmInstruction(pushOp, child, highValue, cg); int32_t lowValue = child->getLongIntLow(); if (lowValue >= -128 && lowValue <= 127) { pushOp = PUSHImms; } else { pushOp = PUSHImm4; } generateImmInstruction(pushOp, child, lowValue, cg); cg->decReferenceCount(child); return NULL; } else if (child->getOpCodeValue() == TR::dbits2l && !child->normalizeNanValues() && child->getReferenceCount() == 1) { pushRegister = pushDoubleArg(child->getFirstChild(), cg); cg->decReferenceCount(child); return pushRegister; } else if (child->getOpCode().isMemoryReference() && child->getReferenceCount() == 1) { TR::MemoryReference *lowMR = generateX86MemoryReference(child, cg); generateMemInstruction(PUSHMem, child, generateX86MemoryReference(*lowMR,4, cg), cg); generateMemInstruction(PUSHMem, child, lowMR, cg); lowMR->decNodeReferenceCounts(cg); return NULL; } } pushRegister = cg->evaluate(child); generateRegInstruction(PUSHReg, child, pushRegister->getHighOrder(), cg); generateRegInstruction(PUSHReg, child, pushRegister->getLowOrder(), cg); cg->decReferenceCount(child); return pushRegister; }
TR::Register* OMR::X86::TreeEvaluator::SIMDsplatsEvaluator(TR::Node* node, TR::CodeGenerator* cg) { TR::Node* childNode = node->getChild(0); TR::Register* childReg = cg->evaluate(childNode); TR::Register* resultReg = cg->allocateRegister(TR_VRF); switch (node->getDataType()) { case TR::VectorInt32: generateRegRegInstruction(MOVDRegReg4, node, resultReg, childReg, cg); generateRegRegImmInstruction(PSHUFDRegRegImm1, node, resultReg, resultReg, 0x00, cg); // 00 00 00 00 shuffle xxxA to AAAA break; case TR::VectorInt64: if (TR::Compiler->target.is32Bit()) { TR::Register* tempVectorReg = cg->allocateRegister(TR_VRF); generateRegRegInstruction(MOVDRegReg4, node, tempVectorReg, childReg->getHighOrder(), cg); generateRegImmInstruction(PSLLQRegImm1, node, tempVectorReg, 0x20, cg); generateRegRegInstruction(MOVDRegReg4, node, resultReg, childReg->getLowOrder(), cg); generateRegRegInstruction(PORRegReg, node, resultReg, tempVectorReg, cg); cg->stopUsingRegister(tempVectorReg); } else { generateRegRegInstruction(MOVQRegReg8, node, resultReg, childReg, cg); } generateRegRegImmInstruction(PSHUFDRegRegImm1, node, resultReg, resultReg, 0x44, cg); // 01 00 01 00 shuffle xxBA to BABA break; case TR::VectorFloat: generateRegRegImmInstruction(PSHUFDRegRegImm1, node, resultReg, childReg, 0x00, cg); // 00 00 00 00 shuffle xxxA to AAAA break; case TR::VectorDouble: generateRegRegImmInstruction(PSHUFDRegRegImm1, node, resultReg, childReg, 0x44, cg); // 01 00 01 00 shuffle xxBA to BABA break; default: if (cg->comp()->getOption(TR_TraceCG)) traceMsg(cg->comp(), "Unsupported data type, Node = %p\n", node); TR_ASSERT(false, "Unsupported data type"); break; } node->setRegister(resultReg); cg->decReferenceCount(childNode); return resultReg; }
TR::Register * OMR::X86::I386::CodeGenerator::longClobberEvaluate(TR::Node *node) { TR_ASSERT(node->getOpCode().is8Byte(), "assertion failure"); if (node->getReferenceCount() > 1) { TR::Register *temp = self()->evaluate(node); TR::Register *lowReg = self()->allocateRegister(); TR::Register *highReg = self()->allocateRegister(); TR::RegisterPair *longReg = self()->allocateRegisterPair(lowReg, highReg); generateRegRegInstruction(MOV4RegReg, node, lowReg, temp->getLowOrder(), self()); generateRegRegInstruction(MOV4RegReg, node, highReg, temp->getHighOrder(), self()); return longReg; } else { return self()->evaluate(node); } }
TR::Register * TR::IA32SystemLinkage::buildVolatileAndReturnDependencies( TR::Node *callNode, TR::RegisterDependencyConditions *deps) { TR_ASSERT(deps != NULL, "expected register dependencies"); // Allocate virtual register for return value // TR::Register *integerReturnReg = NULL; TR::Register *longReturnReg = NULL; TR::Register *fpReturnReg = NULL; TR::Register *returnReg = NULL; // An alias for one of the above switch (callNode->getDataType()) { case TR::NoType: break; case TR::Int8: case TR::Int16: case TR::Int32: returnReg = integerReturnReg = cg()->allocateRegister(); break; case TR::Address: returnReg = integerReturnReg = cg()->allocateCollectedReferenceRegister(); break; case TR::Float: returnReg = fpReturnReg = cg()->allocateSinglePrecisionRegister(TR_X87); break; case TR::Double: returnReg = fpReturnReg = cg()->allocateRegister(TR_X87); break; case TR::Int64: returnReg = longReturnReg = (TR::Register*)cg()->allocateRegisterPair(cg()->allocateRegister(), cg()->allocateRegister()); break; case TR::Aggregate: default: TR_ASSERT(false, "return type still not supported"); } // Deps for volatile regs // // TODO: This should be less dependent on the real registers, but the way // _properties is set up makes that very hard. TR_ASSERT(_properties.getIntegerReturnRegister() == TR::RealRegister::eax, "assertion failure"); TR_ASSERT(_properties.getLongLowReturnRegister() == TR::RealRegister::eax, "assertion failure"); TR_ASSERT(_properties.getLongHighReturnRegister() == TR::RealRegister::edx, "assertion failure"); TR_ASSERT(_properties.getFloatReturnRegister() == TR::RealRegister::st0, "assertion failure"); if (longReturnReg) { deps->addPostCondition(returnReg->getLowOrder(), TR::RealRegister::eax, cg()); deps->addPostCondition(returnReg->getHighOrder(), TR::RealRegister::edx, cg()); } else if (integerReturnReg) { deps->addPostCondition(returnReg, TR::RealRegister::eax, cg()); deps->addPostCondition(cg()->allocateRegister(), TR::RealRegister::edx, cg()); } else { deps->addPostCondition(cg()->allocateRegister(), TR::RealRegister::eax, cg()); deps->addPostCondition(cg()->allocateRegister(), TR::RealRegister::edx, cg()); } deps->addPostCondition(cg()->allocateRegister(), TR::RealRegister::ecx, cg()); // st0 if (fpReturnReg) { deps->addPostCondition(returnReg, _properties.getFloatReturnRegister(), cg()); } else { // No need for a dummy dep here because FPREGSPILL instruction takes care of it } // The reg dependency is left open intentionally, and need to be closed by // the caller. The reason is because, child class might call this method, while // adding more register dependecies; if we close the reg dependency here, // the child class could add NO more register dependencies. return returnReg; }
/* * users should call the longSubtractAnalyser or longSubtractAnalyserWithExplicitOperands APIs instead of calling this one directly */ TR::Register* TR_X86SubtractAnalyser::longSubtractAnalyserImpl(TR::Node *root, TR::Node *&firstChild, TR::Node *&secondChild) { TR::Register *firstRegister = firstChild->getRegister(); TR::Register *secondRegister = secondChild->getRegister(); TR::Register *targetRegister = NULL; bool firstHighZero = false; bool secondHighZero = false; bool useSecondHighOrder = false; TR_X86OpCodes regRegOpCode = SUB4RegReg; TR_X86OpCodes regMemOpCode = SUB4RegMem; bool needsEflags = NEED_CC(root) || (root->getOpCodeValue() == TR::lusubb); // Can generate better code for long adds when one or more children have a high order zero word // can avoid the evaluation when we don't need the result of such nodes for another parent. // if (firstChild->isHighWordZero() && !needsEflags) { firstHighZero = true; } if (secondChild->isHighWordZero() && !needsEflags) { secondHighZero = true; TR::ILOpCodes secondOp = secondChild->getOpCodeValue(); if (secondChild->getReferenceCount() == 1 && secondRegister == 0) { if (secondOp == TR::iu2l || secondOp == TR::su2l || secondOp == TR::bu2l || (secondOp == TR::lushr && secondChild->getSecondChild()->getOpCodeValue() == TR::iconst && (secondChild->getSecondChild()->getInt() & TR::TreeEvaluator::shiftMask(true)) == 32)) { secondChild = secondChild->getFirstChild(); secondRegister = secondChild->getRegister(); if (secondOp == TR::lushr) { useSecondHighOrder = true; } } } } setInputs(firstChild, firstRegister, secondChild, secondRegister); if (isVolatileMemoryOperand(firstChild)) resetMem1(); if (isVolatileMemoryOperand(secondChild)) resetMem2(); if (getEvalChild1()) { firstRegister = _cg->evaluate(firstChild); } if (getEvalChild2()) { secondRegister = _cg->evaluate(secondChild); } if (secondHighZero && secondRegister && secondRegister->getRegisterPair()) { if (!useSecondHighOrder) { secondRegister = secondRegister->getLowOrder(); } else { secondRegister = secondRegister->getHighOrder(); } } if (root->getOpCodeValue() == TR::lusubb && TR_X86ComputeCC::setCarryBorrow(root->getChild(2), true, _cg)) { // use SBB rather than SUB // regRegOpCode = SBB4RegReg; regMemOpCode = SBB4RegMem; } if (getCopyReg1()) { TR::Register *lowThird = _cg->allocateRegister(); TR::Register *highThird = _cg->allocateRegister(); TR::RegisterPair *thirdReg = _cg->allocateRegisterPair(lowThird, highThird); targetRegister = thirdReg; generateRegRegInstruction(MOV4RegReg, root, lowThird, firstRegister->getLowOrder(), _cg); if (firstHighZero) { generateRegRegInstruction(XOR4RegReg, root, highThird, highThird, _cg); } else { generateRegRegInstruction(MOV4RegReg, root, highThird, firstRegister->getHighOrder(), _cg); } if (getSubReg3Reg2()) { if (secondHighZero) { generateRegRegInstruction(regRegOpCode, root, lowThird, secondRegister, _cg); generateRegImmInstruction(SBB4RegImms, root, highThird, 0, _cg); } else { generateRegRegInstruction(regRegOpCode, root, lowThird, secondRegister->getLowOrder(), _cg); generateRegRegInstruction(SBB4RegReg, root, highThird, secondRegister->getHighOrder(), _cg); } } else // assert getSubReg3Mem2() == true { TR::MemoryReference *lowMR = generateX86MemoryReference(secondChild, _cg); /** * The below code is needed to ensure correct behaviour when the subtract analyser encounters a lushr bytecode that shifts * by 32 bits. This is the only case where the useSecondHighOrder bit is set. * When the first child of the lushr is in a register, code above handles the shift. When the first child of the lushr is in * memory, the below ensures that the upper part of the first child of the lushr is used as lowMR. */ if (useSecondHighOrder) { TR_ASSERT(secondHighZero, "useSecondHighOrder should be consistent with secondHighZero. useSecondHighOrder subsumes secondHighZero"); lowMR = generateX86MemoryReference(*lowMR, 4, _cg); } generateRegMemInstruction(regMemOpCode, root, lowThird, lowMR, _cg); if (secondHighZero) { generateRegImmInstruction(SBB4RegImms, root, highThird, 0, _cg); } else { TR::MemoryReference *highMR = generateX86MemoryReference(*lowMR, 4, _cg); generateRegMemInstruction(SBB4RegMem, root, highThird, highMR, _cg); } lowMR->decNodeReferenceCounts(_cg); } } else if (getSubReg1Reg2()) { if (secondHighZero) { generateRegRegInstruction(regRegOpCode, root, firstRegister->getLowOrder(), secondRegister, _cg); generateRegImmInstruction(SBB4RegImms, root, firstRegister->getHighOrder(), 0, _cg); } else { generateRegRegInstruction(regRegOpCode, root, firstRegister->getLowOrder(), secondRegister->getLowOrder(), _cg); generateRegRegInstruction(SBB4RegReg, root, firstRegister->getHighOrder(), secondRegister->getHighOrder(), _cg); } targetRegister = firstRegister; } else // assert getSubReg1Mem2() == true { TR::MemoryReference *lowMR = generateX86MemoryReference(secondChild, _cg); /** * The below code is needed to ensure correct behaviour when the subtract analyser encounters a lushr bytecode that shifts * by 32 bits. This is the only case where the useSecondHighOrder bit is set. * When the first child of the lushr is in a register, code above handles the shift. When the first child of the lushr is in * memory, the below ensures that the upper part of the first child of the lushr is used as lowMR. */ if (useSecondHighOrder) lowMR = generateX86MemoryReference(*lowMR, 4, _cg); generateRegMemInstruction(regMemOpCode, root, firstRegister->getLowOrder(), lowMR, _cg); if (secondHighZero) { generateRegImmInstruction(SBB4RegImms, root, firstRegister->getHighOrder(), 0, _cg); } else { TR::MemoryReference *highMR = generateX86MemoryReference(*lowMR, 4, _cg); generateRegMemInstruction(SBB4RegMem, root, firstRegister->getHighOrder(), highMR, _cg); } targetRegister = firstRegister; lowMR->decNodeReferenceCounts(_cg); } return targetRegister; }
void TR_S390BinaryAnalyser::longSubtractAnalyser(TR::Node * root) { TR::Node * firstChild; TR::Node * secondChild; TR::Instruction * cursor = NULL; TR::RegisterDependencyConditions * dependencies = NULL; bool setsOrReadsCC = NEED_CC(root) || (root->getOpCodeValue() == TR::lusubb); TR::InstOpCode::Mnemonic regToRegOpCode; TR::InstOpCode::Mnemonic memToRegOpCode; TR::Compilation *comp = TR::comp(); if (TR::Compiler->target.is64Bit() || cg()->use64BitRegsOn32Bit()) { if (!setsOrReadsCC) { regToRegOpCode = TR::InstOpCode::SGR; memToRegOpCode = TR::InstOpCode::SG; } else { regToRegOpCode = TR::InstOpCode::SLGR; memToRegOpCode = TR::InstOpCode::SLG; } } else { regToRegOpCode = TR::InstOpCode::SLR; memToRegOpCode = TR::InstOpCode::SL; } firstChild = root->getFirstChild(); secondChild = root->getSecondChild(); TR::Register * firstRegister = firstChild->getRegister(); TR::Register * secondRegister = secondChild->getRegister(); setInputs(firstChild, firstRegister, secondChild, secondRegister, false, false, comp); /** Attempt to use SGH to subtract halfword (64 <- 16). * The second child is a halfword from memory */ bool is16BitMemory2Operand = false; if (TR::Compiler->target.cpu.getS390SupportsZ14() && secondChild->getOpCodeValue() == TR::s2l && secondChild->getFirstChild()->getOpCodeValue() == TR::sloadi && secondChild->isSingleRefUnevaluated() && secondChild->getFirstChild()->isSingleRefUnevaluated()) { setMem2(); memToRegOpCode = TR::InstOpCode::SGH; is16BitMemory2Operand = true; } if (getEvalChild1()) { firstRegister = cg()->evaluate(firstChild); } if (getEvalChild2()) { secondRegister = cg()->evaluate(secondChild); } remapInputs(firstChild, firstRegister, secondChild, secondRegister); if ((root->getOpCodeValue() == TR::lusubb) && TR_S390ComputeCC::setCarryBorrow(root->getChild(2), false, cg())) { // use SLBGR rather than SLGR/SGR // SLBG rather than SLG/SG // or // use SLBR rather than SLR // SLB rather than SL bool uses64bit = TR::Compiler->target.is64Bit() || cg()->use64BitRegsOn32Bit(); regToRegOpCode = uses64bit ? TR::InstOpCode::SLBGR : TR::InstOpCode::SLBR; memToRegOpCode = uses64bit ? TR::InstOpCode::SLBG : TR::InstOpCode::SLB; } if (TR::Compiler->target.is64Bit() || cg()->use64BitRegsOn32Bit()) { if (getCopyReg1()) { TR::Register * thirdReg = cg()->allocate64bitRegister(); root->setRegister(thirdReg); generateRRInstruction(cg(), TR::InstOpCode::LGR, root, thirdReg, firstRegister); if (getBinaryReg3Reg2()) { generateRRInstruction(cg(), regToRegOpCode, root, thirdReg, secondRegister); } else // assert getBinaryReg3Mem2() == true { TR::MemoryReference * longMR = generateS390MemoryReference(secondChild, cg()); generateRXInstruction(cg(), memToRegOpCode, root, thirdReg, longMR); longMR->stopUsingMemRefRegister(cg()); } } else if (getBinaryReg1Reg2()) { generateRRInstruction(cg(), regToRegOpCode, root, firstRegister, secondRegister); root->setRegister(firstRegister); } else // assert getBinaryReg1Mem2() == true { TR_ASSERT( !getInvalid(), "TR_S390BinaryAnalyser::invalid case\n"); TR::Node* baseAddrNode = is16BitMemory2Operand ? secondChild->getFirstChild() : secondChild; TR::MemoryReference * longMR = generateS390MemoryReference(baseAddrNode, cg()); generateRXInstruction(cg(), memToRegOpCode, root, firstRegister, longMR); longMR->stopUsingMemRefRegister(cg()); root->setRegister(firstRegister); if(is16BitMemory2Operand) { cg()->decReferenceCount(secondChild->getFirstChild()); } } } else // if 32bit codegen... { bool zArchTrexsupported = performTransformation(comp, "O^O Use SL/SLB for long sub."); TR::Register * highDiff = NULL; TR::LabelSymbol * doneLSub = TR::LabelSymbol::create(cg()->trHeapMemory(),cg()); if (getCopyReg1()) { TR::Register * lowThird = cg()->allocateRegister(); TR::Register * highThird = cg()->allocateRegister(); TR::RegisterPair * thirdReg = cg()->allocateConsecutiveRegisterPair(lowThird, highThird); highDiff = highThird; dependencies = new (cg()->trHeapMemory()) TR::RegisterDependencyConditions(0, 9, cg()); dependencies->addPostCondition(firstRegister, TR::RealRegister::EvenOddPair); dependencies->addPostCondition(firstRegister->getHighOrder(), TR::RealRegister::LegalEvenOfPair); dependencies->addPostCondition(firstRegister->getLowOrder(), TR::RealRegister::LegalOddOfPair); // If 2nd operand has ref count of 1 and can be accessed by a memory reference, // then second register will not be used. if(secondRegister == firstRegister && !setsOrReadsCC) { TR_ASSERT( false, "lsub with identical children - fix Simplifier"); } if (secondRegister != NULL && firstRegister != secondRegister) { dependencies->addPostCondition(secondRegister, TR::RealRegister::EvenOddPair); dependencies->addPostCondition(secondRegister->getHighOrder(), TR::RealRegister::LegalEvenOfPair); dependencies->addPostCondition(secondRegister->getLowOrder(), TR::RealRegister::LegalOddOfPair); } dependencies->addPostCondition(highThird, TR::RealRegister::AssignAny); root->setRegister(thirdReg); generateRRInstruction(cg(), TR::InstOpCode::LR, root, highThird, firstRegister->getHighOrder()); generateRRInstruction(cg(), TR::InstOpCode::LR, root, lowThird, firstRegister->getLowOrder()); if (getBinaryReg3Reg2()) { if ((ENABLE_ZARCH_FOR_32 && zArchTrexsupported) || setsOrReadsCC) { generateRRInstruction(cg(), regToRegOpCode, root, lowThird, secondRegister->getLowOrder()); generateRRInstruction(cg(), TR::InstOpCode::SLBR, root, highThird, secondRegister->getHighOrder()); } else { generateRRInstruction(cg(), TR::InstOpCode::SR, root, highThird, secondRegister->getHighOrder()); generateRRInstruction(cg(), TR::InstOpCode::SLR, root, lowThird, secondRegister->getLowOrder()); } } else // assert getBinaryReg3Mem2() == true { TR::MemoryReference * highMR = generateS390MemoryReference(secondChild, cg()); TR::MemoryReference * lowMR = generateS390MemoryReference(*highMR, 4, cg()); dependencies->addAssignAnyPostCondOnMemRef(highMR); if ((ENABLE_ZARCH_FOR_32 && zArchTrexsupported) || setsOrReadsCC) { generateRXInstruction(cg(), memToRegOpCode, root, lowThird, lowMR); generateRXInstruction(cg(), TR::InstOpCode::SLB, root, highThird, highMR); } else { generateRXInstruction(cg(), TR::InstOpCode::S, root, highThird, highMR); generateRXInstruction(cg(), TR::InstOpCode::SL, root, lowThird, lowMR); } highMR->stopUsingMemRefRegister(cg()); lowMR->stopUsingMemRefRegister(cg()); } } else if (getBinaryReg1Reg2()) { dependencies = new (cg()->trHeapMemory()) TR::RegisterDependencyConditions(0, 6, cg()); dependencies->addPostCondition(firstRegister, TR::RealRegister::EvenOddPair); dependencies->addPostCondition(firstRegister->getHighOrder(), TR::RealRegister::LegalEvenOfPair); dependencies->addPostCondition(firstRegister->getLowOrder(), TR::RealRegister::LegalOddOfPair); if(secondRegister == firstRegister) { TR_ASSERT( false, "lsub with identical children - fix Simplifier"); } if (secondRegister != firstRegister) { dependencies->addPostCondition(secondRegister, TR::RealRegister::EvenOddPair); dependencies->addPostCondition(secondRegister->getHighOrder(), TR::RealRegister::LegalEvenOfPair); dependencies->addPostCondition(secondRegister->getLowOrder(), TR::RealRegister::LegalOddOfPair); } if ((ENABLE_ZARCH_FOR_32 && zArchTrexsupported) || setsOrReadsCC) { generateRRInstruction(cg(), regToRegOpCode, root, firstRegister->getLowOrder(), secondRegister->getLowOrder()); generateRRInstruction(cg(), TR::InstOpCode::SLBR, root, firstRegister->getHighOrder(), secondRegister->getHighOrder()); } else { generateRRInstruction(cg(), TR::InstOpCode::SR, root, firstRegister->getHighOrder(), secondRegister->getHighOrder()); generateRRInstruction(cg(), TR::InstOpCode::SLR, root, firstRegister->getLowOrder(), secondRegister->getLowOrder()); } highDiff = firstRegister->getHighOrder(); root->setRegister(firstRegister); } else // assert getBinaryReg1Mem2() == true { TR_ASSERT( !getInvalid(),"TR_S390BinaryAnalyser::invalid case\n"); dependencies = new (cg()->trHeapMemory()) TR::RegisterDependencyConditions(0, 5, cg()); dependencies->addPostCondition(firstRegister, TR::RealRegister::EvenOddPair); dependencies->addPostCondition(firstRegister->getHighOrder(), TR::RealRegister::LegalEvenOfPair); dependencies->addPostCondition(firstRegister->getLowOrder(), TR::RealRegister::LegalOddOfPair); TR::MemoryReference * highMR = generateS390MemoryReference(secondChild, cg()); TR::MemoryReference * lowMR = generateS390MemoryReference(*highMR, 4, cg()); dependencies->addAssignAnyPostCondOnMemRef(highMR); if ((ENABLE_ZARCH_FOR_32 && zArchTrexsupported) || setsOrReadsCC) { generateRXInstruction(cg(), memToRegOpCode, root, firstRegister->getLowOrder(), lowMR); generateRXInstruction(cg(), TR::InstOpCode::SLB, root, firstRegister->getHighOrder(), highMR); } else { generateRXInstruction(cg(), TR::InstOpCode::S, root, firstRegister->getHighOrder(), highMR); generateRXInstruction(cg(), TR::InstOpCode::SL, root, firstRegister->getLowOrder(), lowMR); } highDiff = firstRegister->getHighOrder(); root->setRegister(firstRegister); highMR->stopUsingMemRefRegister(cg()); lowMR->stopUsingMemRefRegister(cg()); } if (!((ENABLE_ZARCH_FOR_32 && zArchTrexsupported) || setsOrReadsCC)) { // Check for overflow in LS int. If overflow, we are done. generateS390BranchInstruction(cg(), TR::InstOpCode::BRC,TR::InstOpCode::COND_MASK3, root, doneLSub); // Increment MS int due to overflow in LS int generateRIInstruction(cg(), TR::InstOpCode::AHI, root, highDiff, -1); generateS390LabelInstruction(cg(), TR::InstOpCode::LABEL, root, doneLSub, dependencies); } } cg()->decReferenceCount(firstChild); cg()->decReferenceCount(secondChild); return; }
rcount_t OMR::CodeGenerator::decReferenceCount(TR::Node * node) { TR::Register *reg = node->getRegister(); // restricted registers go dead when ref count==2 because // their ref count was inced in prepareNodeForInstructionSelection if ((node->getReferenceCount() == 1) && reg && self()->getLiveRegisters(reg->getKind())) { TR_ASSERT(reg->isLive() || (diagnostic("\n*** Error: Register %s for node " "[%s] died prematurely\n", reg->getRegisterName(self()->comp()), node->getName(self()->comp()->getDebug())), 0), "Node %s register should be live",self()->getDebug()->getName(node)); TR_LiveRegisterInfo *liveRegister = reg->getLiveRegisterInfo(); TR::Register *pair = reg->getRegisterPair(); if (pair) { pair->getHighOrder()->getLiveRegisterInfo()->decNodeCount(); pair->getLowOrder()->getLiveRegisterInfo()->decNodeCount(); } if (liveRegister && liveRegister->decNodeCount() == 0) { // The register is now dead // self()->getLiveRegisters(reg->getKind())->registerIsDead(reg); } } #ifdef J9_PROJECT_SPECIFIC #if defined(TR_TARGET_S390) if (reg && reg->getOpaquePseudoRegister()) { TR_OpaquePseudoRegister *pseudoReg = reg->getOpaquePseudoRegister(); TR_StorageReference *storageReference = pseudoReg->getStorageReference(); TR_ASSERT(storageReference,"the pseudoReg should have a non-null storage reference\n"); storageReference->decrementTemporaryReferenceCount(); if (node->getReferenceCount() == 1) { storageReference->decOwningRegisterCount(); if (self()->traceBCDCodeGen()) traceMsg(self()->comp(),"\tdecrement owningRegisterCount %d->%d on ref #%d (%s) for reg %s as %s (%p) refCount == 1 (going to 0)\n", storageReference->getOwningRegisterCount()+1, storageReference->getOwningRegisterCount(), storageReference->getReferenceNumber(), self()->getDebug()->getName(storageReference->getSymbol()), self()->getDebug()->getName(reg), node->getOpCode().getName(), node); } } else if (node->getOpCode().hasSymbolReference() && node->getSymbolReference() && node->getSymbolReference()->isTempVariableSizeSymRef()) { TR_ASSERT(false,"tempMemSlots should only be attached to pseudoRegisters and not node %p\n",node); } #endif #endif rcount_t count = node->decReferenceCount(); if (self()->comp()->getOptions()->getTraceCGOption(TR_TraceCGEvaluation)) { self()->getDebug()->printNodeEvaluation(node, "-- ", reg); } return count; }