static TR::Register *l2fd(TR::Node *node, TR::RealRegister *target, TR_X86OpCodes opRegMem8, TR_X86OpCodes opRegReg8, TR::CodeGenerator *cg) { TR::Node *child = node->getFirstChild(); TR::MemoryReference *tempMR; TR_ASSERT(cg->useSSEForSinglePrecision(), "assertion failure"); if (child->getRegister() == NULL && child->getReferenceCount() == 1 && child->getOpCode().isLoadVar()) { tempMR = generateX86MemoryReference(child, cg); generateRegMemInstruction(opRegMem8, node, target, tempMR, cg); tempMR->decNodeReferenceCounts(cg); } else { TR::Register *intReg = cg->evaluate(child); generateRegRegInstruction(opRegReg8, node, target, intReg, cg); cg->decReferenceCount(child); } node->setRegister(target); return target; }
TR::Register *IA32LinkageUtils::pushLongArg( TR::Node *child, TR::CodeGenerator *cg) { TR::Register *pushRegister; if (child->getRegister() == NULL) { if (child->getOpCode().isLoadConst()) { TR_X86OpCodes pushOp; int32_t highValue = child->getLongIntHigh(); if (highValue >= -128 && highValue <= 127) { pushOp = PUSHImms; } else { pushOp = PUSHImm4; } generateImmInstruction(pushOp, child, highValue, cg); int32_t lowValue = child->getLongIntLow(); if (lowValue >= -128 && lowValue <= 127) { pushOp = PUSHImms; } else { pushOp = PUSHImm4; } generateImmInstruction(pushOp, child, lowValue, cg); cg->decReferenceCount(child); return NULL; } else if (child->getOpCodeValue() == TR::dbits2l && !child->normalizeNanValues() && child->getReferenceCount() == 1) { pushRegister = pushDoubleArg(child->getFirstChild(), cg); cg->decReferenceCount(child); return pushRegister; } else if (child->getOpCode().isMemoryReference() && child->getReferenceCount() == 1) { TR::MemoryReference *lowMR = generateX86MemoryReference(child, cg); generateMemInstruction(PUSHMem, child, generateX86MemoryReference(*lowMR,4, cg), cg); generateMemInstruction(PUSHMem, child, lowMR, cg); lowMR->decNodeReferenceCounts(cg); return NULL; } } pushRegister = cg->evaluate(child); generateRegInstruction(PUSHReg, child, pushRegister->getHighOrder(), cg); generateRegInstruction(PUSHReg, child, pushRegister->getLowOrder(), cg); cg->decReferenceCount(child); return pushRegister; }
TR::Register* OMR::X86::TreeEvaluator::SIMDstoreEvaluator(TR::Node* node, TR::CodeGenerator* cg) { TR::Node* valueNode = node->getChild(node->getOpCode().isIndirect() ? 1 : 0); TR::MemoryReference* tempMR = generateX86MemoryReference(node, cg); tempMR = ConvertToPatchableMemoryReference(tempMR, node, cg); TR::Register* valueReg = cg->evaluate(valueNode); TR_X86OpCodes opCode = BADIA32Op; switch (node->getSize()) { case 16: opCode = MOVDQUMemReg; break; default: if (cg->comp()->getOption(TR_TraceCG)) traceMsg(cg->comp(), "Unsupported fill size: Node = %p\n", node); TR_ASSERT(false, "Unsupported fill size"); break; } TR::Instruction* instr = generateMemRegInstruction(opCode, node, tempMR, valueReg, cg); cg->decReferenceCount(valueNode); tempMR->decNodeReferenceCounts(cg); if (node->getOpCode().isIndirect()) cg->setImplicitExceptionPoint(instr); return NULL; }
TR::Register* OMR::X86::TreeEvaluator::SIMDloadEvaluator(TR::Node* node, TR::CodeGenerator* cg) { TR::MemoryReference* tempMR = generateX86MemoryReference(node, cg); tempMR = ConvertToPatchableMemoryReference(tempMR, node, cg); TR::Register* resultReg = cg->allocateRegister(TR_VRF); TR_X86OpCodes opCode = BADIA32Op; switch (node->getSize()) { case 16: opCode = MOVDQURegMem; break; default: if (cg->comp()->getOption(TR_TraceCG)) traceMsg(cg->comp(), "Unsupported fill size: Node = %p\n", node); TR_ASSERT(false, "Unsupported fill size"); break; } TR::Instruction* instr = generateRegMemInstruction(opCode, node, resultReg, tempMR, cg); if (node->getOpCode().isIndirect()) cg->setImplicitExceptionPoint(instr); node->setRegister(resultReg); tempMR->decNodeReferenceCounts(cg); return resultReg; }
// also handles ilload TR::Register *OMR::X86::AMD64::TreeEvaluator::lloadEvaluator(TR::Node *node, TR::CodeGenerator *cg) { TR::MemoryReference *sourceMR = generateX86MemoryReference(node, cg); TR::Register *reg = TR::TreeEvaluator::loadMemory(node, sourceMR, TR_RematerializableLong, node->getOpCode().isIndirect(), cg); reg->setMemRef(sourceMR); node->setRegister(reg); sourceMR->decNodeReferenceCounts(cg); return reg; }
TR::Register *IA32LinkageUtils::pushFloatArg( TR::Node *child, TR::CodeGenerator *cg) { TR::Register *pushRegister; if (child->getRegister() == NULL) { if (child->getOpCodeValue() == TR::fconst) { int32_t value = child->getFloatBits(); TR_X86OpCodes pushOp; if (value >= -128 && value <= 127) { pushOp = PUSHImms; } else { pushOp = PUSHImm4; } generateImmInstruction(pushOp, child, value, cg); cg->decReferenceCount(child); return NULL; } else if (child->getReferenceCount() == 1) { if (child->getOpCode().isLoad()) { TR::MemoryReference *tempMR = generateX86MemoryReference(child, cg); generateMemInstruction(PUSHMem, child, tempMR, cg); tempMR->decNodeReferenceCounts(cg); cg->decReferenceCount(child); return NULL; } else if (child->getOpCodeValue() == TR::ibits2f) { pushRegister = pushIntegerWordArg(child->getFirstChild(), cg); cg->decReferenceCount(child); return pushRegister; } } } pushRegister = cg->evaluate(child); TR::RealRegister *espReal = cg->machine()->getRealRegister(TR::RealRegister::esp); generateRegImmInstruction(SUB4RegImms, child, espReal, 4, cg); if (cg->useSSEForSinglePrecision() && pushRegister->getKind() == TR_FPR) generateMemRegInstruction(MOVSSMemReg, child, generateX86MemoryReference(espReal, 0, cg), pushRegister, cg); else generateFPMemRegInstruction(FSTMemReg, child, generateX86MemoryReference(espReal, 0, cg), pushRegister, cg); cg->decReferenceCount(child); return pushRegister; }
/* * users should call the integerSubtractAnalyser or integerSubtractAnalyserWithExplicitOperands APIs instead of calling this one directly */ TR::Register* TR_X86SubtractAnalyser::integerSubtractAnalyserImpl(TR::Node *root, TR::Node *firstChild, TR::Node *secondChild, TR_X86OpCodes regRegOpCode, TR_X86OpCodes regMemOpCode, TR_X86OpCodes copyOpCode, bool needsEflags, TR::Node *borrow) { TR::Register *targetRegister = NULL; TR::Register *firstRegister = firstChild->getRegister(); TR::Register *secondRegister = secondChild->getRegister(); setInputs(firstChild, firstRegister, secondChild, secondRegister); bool loadedConst = false; needsEflags = needsEflags || NEED_CC(root); if (getEvalChild1()) { // if firstChild and secondChild are the same node, then we should // evaluate (take the else path) so that the evaluate for the secondChild // below will get the correct/already-allocated register. if (firstRegister == 0 && firstChild->getOpCodeValue() == TR::iconst && (firstChild != secondChild)) { // An iconst may have to be generated. The iconst will be generated after the // secondChild is evaluated. Set the loadedConst flag to true. loadedConst = true; } else { firstRegister = _cg->evaluate(firstChild); } } if (getEvalChild2()) { secondRegister = _cg->evaluate(secondChild); if (firstChild->getRegister()) { firstRegister = firstChild->getRegister(); } else if (!loadedConst) { firstRegister = _cg->evaluate(firstChild); } } if (loadedConst) { if (firstRegister == 0) { // firstchild is an inconst and it has not been evaluated. // Generate the code for an iconst. firstRegister = _cg->allocateRegister(); TR::TreeEvaluator::insertLoadConstant(firstChild, firstRegister, firstChild->getInt(), TR_RematerializableInt, _cg); } else { // firstchild was evaluated. The code for an iconst does not need to be generated. // Set the loadConst flag to false. loadedConst = false; } } if (borrow != 0) TR_X86ComputeCC::setCarryBorrow(borrow, true, _cg); if (getCopyReg1()) { if (firstChild->getReferenceCount() > 1) { TR::Register *thirdReg; if (firstChild->getOpCodeValue() == TR::iconst && loadedConst) { thirdReg = firstRegister; } else { if (secondChild->getReferenceCount() == 1 && secondRegister != 0 && !needsEflags && (borrow == 0)) { // use one fewer registers if we negate the clobberable secondRegister and add // Don't do this though if condition codes are needed. The sequence // depends on the carry flag being valid as if a sub was done. // bool nodeIs64Bit = TR_X86OpCode(regRegOpCode).hasLongSource(); generateRegInstruction(NEGReg(nodeIs64Bit), secondChild, secondRegister, _cg); thirdReg = secondRegister; secondRegister = firstRegister; regRegOpCode = ADDRegReg(nodeIs64Bit); } else { thirdReg = _cg->allocateRegister(); generateRegRegInstruction(copyOpCode, root, thirdReg, firstRegister, _cg); } } targetRegister = thirdReg; if (getSubReg3Reg2()) { generateRegRegInstruction(regRegOpCode, root, thirdReg, secondRegister, _cg); } else // assert getSubReg3Mem2() == true { TR::MemoryReference *tempMR = generateX86MemoryReference(secondChild, _cg); generateRegMemInstruction(regMemOpCode, root, thirdReg, tempMR, _cg); tempMR->decNodeReferenceCounts(_cg); } } else { if (getSubReg3Reg2()) { generateRegRegInstruction(regRegOpCode, root, firstRegister, secondRegister, _cg); } else // assert getSubReg3Mem2() == true { TR::MemoryReference *tempMR = generateX86MemoryReference(secondChild, _cg); generateRegMemInstruction(regMemOpCode, root, firstRegister, tempMR, _cg); tempMR->decNodeReferenceCounts(_cg); } targetRegister = firstRegister; } } else if (getSubReg1Reg2()) { generateRegRegInstruction(regRegOpCode, root, firstRegister, secondRegister, _cg); targetRegister = firstRegister; } else // assert getSubReg1Mem2() == true { TR::MemoryReference *tempMR = generateX86MemoryReference(secondChild, _cg); generateRegMemInstruction(regMemOpCode, root, firstRegister, tempMR, _cg); targetRegister = firstRegister; tempMR->decNodeReferenceCounts(_cg); } return targetRegister; }
/* * users should call the longSubtractAnalyser or longSubtractAnalyserWithExplicitOperands APIs instead of calling this one directly */ TR::Register* TR_X86SubtractAnalyser::longSubtractAnalyserImpl(TR::Node *root, TR::Node *&firstChild, TR::Node *&secondChild) { TR::Register *firstRegister = firstChild->getRegister(); TR::Register *secondRegister = secondChild->getRegister(); TR::Register *targetRegister = NULL; bool firstHighZero = false; bool secondHighZero = false; bool useSecondHighOrder = false; TR_X86OpCodes regRegOpCode = SUB4RegReg; TR_X86OpCodes regMemOpCode = SUB4RegMem; bool needsEflags = NEED_CC(root) || (root->getOpCodeValue() == TR::lusubb); // Can generate better code for long adds when one or more children have a high order zero word // can avoid the evaluation when we don't need the result of such nodes for another parent. // if (firstChild->isHighWordZero() && !needsEflags) { firstHighZero = true; } if (secondChild->isHighWordZero() && !needsEflags) { secondHighZero = true; TR::ILOpCodes secondOp = secondChild->getOpCodeValue(); if (secondChild->getReferenceCount() == 1 && secondRegister == 0) { if (secondOp == TR::iu2l || secondOp == TR::su2l || secondOp == TR::bu2l || (secondOp == TR::lushr && secondChild->getSecondChild()->getOpCodeValue() == TR::iconst && (secondChild->getSecondChild()->getInt() & TR::TreeEvaluator::shiftMask(true)) == 32)) { secondChild = secondChild->getFirstChild(); secondRegister = secondChild->getRegister(); if (secondOp == TR::lushr) { useSecondHighOrder = true; } } } } setInputs(firstChild, firstRegister, secondChild, secondRegister); if (isVolatileMemoryOperand(firstChild)) resetMem1(); if (isVolatileMemoryOperand(secondChild)) resetMem2(); if (getEvalChild1()) { firstRegister = _cg->evaluate(firstChild); } if (getEvalChild2()) { secondRegister = _cg->evaluate(secondChild); } if (secondHighZero && secondRegister && secondRegister->getRegisterPair()) { if (!useSecondHighOrder) { secondRegister = secondRegister->getLowOrder(); } else { secondRegister = secondRegister->getHighOrder(); } } if (root->getOpCodeValue() == TR::lusubb && TR_X86ComputeCC::setCarryBorrow(root->getChild(2), true, _cg)) { // use SBB rather than SUB // regRegOpCode = SBB4RegReg; regMemOpCode = SBB4RegMem; } if (getCopyReg1()) { TR::Register *lowThird = _cg->allocateRegister(); TR::Register *highThird = _cg->allocateRegister(); TR::RegisterPair *thirdReg = _cg->allocateRegisterPair(lowThird, highThird); targetRegister = thirdReg; generateRegRegInstruction(MOV4RegReg, root, lowThird, firstRegister->getLowOrder(), _cg); if (firstHighZero) { generateRegRegInstruction(XOR4RegReg, root, highThird, highThird, _cg); } else { generateRegRegInstruction(MOV4RegReg, root, highThird, firstRegister->getHighOrder(), _cg); } if (getSubReg3Reg2()) { if (secondHighZero) { generateRegRegInstruction(regRegOpCode, root, lowThird, secondRegister, _cg); generateRegImmInstruction(SBB4RegImms, root, highThird, 0, _cg); } else { generateRegRegInstruction(regRegOpCode, root, lowThird, secondRegister->getLowOrder(), _cg); generateRegRegInstruction(SBB4RegReg, root, highThird, secondRegister->getHighOrder(), _cg); } } else // assert getSubReg3Mem2() == true { TR::MemoryReference *lowMR = generateX86MemoryReference(secondChild, _cg); /** * The below code is needed to ensure correct behaviour when the subtract analyser encounters a lushr bytecode that shifts * by 32 bits. This is the only case where the useSecondHighOrder bit is set. * When the first child of the lushr is in a register, code above handles the shift. When the first child of the lushr is in * memory, the below ensures that the upper part of the first child of the lushr is used as lowMR. */ if (useSecondHighOrder) { TR_ASSERT(secondHighZero, "useSecondHighOrder should be consistent with secondHighZero. useSecondHighOrder subsumes secondHighZero"); lowMR = generateX86MemoryReference(*lowMR, 4, _cg); } generateRegMemInstruction(regMemOpCode, root, lowThird, lowMR, _cg); if (secondHighZero) { generateRegImmInstruction(SBB4RegImms, root, highThird, 0, _cg); } else { TR::MemoryReference *highMR = generateX86MemoryReference(*lowMR, 4, _cg); generateRegMemInstruction(SBB4RegMem, root, highThird, highMR, _cg); } lowMR->decNodeReferenceCounts(_cg); } } else if (getSubReg1Reg2()) { if (secondHighZero) { generateRegRegInstruction(regRegOpCode, root, firstRegister->getLowOrder(), secondRegister, _cg); generateRegImmInstruction(SBB4RegImms, root, firstRegister->getHighOrder(), 0, _cg); } else { generateRegRegInstruction(regRegOpCode, root, firstRegister->getLowOrder(), secondRegister->getLowOrder(), _cg); generateRegRegInstruction(SBB4RegReg, root, firstRegister->getHighOrder(), secondRegister->getHighOrder(), _cg); } targetRegister = firstRegister; } else // assert getSubReg1Mem2() == true { TR::MemoryReference *lowMR = generateX86MemoryReference(secondChild, _cg); /** * The below code is needed to ensure correct behaviour when the subtract analyser encounters a lushr bytecode that shifts * by 32 bits. This is the only case where the useSecondHighOrder bit is set. * When the first child of the lushr is in a register, code above handles the shift. When the first child of the lushr is in * memory, the below ensures that the upper part of the first child of the lushr is used as lowMR. */ if (useSecondHighOrder) lowMR = generateX86MemoryReference(*lowMR, 4, _cg); generateRegMemInstruction(regMemOpCode, root, firstRegister->getLowOrder(), lowMR, _cg); if (secondHighZero) { generateRegImmInstruction(SBB4RegImms, root, firstRegister->getHighOrder(), 0, _cg); } else { TR::MemoryReference *highMR = generateX86MemoryReference(*lowMR, 4, _cg); generateRegMemInstruction(SBB4RegMem, root, firstRegister->getHighOrder(), highMR, _cg); } targetRegister = firstRegister; lowMR->decNodeReferenceCounts(_cg); } return targetRegister; }
TR::Register *TR_X86FPCompareAnalyser::fpCompareAnalyser(TR::Node *root, TR_X86OpCodes cmpRegRegOpCode, TR_X86OpCodes cmpRegMemOpCode, TR_X86OpCodes cmpiRegRegOpCode, bool useFCOMIInstructions) { TR::Node *firstChild, *secondChild; TR::ILOpCodes cmpOp = root->getOpCodeValue(); bool reverseMemOp = false; bool reverseCmpOp = false; TR::Compilation* comp = _cg->comp(); TR_X86OpCodes cmpInstr = useFCOMIInstructions ? cmpiRegRegOpCode : cmpRegRegOpCode; // Some operators must have their operands swapped to improve the generated // code needed to evaluate the result of the comparison. // bool mustSwapOperands = (cmpOp == TR::iffcmple || cmpOp == TR::ifdcmple || cmpOp == TR::iffcmpgtu || cmpOp == TR::ifdcmpgtu || cmpOp == TR::fcmple || cmpOp == TR::dcmple || cmpOp == TR::fcmpgtu || cmpOp == TR::dcmpgtu || (useFCOMIInstructions && (cmpOp == TR::iffcmplt || cmpOp == TR::ifdcmplt || cmpOp == TR::iffcmpgeu || cmpOp == TR::ifdcmpgeu || cmpOp == TR::fcmplt || cmpOp == TR::dcmplt || cmpOp == TR::fcmpgeu || cmpOp == TR::dcmpgeu))) ? true : false; // Some operators should not have their operands swapped to improve the generated // code needed to evaluate the result of the comparison. // bool preventOperandSwapping = (cmpOp == TR::iffcmpltu || cmpOp == TR::ifdcmpltu || cmpOp == TR::iffcmpge || cmpOp == TR::ifdcmpge || cmpOp == TR::fcmpltu || cmpOp == TR::dcmpltu || cmpOp == TR::fcmpge || cmpOp == TR::dcmpge || (useFCOMIInstructions && (cmpOp == TR::iffcmpgt || cmpOp == TR::ifdcmpgt || cmpOp == TR::iffcmpleu || cmpOp == TR::ifdcmpleu || cmpOp == TR::fcmpgt || cmpOp == TR::dcmpgt || cmpOp == TR::fcmpleu || cmpOp == TR::dcmpleu))) ? true : false; // For correctness, don't swap operands of these operators. // if (cmpOp == TR::fcmpg || cmpOp == TR::fcmpl || cmpOp == TR::dcmpg || cmpOp == TR::dcmpl) { preventOperandSwapping = true; } // Initial operand evaluation ordering. // if (preventOperandSwapping || (!mustSwapOperands && _cg->whichChildToEvaluate(root) == 0)) { firstChild = root->getFirstChild(); secondChild = root->getSecondChild(); setReversedOperands(false); } else { firstChild = root->getSecondChild(); secondChild = root->getFirstChild(); setReversedOperands(true); } TR::Register *firstRegister = firstChild->getRegister(); TR::Register *secondRegister = secondChild->getRegister(); setInputs(firstChild, firstRegister, secondChild, secondRegister, useFCOMIInstructions, // If either 'preventOperandSwapping' or 'mustSwapOperands' is set then the // initial operand ordering set above must be maintained. // preventOperandSwapping || mustSwapOperands); // Make sure any required operand ordering is respected. // if ((getCmpReg2Reg1() || getCmpReg2Mem1()) && (mustSwapOperands || preventOperandSwapping)) { reverseCmpOp = getCmpReg2Reg1() ? true : false; reverseMemOp = getCmpReg2Mem1() ? true : false; } // If we are not comparing with a memory operand, one of them evaluates // to a zero, and the zero is not already on the stack, then we can use // FTST to save a register. // // (With a memory operand, either the constant zero needs to be loaded // to use FCOM, or the memory operand needs to be loaded to use FTST, // so there is no gain in using FTST.) // // If the constant zero is in the target register, using FTST means the // comparison will be reversed. We cannot do this if the initial ordering // of the operands must be maintained. // // Finally, if FTST is used and this is the last use of the target, the // target register may need to be explicitly popped. // TR::Register *targetRegisterForFTST = NULL; TR::Node *targetChildForFTST = NULL; if (getEvalChild1() && isUnevaluatedZero(firstChild)) // do we need getEvalChild1() here? { if ( ((getCmpReg1Reg2() || reverseCmpOp) && !(preventOperandSwapping || mustSwapOperands)) || (getCmpReg2Reg1() && !reverseCmpOp)) { if (getEvalChild2()) { secondRegister = _cg->evaluate(secondChild); } targetRegisterForFTST = secondRegister; targetChildForFTST = secondChild; notReversedOperands(); } } else if (getEvalChild2() && isUnevaluatedZero(secondChild)) // do we need getEvalChild2() here? { if ( (getCmpReg1Reg2() || reverseCmpOp) || (getCmpReg2Reg1() && !reverseCmpOp && !(preventOperandSwapping || mustSwapOperands)) ) { if (getEvalChild1()) { firstRegister = _cg->evaluate(firstChild); } targetRegisterForFTST = firstRegister; targetChildForFTST = firstChild; } } if (!targetRegisterForFTST) { // If we have a choice, evaluate the target operand last. By doing so, we // help out the register assigner because the target must be TOS. This // avoids an unneccessary FXCH for the target. // if (getEvalChild1() && getEvalChild2()) { if (getCmpReg1Reg2() || getCmpReg1Mem2()) { secondRegister = _cg->evaluate(secondChild); firstRegister = _cg->evaluate(firstChild); } else { firstRegister = _cg->evaluate(firstChild); secondRegister = _cg->evaluate(secondChild); } } else { if (getEvalChild1()) { firstRegister = _cg->evaluate(firstChild); } if (getEvalChild2()) { secondRegister = _cg->evaluate(secondChild); } } } // Adjust the FP precision of feeding operands. // if (firstRegister && (firstRegister->needsPrecisionAdjustment() || comp->getOption(TR_StrictFPCompares) || (firstRegister->mayNeedPrecisionAdjustment() && secondChild->getOpCode().isLoadConst()) || (firstRegister->mayNeedPrecisionAdjustment() && !secondRegister))) { TR::TreeEvaluator::insertPrecisionAdjustment(firstRegister, root, _cg); } if (secondRegister && (secondRegister->needsPrecisionAdjustment() || comp->getOption(TR_StrictFPCompares) || (secondRegister->mayNeedPrecisionAdjustment() && firstChild->getOpCode().isLoadConst()) || (secondRegister->mayNeedPrecisionAdjustment() && !firstRegister))) { TR::TreeEvaluator::insertPrecisionAdjustment(secondRegister, root, _cg); } // Generate the compare instruction. // if (targetRegisterForFTST) { generateFPRegInstruction(FTSTReg, root, targetRegisterForFTST, _cg); } else if (!useFCOMIInstructions && (getCmpReg1Mem2() || reverseMemOp)) { TR::MemoryReference *tempMR = generateX86MemoryReference(secondChild, _cg); generateFPRegMemInstruction(cmpRegMemOpCode, root, firstRegister, tempMR, _cg); tempMR->decNodeReferenceCounts(_cg); } else if (!useFCOMIInstructions && getCmpReg2Mem1()) { TR::MemoryReference *tempMR = generateX86MemoryReference(firstChild, _cg); generateFPRegMemInstruction(cmpRegMemOpCode, root, secondRegister, tempMR, _cg); notReversedOperands(); tempMR->decNodeReferenceCounts(_cg); } else if (getCmpReg1Reg2() || reverseCmpOp) { generateFPCompareRegRegInstruction(cmpInstr, root, firstRegister, secondRegister, _cg); } else if (getCmpReg2Reg1()) { generateFPCompareRegRegInstruction(cmpInstr, root, secondRegister, firstRegister, _cg); notReversedOperands(); } _cg->decReferenceCount(firstChild); _cg->decReferenceCount(secondChild); // Evaluate the comparison. // if (getReversedOperands()) { cmpOp = TR::ILOpCode(cmpOp).getOpCodeForSwapChildren(); TR::Node::recreate(root, cmpOp); } if (useFCOMIInstructions && !targetRegisterForFTST) { return NULL; } // We must manually move the FP condition flags to the EFLAGS register if we don't // use the FCOMI instructions. // TR::Register *accRegister = _cg->allocateRegister(); TR::RegisterDependencyConditions *dependencies = generateRegisterDependencyConditions((uint8_t)1, 1, _cg); dependencies->addPreCondition(accRegister, TR::RealRegister::eax, _cg); dependencies->addPostCondition(accRegister, TR::RealRegister::eax, _cg); generateRegInstruction(STSWAcc, root, accRegister, dependencies, _cg); // Pop the FTST target register if it is not used any more. // if (targetRegisterForFTST && targetChildForFTST && targetChildForFTST->getReferenceCount() == 0) { generateFPSTiST0RegRegInstruction(FSTRegReg, root, targetRegisterForFTST, targetRegisterForFTST, _cg); } return accRegister; }
TR::Register *TR_IA32XMMCompareAnalyser::xmmCompareAnalyser(TR::Node *root, TR_X86OpCodes cmpRegRegOpCode, TR_X86OpCodes cmpRegMemOpCode) { TR::Node *firstChild, *secondChild; TR::ILOpCodes cmpOp = root->getOpCodeValue(); bool reverseMemOp = false; bool reverseCmpOp = false; // Some operators must have their operands swapped to improve the generated // code needed to evaluate the result of the comparison. // bool mustSwapOperands = (cmpOp == TR::iffcmple || cmpOp == TR::ifdcmple || cmpOp == TR::iffcmpgtu || cmpOp == TR::ifdcmpgtu || cmpOp == TR::fcmple || cmpOp == TR::dcmple || cmpOp == TR::fcmpgtu || cmpOp == TR::dcmpgtu || cmpOp == TR::iffcmplt || cmpOp == TR::ifdcmplt || cmpOp == TR::iffcmpgeu || cmpOp == TR::ifdcmpgeu || cmpOp == TR::fcmplt || cmpOp == TR::dcmplt || cmpOp == TR::fcmpgeu || cmpOp == TR::dcmpgeu) ? true : false; // Some operators should not have their operands swapped to improve the generated // code needed to evaluate the result of the comparison. // bool preventOperandSwapping = (cmpOp == TR::iffcmpltu || cmpOp == TR::ifdcmpltu || cmpOp == TR::iffcmpge || cmpOp == TR::ifdcmpge || cmpOp == TR::fcmpltu || cmpOp == TR::dcmpltu || cmpOp == TR::fcmpge || cmpOp == TR::dcmpge || cmpOp == TR::iffcmpgt || cmpOp == TR::ifdcmpgt || cmpOp == TR::iffcmpleu || cmpOp == TR::ifdcmpleu || cmpOp == TR::fcmpgt || cmpOp == TR::dcmpgt || cmpOp == TR::fcmpleu || cmpOp == TR::dcmpleu) ? true : false; // For correctness, don't swap operands of these operators. // if (cmpOp == TR::fcmpg || cmpOp == TR::fcmpl || cmpOp == TR::dcmpg || cmpOp == TR::dcmpl) { preventOperandSwapping = true; } // Initial operand evaluation ordering. // if (preventOperandSwapping || (!mustSwapOperands && _cg->whichChildToEvaluate(root) == 0)) { firstChild = root->getFirstChild(); secondChild = root->getSecondChild(); setReversedOperands(false); } else { firstChild = root->getSecondChild(); secondChild = root->getFirstChild(); setReversedOperands(true); } TR::Register *firstRegister = firstChild->getRegister(); TR::Register *secondRegister = secondChild->getRegister(); setInputs(firstChild, firstRegister, secondChild, secondRegister, false, // If either 'preventOperandSwapping' or 'mustSwapOperands' is set then the // initial operand ordering set above must be maintained. // preventOperandSwapping || mustSwapOperands); // Make sure any required operand ordering is respected. // if ((getCmpReg2Reg1() || getCmpReg2Mem1()) && (mustSwapOperands || preventOperandSwapping)) { reverseCmpOp = getCmpReg2Reg1() ? true : false; reverseMemOp = getCmpReg2Mem1() ? true : false; } // Evaluate the children if necessary. // if (getEvalChild1()) { _cg->evaluate(firstChild); } if (getEvalChild2()) { _cg->evaluate(secondChild); } TR::TreeEvaluator::coerceFPOperandsToXMMRs(root, _cg); firstRegister = firstChild->getRegister(); secondRegister = secondChild->getRegister(); // Generate the compare instruction. // if (getCmpReg1Mem2() || reverseMemOp) { TR::MemoryReference *tempMR = generateX86MemoryReference(secondChild, _cg); generateRegMemInstruction(cmpRegMemOpCode, root, firstRegister, tempMR, _cg); tempMR->decNodeReferenceCounts(_cg); } else if (getCmpReg2Mem1()) { TR::MemoryReference *tempMR = generateX86MemoryReference(firstChild, _cg); generateRegMemInstruction(cmpRegMemOpCode, root, secondRegister, tempMR, _cg); notReversedOperands(); tempMR->decNodeReferenceCounts(_cg); } else if (getCmpReg1Reg2() || reverseCmpOp) { generateRegRegInstruction(cmpRegRegOpCode, root, firstRegister, secondRegister, _cg); } else if (getCmpReg2Reg1()) { generateRegRegInstruction(cmpRegRegOpCode, root, secondRegister, firstRegister, _cg); notReversedOperands(); } _cg->decReferenceCount(firstChild); _cg->decReferenceCount(secondChild); // Update the opcode on the root node if we have swapped its children. // TODO: Reverse the children too, or else this looks wrong in the log file // if (getReversedOperands()) { cmpOp = TR::ILOpCode(cmpOp).getOpCodeForSwapChildren(); TR::Node::recreate(root, cmpOp); } return NULL; }
TR::Register *IA32LinkageUtils::pushIntegerWordArg( TR::Node *child, TR::CodeGenerator *cg) { TR::Register *pushRegister; if (child->getRegister() == NULL) { if (child->getOpCode().isLoadConst()) { int32_t value = child->getInt(); TR_X86OpCodes pushOp; if (value >= -128 && value <= 127) { pushOp = PUSHImms; } else { pushOp = PUSHImm4; } generateImmInstruction(pushOp, child, value, cg); cg->decReferenceCount(child); return NULL; } else if (child->getOpCodeValue() == TR::loadaddr) { TR::SymbolReference * symRef = child->getSymbolReference(); TR::StaticSymbol *sym = symRef->getSymbol()->getStaticSymbol(); if (sym) { TR_ASSERT(!symRef->isUnresolved(), "pushIntegerWordArg loadaddr expecting resolved symbol"); generateImmSymInstruction(PUSHImm4, child, (uintptrj_t)sym->getStaticAddress(), symRef, cg); cg->decReferenceCount(child); return NULL; } } else if (child->getOpCodeValue() == TR::fbits2i && !child->normalizeNanValues() && child->getReferenceCount() == 1) { pushRegister = pushFloatArg(child->getFirstChild(), cg); cg->decReferenceCount(child); return pushRegister; } else if (child->getOpCode().isMemoryReference() && (child->getReferenceCount() == 1) && (child->getSymbolReference() != cg->comp()->getSymRefTab()->findVftSymbolRef())) { TR::MemoryReference *tempMR = generateX86MemoryReference(child, cg); generateMemInstruction(PUSHMem, child, tempMR, cg); tempMR->decNodeReferenceCounts(cg); cg->decReferenceCount(child); return NULL; } } pushRegister = cg->evaluate(child); generateRegInstruction(PUSHReg, child, pushRegister, cg); cg->decReferenceCount(child); return pushRegister; }
TR::Register *IA32LinkageUtils::pushDoubleArg( TR::Node *child, TR::CodeGenerator *cg) { TR::Register *pushRegister; if (child->getRegister() == NULL) { if (child->getOpCodeValue() == TR::dconst) { TR_X86OpCodes pushOp; int32_t highValue = child->getLongIntHigh(); if (highValue >= -128 && highValue <= 127) { pushOp = PUSHImms; } else { pushOp = PUSHImm4; } generateImmInstruction(pushOp, child, highValue, cg); int32_t lowValue = child->getLongIntLow(); if (lowValue >= -128 && lowValue <= 127) { pushOp = PUSHImms; } else { pushOp = PUSHImm4; } generateImmInstruction(pushOp, child, lowValue, cg); cg->decReferenceCount(child); return NULL; } else if (child->getReferenceCount() == 1) { if (child->getOpCode().isLoad()) { TR::MemoryReference *lowMR = generateX86MemoryReference(child, cg); generateMemInstruction(PUSHMem, child, generateX86MemoryReference(*lowMR, 4, cg), cg); generateMemInstruction(PUSHMem, child, lowMR, cg); lowMR->decNodeReferenceCounts(cg); cg->decReferenceCount(child); return NULL; } else if (child->getOpCodeValue() == TR::lbits2d) { pushRegister = pushLongArg(child->getFirstChild(), cg); cg->decReferenceCount(child); return pushRegister; } } } pushRegister = cg->evaluate(child); TR::RealRegister *espReal = cg->machine()->getRealRegister(TR::RealRegister::esp); generateRegImmInstruction(SUB4RegImms, child, espReal, 8, cg); if (cg->useSSEForSinglePrecision() && pushRegister->getKind() == TR_FPR) generateMemRegInstruction(MOVSDMemReg, child, generateX86MemoryReference(espReal, 0, cg), pushRegister, cg); else generateFPMemRegInstruction(DSTMemReg, child, generateX86MemoryReference(espReal, 0, cg), pushRegister, cg); cg->decReferenceCount(child); return pushRegister; }