TR::Register *IA32LinkageUtils::pushLongArg( TR::Node *child, TR::CodeGenerator *cg) { TR::Register *pushRegister; if (child->getRegister() == NULL) { if (child->getOpCode().isLoadConst()) { TR_X86OpCodes pushOp; int32_t highValue = child->getLongIntHigh(); if (highValue >= -128 && highValue <= 127) { pushOp = PUSHImms; } else { pushOp = PUSHImm4; } generateImmInstruction(pushOp, child, highValue, cg); int32_t lowValue = child->getLongIntLow(); if (lowValue >= -128 && lowValue <= 127) { pushOp = PUSHImms; } else { pushOp = PUSHImm4; } generateImmInstruction(pushOp, child, lowValue, cg); cg->decReferenceCount(child); return NULL; } else if (child->getOpCodeValue() == TR::dbits2l && !child->normalizeNanValues() && child->getReferenceCount() == 1) { pushRegister = pushDoubleArg(child->getFirstChild(), cg); cg->decReferenceCount(child); return pushRegister; } else if (child->getOpCode().isMemoryReference() && child->getReferenceCount() == 1) { TR::MemoryReference *lowMR = generateX86MemoryReference(child, cg); generateMemInstruction(PUSHMem, child, generateX86MemoryReference(*lowMR,4, cg), cg); generateMemInstruction(PUSHMem, child, lowMR, cg); lowMR->decNodeReferenceCounts(cg); return NULL; } } pushRegister = cg->evaluate(child); generateRegInstruction(PUSHReg, child, pushRegister->getHighOrder(), cg); generateRegInstruction(PUSHReg, child, pushRegister->getLowOrder(), cg); cg->decReferenceCount(child); return pushRegister; }
TR::Register * TR::AMD64SystemLinkage::buildIndirectDispatch(TR::Node *callNode) { TR::SymbolReference *methodSymRef = callNode->getSymbolReference(); TR_ASSERT(methodSymRef->getSymbol()->castToMethodSymbol()->isComputed(), "system linkage only supports computed indirect call for now %p\n", callNode); // Evaluate VFT // TR::Register *vftRegister; TR::Node *vftNode = callNode->getFirstChild(); if (vftNode->getRegister()) { vftRegister = vftNode->getRegister(); } else { vftRegister = cg()->evaluate(vftNode); } // Allocate adequate register dependencies. // // pre = number of argument registers + 1 for VFT register // post = number of volatile + VMThread + return register // uint32_t pre = getProperties().getNumIntegerArgumentRegisters() + getProperties().getNumFloatArgumentRegisters() + 1; uint32_t post = getProperties().getNumVolatileRegisters() + 1 + (callNode->getDataType() == TR::NoType ? 0 : 1); #if defined (PYTHON) && 0 // Treat all preserved GP regs as volatile until register map support available. // post += getProperties().getNumberOfPreservedGPRegisters(); #endif TR::RegisterDependencyConditions *callDeps = generateRegisterDependencyConditions(pre, 1, cg()); TR::RealRegister::RegNum scratchRegIndex = getProperties().getIntegerScratchRegister(1); callDeps->addPostCondition(vftRegister, scratchRegIndex, cg()); callDeps->stopAddingPostConditions(); // Evaluate outgoing arguments on the system stack and build pre-conditions. // int32_t memoryArgSize = buildArgs(callNode, callDeps); // Dispatch // generateRegInstruction(CALLReg, callNode, vftRegister, callDeps, cg()); cg()->resetIsLeafMethod(); // Build label post-conditions // TR::RegisterDependencyConditions *postDeps = generateRegisterDependencyConditions(0, post, cg()); TR::Register *returnReg = buildVolatileAndReturnDependencies(callNode, postDeps); postDeps->stopAddingPostConditions(); TR::LabelSymbol *postDepLabel = generateLabelSymbol(cg()); generateLabelInstruction(LABEL, callNode, postDepLabel, postDeps, cg()); return returnReg; }
TR::Register *OMR::X86::AMD64::TreeEvaluator::lcmpEvaluator(TR::Node *node, TR::CodeGenerator *cg) { TR::Node *firstChild = node->getFirstChild(); TR::Node *secondChild = node->getSecondChild(); TR::Register *leftRegister = cg->evaluate(firstChild); TR::Register *rightRegister = cg->evaluate(secondChild); // Compare left and right operands, all finished with the operands after this. generateRegRegInstruction(CMP8RegReg, node, leftRegister, rightRegister, cg); cg->decReferenceCount(firstChild); cg->decReferenceCount(secondChild); TR::Register *isLessThanReg = cg->allocateRegister(); TR::Register *isNotEqualReg = cg->allocateRegister(); cg->getLiveRegisters(TR_GPR)->setByteRegisterAssociation(isLessThanReg); cg->getLiveRegisters(TR_GPR)->setByteRegisterAssociation(isNotEqualReg); // The state of things in each possible case after each instruction: // left < right left = right left > right // Processor flags: NE=1 LT=1 NE=0 LT=0 NE=1 LT=0 generateRegInstruction(SETL1Reg, node, isLessThanReg, cg); // isLessThanReg: 00000001 00000000 00000000 generateRegInstruction(SETNE1Reg, node, isNotEqualReg, cg); // isNotEqualReg: 00000001 00000000 00000001 generateRegInstruction(NEG1Reg, node, isLessThanReg, cg); // isLessThanReg: 11111111 00000000 00000000 generateRegRegInstruction(OR1RegReg, node, isNotEqualReg, isLessThanReg, cg); // isNotEqualReg: 11111111 00000000 00000001 generateRegRegInstruction(MOVSXReg4Reg1, node, isNotEqualReg, isNotEqualReg, cg); node->setRegister(isNotEqualReg); cg->stopUsingRegister(isLessThanReg); return isNotEqualReg; }
/* * users should call the integerSubtractAnalyser or integerSubtractAnalyserWithExplicitOperands APIs instead of calling this one directly */ TR::Register* TR_X86SubtractAnalyser::integerSubtractAnalyserImpl(TR::Node *root, TR::Node *firstChild, TR::Node *secondChild, TR_X86OpCodes regRegOpCode, TR_X86OpCodes regMemOpCode, TR_X86OpCodes copyOpCode, bool needsEflags, TR::Node *borrow) { TR::Register *targetRegister = NULL; TR::Register *firstRegister = firstChild->getRegister(); TR::Register *secondRegister = secondChild->getRegister(); setInputs(firstChild, firstRegister, secondChild, secondRegister); bool loadedConst = false; needsEflags = needsEflags || NEED_CC(root); if (getEvalChild1()) { // if firstChild and secondChild are the same node, then we should // evaluate (take the else path) so that the evaluate for the secondChild // below will get the correct/already-allocated register. if (firstRegister == 0 && firstChild->getOpCodeValue() == TR::iconst && (firstChild != secondChild)) { // An iconst may have to be generated. The iconst will be generated after the // secondChild is evaluated. Set the loadedConst flag to true. loadedConst = true; } else { firstRegister = _cg->evaluate(firstChild); } } if (getEvalChild2()) { secondRegister = _cg->evaluate(secondChild); if (firstChild->getRegister()) { firstRegister = firstChild->getRegister(); } else if (!loadedConst) { firstRegister = _cg->evaluate(firstChild); } } if (loadedConst) { if (firstRegister == 0) { // firstchild is an inconst and it has not been evaluated. // Generate the code for an iconst. firstRegister = _cg->allocateRegister(); TR::TreeEvaluator::insertLoadConstant(firstChild, firstRegister, firstChild->getInt(), TR_RematerializableInt, _cg); } else { // firstchild was evaluated. The code for an iconst does not need to be generated. // Set the loadConst flag to false. loadedConst = false; } } if (borrow != 0) TR_X86ComputeCC::setCarryBorrow(borrow, true, _cg); if (getCopyReg1()) { if (firstChild->getReferenceCount() > 1) { TR::Register *thirdReg; if (firstChild->getOpCodeValue() == TR::iconst && loadedConst) { thirdReg = firstRegister; } else { if (secondChild->getReferenceCount() == 1 && secondRegister != 0 && !needsEflags && (borrow == 0)) { // use one fewer registers if we negate the clobberable secondRegister and add // Don't do this though if condition codes are needed. The sequence // depends on the carry flag being valid as if a sub was done. // bool nodeIs64Bit = TR_X86OpCode(regRegOpCode).hasLongSource(); generateRegInstruction(NEGReg(nodeIs64Bit), secondChild, secondRegister, _cg); thirdReg = secondRegister; secondRegister = firstRegister; regRegOpCode = ADDRegReg(nodeIs64Bit); } else { thirdReg = _cg->allocateRegister(); generateRegRegInstruction(copyOpCode, root, thirdReg, firstRegister, _cg); } } targetRegister = thirdReg; if (getSubReg3Reg2()) { generateRegRegInstruction(regRegOpCode, root, thirdReg, secondRegister, _cg); } else // assert getSubReg3Mem2() == true { TR::MemoryReference *tempMR = generateX86MemoryReference(secondChild, _cg); generateRegMemInstruction(regMemOpCode, root, thirdReg, tempMR, _cg); tempMR->decNodeReferenceCounts(_cg); } } else { if (getSubReg3Reg2()) { generateRegRegInstruction(regRegOpCode, root, firstRegister, secondRegister, _cg); } else // assert getSubReg3Mem2() == true { TR::MemoryReference *tempMR = generateX86MemoryReference(secondChild, _cg); generateRegMemInstruction(regMemOpCode, root, firstRegister, tempMR, _cg); tempMR->decNodeReferenceCounts(_cg); } targetRegister = firstRegister; } } else if (getSubReg1Reg2()) { generateRegRegInstruction(regRegOpCode, root, firstRegister, secondRegister, _cg); targetRegister = firstRegister; } else // assert getSubReg1Mem2() == true { TR::MemoryReference *tempMR = generateX86MemoryReference(secondChild, _cg); generateRegMemInstruction(regMemOpCode, root, firstRegister, tempMR, _cg); targetRegister = firstRegister; tempMR->decNodeReferenceCounts(_cg); } return targetRegister; }
TR::Register *TR::AMD64SystemLinkage::buildDirectDispatch( TR::Node *callNode, bool spillFPRegs) { TR::SymbolReference *methodSymRef = callNode->getSymbolReference(); TR::MethodSymbol *methodSymbol = methodSymRef->getSymbol()->castToMethodSymbol(); TR::Register *returnReg; // Allocate adequate register dependencies. // // pre = number of argument registers // post = number of volatile + return register // uint32_t pre = getProperties().getNumIntegerArgumentRegisters() + getProperties().getNumFloatArgumentRegisters(); uint32_t post = getProperties().getNumVolatileRegisters() + (callNode->getDataType() == TR::NoType ? 0 : 1); #if defined (PYTHON) && 0 // Treat all preserved GP regs as volatile until register map support available. // post += getProperties().getNumberOfPreservedGPRegisters(); #endif TR::RegisterDependencyConditions *preDeps = generateRegisterDependencyConditions(pre, 0, cg()); TR::RegisterDependencyConditions *postDeps = generateRegisterDependencyConditions(0, post, cg()); // Evaluate outgoing arguments on the system stack and build pre-conditions. // int32_t memoryArgSize = buildArgs(callNode, preDeps); // Build post-conditions. // returnReg = buildVolatileAndReturnDependencies(callNode, postDeps); postDeps->stopAddingPostConditions(); // Find the second scratch register in the post dependency list. // TR::Register *scratchReg = NULL; TR::RealRegister::RegNum scratchRegIndex = getProperties().getIntegerScratchRegister(1); for (int32_t i=0; i<post; i++) { if (postDeps->getPostConditions()->getRegisterDependency(i)->getRealRegister() == scratchRegIndex) { scratchReg = postDeps->getPostConditions()->getRegisterDependency(i)->getRegister(); break; } } #if defined(PYTHON) && 0 // For Python, store the instruction that contains the GC map at this site into // the frame object. // TR::SymbolReference *frameObjectSymRef = comp()->getSymRefTab()->findOrCreateAutoSymbol(comp()->getMethodSymbol(), 0, TR::Address, true, false, true); TR::Register *frameObjectRegister = cg()->allocateRegister(); generateRegMemInstruction( L8RegMem, callNode, frameObjectRegister, generateX86MemoryReference(frameObjectSymRef, cg()), cg()); TR::RealRegister *espReal = cg()->machine()->getX86RealRegister(TR::RealRegister::esp); TR::Register *gcMapPCRegister = cg()->allocateRegister(); generateRegMemInstruction( LEA8RegMem, callNode, gcMapPCRegister, generateX86MemoryReference(espReal, -8, cg()), cg()); // Use "volatile" registers across the call. Once proper register map support // is implemented, r14 and r15 will no longer be volatile and a different pair // should be chosen. // TR::RegisterDependencyConditions *gcMapDeps = generateRegisterDependencyConditions(0, 2, cg()); gcMapDeps->addPostCondition(frameObjectRegister, TR::RealRegister::r14, cg()); gcMapDeps->addPostCondition(gcMapPCRegister, TR::RealRegister::r15, cg()); gcMapDeps->stopAddingPostConditions(); generateMemRegInstruction( S8MemReg, callNode, generateX86MemoryReference(frameObjectRegister, fe()->getPythonGCMapPCOffsetInFrame(), cg()), gcMapPCRegister, gcMapDeps, cg()); cg()->stopUsingRegister(frameObjectRegister); cg()->stopUsingRegister(gcMapPCRegister); #endif TR::Instruction *instr; if (methodSymbol->getMethodAddress()) { TR_ASSERT(scratchReg, "could not find second scratch register"); auto LoadRegisterInstruction = generateRegImm64SymInstruction( MOV8RegImm64, callNode, scratchReg, (uintptr_t)methodSymbol->getMethodAddress(), methodSymRef, cg()); if (TR::Options::getCmdLineOptions()->getOption(TR_EmitRelocatableELFFile)) { LoadRegisterInstruction->setReloKind(TR_NativeMethodAbsolute); } instr = generateRegInstruction(CALLReg, callNode, scratchReg, preDeps, cg()); } else { instr = generateImmSymInstruction(CALLImm4, callNode, (uintptrj_t)methodSymbol->getMethodAddress(), methodSymRef, preDeps, cg()); } cg()->resetIsLeafMethod(); instr->setNeedsGCMap(getProperties().getPreservedRegisterMapForGC()); cg()->stopUsingRegister(scratchReg); TR::LabelSymbol *postDepLabel = generateLabelSymbol(cg()); generateLabelInstruction(LABEL, callNode, postDepLabel, postDeps, cg()); return returnReg; }
TR::Register *TR::AMD64SystemLinkage::buildDirectDispatch( TR::Node *callNode, bool spillFPRegs) { TR::SymbolReference *methodSymRef = callNode->getSymbolReference(); TR::MethodSymbol *methodSymbol = methodSymRef->getSymbol()->castToMethodSymbol(); TR::Register *returnReg; // Allocate adequate register dependencies. // // pre = number of argument registers // post = number of volatile + return register // uint32_t pre = getProperties().getNumIntegerArgumentRegisters() + getProperties().getNumFloatArgumentRegisters(); uint32_t post = getProperties().getNumVolatileRegisters() + (callNode->getDataType() == TR::NoType ? 0 : 1); TR::RegisterDependencyConditions *preDeps = generateRegisterDependencyConditions(pre, 0, cg()); TR::RegisterDependencyConditions *postDeps = generateRegisterDependencyConditions(0, post, cg()); // Evaluate outgoing arguments on the system stack and build pre-conditions. // int32_t memoryArgSize = buildArgs(callNode, preDeps); // Build post-conditions. // returnReg = buildVolatileAndReturnDependencies(callNode, postDeps); postDeps->stopAddingPostConditions(); // Find the second scratch register in the post dependency list. // TR::Register *scratchReg = NULL; TR::RealRegister::RegNum scratchRegIndex = getProperties().getIntegerScratchRegister(1); for (int32_t i=0; i<post; i++) { if (postDeps->getPostConditions()->getRegisterDependency(i)->getRealRegister() == scratchRegIndex) { scratchReg = postDeps->getPostConditions()->getRegisterDependency(i)->getRegister(); break; } } TR::Instruction *instr; if (methodSymbol->getMethodAddress()) { TR_ASSERT(scratchReg, "could not find second scratch register"); auto LoadRegisterInstruction = generateRegImm64SymInstruction( MOV8RegImm64, callNode, scratchReg, (uintptr_t)methodSymbol->getMethodAddress(), methodSymRef, cg()); if (comp()->getOption(TR_EmitRelocatableELFFile)) { LoadRegisterInstruction->setReloKind(TR_NativeMethodAbsolute); } instr = generateRegInstruction(CALLReg, callNode, scratchReg, preDeps, cg()); } else { instr = generateImmSymInstruction(CALLImm4, callNode, (uintptrj_t)methodSymbol->getMethodAddress(), methodSymRef, preDeps, cg()); } cg()->resetIsLeafMethod(); instr->setNeedsGCMap(getProperties().getPreservedRegisterMapForGC()); cg()->stopUsingRegister(scratchReg); TR::LabelSymbol *postDepLabel = generateLabelSymbol(cg()); generateLabelInstruction(LABEL, callNode, postDepLabel, postDeps, cg()); return returnReg; }
TR::Register *TR_X86FPCompareAnalyser::fpCompareAnalyser(TR::Node *root, TR_X86OpCodes cmpRegRegOpCode, TR_X86OpCodes cmpRegMemOpCode, TR_X86OpCodes cmpiRegRegOpCode, bool useFCOMIInstructions) { TR::Node *firstChild, *secondChild; TR::ILOpCodes cmpOp = root->getOpCodeValue(); bool reverseMemOp = false; bool reverseCmpOp = false; TR::Compilation* comp = _cg->comp(); TR_X86OpCodes cmpInstr = useFCOMIInstructions ? cmpiRegRegOpCode : cmpRegRegOpCode; // Some operators must have their operands swapped to improve the generated // code needed to evaluate the result of the comparison. // bool mustSwapOperands = (cmpOp == TR::iffcmple || cmpOp == TR::ifdcmple || cmpOp == TR::iffcmpgtu || cmpOp == TR::ifdcmpgtu || cmpOp == TR::fcmple || cmpOp == TR::dcmple || cmpOp == TR::fcmpgtu || cmpOp == TR::dcmpgtu || (useFCOMIInstructions && (cmpOp == TR::iffcmplt || cmpOp == TR::ifdcmplt || cmpOp == TR::iffcmpgeu || cmpOp == TR::ifdcmpgeu || cmpOp == TR::fcmplt || cmpOp == TR::dcmplt || cmpOp == TR::fcmpgeu || cmpOp == TR::dcmpgeu))) ? true : false; // Some operators should not have their operands swapped to improve the generated // code needed to evaluate the result of the comparison. // bool preventOperandSwapping = (cmpOp == TR::iffcmpltu || cmpOp == TR::ifdcmpltu || cmpOp == TR::iffcmpge || cmpOp == TR::ifdcmpge || cmpOp == TR::fcmpltu || cmpOp == TR::dcmpltu || cmpOp == TR::fcmpge || cmpOp == TR::dcmpge || (useFCOMIInstructions && (cmpOp == TR::iffcmpgt || cmpOp == TR::ifdcmpgt || cmpOp == TR::iffcmpleu || cmpOp == TR::ifdcmpleu || cmpOp == TR::fcmpgt || cmpOp == TR::dcmpgt || cmpOp == TR::fcmpleu || cmpOp == TR::dcmpleu))) ? true : false; // For correctness, don't swap operands of these operators. // if (cmpOp == TR::fcmpg || cmpOp == TR::fcmpl || cmpOp == TR::dcmpg || cmpOp == TR::dcmpl) { preventOperandSwapping = true; } // Initial operand evaluation ordering. // if (preventOperandSwapping || (!mustSwapOperands && _cg->whichChildToEvaluate(root) == 0)) { firstChild = root->getFirstChild(); secondChild = root->getSecondChild(); setReversedOperands(false); } else { firstChild = root->getSecondChild(); secondChild = root->getFirstChild(); setReversedOperands(true); } TR::Register *firstRegister = firstChild->getRegister(); TR::Register *secondRegister = secondChild->getRegister(); setInputs(firstChild, firstRegister, secondChild, secondRegister, useFCOMIInstructions, // If either 'preventOperandSwapping' or 'mustSwapOperands' is set then the // initial operand ordering set above must be maintained. // preventOperandSwapping || mustSwapOperands); // Make sure any required operand ordering is respected. // if ((getCmpReg2Reg1() || getCmpReg2Mem1()) && (mustSwapOperands || preventOperandSwapping)) { reverseCmpOp = getCmpReg2Reg1() ? true : false; reverseMemOp = getCmpReg2Mem1() ? true : false; } // If we are not comparing with a memory operand, one of them evaluates // to a zero, and the zero is not already on the stack, then we can use // FTST to save a register. // // (With a memory operand, either the constant zero needs to be loaded // to use FCOM, or the memory operand needs to be loaded to use FTST, // so there is no gain in using FTST.) // // If the constant zero is in the target register, using FTST means the // comparison will be reversed. We cannot do this if the initial ordering // of the operands must be maintained. // // Finally, if FTST is used and this is the last use of the target, the // target register may need to be explicitly popped. // TR::Register *targetRegisterForFTST = NULL; TR::Node *targetChildForFTST = NULL; if (getEvalChild1() && isUnevaluatedZero(firstChild)) // do we need getEvalChild1() here? { if ( ((getCmpReg1Reg2() || reverseCmpOp) && !(preventOperandSwapping || mustSwapOperands)) || (getCmpReg2Reg1() && !reverseCmpOp)) { if (getEvalChild2()) { secondRegister = _cg->evaluate(secondChild); } targetRegisterForFTST = secondRegister; targetChildForFTST = secondChild; notReversedOperands(); } } else if (getEvalChild2() && isUnevaluatedZero(secondChild)) // do we need getEvalChild2() here? { if ( (getCmpReg1Reg2() || reverseCmpOp) || (getCmpReg2Reg1() && !reverseCmpOp && !(preventOperandSwapping || mustSwapOperands)) ) { if (getEvalChild1()) { firstRegister = _cg->evaluate(firstChild); } targetRegisterForFTST = firstRegister; targetChildForFTST = firstChild; } } if (!targetRegisterForFTST) { // If we have a choice, evaluate the target operand last. By doing so, we // help out the register assigner because the target must be TOS. This // avoids an unneccessary FXCH for the target. // if (getEvalChild1() && getEvalChild2()) { if (getCmpReg1Reg2() || getCmpReg1Mem2()) { secondRegister = _cg->evaluate(secondChild); firstRegister = _cg->evaluate(firstChild); } else { firstRegister = _cg->evaluate(firstChild); secondRegister = _cg->evaluate(secondChild); } } else { if (getEvalChild1()) { firstRegister = _cg->evaluate(firstChild); } if (getEvalChild2()) { secondRegister = _cg->evaluate(secondChild); } } } // Adjust the FP precision of feeding operands. // if (firstRegister && (firstRegister->needsPrecisionAdjustment() || comp->getOption(TR_StrictFPCompares) || (firstRegister->mayNeedPrecisionAdjustment() && secondChild->getOpCode().isLoadConst()) || (firstRegister->mayNeedPrecisionAdjustment() && !secondRegister))) { TR::TreeEvaluator::insertPrecisionAdjustment(firstRegister, root, _cg); } if (secondRegister && (secondRegister->needsPrecisionAdjustment() || comp->getOption(TR_StrictFPCompares) || (secondRegister->mayNeedPrecisionAdjustment() && firstChild->getOpCode().isLoadConst()) || (secondRegister->mayNeedPrecisionAdjustment() && !firstRegister))) { TR::TreeEvaluator::insertPrecisionAdjustment(secondRegister, root, _cg); } // Generate the compare instruction. // if (targetRegisterForFTST) { generateFPRegInstruction(FTSTReg, root, targetRegisterForFTST, _cg); } else if (!useFCOMIInstructions && (getCmpReg1Mem2() || reverseMemOp)) { TR::MemoryReference *tempMR = generateX86MemoryReference(secondChild, _cg); generateFPRegMemInstruction(cmpRegMemOpCode, root, firstRegister, tempMR, _cg); tempMR->decNodeReferenceCounts(_cg); } else if (!useFCOMIInstructions && getCmpReg2Mem1()) { TR::MemoryReference *tempMR = generateX86MemoryReference(firstChild, _cg); generateFPRegMemInstruction(cmpRegMemOpCode, root, secondRegister, tempMR, _cg); notReversedOperands(); tempMR->decNodeReferenceCounts(_cg); } else if (getCmpReg1Reg2() || reverseCmpOp) { generateFPCompareRegRegInstruction(cmpInstr, root, firstRegister, secondRegister, _cg); } else if (getCmpReg2Reg1()) { generateFPCompareRegRegInstruction(cmpInstr, root, secondRegister, firstRegister, _cg); notReversedOperands(); } _cg->decReferenceCount(firstChild); _cg->decReferenceCount(secondChild); // Evaluate the comparison. // if (getReversedOperands()) { cmpOp = TR::ILOpCode(cmpOp).getOpCodeForSwapChildren(); TR::Node::recreate(root, cmpOp); } if (useFCOMIInstructions && !targetRegisterForFTST) { return NULL; } // We must manually move the FP condition flags to the EFLAGS register if we don't // use the FCOMI instructions. // TR::Register *accRegister = _cg->allocateRegister(); TR::RegisterDependencyConditions *dependencies = generateRegisterDependencyConditions((uint8_t)1, 1, _cg); dependencies->addPreCondition(accRegister, TR::RealRegister::eax, _cg); dependencies->addPostCondition(accRegister, TR::RealRegister::eax, _cg); generateRegInstruction(STSWAcc, root, accRegister, dependencies, _cg); // Pop the FTST target register if it is not used any more. // if (targetRegisterForFTST && targetChildForFTST && targetChildForFTST->getReferenceCount() == 0) { generateFPSTiST0RegRegInstruction(FSTRegReg, root, targetRegisterForFTST, targetRegisterForFTST, _cg); } return accRegister; }
TR::Register *IA32LinkageUtils::pushIntegerWordArg( TR::Node *child, TR::CodeGenerator *cg) { TR::Register *pushRegister; if (child->getRegister() == NULL) { if (child->getOpCode().isLoadConst()) { int32_t value = child->getInt(); TR_X86OpCodes pushOp; if (value >= -128 && value <= 127) { pushOp = PUSHImms; } else { pushOp = PUSHImm4; } generateImmInstruction(pushOp, child, value, cg); cg->decReferenceCount(child); return NULL; } else if (child->getOpCodeValue() == TR::loadaddr) { TR::SymbolReference * symRef = child->getSymbolReference(); TR::StaticSymbol *sym = symRef->getSymbol()->getStaticSymbol(); if (sym) { TR_ASSERT(!symRef->isUnresolved(), "pushIntegerWordArg loadaddr expecting resolved symbol"); generateImmSymInstruction(PUSHImm4, child, (uintptrj_t)sym->getStaticAddress(), symRef, cg); cg->decReferenceCount(child); return NULL; } } else if (child->getOpCodeValue() == TR::fbits2i && !child->normalizeNanValues() && child->getReferenceCount() == 1) { pushRegister = pushFloatArg(child->getFirstChild(), cg); cg->decReferenceCount(child); return pushRegister; } else if (child->getOpCode().isMemoryReference() && (child->getReferenceCount() == 1) && (child->getSymbolReference() != cg->comp()->getSymRefTab()->findVftSymbolRef())) { TR::MemoryReference *tempMR = generateX86MemoryReference(child, cg); generateMemInstruction(PUSHMem, child, tempMR, cg); tempMR->decNodeReferenceCounts(cg); cg->decReferenceCount(child); return NULL; } } pushRegister = cg->evaluate(child); generateRegInstruction(PUSHReg, child, pushRegister, cg); cg->decReferenceCount(child); return pushRegister; }
void TR_OutlinedInstructions::generateOutlinedInstructionsDispatch() { // Switch to cold helper instruction stream. // TR::Register *vmThreadReg = _cg->getMethodMetaDataRegister(); TR::Instruction *savedFirstInstruction = comp()->getFirstInstruction(); TR::Instruction *savedAppendInstruction = comp()->getAppendInstruction(); comp()->setFirstInstruction(NULL); comp()->setAppendInstruction(NULL); new (_cg->trHeapMemory()) TR::X86LabelInstruction(NULL, LABEL, _entryLabel, _cg); if (_rematerializeVMThread) { generateRegInstruction(PUSHReg, _callNode, vmThreadReg, _cg); generateRestoreVMThreadInstruction ( _callNode, _cg); TR::MemoryReference *vmThreadMR = generateX86MemoryReference(vmThreadReg, (TR::Compiler->target.is64Bit()) ? 16 : 8, _cg); generateRegMemInstruction (LRegMem(), _callNode, vmThreadReg, vmThreadMR, _cg); } TR::Register *resultReg=NULL; if (_callNode->getOpCode().isCallIndirect()) resultReg = TR::TreeEvaluator::performCall(_callNode, true, false, _cg); else resultReg = TR::TreeEvaluator::performCall(_callNode, false, false, _cg); if (_rematerializeVMThread) { generateRegInstruction(POPReg, _callNode, vmThreadReg, _cg); } if (_targetReg) { TR_ASSERT(resultReg, "assertion failure"); TR::RegisterPair *targetRegPair = _targetReg->getRegisterPair(); TR::RegisterPair *resultRegPair = resultReg->getRegisterPair(); if (targetRegPair) { TR_ASSERT(resultRegPair, "OutlinedInstructions: targetReg is a register pair and resultReg is not"); generateRegRegInstruction(_targetRegMovOpcode, _callNode, targetRegPair->getLowOrder(), resultRegPair->getLowOrder(), _cg); generateRegRegInstruction(_targetRegMovOpcode, _callNode, targetRegPair->getHighOrder(), resultRegPair->getHighOrder(), _cg); } else { TR_ASSERT(!resultRegPair, "OutlinedInstructions: resultReg is a register pair and targetReg is not"); generateRegRegInstruction(_targetRegMovOpcode, _callNode, _targetReg, resultReg, _cg); } } _cg->decReferenceCount(_callNode); if (_restartLabel) generateLabelInstruction(JMP4, _callNode, _restartLabel, _cg); else { // Java-specific. // No restart label implies we're not coming back from this call, // so it's safe to put data after the call. In the case of calling a throw // helper, there's an ancient busted handshake that expects to find a 4-byte // offset here, so we have to comply... // // When the handshake is removed, we can delete this zero. // generateImmInstruction(DDImm4, _callNode, 0, _cg); } // Dummy label to delimit the end of the helper call dispatch sequence (for exception ranges). // generateLabelInstruction(LABEL, _callNode, TR::LabelSymbol::create(_cg->trHeapMemory(),_cg), _cg); // Switch from cold helper instruction stream. // _firstInstruction = comp()->getFirstInstruction(); _appendInstruction = comp()->getAppendInstruction(); comp()->setFirstInstruction(savedFirstInstruction); comp()->setAppendInstruction(savedAppendInstruction); }