SDValue LanaiTargetLowering::LowerReturn(SDValue Chain, CallingConv::ID CallConv, bool IsVarArg, const SmallVectorImpl<ISD::OutputArg> &Outs, const SmallVectorImpl<SDValue> &OutVals, SDLoc DL, SelectionDAG &DAG) const { // CCValAssign - represent the assignment of the return value to a location SmallVector<CCValAssign, 16> RVLocs; // CCState - Info about the registers and stack slot. CCState CCInfo(CallConv, IsVarArg, DAG.getMachineFunction(), RVLocs, *DAG.getContext()); // Analize return values. CCInfo.AnalyzeReturn(Outs, RetCC_Lanai32); SDValue Flag; SmallVector<SDValue, 4> RetOps(1, Chain); // Copy the result values into the output registers. for (unsigned i = 0; i != RVLocs.size(); ++i) { CCValAssign &VA = RVLocs[i]; assert(VA.isRegLoc() && "Can only return in registers!"); Chain = DAG.getCopyToReg(Chain, DL, VA.getLocReg(), OutVals[i], Flag); // Guarantee that all emitted copies are stuck together with flags. Flag = Chain.getValue(1); RetOps.push_back(DAG.getRegister(VA.getLocReg(), VA.getLocVT())); } // The Lanai ABI for returning structs by value requires that we copy // the sret argument into rv for the return. We saved the argument into // a virtual register in the entry block, so now we copy the value out // and into rv. if (DAG.getMachineFunction().getFunction()->hasStructRetAttr()) { MachineFunction &MF = DAG.getMachineFunction(); LanaiMachineFunctionInfo *LanaiMFI = MF.getInfo<LanaiMachineFunctionInfo>(); unsigned Reg = LanaiMFI->getSRetReturnReg(); assert(Reg && "SRetReturnReg should have been set in LowerFormalArguments()."); SDValue Val = DAG.getCopyFromReg(Chain, DL, Reg, getPointerTy(DAG.getDataLayout())); Chain = DAG.getCopyToReg(Chain, DL, Lanai::RV, Val, Flag); Flag = Chain.getValue(1); RetOps.push_back( DAG.getRegister(Lanai::RV, getPointerTy(DAG.getDataLayout()))); } RetOps[0] = Chain; // Update chain unsigned Opc = LanaiISD::RET_FLAG; if (Flag.getNode()) RetOps.push_back(Flag); // Return Void return DAG.getNode(Opc, DL, MVT::Other, ArrayRef<SDValue>(&RetOps[0], RetOps.size())); }
SDValue AlphaTargetLowering::LowerReturn(SDValue Chain, CallingConv::ID CallConv, bool isVarArg, const SmallVectorImpl<ISD::OutputArg> &Outs, DebugLoc dl, SelectionDAG &DAG) const { SDValue Copy = DAG.getCopyToReg(Chain, dl, Alpha::R26, DAG.getNode(AlphaISD::GlobalRetAddr, DebugLoc(), MVT::i64), SDValue()); switch (Outs.size()) { default: llvm_unreachable("Do not know how to return this many arguments!"); case 0: break; //return SDValue(); // ret void is legal case 1: { EVT ArgVT = Outs[0].Val.getValueType(); unsigned ArgReg; if (ArgVT.isInteger()) ArgReg = Alpha::R0; else { assert(ArgVT.isFloatingPoint()); ArgReg = Alpha::F0; } Copy = DAG.getCopyToReg(Copy, dl, ArgReg, Outs[0].Val, Copy.getValue(1)); if (DAG.getMachineFunction().getRegInfo().liveout_empty()) DAG.getMachineFunction().getRegInfo().addLiveOut(ArgReg); break; } case 2: { EVT ArgVT = Outs[0].Val.getValueType(); unsigned ArgReg1, ArgReg2; if (ArgVT.isInteger()) { ArgReg1 = Alpha::R0; ArgReg2 = Alpha::R1; } else { assert(ArgVT.isFloatingPoint()); ArgReg1 = Alpha::F0; ArgReg2 = Alpha::F1; } Copy = DAG.getCopyToReg(Copy, dl, ArgReg1, Outs[0].Val, Copy.getValue(1)); if (std::find(DAG.getMachineFunction().getRegInfo().liveout_begin(), DAG.getMachineFunction().getRegInfo().liveout_end(), ArgReg1) == DAG.getMachineFunction().getRegInfo().liveout_end()) DAG.getMachineFunction().getRegInfo().addLiveOut(ArgReg1); Copy = DAG.getCopyToReg(Copy, dl, ArgReg2, Outs[1].Val, Copy.getValue(1)); if (std::find(DAG.getMachineFunction().getRegInfo().liveout_begin(), DAG.getMachineFunction().getRegInfo().liveout_end(), ArgReg2) == DAG.getMachineFunction().getRegInfo().liveout_end()) DAG.getMachineFunction().getRegInfo().addLiveOut(ArgReg2); break; } } return DAG.getNode(AlphaISD::RET_FLAG, dl, MVT::Other, Copy, Copy.getValue(1)); }
SDValue MipsTargetLowering:: LowerFP_TO_SINT(SDValue Op, SelectionDAG &DAG) { if (!Subtarget->isMips1()) return Op; MachineFunction &MF = DAG.getMachineFunction(); unsigned CCReg = AddLiveIn(MF, Mips::FCR31, Mips::CCRRegisterClass); SDValue Chain = DAG.getEntryNode(); DebugLoc dl = Op.getDebugLoc(); SDValue Src = Op.getOperand(0); // Set the condition register SDValue CondReg = DAG.getCopyFromReg(Chain, dl, CCReg, MVT::i32); CondReg = DAG.getCopyToReg(Chain, dl, Mips::AT, CondReg); CondReg = DAG.getCopyFromReg(CondReg, dl, Mips::AT, MVT::i32); SDValue Cst = DAG.getConstant(3, MVT::i32); SDValue Or = DAG.getNode(ISD::OR, dl, MVT::i32, CondReg, Cst); Cst = DAG.getConstant(2, MVT::i32); SDValue Xor = DAG.getNode(ISD::XOR, dl, MVT::i32, Or, Cst); SDValue InFlag(0, 0); CondReg = DAG.getCopyToReg(Chain, dl, Mips::FCR31, Xor, InFlag); // Emit the round instruction and bit convert to integer SDValue Trunc = DAG.getNode(MipsISD::FPRound, dl, MVT::f32, Src, CondReg.getValue(1)); SDValue BitCvt = DAG.getNode(ISD::BIT_CONVERT, dl, MVT::i32, Trunc); return BitCvt; }
static SDOperand LowerRET(SDOperand Op, SelectionDAG &DAG) { SDOperand Copy = DAG.getCopyToReg(Op.getOperand(0), Alpha::R26, DAG.getNode(AlphaISD::GlobalRetAddr, MVT::i64), SDOperand()); switch (Op.getNumOperands()) { default: assert(0 && "Do not know how to return this many arguments!"); abort(); case 1: break; //return SDOperand(); // ret void is legal case 3: { MVT::ValueType ArgVT = Op.getOperand(1).getValueType(); unsigned ArgReg; if (MVT::isInteger(ArgVT)) ArgReg = Alpha::R0; else { assert(MVT::isFloatingPoint(ArgVT)); ArgReg = Alpha::F0; } Copy = DAG.getCopyToReg(Copy, ArgReg, Op.getOperand(1), Copy.getValue(1)); if (DAG.getMachineFunction().liveout_empty()) DAG.getMachineFunction().addLiveOut(ArgReg); break; } } return DAG.getNode(AlphaISD::RET_FLAG, MVT::Other, Copy, Copy.getValue(1)); }
SDValue WebAssemblyTargetLowering::LowerCopyToReg(SDValue Op, SelectionDAG &DAG) const { SDValue Src = Op.getOperand(2); if (isa<FrameIndexSDNode>(Src.getNode())) { // CopyToReg nodes don't support FrameIndex operands. Other targets select // the FI to some LEA-like instruction, but since we don't have that, we // need to insert some kind of instruction that can take an FI operand and // produces a value usable by CopyToReg (i.e. in a vreg). So insert a dummy // copy_local between Op and its FI operand. SDValue Chain = Op.getOperand(0); SDLoc DL(Op); unsigned Reg = cast<RegisterSDNode>(Op.getOperand(1))->getReg(); EVT VT = Src.getValueType(); SDValue Copy( DAG.getMachineNode(VT == MVT::i32 ? WebAssembly::COPY_LOCAL_I32 : WebAssembly::COPY_LOCAL_I64, DL, VT, Src), 0); return Op.getNode()->getNumValues() == 1 ? DAG.getCopyToReg(Chain, DL, Reg, Copy) : DAG.getCopyToReg(Chain, DL, Reg, Copy, Op.getNumOperands() == 4 ? Op.getOperand(3) : SDValue()); } return SDValue(); }
SDValue VectorProcTargetLowering::LowerReturn(SDValue Chain, CallingConv::ID CallConv, bool IsVarArg, const SmallVectorImpl<ISD::OutputArg> &Outs, const SmallVectorImpl<SDValue> &OutVals, DebugLoc DL, SelectionDAG &DAG) const { MachineFunction &MF = DAG.getMachineFunction(); // CCValAssign - represent the assignment of the return value to locations. SmallVector<CCValAssign, 16> RVLocs; // CCState - Info about the registers and stack slot. CCState CCInfo(CallConv, IsVarArg, DAG.getMachineFunction(), DAG.getTarget(), RVLocs, *DAG.getContext()); // Analyze return values. CCInfo.AnalyzeReturn(Outs, RetCC_VectorProc32); SDValue Flag; SmallVector<SDValue, 4> RetOps(1, Chain); // Copy the result values into the output registers. for (unsigned i = 0; i != RVLocs.size(); ++i) { CCValAssign &VA = RVLocs[i]; assert(VA.isRegLoc() && "Can only return in registers!"); Chain = DAG.getCopyToReg(Chain, DL, VA.getLocReg(), OutVals[i], Flag); // Guarantee that all emitted copies are stuck together with flags. Flag = Chain.getValue(1); RetOps.push_back(DAG.getRegister(VA.getLocReg(), VA.getLocVT())); } if (MF.getFunction()->hasStructRetAttr()) { VectorProcMachineFunctionInfo *SFI = MF.getInfo<VectorProcMachineFunctionInfo>(); unsigned Reg = SFI->getSRetReturnReg(); if (!Reg) llvm_unreachable("sret virtual register not created in the entry block"); SDValue Val = DAG.getCopyFromReg(Chain, DL, Reg, getPointerTy()); Chain = DAG.getCopyToReg(Chain, DL, VectorProc::S0, Val, Flag); Flag = Chain.getValue(1); RetOps.push_back(DAG.getRegister(VectorProc::S0, getPointerTy())); } RetOps[0] = Chain; // Update chain. // Add the flag if we have it. if (Flag.getNode()) RetOps.push_back(Flag); return DAG.getNode(VectorProcISD::RET_FLAG, DL, MVT::Other, &RetOps[0], RetOps.size()); }
SDValue LanaiTargetLowering::LowerDYNAMIC_STACKALLOC(SDValue Op, SelectionDAG &DAG) const { SDValue Chain = Op.getOperand(0); SDValue Size = Op.getOperand(1); SDLoc DL(Op); unsigned SPReg = getStackPointerRegisterToSaveRestore(); // Get a reference to the stack pointer. SDValue StackPointer = DAG.getCopyFromReg(Chain, DL, SPReg, MVT::i32); // Subtract the dynamic size from the actual stack size to // obtain the new stack size. SDValue Sub = DAG.getNode(ISD::SUB, DL, MVT::i32, StackPointer, Size); // For Lanai, the outgoing memory arguments area should be on top of the // alloca area on the stack i.e., the outgoing memory arguments should be // at a lower address than the alloca area. Move the alloca area down the // stack by adding back the space reserved for outgoing arguments to SP // here. // // We do not know what the size of the outgoing args is at this point. // So, we add a pseudo instruction ADJDYNALLOC that will adjust the // stack pointer. We replace this instruction with on that has the correct, // known offset in emitPrologue(). SDValue ArgAdjust = DAG.getNode(LanaiISD::ADJDYNALLOC, DL, MVT::i32, Sub); // The Sub result contains the new stack start address, so it // must be placed in the stack pointer register. SDValue CopyChain = DAG.getCopyToReg(Chain, DL, SPReg, Sub); SDValue Ops[2] = {ArgAdjust, CopyChain}; return DAG.getMergeValues(Ops, DL); }
//===----------------------------------------------------------------------===// // Misc Lower Operation implementation //===----------------------------------------------------------------------===// SDValue Cpu0TargetLowering:: LowerDYNAMIC_STACKALLOC(SDValue Op, SelectionDAG &DAG) const { MachineFunction &MF = DAG.getMachineFunction(); Cpu0FunctionInfo *Cpu0FI = MF.getInfo<Cpu0FunctionInfo>(); unsigned SP = Cpu0::SP; assert(getTargetMachine().getFrameLowering()->getStackAlignment() >= cast<ConstantSDNode>(Op.getOperand(2).getNode())->getZExtValue() && "Cannot lower if the alignment of the allocated space is larger than \ that of the stack."); SDValue Chain = Op.getOperand(0); SDValue Size = Op.getOperand(1); DebugLoc dl = Op.getDebugLoc(); // Get a reference from Cpu0 stack pointer SDValue StackPointer = DAG.getCopyFromReg(Chain, dl, SP, getPointerTy()); // Subtract the dynamic size from the actual stack size to // obtain the new stack size. SDValue Sub = DAG.getNode(ISD::SUB, dl, getPointerTy(), StackPointer, Size); // The Sub result contains the new stack start address, so it // must be placed in the stack pointer register. Chain = DAG.getCopyToReg(StackPointer.getValue(1), dl, SP, Sub, SDValue()); // This node always has two return values: a new stack pointer // value and a chain SDVTList VTLs = DAG.getVTList(getPointerTy(), MVT::Other); SDValue Ptr = DAG.getFrameIndex(Cpu0FI->getDynAllocFI(), getPointerTy()); SDValue Ops[] = { Chain, Ptr, Chain.getValue(1) }; return DAG.getNode(Cpu0ISD::DynAlloc, dl, VTLs, Ops, 3); }
SDValue BlackfinTargetLowering::LowerReturn(SDValue Chain, CallingConv::ID CallConv, bool isVarArg, const SmallVectorImpl<ISD::OutputArg> &Outs, const SmallVectorImpl<SDValue> &OutVals, DebugLoc dl, SelectionDAG &DAG) const { // CCValAssign - represent the assignment of the return value to locations. SmallVector<CCValAssign, 16> RVLocs; // CCState - Info about the registers and stack slot. CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), DAG.getTarget(), RVLocs, *DAG.getContext()); // Analize return values. CCInfo.AnalyzeReturn(Outs, RetCC_Blackfin); // If this is the first return lowered for this function, add the regs to the // liveout set for the function. if (DAG.getMachineFunction().getRegInfo().liveout_empty()) { for (unsigned i = 0; i != RVLocs.size(); ++i) DAG.getMachineFunction().getRegInfo().addLiveOut(RVLocs[i].getLocReg()); } SDValue Flag; // Copy the result values into the output registers. for (unsigned i = 0; i != RVLocs.size(); ++i) { CCValAssign &VA = RVLocs[i]; assert(VA.isRegLoc() && "Can only return in registers!"); SDValue Opi = OutVals[i]; // Expand to i32 if necessary switch (VA.getLocInfo()) { default: llvm_unreachable("Unknown loc info!"); case CCValAssign::Full: break; case CCValAssign::SExt: Opi = DAG.getNode(ISD::SIGN_EXTEND, dl, VA.getLocVT(), Opi); break; case CCValAssign::ZExt: Opi = DAG.getNode(ISD::ZERO_EXTEND, dl, VA.getLocVT(), Opi); break; case CCValAssign::AExt: Opi = DAG.getNode(ISD::ANY_EXTEND, dl, VA.getLocVT(), Opi); break; } Chain = DAG.getCopyToReg(Chain, dl, VA.getLocReg(), Opi, SDValue()); // Guarantee that all emitted copies are stuck together with flags. Flag = Chain.getValue(1); } if (Flag.getNode()) { return DAG.getNode(BFISD::RET_FLAG, dl, MVT::Other, Chain, Flag); } else { return DAG.getNode(BFISD::RET_FLAG, dl, MVT::Other, Chain); } }
SDValue PTXTargetLowering:: LowerReturn(SDValue Chain, CallingConv::ID CallConv, bool isVarArg, const SmallVectorImpl<ISD::OutputArg> &Outs, const SmallVectorImpl<SDValue> &OutVals, DebugLoc dl, SelectionDAG &DAG) const { if (isVarArg) llvm_unreachable("PTX does not support varargs"); switch (CallConv) { default: llvm_unreachable("Unsupported calling convention."); case CallingConv::PTX_Kernel: assert(Outs.size() == 0 && "Kernel must return void."); return DAG.getNode(PTXISD::EXIT, dl, MVT::Other, Chain); case CallingConv::PTX_Device: //assert(Outs.size() <= 1 && "Can at most return one value."); break; } MachineFunction& MF = DAG.getMachineFunction(); PTXMachineFunctionInfo *MFI = MF.getInfo<PTXMachineFunctionInfo>(); SmallVector<CCValAssign, 16> RVLocs; CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), getTargetMachine(), RVLocs, *DAG.getContext()); SDValue Flag; CCInfo.AnalyzeReturn(Outs, RetCC_PTX); for (unsigned i = 0, e = RVLocs.size(); i != e; ++i) { CCValAssign& VA = RVLocs[i]; assert(VA.isRegLoc() && "CCValAssign must be RegLoc"); unsigned Reg = VA.getLocReg(); DAG.getMachineFunction().getRegInfo().addLiveOut(Reg); Chain = DAG.getCopyToReg(Chain, dl, Reg, OutVals[i], Flag); // Guarantee that all emitted copies are stuck together, // avoiding something bad Flag = Chain.getValue(1); MFI->addRetReg(Reg); } if (Flag.getNode() == 0) { return DAG.getNode(PTXISD::RET, dl, MVT::Other, Chain); } else { return DAG.getNode(PTXISD::RET, dl, MVT::Other, Chain, Flag); } }
SDValue SystemZTargetLowering::LowerReturn(SDValue Chain, CallingConv::ID CallConv, bool isVarArg, const SmallVectorImpl<ISD::OutputArg> &Outs, const SmallVectorImpl<SDValue> &OutVals, DebugLoc dl, SelectionDAG &DAG) const { // CCValAssign - represent the assignment of the return value to a location SmallVector<CCValAssign, 16> RVLocs; // CCState - Info about the registers and stack slot. CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), getTargetMachine(), RVLocs, *DAG.getContext()); // Analize return values. CCInfo.AnalyzeReturn(Outs, RetCC_SystemZ); // If this is the first return lowered for this function, add the regs to the // liveout set for the function. if (DAG.getMachineFunction().getRegInfo().liveout_empty()) { for (unsigned i = 0; i != RVLocs.size(); ++i) if (RVLocs[i].isRegLoc()) DAG.getMachineFunction().getRegInfo().addLiveOut(RVLocs[i].getLocReg()); } SDValue Flag; // Copy the result values into the output registers. for (unsigned i = 0; i != RVLocs.size(); ++i) { CCValAssign &VA = RVLocs[i]; SDValue ResValue = OutVals[i]; assert(VA.isRegLoc() && "Can only return in registers!"); // If this is an 8/16/32-bit value, it is really should be passed promoted // to 64 bits. if (VA.getLocInfo() == CCValAssign::SExt) ResValue = DAG.getNode(ISD::SIGN_EXTEND, dl, VA.getLocVT(), ResValue); else if (VA.getLocInfo() == CCValAssign::ZExt) ResValue = DAG.getNode(ISD::ZERO_EXTEND, dl, VA.getLocVT(), ResValue); else if (VA.getLocInfo() == CCValAssign::AExt) ResValue = DAG.getNode(ISD::ANY_EXTEND, dl, VA.getLocVT(), ResValue); Chain = DAG.getCopyToReg(Chain, dl, VA.getLocReg(), ResValue, Flag); // Guarantee that all emitted copies are stuck together, // avoiding something bad. Flag = Chain.getValue(1); } if (Flag.getNode()) return DAG.getNode(SystemZISD::RET_FLAG, dl, MVT::Other, Chain, Flag); // Return Void return DAG.getNode(SystemZISD::RET_FLAG, dl, MVT::Other, Chain); }
SDValue MSP430TargetLowering::LowerReturn(SDValue Chain, CallingConv::ID CallConv, bool isVarArg, const SmallVectorImpl<ISD::OutputArg> &Outs, const SmallVectorImpl<SDValue> &OutVals, DebugLoc dl, SelectionDAG &DAG) const { // CCValAssign - represent the assignment of the return value to a location SmallVector<CCValAssign, 16> RVLocs; // ISRs cannot return any value. if (CallConv == CallingConv::MSP430_INTR && !Outs.empty()) { report_fatal_error("ISRs cannot return any value"); return SDValue(); } // CCState - Info about the registers and stack slot. CCState CCInfo(CallConv, isVarArg, getTargetMachine(), RVLocs, *DAG.getContext()); // Analize return values. CCInfo.AnalyzeReturn(Outs, RetCC_MSP430); // If this is the first return lowered for this function, add the regs to the // liveout set for the function. if (DAG.getMachineFunction().getRegInfo().liveout_empty()) { for (unsigned i = 0; i != RVLocs.size(); ++i) if (RVLocs[i].isRegLoc()) DAG.getMachineFunction().getRegInfo().addLiveOut(RVLocs[i].getLocReg()); } SDValue Flag; // Copy the result values into the output registers. for (unsigned i = 0; i != RVLocs.size(); ++i) { CCValAssign &VA = RVLocs[i]; assert(VA.isRegLoc() && "Can only return in registers!"); Chain = DAG.getCopyToReg(Chain, dl, VA.getLocReg(), OutVals[i], Flag); // Guarantee that all emitted copies are stuck together, // avoiding something bad. Flag = Chain.getValue(1); } unsigned Opc = (CallConv == CallingConv::MSP430_INTR ? MSP430ISD::RETI_FLAG : MSP430ISD::RET_FLAG); if (Flag.getNode()) return DAG.getNode(Opc, dl, MVT::Other, Chain, Flag); // Return Void return DAG.getNode(Opc, dl, MVT::Other, Chain); }
SDValue ARCompactTargetLowering::LowerReturn(SDValue Chain, CallingConv::ID CallConv, bool isVarArg, const SmallVectorImpl<ISD::OutputArg> &Outs, const SmallVectorImpl<SDValue> &OutVals, DebugLoc dl, SelectionDAG &DAG) const { //DEBUG(dbgs() << "ARCompactTargetLowering::LowerReturn()\n"); MachineFunction &MF = DAG.getMachineFunction(); // CCValAssign - represent the assignment of the return value to locations. SmallVector<CCValAssign, 16> RVLocs; // CCState - Info about the registers and stack slot. CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), DAG.getTarget(), RVLocs, *DAG.getContext()); // Analize return values. CCInfo.AnalyzeReturn(Outs, RetCC_ARCompact32); // If this is the first return lowered for this function, add the regs to the // liveout set for the function. if (MF.getRegInfo().liveout_empty()) { for (unsigned i = 0; i != RVLocs.size(); ++i) if (RVLocs[i].isRegLoc()) { MF.getRegInfo().addLiveOut(RVLocs[i].getLocReg()); } } SDValue Flag; // Copy the result values into the output registers. for (unsigned i = 0; i != RVLocs.size(); ++i) { CCValAssign &VA = RVLocs[i]; assert(VA.isRegLoc() && "Can only return in registers!"); Chain = DAG.getCopyToReg(Chain, dl, VA.getLocReg(), OutVals[i], Flag); // Guarantee that all emitted copies are stuck together with flags. Flag = Chain.getValue(1); } // TODO: Check this value. unsigned int RetAddrOffset = 8; // Call Instruction + Delay Slot SDValue RetAddrOffsetNode = DAG.getConstant(RetAddrOffset, MVT::i32); if (Flag.getNode()) { return DAG.getNode(ARCISD::RET_FLAG, dl, MVT::Other, Chain, RetAddrOffsetNode, Flag); } return DAG.getNode(ARCISD::RET_FLAG, dl, MVT::Other, Chain, RetAddrOffsetNode); }
SDValue WebAssemblyTargetLowering::LowerFormalArguments( SDValue Chain, CallingConv::ID CallConv, bool IsVarArg, const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &DL, SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const { MachineFunction &MF = DAG.getMachineFunction(); auto *MFI = MF.getInfo<WebAssemblyFunctionInfo>(); if (!CallingConvSupported(CallConv)) fail(DL, DAG, "WebAssembly doesn't support non-C calling conventions"); // Set up the incoming ARGUMENTS value, which serves to represent the liveness // of the incoming values before they're represented by virtual registers. MF.getRegInfo().addLiveIn(WebAssembly::ARGUMENTS); for (const ISD::InputArg &In : Ins) { if (In.Flags.isInAlloca()) fail(DL, DAG, "WebAssembly hasn't implemented inalloca arguments"); if (In.Flags.isNest()) fail(DL, DAG, "WebAssembly hasn't implemented nest arguments"); if (In.Flags.isInConsecutiveRegs()) fail(DL, DAG, "WebAssembly hasn't implemented cons regs arguments"); if (In.Flags.isInConsecutiveRegsLast()) fail(DL, DAG, "WebAssembly hasn't implemented cons regs last arguments"); // Ignore In.getOrigAlign() because all our arguments are passed in // registers. InVals.push_back( In.Used ? DAG.getNode(WebAssemblyISD::ARGUMENT, DL, In.VT, DAG.getTargetConstant(InVals.size(), DL, MVT::i32)) : DAG.getUNDEF(In.VT)); // Record the number and types of arguments. MFI->addParam(In.VT); } // Varargs are copied into a buffer allocated by the caller, and a pointer to // the buffer is passed as an argument. if (IsVarArg) { MVT PtrVT = getPointerTy(MF.getDataLayout()); unsigned VarargVreg = MF.getRegInfo().createVirtualRegister(getRegClassFor(PtrVT)); MFI->setVarargBufferVreg(VarargVreg); Chain = DAG.getCopyToReg( Chain, DL, VarargVreg, DAG.getNode(WebAssemblyISD::ARGUMENT, DL, PtrVT, DAG.getTargetConstant(Ins.size(), DL, MVT::i32))); MFI->addParam(PtrVT); } return Chain; }
SDValue PTXTargetLowering:: LowerReturn(SDValue Chain, CallingConv::ID CallConv, bool isVarArg, const SmallVectorImpl<ISD::OutputArg> &Outs, const SmallVectorImpl<SDValue> &OutVals, DebugLoc dl, SelectionDAG &DAG) const { if (isVarArg) llvm_unreachable("PTX does not support varargs"); switch (CallConv) { default: llvm_unreachable("Unsupported calling convention."); case CallingConv::PTX_Kernel: assert(Outs.size() == 0 && "Kernel must return void."); return DAG.getNode(PTXISD::EXIT, dl, MVT::Other, Chain); case CallingConv::PTX_Device: assert(Outs.size() <= 1 && "Can at most return one value."); break; } // PTX_Device // return void if (Outs.size() == 0) return DAG.getNode(PTXISD::RET, dl, MVT::Other, Chain); assert(Outs[0].VT == MVT::i32 && "Can return only basic types"); SDValue Flag; unsigned reg = PTX::R0; MachineFunction &MF = DAG.getMachineFunction(); PTXMachineFunctionInfo *MFI = MF.getInfo<PTXMachineFunctionInfo>(); MFI->setRetReg(reg); // If this is the first return lowered for this function, add the regs to the // liveout set for the function if (DAG.getMachineFunction().getRegInfo().liveout_empty()) DAG.getMachineFunction().getRegInfo().addLiveOut(reg); // Copy the result values into the output registers Chain = DAG.getCopyToReg(Chain, dl, reg, OutVals[0], Flag); // Guarantee that all emitted copies are stuck together, // avoiding something bad Flag = Chain.getValue(1); return DAG.getNode(PTXISD::RET, dl, MVT::Other, Chain, Flag); }
SDValue MSP430TargetLowering::LowerReturn(SDValue Chain, CallingConv::ID CallConv, bool isVarArg, const SmallVectorImpl<ISD::OutputArg> &Outs, const SmallVectorImpl<SDValue> &OutVals, const SDLoc &dl, SelectionDAG &DAG) const { // CCValAssign - represent the assignment of the return value to a location SmallVector<CCValAssign, 16> RVLocs; // ISRs cannot return any value. if (CallConv == CallingConv::MSP430_INTR && !Outs.empty()) report_fatal_error("ISRs cannot return any value"); // CCState - Info about the registers and stack slot. CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), RVLocs, *DAG.getContext()); // Analize return values. AnalyzeReturnValues(CCInfo, RVLocs, Outs); SDValue Flag; SmallVector<SDValue, 4> RetOps(1, Chain); // Copy the result values into the output registers. for (unsigned i = 0; i != RVLocs.size(); ++i) { CCValAssign &VA = RVLocs[i]; assert(VA.isRegLoc() && "Can only return in registers!"); Chain = DAG.getCopyToReg(Chain, dl, VA.getLocReg(), OutVals[i], Flag); // Guarantee that all emitted copies are stuck together, // avoiding something bad. Flag = Chain.getValue(1); RetOps.push_back(DAG.getRegister(VA.getLocReg(), VA.getLocVT())); } unsigned Opc = (CallConv == CallingConv::MSP430_INTR ? MSP430ISD::RETI_FLAG : MSP430ISD::RET_FLAG); RetOps[0] = Chain; // Update chain. // Add the flag if we have it. if (Flag.getNode()) RetOps.push_back(Flag); return DAG.getNode(Opc, dl, MVT::Other, RetOps); }
SDValue BPFTargetLowering::LowerReturn(SDValue Chain, CallingConv::ID CallConv, bool IsVarArg, const SmallVectorImpl<ISD::OutputArg> &Outs, const SmallVectorImpl<SDValue> &OutVals, SDLoc DL, SelectionDAG &DAG) const { // CCValAssign - represent the assignment of the return value to a location SmallVector<CCValAssign, 16> RVLocs; MachineFunction &MF = DAG.getMachineFunction(); // CCState - Info about the registers and stack slot. CCState CCInfo(CallConv, IsVarArg, MF, RVLocs, *DAG.getContext()); if (MF.getFunction()->getReturnType()->isAggregateType()) { DiagnosticInfoUnsupported Err(DL, *MF.getFunction(), "only integer returns supported", SDValue()); DAG.getContext()->diagnose(Err); } // Analize return values. CCInfo.AnalyzeReturn(Outs, RetCC_BPF64); SDValue Flag; SmallVector<SDValue, 4> RetOps(1, Chain); // Copy the result values into the output registers. for (unsigned i = 0; i != RVLocs.size(); ++i) { CCValAssign &VA = RVLocs[i]; assert(VA.isRegLoc() && "Can only return in registers!"); Chain = DAG.getCopyToReg(Chain, DL, VA.getLocReg(), OutVals[i], Flag); // Guarantee that all emitted copies are stuck together, // avoiding something bad. Flag = Chain.getValue(1); RetOps.push_back(DAG.getRegister(VA.getLocReg(), VA.getLocVT())); } unsigned Opc = BPFISD::RET_FLAG; RetOps[0] = Chain; // Update chain. // Add the flag if we have it. if (Flag.getNode()) RetOps.push_back(Flag); return DAG.getNode(Opc, DL, MVT::Other, RetOps); }
SDValue MBlazeTargetLowering:: LowerReturn(SDValue Chain, CallingConv::ID CallConv, bool isVarArg, const SmallVectorImpl<ISD::OutputArg> &Outs, const SmallVectorImpl<SDValue> &OutVals, DebugLoc dl, SelectionDAG &DAG) const { // CCValAssign - represent the assignment of // the return value to a location SmallVector<CCValAssign, 16> RVLocs; // CCState - Info about the registers and stack slot. CCState CCInfo(CallConv, isVarArg, getTargetMachine(), RVLocs, *DAG.getContext()); // Analize return values. CCInfo.AnalyzeReturn(Outs, RetCC_MBlaze); // If this is the first return lowered for this function, add // the regs to the liveout set for the function. if (DAG.getMachineFunction().getRegInfo().liveout_empty()) { for (unsigned i = 0; i != RVLocs.size(); ++i) if (RVLocs[i].isRegLoc()) DAG.getMachineFunction().getRegInfo().addLiveOut(RVLocs[i].getLocReg()); } SDValue Flag; // Copy the result values into the output registers. for (unsigned i = 0; i != RVLocs.size(); ++i) { CCValAssign &VA = RVLocs[i]; assert(VA.isRegLoc() && "Can only return in registers!"); Chain = DAG.getCopyToReg(Chain, dl, VA.getLocReg(), OutVals[i], Flag); // guarantee that all emitted copies are // stuck together, avoiding something bad Flag = Chain.getValue(1); } // Return on MBlaze is always a "rtsd R15, 8" if (Flag.getNode()) return DAG.getNode(MBlazeISD::Ret, dl, MVT::Other, Chain, DAG.getRegister(MBlaze::R15, MVT::i32), Flag); else // Return Void return DAG.getNode(MBlazeISD::Ret, dl, MVT::Other, Chain, DAG.getRegister(MBlaze::R15, MVT::i32)); }
SDValue MSP430TargetLowering::LowerRET(SDValue Op, SelectionDAG &DAG) { // CCValAssign - represent the assignment of the return value to a location SmallVector<CCValAssign, 16> RVLocs; unsigned CC = DAG.getMachineFunction().getFunction()->getCallingConv(); bool isVarArg = DAG.getMachineFunction().getFunction()->isVarArg(); DebugLoc dl = Op.getDebugLoc(); // CCState - Info about the registers and stack slot. CCState CCInfo(CC, isVarArg, getTargetMachine(), RVLocs); // Analize return values of ISD::RET CCInfo.AnalyzeReturn(Op.getNode(), RetCC_MSP430); // If this is the first return lowered for this function, add the regs to the // liveout set for the function. if (DAG.getMachineFunction().getRegInfo().liveout_empty()) { for (unsigned i = 0; i != RVLocs.size(); ++i) if (RVLocs[i].isRegLoc()) DAG.getMachineFunction().getRegInfo().addLiveOut(RVLocs[i].getLocReg()); } // The chain is always operand #0 SDValue Chain = Op.getOperand(0); SDValue Flag; // Copy the result values into the output registers. for (unsigned i = 0; i != RVLocs.size(); ++i) { CCValAssign &VA = RVLocs[i]; assert(VA.isRegLoc() && "Can only return in registers!"); // ISD::RET => ret chain, (regnum1,val1), ... // So i*2+1 index only the regnums Chain = DAG.getCopyToReg(Chain, dl, VA.getLocReg(), Op.getOperand(i*2+1), Flag); // Guarantee that all emitted copies are stuck together, // avoiding something bad. Flag = Chain.getValue(1); } if (Flag.getNode()) return DAG.getNode(MSP430ISD::RET_FLAG, dl, MVT::Other, Chain, Flag); // Return Void return DAG.getNode(MSP430ISD::RET_FLAG, dl, MVT::Other, Chain); }
static SDValue LowerDYNAMIC_STACKALLOC(SDValue Op, SelectionDAG &DAG) { SDValue Chain = Op.getOperand(0); // Legalize the chain. SDValue Size = Op.getOperand(1); // Legalize the size. DebugLoc dl = Op.getDebugLoc(); unsigned SPReg = SP::O6; SDValue SP = DAG.getCopyFromReg(Chain, dl, SPReg, MVT::i32); SDValue NewSP = DAG.getNode(ISD::SUB, dl, MVT::i32, SP, Size); // Value Chain = DAG.getCopyToReg(SP.getValue(1), dl, SPReg, NewSP); // Output chain // The resultant pointer is actually 16 words from the bottom of the stack, // to provide a register spill area. SDValue NewVal = DAG.getNode(ISD::ADD, dl, MVT::i32, NewSP, DAG.getConstant(96, MVT::i32)); SDValue Ops[2] = { NewVal, Chain }; return DAG.getMergeValues(Ops, 2, dl); }
SDValue SparcTargetLowering::LowerReturn(SDValue Chain, CallingConv::ID CallConv, bool isVarArg, const SmallVectorImpl<ISD::OutputArg> &Outs, DebugLoc dl, SelectionDAG &DAG) const { // CCValAssign - represent the assignment of the return value to locations. SmallVector<CCValAssign, 16> RVLocs; // CCState - Info about the registers and stack slot. CCState CCInfo(CallConv, isVarArg, DAG.getTarget(), RVLocs, *DAG.getContext()); // Analize return values. CCInfo.AnalyzeReturn(Outs, RetCC_Sparc32); // If this is the first return lowered for this function, add the regs to the // liveout set for the function. if (DAG.getMachineFunction().getRegInfo().liveout_empty()) { for (unsigned i = 0; i != RVLocs.size(); ++i) if (RVLocs[i].isRegLoc()) DAG.getMachineFunction().getRegInfo().addLiveOut(RVLocs[i].getLocReg()); } SDValue Flag; // Copy the result values into the output registers. for (unsigned i = 0; i != RVLocs.size(); ++i) { CCValAssign &VA = RVLocs[i]; assert(VA.isRegLoc() && "Can only return in registers!"); Chain = DAG.getCopyToReg(Chain, dl, VA.getLocReg(), Outs[i].Val, Flag); // Guarantee that all emitted copies are stuck together with flags. Flag = Chain.getValue(1); } if (Flag.getNode()) return DAG.getNode(SPISD::RET_FLAG, dl, MVT::Other, Chain, Flag); return DAG.getNode(SPISD::RET_FLAG, dl, MVT::Other, Chain); }
SDValue MipsTargetLowering:: LowerDYNAMIC_STACKALLOC(SDValue Op, SelectionDAG &DAG) { SDValue Chain = Op.getOperand(0); SDValue Size = Op.getOperand(1); DebugLoc dl = Op.getDebugLoc(); // Get a reference from Mips stack pointer SDValue StackPointer = DAG.getCopyFromReg(Chain, dl, Mips::SP, MVT::i32); // Subtract the dynamic size from the actual stack size to // obtain the new stack size. SDValue Sub = DAG.getNode(ISD::SUB, dl, MVT::i32, StackPointer, Size); // The Sub result contains the new stack start address, so it // must be placed in the stack pointer register. Chain = DAG.getCopyToReg(StackPointer.getValue(1), dl, Mips::SP, Sub); // This node always has two return values: a new stack pointer // value and a chain SDValue Ops[2] = { Sub, Chain }; return DAG.getMergeValues(Ops, 2, dl); }
SDValue Y86TargetLowering::LowerReturn(SDValue Chain, CallingConv::ID CallConv, bool isVarArg, const SmallVectorImpl<ISD::OutputArg> &Outs, const SmallVectorImpl<SDValue> &OutVals, SDLoc dl, SelectionDAG &DAG) const { MachineFunction &MF = DAG.getMachineFunction(); // Gather info about the return values. SmallVector<CCValAssign, 16> RVLocs; CCState CCInfo(CallConv, isVarArg, MF, RVLocs, *DAG.getContext()); CCInfo.AnalyzeReturn(Outs, RetCC_Y86); SDValue Flag; SmallVector<SDValue, 6> RetOps(1, Chain); // Operand 0 is the chain. // Copy the result values into the output registers. for (unsigned i = 0; i != RVLocs.size(); ++i) { CCValAssign &VA = RVLocs[i]; assert(VA.isRegLoc() && "Can only return in registers!"); SDValue ValToCopy = OutVals[i]; Chain = DAG.getCopyToReg(Chain, dl, VA.getLocReg(), ValToCopy, Flag); Flag = Chain.getValue(1); // Copies are glued together with flags. RetOps.push_back(DAG.getRegister(VA.getLocReg(), VA.getLocVT())); } RetOps[0] = Chain; // Update the chain. // Add the flag if we have it. if (Flag.getNode()) RetOps.push_back(Flag); return DAG.getNode(Y86ISD::RET_FLAG, dl, MVT::Other, RetOps); }
/// LowerCCCCallTo - functions arguments are copied from virtual regs to /// (physical regs)/(stack frame), CALLSEQ_START and CALLSEQ_END are emitted. /// TODO: sret. SDValue MSP430TargetLowering::LowerCCCCallTo(SDValue Chain, SDValue Callee, CallingConv::ID CallConv, bool isVarArg, bool isTailCall, const SmallVectorImpl<ISD::OutputArg> &Outs, const SmallVectorImpl<SDValue> &OutVals, const SmallVectorImpl<ISD::InputArg> &Ins, DebugLoc dl, SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const { // Analyze operands of the call, assigning locations to each operand. SmallVector<CCValAssign, 16> ArgLocs; CCState CCInfo(CallConv, isVarArg, getTargetMachine(), ArgLocs, *DAG.getContext()); CCInfo.AnalyzeCallOperands(Outs, CC_MSP430); // Get a count of how many bytes are to be pushed on the stack. unsigned NumBytes = CCInfo.getNextStackOffset(); Chain = DAG.getCALLSEQ_START(Chain ,DAG.getConstant(NumBytes, getPointerTy(), true)); SmallVector<std::pair<unsigned, SDValue>, 4> RegsToPass; SmallVector<SDValue, 12> MemOpChains; SDValue StackPtr; // Walk the register/memloc assignments, inserting copies/loads. for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) { CCValAssign &VA = ArgLocs[i]; SDValue Arg = OutVals[i]; // Promote the value if needed. switch (VA.getLocInfo()) { default: llvm_unreachable("Unknown loc info!"); case CCValAssign::Full: break; case CCValAssign::SExt: Arg = DAG.getNode(ISD::SIGN_EXTEND, dl, VA.getLocVT(), Arg); break; case CCValAssign::ZExt: Arg = DAG.getNode(ISD::ZERO_EXTEND, dl, VA.getLocVT(), Arg); break; case CCValAssign::AExt: Arg = DAG.getNode(ISD::ANY_EXTEND, dl, VA.getLocVT(), Arg); break; } // Arguments that can be passed on register must be kept at RegsToPass // vector if (VA.isRegLoc()) { RegsToPass.push_back(std::make_pair(VA.getLocReg(), Arg)); } else { assert(VA.isMemLoc()); if (StackPtr.getNode() == 0) StackPtr = DAG.getCopyFromReg(Chain, dl, MSP430::SPW, getPointerTy()); SDValue PtrOff = DAG.getNode(ISD::ADD, dl, getPointerTy(), StackPtr, DAG.getIntPtrConstant(VA.getLocMemOffset())); MemOpChains.push_back(DAG.getStore(Chain, dl, Arg, PtrOff, MachinePointerInfo(),false, false, 0)); } } // Transform all store nodes into one single node because all store nodes are // independent of each other. if (!MemOpChains.empty()) Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, &MemOpChains[0], MemOpChains.size()); // Build a sequence of copy-to-reg nodes chained together with token chain and // flag operands which copy the outgoing args into registers. The InFlag in // necessary since all emitted instructions must be stuck together. SDValue InFlag; for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) { Chain = DAG.getCopyToReg(Chain, dl, RegsToPass[i].first, RegsToPass[i].second, InFlag); InFlag = Chain.getValue(1); } // If the callee is a GlobalAddress node (quite common, every direct call is) // turn it into a TargetGlobalAddress node so that legalize doesn't hack it. // Likewise ExternalSymbol -> TargetExternalSymbol. if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee)) Callee = DAG.getTargetGlobalAddress(G->getGlobal(), dl, MVT::i16); else if (ExternalSymbolSDNode *E = dyn_cast<ExternalSymbolSDNode>(Callee)) Callee = DAG.getTargetExternalSymbol(E->getSymbol(), MVT::i16); // Returns a chain & a flag for retval copy to use. SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue); SmallVector<SDValue, 8> Ops; Ops.push_back(Chain); Ops.push_back(Callee); // Add argument registers to the end of the list so that they are // known live into the call. for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) Ops.push_back(DAG.getRegister(RegsToPass[i].first, RegsToPass[i].second.getValueType())); if (InFlag.getNode()) Ops.push_back(InFlag); Chain = DAG.getNode(MSP430ISD::CALL, dl, NodeTys, &Ops[0], Ops.size()); InFlag = Chain.getValue(1); // Create the CALLSEQ_END node. Chain = DAG.getCALLSEQ_END(Chain, DAG.getConstant(NumBytes, getPointerTy(), true), DAG.getConstant(0, getPointerTy(), true), InFlag); InFlag = Chain.getValue(1); // Handle result values, copying them out of physregs into vregs that we // return. return LowerCallResult(Chain, InFlag, CallConv, isVarArg, Ins, dl, DAG, InVals); }
SDValue X86SelectionDAGInfo::EmitTargetCodeForMemset(SelectionDAG &DAG, SDLoc dl, SDValue Chain, SDValue Dst, SDValue Src, SDValue Size, unsigned Align, bool isVolatile, MachinePointerInfo DstPtrInfo) const { ConstantSDNode *ConstantSize = dyn_cast<ConstantSDNode>(Size); // If to a segment-relative address space, use the default lowering. if (DstPtrInfo.getAddrSpace() >= 256) return SDValue(); // If not DWORD aligned or size is more than the threshold, call the library. // The libc version is likely to be faster for these cases. It can use the // address value and run time information about the CPU. if ((Align & 3) != 0 || !ConstantSize || ConstantSize->getZExtValue() > Subtarget->getMaxInlineSizeThreshold()) { // Check to see if there is a specialized entry-point for memory zeroing. ConstantSDNode *V = dyn_cast<ConstantSDNode>(Src); if (const char *bzeroEntry = V && V->isNullValue() ? Subtarget->getBZeroEntry() : nullptr) { EVT IntPtr = TLI.getPointerTy(); Type *IntPtrTy = getDataLayout()->getIntPtrType(*DAG.getContext()); TargetLowering::ArgListTy Args; TargetLowering::ArgListEntry Entry; Entry.Node = Dst; Entry.Ty = IntPtrTy; Args.push_back(Entry); Entry.Node = Size; Args.push_back(Entry); TargetLowering::CallLoweringInfo CLI(DAG); CLI.setDebugLoc(dl).setChain(Chain) .setCallee(CallingConv::C, Type::getVoidTy(*DAG.getContext()), DAG.getExternalSymbol(bzeroEntry, IntPtr), &Args, 0) .setDiscardResult(); std::pair<SDValue,SDValue> CallResult = TLI.LowerCallTo(CLI); return CallResult.second; } // Otherwise have the target-independent code call memset. return SDValue(); } uint64_t SizeVal = ConstantSize->getZExtValue(); SDValue InFlag; EVT AVT; SDValue Count; ConstantSDNode *ValC = dyn_cast<ConstantSDNode>(Src); unsigned BytesLeft = 0; bool TwoRepStos = false; if (ValC) { unsigned ValReg; uint64_t Val = ValC->getZExtValue() & 255; // If the value is a constant, then we can potentially use larger sets. switch (Align & 3) { case 2: // WORD aligned AVT = MVT::i16; ValReg = X86::AX; Val = (Val << 8) | Val; break; case 0: // DWORD aligned AVT = MVT::i32; ValReg = X86::EAX; Val = (Val << 8) | Val; Val = (Val << 16) | Val; if (Subtarget->is64Bit() && ((Align & 0x7) == 0)) { // QWORD aligned AVT = MVT::i64; ValReg = X86::RAX; Val = (Val << 32) | Val; } break; default: // Byte aligned AVT = MVT::i8; ValReg = X86::AL; Count = DAG.getIntPtrConstant(SizeVal); break; } if (AVT.bitsGT(MVT::i8)) { unsigned UBytes = AVT.getSizeInBits() / 8; Count = DAG.getIntPtrConstant(SizeVal / UBytes); BytesLeft = SizeVal % UBytes; } Chain = DAG.getCopyToReg(Chain, dl, ValReg, DAG.getConstant(Val, AVT), InFlag); InFlag = Chain.getValue(1); } else { AVT = MVT::i8; Count = DAG.getIntPtrConstant(SizeVal); Chain = DAG.getCopyToReg(Chain, dl, X86::AL, Src, InFlag); InFlag = Chain.getValue(1); } Chain = DAG.getCopyToReg(Chain, dl, Subtarget->is64Bit() ? X86::RCX : X86::ECX, Count, InFlag); InFlag = Chain.getValue(1); Chain = DAG.getCopyToReg(Chain, dl, Subtarget->is64Bit() ? X86::RDI : X86::EDI, Dst, InFlag); InFlag = Chain.getValue(1); SDVTList Tys = DAG.getVTList(MVT::Other, MVT::Glue); SDValue Ops[] = { Chain, DAG.getValueType(AVT), InFlag }; Chain = DAG.getNode(X86ISD::REP_STOS, dl, Tys, Ops); if (TwoRepStos) { InFlag = Chain.getValue(1); Count = Size; EVT CVT = Count.getValueType(); SDValue Left = DAG.getNode(ISD::AND, dl, CVT, Count, DAG.getConstant((AVT == MVT::i64) ? 7 : 3, CVT)); Chain = DAG.getCopyToReg(Chain, dl, (CVT == MVT::i64) ? X86::RCX : X86::ECX, Left, InFlag); InFlag = Chain.getValue(1); Tys = DAG.getVTList(MVT::Other, MVT::Glue); SDValue Ops[] = { Chain, DAG.getValueType(MVT::i8), InFlag }; Chain = DAG.getNode(X86ISD::REP_STOS, dl, Tys, Ops); } else if (BytesLeft) { // Handle the last 1 - 7 bytes. unsigned Offset = SizeVal - BytesLeft; EVT AddrVT = Dst.getValueType(); EVT SizeVT = Size.getValueType(); Chain = DAG.getMemset(Chain, dl, DAG.getNode(ISD::ADD, dl, AddrVT, Dst, DAG.getConstant(Offset, AddrVT)), Src, DAG.getConstant(BytesLeft, SizeVT), Align, isVolatile, DstPtrInfo.getWithOffset(Offset)); } // TODO: Use a Tokenfactor, as in memcpy, instead of a single chain. return Chain; }
SDValue X86SelectionDAGInfo::EmitTargetCodeForMemcpy(SelectionDAG &DAG, SDLoc dl, SDValue Chain, SDValue Dst, SDValue Src, SDValue Size, unsigned Align, bool isVolatile, bool AlwaysInline, MachinePointerInfo DstPtrInfo, MachinePointerInfo SrcPtrInfo) const { // This requires the copy size to be a constant, preferably // within a subtarget-specific limit. ConstantSDNode *ConstantSize = dyn_cast<ConstantSDNode>(Size); if (!ConstantSize) return SDValue(); uint64_t SizeVal = ConstantSize->getZExtValue(); if (!AlwaysInline && SizeVal > Subtarget->getMaxInlineSizeThreshold()) return SDValue(); /// If not DWORD aligned, it is more efficient to call the library. However /// if calling the library is not allowed (AlwaysInline), then soldier on as /// the code generated here is better than the long load-store sequence we /// would otherwise get. if (!AlwaysInline && (Align & 3) != 0) return SDValue(); // If to a segment-relative address space, use the default lowering. if (DstPtrInfo.getAddrSpace() >= 256 || SrcPtrInfo.getAddrSpace() >= 256) return SDValue(); // ESI might be used as a base pointer, in that case we can't simply overwrite // the register. Fall back to generic code. const X86RegisterInfo *TRI = static_cast<const X86RegisterInfo *>(DAG.getTarget().getRegisterInfo()); if (TRI->hasBasePointer(DAG.getMachineFunction()) && TRI->getBaseRegister() == X86::ESI) return SDValue(); MVT AVT; if (Align & 1) AVT = MVT::i8; else if (Align & 2) AVT = MVT::i16; else if (Align & 4) // DWORD aligned AVT = MVT::i32; else // QWORD aligned AVT = Subtarget->is64Bit() ? MVT::i64 : MVT::i32; unsigned UBytes = AVT.getSizeInBits() / 8; unsigned CountVal = SizeVal / UBytes; SDValue Count = DAG.getIntPtrConstant(CountVal); unsigned BytesLeft = SizeVal % UBytes; SDValue InFlag; Chain = DAG.getCopyToReg(Chain, dl, Subtarget->is64Bit() ? X86::RCX : X86::ECX, Count, InFlag); InFlag = Chain.getValue(1); Chain = DAG.getCopyToReg(Chain, dl, Subtarget->is64Bit() ? X86::RDI : X86::EDI, Dst, InFlag); InFlag = Chain.getValue(1); Chain = DAG.getCopyToReg(Chain, dl, Subtarget->is64Bit() ? X86::RSI : X86::ESI, Src, InFlag); InFlag = Chain.getValue(1); SDVTList Tys = DAG.getVTList(MVT::Other, MVT::Glue); SDValue Ops[] = { Chain, DAG.getValueType(AVT), InFlag }; SDValue RepMovs = DAG.getNode(X86ISD::REP_MOVS, dl, Tys, Ops); SmallVector<SDValue, 4> Results; Results.push_back(RepMovs); if (BytesLeft) { // Handle the last 1 - 7 bytes. unsigned Offset = SizeVal - BytesLeft; EVT DstVT = Dst.getValueType(); EVT SrcVT = Src.getValueType(); EVT SizeVT = Size.getValueType(); Results.push_back(DAG.getMemcpy(Chain, dl, DAG.getNode(ISD::ADD, dl, DstVT, Dst, DAG.getConstant(Offset, DstVT)), DAG.getNode(ISD::ADD, dl, SrcVT, Src, DAG.getConstant(Offset, SrcVT)), DAG.getConstant(BytesLeft, SizeVT), Align, isVolatile, AlwaysInline, DstPtrInfo.getWithOffset(Offset), SrcPtrInfo.getWithOffset(Offset))); } return DAG.getNode(ISD::TokenFactor, dl, MVT::Other, Results); }
SDValue BlackfinTargetLowering::LowerCall(SDValue Chain, SDValue Callee, CallingConv::ID CallConv, bool isVarArg, bool &isTailCall, const SmallVectorImpl<ISD::OutputArg> &Outs, const SmallVectorImpl<SDValue> &OutVals, const SmallVectorImpl<ISD::InputArg> &Ins, DebugLoc dl, SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const { // Blackfin target does not yet support tail call optimization. isTailCall = false; // Analyze operands of the call, assigning locations to each operand. SmallVector<CCValAssign, 16> ArgLocs; CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), DAG.getTarget(), ArgLocs, *DAG.getContext()); CCInfo.AllocateStack(12, 4); // ABI requires 12 bytes stack space CCInfo.AnalyzeCallOperands(Outs, CC_Blackfin); // Get the size of the outgoing arguments stack space requirement. unsigned ArgsSize = CCInfo.getNextStackOffset(); Chain = DAG.getCALLSEQ_START(Chain, DAG.getIntPtrConstant(ArgsSize, true)); SmallVector<std::pair<unsigned, SDValue>, 8> RegsToPass; SmallVector<SDValue, 8> MemOpChains; // Walk the register/memloc assignments, inserting copies/loads. for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) { CCValAssign &VA = ArgLocs[i]; SDValue Arg = OutVals[i]; // Promote the value if needed. switch (VA.getLocInfo()) { default: llvm_unreachable("Unknown loc info!"); case CCValAssign::Full: break; case CCValAssign::SExt: Arg = DAG.getNode(ISD::SIGN_EXTEND, dl, VA.getLocVT(), Arg); break; case CCValAssign::ZExt: Arg = DAG.getNode(ISD::ZERO_EXTEND, dl, VA.getLocVT(), Arg); break; case CCValAssign::AExt: Arg = DAG.getNode(ISD::ANY_EXTEND, dl, VA.getLocVT(), Arg); break; } // Arguments that can be passed on register must be kept at // RegsToPass vector if (VA.isRegLoc()) { RegsToPass.push_back(std::make_pair(VA.getLocReg(), Arg)); } else { assert(VA.isMemLoc() && "CCValAssign must be RegLoc or MemLoc"); int Offset = VA.getLocMemOffset(); assert(Offset%4 == 0 && "Unaligned LocMemOffset"); assert(VA.getLocVT()==MVT::i32 && "Illegal CCValAssign type"); SDValue SPN = DAG.getCopyFromReg(Chain, dl, BF::SP, MVT::i32); SDValue OffsetN = DAG.getIntPtrConstant(Offset); OffsetN = DAG.getNode(ISD::ADD, dl, MVT::i32, SPN, OffsetN); MemOpChains.push_back(DAG.getStore(Chain, dl, Arg, OffsetN, MachinePointerInfo(),false, false, 0)); } } // Transform all store nodes into one single node because // all store nodes are independent of each other. if (!MemOpChains.empty()) Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, &MemOpChains[0], MemOpChains.size()); // Build a sequence of copy-to-reg nodes chained together with token // chain and flag operands which copy the outgoing args into registers. // The InFlag in necessary since all emitted instructions must be // stuck together. SDValue InFlag; for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) { Chain = DAG.getCopyToReg(Chain, dl, RegsToPass[i].first, RegsToPass[i].second, InFlag); InFlag = Chain.getValue(1); } // If the callee is a GlobalAddress node (quite common, every direct call is) // turn it into a TargetGlobalAddress node so that legalize doesn't hack it. // Likewise ExternalSymbol -> TargetExternalSymbol. if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee)) Callee = DAG.getTargetGlobalAddress(G->getGlobal(), dl, MVT::i32); else if (ExternalSymbolSDNode *E = dyn_cast<ExternalSymbolSDNode>(Callee)) Callee = DAG.getTargetExternalSymbol(E->getSymbol(), MVT::i32); std::vector<EVT> NodeTys; NodeTys.push_back(MVT::Other); // Returns a chain NodeTys.push_back(MVT::Glue); // Returns a flag for retval copy to use. SDValue Ops[] = { Chain, Callee, InFlag }; Chain = DAG.getNode(BFISD::CALL, dl, NodeTys, Ops, InFlag.getNode() ? 3 : 2); InFlag = Chain.getValue(1); Chain = DAG.getCALLSEQ_END(Chain, DAG.getIntPtrConstant(ArgsSize, true), DAG.getIntPtrConstant(0, true), InFlag); InFlag = Chain.getValue(1); // Assign locations to each value returned by this call. SmallVector<CCValAssign, 16> RVLocs; CCState RVInfo(CallConv, isVarArg, DAG.getMachineFunction(), DAG.getTarget(), RVLocs, *DAG.getContext()); RVInfo.AnalyzeCallResult(Ins, RetCC_Blackfin); // Copy all of the result registers out of their specified physreg. for (unsigned i = 0; i != RVLocs.size(); ++i) { CCValAssign &RV = RVLocs[i]; unsigned Reg = RV.getLocReg(); Chain = DAG.getCopyFromReg(Chain, dl, Reg, RVLocs[i].getLocVT(), InFlag); SDValue Val = Chain.getValue(0); InFlag = Chain.getValue(2); Chain = Chain.getValue(1); // Callee is responsible for extending any i16 return values. switch (RV.getLocInfo()) { case CCValAssign::SExt: Val = DAG.getNode(ISD::AssertSext, dl, RV.getLocVT(), Val, DAG.getValueType(RV.getValVT())); break; case CCValAssign::ZExt: Val = DAG.getNode(ISD::AssertZext, dl, RV.getLocVT(), Val, DAG.getValueType(RV.getValVT())); break; default: break; } // Truncate to valtype if (RV.getLocInfo() != CCValAssign::Full) Val = DAG.getNode(ISD::TRUNCATE, dl, RV.getValVT(), Val); InVals.push_back(Val); } return Chain; }
SDValue Cpu0TargetLowering::LowerReturn(SDValue Chain, CallingConv::ID CallConv, bool isVarArg, const SmallVectorImpl<ISD::OutputArg> &Outs, const SmallVectorImpl<SDValue> &OutVals, SDLoc DL, SelectionDAG &DAG) const { // CCValAssign - represent the assignment of // the return value to a location SmallVector<CCValAssign, 16> RVLocs; // CCState - Info about the registers and stack slot. CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), getTargetMachine(), RVLocs, *DAG.getContext()); // Analize return values. CCInfo.AnalyzeReturn(Outs, RetCC_Cpu0); SDValue Flag; SmallVector<SDValue, 4> RetOps(1, Chain); // Copy the result values into the output registers. for (unsigned i = 0; i != RVLocs.size(); ++i) { CCValAssign &VA = RVLocs[i]; assert(VA.isRegLoc() && "Can only return in registers!"); Chain = DAG.getCopyToReg(Chain, DL, VA.getLocReg(), OutVals[i], Flag); // Guarantee that all emitted copies are stuck together with flags. Flag = Chain.getValue(1); RetOps.push_back(DAG.getRegister(VA.getLocReg(), VA.getLocVT())); } #if 1 // structure return begin. Without this, it will use $3 instead of $2 // as return register. The cpu0 ABIs for returning structs by value requires // that we copy the sret argument into $v0 for the return. We saved the // argument into a virtual register in the entry block, so now we copy the // value out and into $v0. if (DAG.getMachineFunction().getFunction()->hasStructRetAttr()) { MachineFunction &MF = DAG.getMachineFunction(); Cpu0FunctionInfo *Cpu0FI = MF.getInfo<Cpu0FunctionInfo>(); unsigned Reg = Cpu0FI->getSRetReturnReg(); if (!Reg) llvm_unreachable("sret virtual register not created in the entry block"); SDValue Val = DAG.getCopyFromReg(Chain, DL, Reg, getPointerTy()); Chain = DAG.getCopyToReg(Chain, DL, Cpu0::V0, Val, Flag); Flag = Chain.getValue(1); RetOps.push_back(DAG.getRegister(Cpu0::V0, getPointerTy())); } #endif // structure return end RetOps[0] = Chain; // Update chain. // Add the flag if we have it. if (Flag.getNode()) RetOps.push_back(Flag); // Return on Cpu0 is always a "ret $lr" return DAG.getNode(Cpu0ISD::Ret, DL, MVT::Other, RetOps); }
/// LowerCall - functions arguments are copied from virtual regs to /// (physical regs)/(stack frame), CALLSEQ_START and CALLSEQ_END are emitted. /// TODO: isVarArg, isTailCall. SDValue MBlazeTargetLowering:: LowerCall(SDValue Chain, SDValue Callee, CallingConv::ID CallConv, bool isVarArg, bool &isTailCall, const SmallVectorImpl<ISD::OutputArg> &Outs, const SmallVectorImpl<SDValue> &OutVals, const SmallVectorImpl<ISD::InputArg> &Ins, DebugLoc dl, SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const { // MBlaze does not yet support tail call optimization isTailCall = false; // The MBlaze requires stack slots for arguments passed to var arg // functions even if they are passed in registers. bool needsRegArgSlots = isVarArg; MachineFunction &MF = DAG.getMachineFunction(); MachineFrameInfo *MFI = MF.getFrameInfo(); const TargetFrameInfo &TFI = *MF.getTarget().getFrameInfo(); // Analyze operands of the call, assigning locations to each operand. SmallVector<CCValAssign, 16> ArgLocs; CCState CCInfo(CallConv, isVarArg, getTargetMachine(), ArgLocs, *DAG.getContext()); CCInfo.AnalyzeCallOperands(Outs, CC_MBlaze); // Get a count of how many bytes are to be pushed on the stack. unsigned NumBytes = CCInfo.getNextStackOffset(); // Variable argument function calls require a minimum of 24-bytes of stack if (isVarArg && NumBytes < 24) NumBytes = 24; Chain = DAG.getCALLSEQ_START(Chain, DAG.getIntPtrConstant(NumBytes, true)); SmallVector<std::pair<unsigned, SDValue>, 8> RegsToPass; SmallVector<SDValue, 8> MemOpChains; // Walk the register/memloc assignments, inserting copies/loads. for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) { CCValAssign &VA = ArgLocs[i]; MVT RegVT = VA.getLocVT(); SDValue Arg = OutVals[i]; // Promote the value if needed. switch (VA.getLocInfo()) { default: llvm_unreachable("Unknown loc info!"); case CCValAssign::Full: break; case CCValAssign::SExt: Arg = DAG.getNode(ISD::SIGN_EXTEND, dl, RegVT, Arg); break; case CCValAssign::ZExt: Arg = DAG.getNode(ISD::ZERO_EXTEND, dl, RegVT, Arg); break; case CCValAssign::AExt: Arg = DAG.getNode(ISD::ANY_EXTEND, dl, RegVT, Arg); break; } // Arguments that can be passed on register must be kept at // RegsToPass vector if (VA.isRegLoc()) { RegsToPass.push_back(std::make_pair(VA.getLocReg(), Arg)); } else { // Register can't get to this point... assert(VA.isMemLoc()); // Since we are alread passing values on the stack we don't // need to worry about creating additional slots for the // values passed via registers. needsRegArgSlots = false; // Create the frame index object for this incoming parameter unsigned ArgSize = VA.getValVT().getSizeInBits()/8; unsigned StackLoc = VA.getLocMemOffset() + 4; int FI = MFI->CreateFixedObject(ArgSize, StackLoc, true); SDValue PtrOff = DAG.getFrameIndex(FI,getPointerTy()); // emit ISD::STORE whichs stores the // parameter value to a stack Location MemOpChains.push_back(DAG.getStore(Chain, dl, Arg, PtrOff, MachinePointerInfo(), false, false, 0)); } } // If we need to reserve stack space for the arguments passed via registers // then create a fixed stack object at the beginning of the stack. if (needsRegArgSlots && TFI.hasReservedCallFrame(MF)) MFI->CreateFixedObject(28,0,true); // Transform all store nodes into one single node because all store // nodes are independent of each other. if (!MemOpChains.empty()) Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, &MemOpChains[0], MemOpChains.size()); // Build a sequence of copy-to-reg nodes chained together with token // chain and flag operands which copy the outgoing args into registers. // The InFlag in necessary since all emited instructions must be // stuck together. SDValue InFlag; for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) { Chain = DAG.getCopyToReg(Chain, dl, RegsToPass[i].first, RegsToPass[i].second, InFlag); InFlag = Chain.getValue(1); } // If the callee is a GlobalAddress/ExternalSymbol node (quite common, every // direct call is) turn it into a TargetGlobalAddress/TargetExternalSymbol // node so that legalize doesn't hack it. if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee)) Callee = DAG.getTargetGlobalAddress(G->getGlobal(), dl, getPointerTy(), 0, 0); else if (ExternalSymbolSDNode *S = dyn_cast<ExternalSymbolSDNode>(Callee)) Callee = DAG.getTargetExternalSymbol(S->getSymbol(), getPointerTy(), 0); // MBlazeJmpLink = #chain, #target_address, #opt_in_flags... // = Chain, Callee, Reg#1, Reg#2, ... // // Returns a chain & a flag for retval copy to use. SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Flag); SmallVector<SDValue, 8> Ops; Ops.push_back(Chain); Ops.push_back(Callee); // Add argument registers to the end of the list so that they are // known live into the call. for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) { Ops.push_back(DAG.getRegister(RegsToPass[i].first, RegsToPass[i].second.getValueType())); } if (InFlag.getNode()) Ops.push_back(InFlag); Chain = DAG.getNode(MBlazeISD::JmpLink, dl, NodeTys, &Ops[0], Ops.size()); InFlag = Chain.getValue(1); // Create the CALLSEQ_END node. Chain = DAG.getCALLSEQ_END(Chain, DAG.getIntPtrConstant(NumBytes, true), DAG.getIntPtrConstant(0, true), InFlag); if (!Ins.empty()) InFlag = Chain.getValue(1); // Handle result values, copying them out of physregs into vregs that we // return. return LowerCallResult(Chain, InFlag, CallConv, isVarArg, Ins, dl, DAG, InVals); }
SDValue AVM2TargetLowering:: LowerOperation(SDValue Op, SelectionDAG &DAG) const { DebugLoc DL = Op.getDebugLoc(); switch (Op.getOpcode()) { default: { assert(0 && "Shouldn't custom lower this"); } // XXX TODO isn't this just Promote? case ISD::FADD: case ISD::FSUB: case ISD::FPOW: case ISD::FMUL: case ISD::FDIV: case ISD::FREM: { SDValue L = Op.getOperand(0); SDValue R = Op.getOperand(1); assert(L.getValueType() == R.getValueType() && L.getValueType() == MVT::f32); L = DAG.getNode(ISD::FP_EXTEND, DL, MVT::f64, L); R = DAG.getNode(ISD::FP_EXTEND, DL, MVT::f64, R); SDValue result = DAG.getNode(Op.getOpcode(), DL, MVT::f64, L, R); return DAG.getNode(ISD::FP_ROUND, DL, MVT::f32, result, DAG.getIntPtrConstant(0)); } // XXX TODO isn't this just Promote? case ISD::FSIN: case ISD::FCOS: case ISD::FSQRT: case ISD::FNEG: case ISD::FABS: { SDValue L = Op.getOperand(0); assert(L.getValueType() == MVT::f32); L = DAG.getNode(ISD::FP_EXTEND, DL, MVT::f64, L); SDValue result = DAG.getNode(Op.getOpcode(), DL, MVT::f64, L); return DAG.getNode(ISD::FP_ROUND, DL, MVT::f32, result, DAG.getIntPtrConstant(0)); } case ISD::SELECT_CC: { ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(4))->get(); SDValue L = Op.getOperand(0); SDValue R = Op.getOperand(1); SDValue A = Op.getOperand(2); SDValue B = Op.getOperand(3); // promote everything that is an f32 if(L.getValueType() == MVT::f32) { L = DAG.getNode(ISD::FP_EXTEND, DL, MVT::f64, L); R = DAG.getNode(ISD::FP_EXTEND, DL, MVT::f64, R); } if(A.getValueType() == MVT::f32) { A = DAG.getNode(ISD::FP_EXTEND, DL, MVT::f64, A); B = DAG.getNode(ISD::FP_EXTEND, DL, MVT::f64, B); } bool IsFloat = L.getValueType() == MVT::f64; bool RIsFloat = A.getValueType() == MVT::f64; SDValue result = DAG.getNode(RIsFloat ? AVM2ISD::FSL : AVM2ISD::SL, DL, RIsFloat ? MVT::f64 : MVT::i32, DAG.getNode(IsFloat ? AVM2ISD::FCNOP: AVM2ISD::CNOP, DL, MVT::i32, DAG.getTargetConstant((int)CC, MVT::i32), L, R), A, B); if(Op.getValueType() == MVT::f32) { result = DAG.getNode(ISD::FP_ROUND, DL, MVT::f32, result, DAG.getIntPtrConstant(0)); } return result; } // case ISD::BR_CC: { return LowerBR_CC(Op, DAG); } case ISD::BR_CC: { SDValue Chain = Op.getOperand(0); ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(1))->get(); SDValue L = Op.getOperand(2); SDValue R = Op.getOperand(3); SDValue Dest = Op.getOperand(4); // If this is a br_cc of a "setcc", and if the setcc got lowered into // an CMP[IF]CC/SELECT_[IF]CC pair, find the original compared values. if(L.getValueType() == MVT::f32) { L = DAG.getNode(ISD::FP_EXTEND, DL, MVT::f64, L); R = DAG.getNode(ISD::FP_EXTEND, DL, MVT::f64, R); } else if(L.getValueType() == MVT::i32 /*|| L.getValueType() == MVT::i64*/) { if(ConstantSDNode *node = dyn_cast<ConstantSDNode>(L)) { L = DAG.getTargetConstant((int)node->getSExtValue(), MVT::i32); } if(ConstantSDNode *node = dyn_cast<ConstantSDNode>(R)) { R = DAG.getTargetConstant((int)node->getSExtValue(), MVT::i32); } } int Opc; int Cmp; bool IsFloat = L.getValueType() == MVT::f64; if( IsFloat ) { Opc = AVM2ISD::FCBR; Cmp = AVM2ISD::FCNOP; } else { Opc = AVM2ISD::CBR; Cmp = AVM2ISD::CNOP; } SDValue NOP = DAG.getNode( Cmp, DL, MVT::i32, DAG.getTargetConstant(CC, MVT::i32), L, R); return DAG.getNode(Opc, DL, MVT::Other, Chain, NOP, Dest); } // XXX TODO can't this just be Legal? case ISD::ConstantFP: { union { double d; struct { unsigned a, b; } i; } u; APFloat apf(cast<ConstantFPSDNode>(Op)->getValueAPF()); bool losesInfo = false; apf.convert(APFloat::IEEEdouble, APFloat::rmNearestTiesToEven, &losesInfo); u.d = apf.convertToDouble(); SDValue result = DAG.getNode(AVM2ISD::FNOP, DL, MVT::f64, DAG.getNode(AVM2ISD::F64, DL, MVT::f64, DAG.getTargetConstant(u.i.a, MVT::i32), DAG.getTargetConstant(u.i.b, MVT::i32))); if(Op.getValueType() == MVT::f32) { result = DAG.getNode(ISD::FP_ROUND, DL, MVT::f32, result,DAG.getIntPtrConstant(0)); } return result; } case ISD::ConstantPool: { const Constant *C = cast<ConstantPoolSDNode>(Op)->getConstVal(); SDValue CP = DAG.getTargetConstantPool(C, MVT::i32, cast<ConstantPoolSDNode>(Op)->getAlignment()); return CP; } case ISD::BRIND: { return LowerBRIND(Op, DAG); } case ISD::BlockAddress: { return LowerBlockAddress(Op, DAG); } case ISD::TRAMPOLINE: { return LowerTRAMPOLINE(Op, DAG); } case ISD::GlobalAddress: { const GlobalValue *GV = cast<GlobalAddressSDNode>(Op)->getGlobal(); if(GV->isDeclaration()) { // TODO ok? probably a leak std::string Str = "\2" + GV->getNameStr(); const char *N = strdup(Str.c_str()); return DAG.getNode(AVM2ISD::INOP, DL, MVT::i32, DAG.getTargetExternalSymbol(N, MVT::i32)); } EVT VT(MVT::i32); return DAG.getNode(AVM2ISD::INOP, DL, MVT::i32, DAG.getTargetGlobalAddress(GV, DL, VT)); } case ISD::ExternalSymbol : { const char *N = cast<ExternalSymbolSDNode>(Op)->getSymbol(); const Module *M = DAG.getMachineFunction().getFunction()->getParent(); const GlobalValue *GV = M->getGlobalVariable(N); if(GV && !GV->isDeclaration()) { EVT VT(MVT::i32); return DAG.getNode(AVM2ISD::INOP, DL, MVT::i32, DAG.getTargetGlobalAddress(GV, DL, VT)); } return DAG.getNode(AVM2ISD::INOP, DL, MVT::i32, DAG.getTargetExternalSymbol(N, MVT::i32)); } case ISD::VASTART: { // vastart just stores the address of the VarArgsFrameIndex slot into the // memory location argument. SDValue Offset = DAG.getNode(ISD::ADD, DL, MVT::i32, DAG.getRegister(AVM2::EBP, MVT::i32), DAG.getConstant(DAG.getMachineFunction().getInfo<AVM2MachineFunctionInfo>()->getVarArgsFrameOffset(), MVT::i32)); /* http://llvm.org/viewvc/llvm-project/llvm/trunk/include/llvm/CodeGen/SelectionDAGNodes.h?revision=46585&view=markup Create a new class, MemOperand, for describing memory references in the backend. Introduce a new SDNode type, MemOperandSDNode, for holding a MemOperand in the SelectionDAG IR, and add a MemOperand list to MachineInstr, and code to manage them. Remove the offset field from SrcValueSDNode; uses of SrcValueSDNode that were using it are all all using MemOperandSDNode now. Also, begin updating some getLoad and getStore calls to use the PseudoSourceValue objects. Most of this was written by Florian Brander, some reorganization and updating to TOT by me. */ const Value *SV = cast<SrcValueSDNode>(Op.getOperand(2))->getValue(); return DAG.getStore(Op.getOperand(0), DL, Offset, Op.getOperand(1), MachinePointerInfo(SV), false, false, 0); } case ISD::VAARG: { SDNode *Node = Op.getNode(); MVT::SimpleValueType VT = Node->getValueType(0).getSimpleVT().SimpleTy; SDValue InChain = Node->getOperand(0); SDValue VAListPtr = Node->getOperand(1); const Value *SV = cast<SrcValueSDNode>(Node->getOperand(2))->getValue(); SDValue VAList = DAG.getLoad(getPointerTy(), DL, InChain, VAListPtr, MachinePointerInfo(SV), false, false, 0); // Increment the pointer, VAList, to the next vaarg SDValue NextPtr = DAG.getNode(ISD::ADD, DL, getPointerTy(), VAList, DAG.getConstant(Node->getValueType(0).getSizeInBits()/8, getPointerTy())); // Store the incremented VAList to the legalized pointer InChain = DAG.getStore(VAList.getValue(1), DL, NextPtr, VAListPtr, MachinePointerInfo(SV), false, false, 0); // Load the actual argument out of the pointer VAList return DAG.getLoad(VT, DL, InChain, VAList, MachinePointerInfo(), false, false, 0); } case ISD::DYNAMIC_STACKALLOC: { SDValue Chain = Op.getOperand(0); // Legalize the chain. SDValue Size = Op.getOperand(1); // Legalize the size. unsigned SPReg = AVM2::ESP; SDValue SP = DAG.getCopyFromReg(Chain, DL, SPReg, MVT::i32); SDValue NewSP = DAG.getNode(ISD::SUB, DL, MVT::i32, SP, Size); // Value unsigned Align = getTargetMachine().getFrameLowering()->getStackAlignment(); assert(!(Align & (Align - 1))); // must be power of 2 SDValue NewSPAligned = DAG.getNode(ISD::AND, DL, MVT::i32, NewSP, DAG.getIntPtrConstant(~(Align - 1))); Chain = DAG.getCopyToReg(SP.getValue(1), DL, SPReg, NewSPAligned); // Output chain std::vector<EVT> Tys; Tys.push_back(MVT::i32); Tys.push_back(MVT::Other); SDValue Ops[2] = { NewSPAligned, Chain }; return DAG.getNode(ISD::MERGE_VALUES, DL, Tys, Ops, 2); } /* http://llvm.org/viewvc/llvm-project?view=rev&revision=78142 Major calling convention code refactoring. Instead of awkwardly encoding calling-convention information with ISD::CALL, ISD::FORMAL_ARGUMENTS, ISD::RET, and ISD::ARG_FLAGS nodes, TargetLowering provides three virtual functions for targets to override: LowerFormalArguments, LowerCall, and LowerRet, which replace the custom lowering done on the special nodes. They provide the same information, but in a more immediately usable format. This also reworks much of the target-independent tail call logic. The decision of whether or not to perform a tail call is now cleanly split between target-independent portions, and the target dependent portion in IsEligibleForTailCallOptimization. This also synchronizes all in-tree targets, to help enable future refactoring and feature work. setOperationAction(ISD::RET , MVT::Other, Custom); case ISD::RET: { SDValue Copy; switch(Op.getNumOperands()) { default: assert(0 && "Do not know how to return this many arguments!"); abort(); case 1: return DAG.getNode(AVM2ISD::RET_FLAG, DL, MVT::Other, Op.getOperand(0), DAG.getConstant(0, MVT::i32)); case 3: { unsigned ArgReg; switch(Op.getOperand(1).getValueType()) { default: assert(0 && "Unknown type to return!"); case MVT::i32: ArgReg = AVM2::EAX; break; case MVT::f32: ArgReg = AVM2::SST0; break; case MVT::f64: ArgReg = AVM2::ST0; break; } Copy = DAG.getCopyToReg(Op.getOperand(0), ArgReg, Op.getOperand(1), SDValue()); break; } case 5: Copy = DAG.getCopyToReg(Op.getOperand(0), AVM2::EDX, Op.getOperand(3), SDValue()); Copy = DAG.getCopyToReg(Copy, AVM2::EAX, Op.getOperand(1), Copy.getValue(1)); break; } return DAG.getNode(AVM2ISD::RET_FLAG, DL, MVT::Other, Copy, Copy.getValue(1)); } */ case ISD::FRAMEADDR: return LowerFRAMEADDR(Op, DAG); case ISD::EH_SJLJ_SETJMP: return LowerEH_SJLJ_SETJMP(Op, DAG); case ISD::EH_SJLJ_LONGJMP: return LowerEH_SJLJ_LONGJMP(Op, DAG); case ISD::EH_SJLJ_DISPATCHSETUP: return LowerEH_SJLJ_DISPATCHSETUP(Op, DAG); case ISD::INTRINSIC_WO_CHAIN: return LowerINTRINSIC_WO_CHAIN(Op, DAG, SubTarget); // Return address. Currently unimplemented case ISD::RETURNADDR: break; } return SDValue(); }