/// InstructionSelectBasicBlock - This callback is invoked by /// SelectionDAGISel when it has created a SelectionDAG for us to codegen. void IA64DAGToDAGISel::InstructionSelectBasicBlock(SelectionDAG &DAG) { DEBUG(BB->dump()); // Select target instructions for the DAG. DAG.setRoot(SelectRoot(DAG.getRoot())); DAG.RemoveDeadNodes(); // Emit machine code to BB. ScheduleAndEmitDAG(DAG); }
// After instruction selection, insert COPY_TO_REGCLASS nodes to help in // choosing the proper register classes. void BlackfinDAGToDAGISel::FixRegisterClasses(SelectionDAG &DAG) { const BlackfinInstrInfo &TII = getInstrInfo(); const BlackfinRegisterInfo *TRI = getRegisterInfo(); DAG.AssignTopologicalOrder(); HandleSDNode Dummy(DAG.getRoot()); for (SelectionDAG::allnodes_iterator NI = DAG.allnodes_begin(); NI != DAG.allnodes_end(); ++NI) { if (NI->use_empty() || !NI->isMachineOpcode()) continue; const TargetInstrDesc &DefTID = TII.get(NI->getMachineOpcode()); for (SDNode::use_iterator UI = NI->use_begin(); !UI.atEnd(); ++UI) { if (!UI->isMachineOpcode()) continue; if (UI.getUse().getResNo() >= DefTID.getNumDefs()) continue; const TargetRegisterClass *DefRC = DefTID.OpInfo[UI.getUse().getResNo()].getRegClass(TRI); const TargetInstrDesc &UseTID = TII.get(UI->getMachineOpcode()); if (UseTID.getNumDefs()+UI.getOperandNo() >= UseTID.getNumOperands()) continue; const TargetRegisterClass *UseRC = UseTID.OpInfo[UseTID.getNumDefs()+UI.getOperandNo()].getRegClass(TRI); if (!DefRC || !UseRC) continue; // We cannot copy CC <-> !(CC/D) if ((isCC(DefRC) && !isDCC(UseRC)) || (isCC(UseRC) && !isDCC(DefRC))) { SDNode *Copy = DAG.getMachineNode(TargetOpcode::COPY_TO_REGCLASS, NI->getDebugLoc(), MVT::i32, UI.getUse().get(), DAG.getTargetConstant(BF::DRegClassID, MVT::i32)); UpdateNodeOperand(DAG, *UI, UI.getOperandNo(), SDValue(Copy, 0)); } } } DAG.setRoot(Dummy.getValue()); }
SDValue NVPTXTargetLowering::LowerFormalArguments(SDValue Chain, CallingConv::ID CallConv, bool isVarArg, const SmallVectorImpl<ISD::InputArg> &Ins, DebugLoc dl, SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const { MachineFunction &MF = DAG.getMachineFunction(); const DataLayout *TD = getDataLayout(); const Function *F = MF.getFunction(); const AttrListPtr &PAL = F->getAttributes(); SDValue Root = DAG.getRoot(); std::vector<SDValue> OutChains; bool isKernel = llvm::isKernelFunction(*F); bool isABI = (nvptxSubtarget.getSmVersion() >= 20); std::vector<Type *> argTypes; std::vector<const Argument *> theArgs; for (Function::const_arg_iterator I = F->arg_begin(), E = F->arg_end(); I != E; ++I) { theArgs.push_back(I); argTypes.push_back(I->getType()); } assert(argTypes.size() == Ins.size() && "Ins types and function types did not match"); int idx = 0; for (unsigned i=0, e=Ins.size(); i!=e; ++i, ++idx) { Type *Ty = argTypes[i]; EVT ObjectVT = getValueType(Ty); assert(ObjectVT == Ins[i].VT && "Ins type did not match function type"); // If the kernel argument is image*_t or sampler_t, convert it to // a i32 constant holding the parameter position. This can later // matched in the AsmPrinter to output the correct mangled name. if (isImageOrSamplerVal(theArgs[i], (theArgs[i]->getParent() ? theArgs[i]->getParent()->getParent() : 0))) { assert(isKernel && "Only kernels can have image/sampler params"); InVals.push_back(DAG.getConstant(i+1, MVT::i32)); continue; } if (theArgs[i]->use_empty()) { // argument is dead InVals.push_back(DAG.getNode(ISD::UNDEF, dl, ObjectVT)); continue; } // In the following cases, assign a node order of "idx+1" // to newly created nodes. The SDNOdes for params have to // appear in the same order as their order of appearance // in the original function. "idx+1" holds that order. if (PAL.getParamAttributes(i+1).hasAttribute(Attributes::ByVal) == false) { // A plain scalar. if (isABI || isKernel) { // If ABI, load from the param symbol SDValue Arg = getParamSymbol(DAG, idx); Value *srcValue = new Argument(PointerType::get(ObjectVT.getTypeForEVT( F->getContext()), llvm::ADDRESS_SPACE_PARAM)); SDValue p = DAG.getLoad(ObjectVT, dl, Root, Arg, MachinePointerInfo(srcValue), false, false, false, TD->getABITypeAlignment(ObjectVT.getTypeForEVT( F->getContext()))); if (p.getNode()) DAG.AssignOrdering(p.getNode(), idx+1); InVals.push_back(p); } else { // If no ABI, just move the param symbol SDValue Arg = getParamSymbol(DAG, idx, ObjectVT); SDValue p = DAG.getNode(NVPTXISD::MoveParam, dl, ObjectVT, Arg); if (p.getNode()) DAG.AssignOrdering(p.getNode(), idx+1); InVals.push_back(p); } continue; } // Param has ByVal attribute if (isABI || isKernel) { // Return MoveParam(param symbol). // Ideally, the param symbol can be returned directly, // but when SDNode builder decides to use it in a CopyToReg(), // machine instruction fails because TargetExternalSymbol // (not lowered) is target dependent, and CopyToReg assumes // the source is lowered. SDValue Arg = getParamSymbol(DAG, idx, getPointerTy()); SDValue p = DAG.getNode(NVPTXISD::MoveParam, dl, ObjectVT, Arg); if (p.getNode()) DAG.AssignOrdering(p.getNode(), idx+1); if (isKernel) InVals.push_back(p); else { SDValue p2 = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, ObjectVT, DAG.getConstant(Intrinsic::nvvm_ptr_local_to_gen, MVT::i32), p); InVals.push_back(p2); } } else { // Have to move a set of param symbols to registers and // store them locally and return the local pointer in InVals const PointerType *elemPtrType = dyn_cast<PointerType>(argTypes[i]); assert(elemPtrType && "Byval parameter should be a pointer type"); Type *elemType = elemPtrType->getElementType(); // Compute the constituent parts SmallVector<EVT, 16> vtparts; SmallVector<uint64_t, 16> offsets; ComputeValueVTs(*this, elemType, vtparts, &offsets, 0); unsigned totalsize = 0; for (unsigned j=0, je=vtparts.size(); j!=je; ++j) totalsize += vtparts[j].getStoreSizeInBits(); SDValue localcopy = DAG.getFrameIndex(MF.getFrameInfo()-> CreateStackObject(totalsize/8, 16, false), getPointerTy()); unsigned sizesofar = 0; std::vector<SDValue> theChains; for (unsigned j=0, je=vtparts.size(); j!=je; ++j) { unsigned numElems = 1; if (vtparts[j].isVector()) numElems = vtparts[j].getVectorNumElements(); for (unsigned k=0, ke=numElems; k!=ke; ++k) { EVT tmpvt = vtparts[j]; if (tmpvt.isVector()) tmpvt = tmpvt.getVectorElementType(); SDValue arg = DAG.getNode(NVPTXISD::MoveParam, dl, tmpvt, getParamSymbol(DAG, idx, tmpvt)); SDValue addr = DAG.getNode(ISD::ADD, dl, getPointerTy(), localcopy, DAG.getConstant(sizesofar, getPointerTy())); theChains.push_back(DAG.getStore(Chain, dl, arg, addr, MachinePointerInfo(), false, false, 0)); sizesofar += tmpvt.getStoreSizeInBits()/8; ++idx; } } --idx; Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, &theChains[0], theChains.size()); InVals.push_back(localcopy); } } // Clang will check explicit VarArg and issue error if any. However, Clang // will let code with // implicit var arg like f() pass. // We treat this case as if the arg list is empty. //if (F.isVarArg()) { // assert(0 && "VarArg not supported yet!"); //} if (!OutChains.empty()) DAG.setRoot(DAG.getNode(ISD::TokenFactor, dl, MVT::Other, &OutChains[0], OutChains.size())); return Chain; }
SDValue AVM2TargetLowering:: LowerFormalArguments(SDValue Chain, CallingConv::ID CallConv, bool isVarArg, const SmallVectorImpl<ISD::InputArg> &Ins, DebugLoc DL, SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const { MachineFunction &MF = DAG.getMachineFunction(); AVM2MachineFunctionInfo *FuncInfo = MF.getInfo<AVM2MachineFunctionInfo>(); // Assign locations to all of the incoming arguments. SmallVector<CCValAssign, 16> ArgLocs; CCState CCInfo(CallConv, isVarArg, getTargetMachine(), ArgLocs, *DAG.getContext()); CCInfo.AnalyzeFormalArguments(Ins, CC_AVM2_32); SDValue Root = DAG.getRoot(); unsigned ArgOffset = 0; for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) { SDValue ArgValue; CCValAssign &VA = ArgLocs[i]; // FIXME: We ignore the register assignments of AnalyzeFormalArguments // because it doesn't know how to split a double into two i32 registers. EVT ObjectVT = VA.getValVT(); switch (ObjectVT.getSimpleVT().SimpleTy) { default: assert(0 && "Unhandled argument type!"); case MVT::i1: case MVT::i8: case MVT::i16: case MVT::i32: if (!Ins[i].Used) { // Argument is dead. InVals.push_back(DAG.getNode(ISD::UNDEF, DL, ObjectVT)); } else { int FrameIdx = MF.getFrameInfo()->CreateFixedObject(4, ArgOffset, true); SDValue FIPtr = DAG.getFrameIndex(FrameIdx, MVT::i32); SDValue Load; if (ObjectVT == MVT::i32) { Load = DAG.getLoad(MVT::i32, DL, Root, FIPtr, MachinePointerInfo(), false, false, 0); } else { ISD::LoadExtType LoadOp = ISD::SEXTLOAD; unsigned Offset = 0; // LE FIPtr = DAG.getNode(ISD::ADD, DL, MVT::i32, FIPtr, DAG.getConstant(Offset, MVT::i32)); Load = DAG.getExtLoad(LoadOp, DL, MVT::i32, Root, FIPtr, MachinePointerInfo(), ObjectVT, false, false, 0); Load = DAG.getNode(ISD::TRUNCATE, DL, ObjectVT, Load); } InVals.push_back(Load); } ArgOffset += 4; break; case MVT::f32: if (!Ins[i].Used) { // Argument is dead. InVals.push_back(DAG.getNode(ISD::UNDEF, DL, ObjectVT)); } else { int FrameIdx = MF.getFrameInfo()->CreateFixedObject(4, ArgOffset, true); SDValue FIPtr = DAG.getFrameIndex(FrameIdx, MVT::i32); InVals.push_back(DAG.getLoad(ObjectVT, DL, Root, FIPtr, MachinePointerInfo(), false, false, 0)); } ArgOffset += 4; break; case MVT::i64: if (!Ins[i].Used) { // Argument is dead. InVals.push_back(DAG.getNode(ISD::UNDEF, DL, ObjectVT)); } else { int FrameIdx = MF.getFrameInfo()->CreateFixedObject(8, ArgOffset, true); SDValue FIPtr = DAG.getFrameIndex(FrameIdx, MVT::i32); SDValue FIPtr4 = DAG.getNode(ISD::ADD, DL, MVT::i32, FIPtr, DAG.getConstant(4, MVT::i32)); SDValue Lo = DAG.getLoad(MVT::i32, DL, Root, FIPtr, MachinePointerInfo(), false, false, 0); SDValue Hi = DAG.getLoad(MVT::i32, DL, Root, FIPtr4, MachinePointerInfo(), false, false, 0); InVals.push_back(DAG.getNode(ISD::BUILD_PAIR, DL, MVT::i64, Lo, Hi)); } ArgOffset += 8; break; case MVT::f64: if (!Ins[i].Used) { // Argument is dead. InVals.push_back(DAG.getNode(ISD::UNDEF, DL, ObjectVT)); } else { int FrameIdx = MF.getFrameInfo()->CreateFixedObject(8, ArgOffset, true); SDValue FIPtr = DAG.getFrameIndex(FrameIdx, MVT::i32); InVals.push_back(DAG.getLoad(ObjectVT, DL, Root, FIPtr, MachinePointerInfo(), false, false, 0)); } ArgOffset += 8; break; } } if (isVarArg) { // Remember the vararg offset for the va_start implementation. FuncInfo->setVarArgsFrameOffset(ArgOffset); } return Chain; }
/// LowerFormalArguments - V8 uses a very simple ABI, where all values are /// passed in either one or two GPRs, including FP values. TODO: we should /// pass FP values in FP registers for fastcc functions. SDValue SparcTargetLowering::LowerFormalArguments(SDValue Chain, CallingConv::ID CallConv, bool isVarArg, const SmallVectorImpl<ISD::InputArg> &Ins, DebugLoc dl, SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const { MachineFunction &MF = DAG.getMachineFunction(); MachineRegisterInfo &RegInfo = MF.getRegInfo(); SparcMachineFunctionInfo *FuncInfo = MF.getInfo<SparcMachineFunctionInfo>(); // Assign locations to all of the incoming arguments. SmallVector<CCValAssign, 16> ArgLocs; CCState CCInfo(CallConv, isVarArg, getTargetMachine(), ArgLocs, *DAG.getContext()); CCInfo.AnalyzeFormalArguments(Ins, CC_Sparc32); static const unsigned ArgRegs[] = { SP::I0, SP::I1, SP::I2, SP::I3, SP::I4, SP::I5 }; const unsigned *CurArgReg = ArgRegs, *ArgRegEnd = ArgRegs+6; unsigned ArgOffset = 68; for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) { SDValue ArgValue; CCValAssign &VA = ArgLocs[i]; // FIXME: We ignore the register assignments of AnalyzeFormalArguments // because it doesn't know how to split a double into two i32 registers. EVT ObjectVT = VA.getValVT(); switch (ObjectVT.getSimpleVT().SimpleTy) { default: llvm_unreachable("Unhandled argument type!"); case MVT::i1: case MVT::i8: case MVT::i16: case MVT::i32: if (!Ins[i].Used) { // Argument is dead. if (CurArgReg < ArgRegEnd) ++CurArgReg; InVals.push_back(DAG.getUNDEF(ObjectVT)); } else if (CurArgReg < ArgRegEnd) { // Lives in an incoming GPR unsigned VReg = RegInfo.createVirtualRegister(&SP::IntRegsRegClass); MF.getRegInfo().addLiveIn(*CurArgReg++, VReg); SDValue Arg = DAG.getCopyFromReg(Chain, dl, VReg, MVT::i32); if (ObjectVT != MVT::i32) { unsigned AssertOp = ISD::AssertSext; Arg = DAG.getNode(AssertOp, dl, MVT::i32, Arg, DAG.getValueType(ObjectVT)); Arg = DAG.getNode(ISD::TRUNCATE, dl, ObjectVT, Arg); } InVals.push_back(Arg); } else { int FrameIdx = MF.getFrameInfo()->CreateFixedObject(4, ArgOffset, true, false); SDValue FIPtr = DAG.getFrameIndex(FrameIdx, MVT::i32); SDValue Load; if (ObjectVT == MVT::i32) { Load = DAG.getLoad(MVT::i32, dl, Chain, FIPtr, NULL, 0, false, false, 0); } else { ISD::LoadExtType LoadOp = ISD::SEXTLOAD; // Sparc is big endian, so add an offset based on the ObjectVT. unsigned Offset = 4-std::max(1U, ObjectVT.getSizeInBits()/8); FIPtr = DAG.getNode(ISD::ADD, dl, MVT::i32, FIPtr, DAG.getConstant(Offset, MVT::i32)); Load = DAG.getExtLoad(LoadOp, dl, MVT::i32, Chain, FIPtr, NULL, 0, ObjectVT, false, false, 0); Load = DAG.getNode(ISD::TRUNCATE, dl, ObjectVT, Load); } InVals.push_back(Load); } ArgOffset += 4; break; case MVT::f32: if (!Ins[i].Used) { // Argument is dead. if (CurArgReg < ArgRegEnd) ++CurArgReg; InVals.push_back(DAG.getUNDEF(ObjectVT)); } else if (CurArgReg < ArgRegEnd) { // Lives in an incoming GPR // FP value is passed in an integer register. unsigned VReg = RegInfo.createVirtualRegister(&SP::IntRegsRegClass); MF.getRegInfo().addLiveIn(*CurArgReg++, VReg); SDValue Arg = DAG.getCopyFromReg(Chain, dl, VReg, MVT::i32); Arg = DAG.getNode(ISD::BIT_CONVERT, dl, MVT::f32, Arg); InVals.push_back(Arg); } else { int FrameIdx = MF.getFrameInfo()->CreateFixedObject(4, ArgOffset, true, false); SDValue FIPtr = DAG.getFrameIndex(FrameIdx, MVT::i32); SDValue Load = DAG.getLoad(MVT::f32, dl, Chain, FIPtr, NULL, 0, false, false, 0); InVals.push_back(Load); } ArgOffset += 4; break; case MVT::i64: case MVT::f64: if (!Ins[i].Used) { // Argument is dead. if (CurArgReg < ArgRegEnd) ++CurArgReg; if (CurArgReg < ArgRegEnd) ++CurArgReg; InVals.push_back(DAG.getUNDEF(ObjectVT)); } else { SDValue HiVal; if (CurArgReg < ArgRegEnd) { // Lives in an incoming GPR unsigned VRegHi = RegInfo.createVirtualRegister(&SP::IntRegsRegClass); MF.getRegInfo().addLiveIn(*CurArgReg++, VRegHi); HiVal = DAG.getCopyFromReg(Chain, dl, VRegHi, MVT::i32); } else { int FrameIdx = MF.getFrameInfo()->CreateFixedObject(4, ArgOffset, true, false); SDValue FIPtr = DAG.getFrameIndex(FrameIdx, MVT::i32); HiVal = DAG.getLoad(MVT::i32, dl, Chain, FIPtr, NULL, 0, false, false, 0); } SDValue LoVal; if (CurArgReg < ArgRegEnd) { // Lives in an incoming GPR unsigned VRegLo = RegInfo.createVirtualRegister(&SP::IntRegsRegClass); MF.getRegInfo().addLiveIn(*CurArgReg++, VRegLo); LoVal = DAG.getCopyFromReg(Chain, dl, VRegLo, MVT::i32); } else { int FrameIdx = MF.getFrameInfo()->CreateFixedObject(4, ArgOffset+4, true, false); SDValue FIPtr = DAG.getFrameIndex(FrameIdx, MVT::i32); LoVal = DAG.getLoad(MVT::i32, dl, Chain, FIPtr, NULL, 0, false, false, 0); } // Compose the two halves together into an i64 unit. SDValue WholeValue = DAG.getNode(ISD::BUILD_PAIR, dl, MVT::i64, LoVal, HiVal); // If we want a double, do a bit convert. if (ObjectVT == MVT::f64) WholeValue = DAG.getNode(ISD::BIT_CONVERT, dl, MVT::f64, WholeValue); InVals.push_back(WholeValue); } ArgOffset += 8; break; } } // Store remaining ArgRegs to the stack if this is a varargs function. if (isVarArg) { // Remember the vararg offset for the va_start implementation. FuncInfo->setVarArgsFrameOffset(ArgOffset); std::vector<SDValue> OutChains; for (; CurArgReg != ArgRegEnd; ++CurArgReg) { unsigned VReg = RegInfo.createVirtualRegister(&SP::IntRegsRegClass); MF.getRegInfo().addLiveIn(*CurArgReg, VReg); SDValue Arg = DAG.getCopyFromReg(DAG.getRoot(), dl, VReg, MVT::i32); int FrameIdx = MF.getFrameInfo()->CreateFixedObject(4, ArgOffset, true, false); SDValue FIPtr = DAG.getFrameIndex(FrameIdx, MVT::i32); OutChains.push_back(DAG.getStore(DAG.getRoot(), dl, Arg, FIPtr, NULL, 0, false, false, 0)); ArgOffset += 4; } if (!OutChains.empty()) { OutChains.push_back(Chain); Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, &OutChains[0], OutChains.size()); } } return Chain; }
/// LowerArguments - V8 uses a very simple ABI, where all values are passed in /// either one or two GPRs, including FP values. TODO: we should pass FP values /// in FP registers for fastcc functions. void SparcTargetLowering::LowerArguments(Function &F, SelectionDAG &DAG, SmallVectorImpl<SDValue> &ArgValues, DebugLoc dl) { MachineFunction &MF = DAG.getMachineFunction(); MachineRegisterInfo &RegInfo = MF.getRegInfo(); static const unsigned ArgRegs[] = { SP::I0, SP::I1, SP::I2, SP::I3, SP::I4, SP::I5 }; const unsigned *CurArgReg = ArgRegs, *ArgRegEnd = ArgRegs+6; unsigned ArgOffset = 68; SDValue Root = DAG.getRoot(); std::vector<SDValue> OutChains; for (Function::arg_iterator I = F.arg_begin(), E = F.arg_end(); I != E; ++I) { MVT ObjectVT = getValueType(I->getType()); switch (ObjectVT.getSimpleVT()) { default: assert(0 && "Unhandled argument type!"); case MVT::i1: case MVT::i8: case MVT::i16: case MVT::i32: if (I->use_empty()) { // Argument is dead. if (CurArgReg < ArgRegEnd) ++CurArgReg; ArgValues.push_back(DAG.getUNDEF(ObjectVT)); } else if (CurArgReg < ArgRegEnd) { // Lives in an incoming GPR unsigned VReg = RegInfo.createVirtualRegister(&SP::IntRegsRegClass); MF.getRegInfo().addLiveIn(*CurArgReg++, VReg); SDValue Arg = DAG.getCopyFromReg(Root, dl, VReg, MVT::i32); if (ObjectVT != MVT::i32) { unsigned AssertOp = ISD::AssertSext; Arg = DAG.getNode(AssertOp, dl, MVT::i32, Arg, DAG.getValueType(ObjectVT)); Arg = DAG.getNode(ISD::TRUNCATE, dl, ObjectVT, Arg); } ArgValues.push_back(Arg); } else { int FrameIdx = MF.getFrameInfo()->CreateFixedObject(4, ArgOffset); SDValue FIPtr = DAG.getFrameIndex(FrameIdx, MVT::i32); SDValue Load; if (ObjectVT == MVT::i32) { Load = DAG.getLoad(MVT::i32, dl, Root, FIPtr, NULL, 0); } else { ISD::LoadExtType LoadOp = ISD::SEXTLOAD; // Sparc is big endian, so add an offset based on the ObjectVT. unsigned Offset = 4-std::max(1U, ObjectVT.getSizeInBits()/8); FIPtr = DAG.getNode(ISD::ADD, dl, MVT::i32, FIPtr, DAG.getConstant(Offset, MVT::i32)); Load = DAG.getExtLoad(LoadOp, dl, MVT::i32, Root, FIPtr, NULL, 0, ObjectVT); Load = DAG.getNode(ISD::TRUNCATE, dl, ObjectVT, Load); } ArgValues.push_back(Load); } ArgOffset += 4; break; case MVT::f32: if (I->use_empty()) { // Argument is dead. if (CurArgReg < ArgRegEnd) ++CurArgReg; ArgValues.push_back(DAG.getUNDEF(ObjectVT)); } else if (CurArgReg < ArgRegEnd) { // Lives in an incoming GPR // FP value is passed in an integer register. unsigned VReg = RegInfo.createVirtualRegister(&SP::IntRegsRegClass); MF.getRegInfo().addLiveIn(*CurArgReg++, VReg); SDValue Arg = DAG.getCopyFromReg(Root, dl, VReg, MVT::i32); Arg = DAG.getNode(ISD::BIT_CONVERT, dl, MVT::f32, Arg); ArgValues.push_back(Arg); } else { int FrameIdx = MF.getFrameInfo()->CreateFixedObject(4, ArgOffset); SDValue FIPtr = DAG.getFrameIndex(FrameIdx, MVT::i32); SDValue Load = DAG.getLoad(MVT::f32, dl, Root, FIPtr, NULL, 0); ArgValues.push_back(Load); } ArgOffset += 4; break; case MVT::i64: case MVT::f64: if (I->use_empty()) { // Argument is dead. if (CurArgReg < ArgRegEnd) ++CurArgReg; if (CurArgReg < ArgRegEnd) ++CurArgReg; ArgValues.push_back(DAG.getUNDEF(ObjectVT)); } else { SDValue HiVal; if (CurArgReg < ArgRegEnd) { // Lives in an incoming GPR unsigned VRegHi = RegInfo.createVirtualRegister(&SP::IntRegsRegClass); MF.getRegInfo().addLiveIn(*CurArgReg++, VRegHi); HiVal = DAG.getCopyFromReg(Root, dl, VRegHi, MVT::i32); } else { int FrameIdx = MF.getFrameInfo()->CreateFixedObject(4, ArgOffset); SDValue FIPtr = DAG.getFrameIndex(FrameIdx, MVT::i32); HiVal = DAG.getLoad(MVT::i32, dl, Root, FIPtr, NULL, 0); } SDValue LoVal; if (CurArgReg < ArgRegEnd) { // Lives in an incoming GPR unsigned VRegLo = RegInfo.createVirtualRegister(&SP::IntRegsRegClass); MF.getRegInfo().addLiveIn(*CurArgReg++, VRegLo); LoVal = DAG.getCopyFromReg(Root, dl, VRegLo, MVT::i32); } else { int FrameIdx = MF.getFrameInfo()->CreateFixedObject(4, ArgOffset+4); SDValue FIPtr = DAG.getFrameIndex(FrameIdx, MVT::i32); LoVal = DAG.getLoad(MVT::i32, dl, Root, FIPtr, NULL, 0); } // Compose the two halves together into an i64 unit. SDValue WholeValue = DAG.getNode(ISD::BUILD_PAIR, dl, MVT::i64, LoVal, HiVal); // If we want a double, do a bit convert. if (ObjectVT == MVT::f64) WholeValue = DAG.getNode(ISD::BIT_CONVERT, dl, MVT::f64, WholeValue); ArgValues.push_back(WholeValue); } ArgOffset += 8; break; } } // Store remaining ArgRegs to the stack if this is a varargs function. if (F.isVarArg()) { // Remember the vararg offset for the va_start implementation. VarArgsFrameOffset = ArgOffset; for (; CurArgReg != ArgRegEnd; ++CurArgReg) { unsigned VReg = RegInfo.createVirtualRegister(&SP::IntRegsRegClass); MF.getRegInfo().addLiveIn(*CurArgReg, VReg); SDValue Arg = DAG.getCopyFromReg(DAG.getRoot(), dl, VReg, MVT::i32); int FrameIdx = MF.getFrameInfo()->CreateFixedObject(4, ArgOffset); SDValue FIPtr = DAG.getFrameIndex(FrameIdx, MVT::i32); OutChains.push_back(DAG.getStore(DAG.getRoot(), dl, Arg, FIPtr, NULL, 0)); ArgOffset += 4; } } if (!OutChains.empty()) DAG.setRoot(DAG.getNode(ISD::TokenFactor, dl, MVT::Other, &OutChains[0], OutChains.size())); }
/// LowerFormalArguments - V8 uses a very simple ABI, where all values are /// passed in either one or two GPRs, including FP values. TODO: we should /// pass FP values in FP registers for fastcc functions. SDValue SparcTargetLowering::LowerFormalArguments(SDValue Chain, CallingConv::ID CallConv, bool isVarArg, const SmallVectorImpl<ISD::InputArg> &Ins, DebugLoc dl, SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const { MachineFunction &MF = DAG.getMachineFunction(); MachineRegisterInfo &RegInfo = MF.getRegInfo(); SparcMachineFunctionInfo *FuncInfo = MF.getInfo<SparcMachineFunctionInfo>(); // Assign locations to all of the incoming arguments. SmallVector<CCValAssign, 16> ArgLocs; CCState CCInfo(CallConv, isVarArg, getTargetMachine(), ArgLocs, *DAG.getContext()); CCInfo.AnalyzeFormalArguments(Ins, CC_Sparc32); const unsigned StackOffset = 92; for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) { CCValAssign &VA = ArgLocs[i]; if (i == 0 && Ins[i].Flags.isSRet()) { //Get SRet from [%fp+64] int FrameIdx = MF.getFrameInfo()->CreateFixedObject(4, 64, true); SDValue FIPtr = DAG.getFrameIndex(FrameIdx, MVT::i32); SDValue Arg = DAG.getLoad(MVT::i32, dl, Chain, FIPtr, MachinePointerInfo(), false, false, 0); InVals.push_back(Arg); continue; } if (VA.isRegLoc()) { if (VA.needsCustom()) { assert(VA.getLocVT() == MVT::f64); unsigned VRegHi = RegInfo.createVirtualRegister(&SP::IntRegsRegClass); MF.getRegInfo().addLiveIn(VA.getLocReg(), VRegHi); SDValue HiVal = DAG.getCopyFromReg(Chain, dl, VRegHi, MVT::i32); assert(i+1 < e); CCValAssign &NextVA = ArgLocs[++i]; SDValue LoVal; if (NextVA.isMemLoc()) { int FrameIdx = MF.getFrameInfo()-> CreateFixedObject(4, StackOffset+NextVA.getLocMemOffset(),true); SDValue FIPtr = DAG.getFrameIndex(FrameIdx, MVT::i32); LoVal = DAG.getLoad(MVT::i32, dl, Chain, FIPtr, MachinePointerInfo(), false, false, 0); } else { unsigned loReg = MF.addLiveIn(NextVA.getLocReg(), &SP::IntRegsRegClass); LoVal = DAG.getCopyFromReg(Chain, dl, loReg, MVT::i32); } SDValue WholeValue = DAG.getNode(ISD::BUILD_PAIR, dl, MVT::i64, LoVal, HiVal); WholeValue = DAG.getNode(ISD::BITCAST, dl, MVT::f64, WholeValue); InVals.push_back(WholeValue); continue; } unsigned VReg = RegInfo.createVirtualRegister(&SP::IntRegsRegClass); MF.getRegInfo().addLiveIn(VA.getLocReg(), VReg); SDValue Arg = DAG.getCopyFromReg(Chain, dl, VReg, MVT::i32); if (VA.getLocVT() == MVT::f32) Arg = DAG.getNode(ISD::BITCAST, dl, MVT::f32, Arg); else if (VA.getLocVT() != MVT::i32) { Arg = DAG.getNode(ISD::AssertSext, dl, MVT::i32, Arg, DAG.getValueType(VA.getLocVT())); Arg = DAG.getNode(ISD::TRUNCATE, dl, VA.getLocVT(), Arg); } InVals.push_back(Arg); continue; } assert(VA.isMemLoc()); unsigned Offset = VA.getLocMemOffset()+StackOffset; if (VA.needsCustom()) { assert(VA.getValVT() == MVT::f64); //If it is double-word aligned, just load. if (Offset % 8 == 0) { int FI = MF.getFrameInfo()->CreateFixedObject(8, Offset, true); SDValue FIPtr = DAG.getFrameIndex(FI, getPointerTy()); SDValue Load = DAG.getLoad(VA.getValVT(), dl, Chain, FIPtr, MachinePointerInfo(), false,false, 0); InVals.push_back(Load); continue; } int FI = MF.getFrameInfo()->CreateFixedObject(4, Offset, true); SDValue FIPtr = DAG.getFrameIndex(FI, getPointerTy()); SDValue HiVal = DAG.getLoad(MVT::i32, dl, Chain, FIPtr, MachinePointerInfo(), false, false, 0); int FI2 = MF.getFrameInfo()->CreateFixedObject(4, Offset+4, true); SDValue FIPtr2 = DAG.getFrameIndex(FI2, getPointerTy()); SDValue LoVal = DAG.getLoad(MVT::i32, dl, Chain, FIPtr2, MachinePointerInfo(), false, false, 0); SDValue WholeValue = DAG.getNode(ISD::BUILD_PAIR, dl, MVT::i64, LoVal, HiVal); WholeValue = DAG.getNode(ISD::BITCAST, dl, MVT::f64, WholeValue); InVals.push_back(WholeValue); continue; } int FI = MF.getFrameInfo()->CreateFixedObject(4, Offset, true); SDValue FIPtr = DAG.getFrameIndex(FI, getPointerTy()); SDValue Load ; if (VA.getValVT() == MVT::i32 || VA.getValVT() == MVT::f32) { Load = DAG.getLoad(VA.getValVT(), dl, Chain, FIPtr, MachinePointerInfo(), false, false, 0); } else { ISD::LoadExtType LoadOp = ISD::SEXTLOAD; // Sparc is big endian, so add an offset based on the ObjectVT. unsigned Offset = 4-std::max(1U, VA.getValVT().getSizeInBits()/8); FIPtr = DAG.getNode(ISD::ADD, dl, MVT::i32, FIPtr, DAG.getConstant(Offset, MVT::i32)); Load = DAG.getExtLoad(LoadOp, dl, MVT::i32, Chain, FIPtr, MachinePointerInfo(), VA.getValVT(), false, false,0); Load = DAG.getNode(ISD::TRUNCATE, dl, VA.getValVT(), Load); } InVals.push_back(Load); } if (MF.getFunction()->hasStructRetAttr()) { //Copy the SRet Argument to SRetReturnReg SparcMachineFunctionInfo *SFI = MF.getInfo<SparcMachineFunctionInfo>(); unsigned Reg = SFI->getSRetReturnReg(); if (!Reg) { Reg = MF.getRegInfo().createVirtualRegister(&SP::IntRegsRegClass); SFI->setSRetReturnReg(Reg); } SDValue Copy = DAG.getCopyToReg(DAG.getEntryNode(), dl, Reg, InVals[0]); Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, Copy, Chain); } // Store remaining ArgRegs to the stack if this is a varargs function. if (isVarArg) { static const unsigned ArgRegs[] = { SP::I0, SP::I1, SP::I2, SP::I3, SP::I4, SP::I5 }; unsigned NumAllocated = CCInfo.getFirstUnallocated(ArgRegs, 6); const unsigned *CurArgReg = ArgRegs+NumAllocated, *ArgRegEnd = ArgRegs+6; unsigned ArgOffset = CCInfo.getNextStackOffset(); if (NumAllocated == 6) ArgOffset += StackOffset; else { assert(!ArgOffset); ArgOffset = 68+4*NumAllocated; } // Remember the vararg offset for the va_start implementation. FuncInfo->setVarArgsFrameOffset(ArgOffset); std::vector<SDValue> OutChains; for (; CurArgReg != ArgRegEnd; ++CurArgReg) { unsigned VReg = RegInfo.createVirtualRegister(&SP::IntRegsRegClass); MF.getRegInfo().addLiveIn(*CurArgReg, VReg); SDValue Arg = DAG.getCopyFromReg(DAG.getRoot(), dl, VReg, MVT::i32); int FrameIdx = MF.getFrameInfo()->CreateFixedObject(4, ArgOffset, true); SDValue FIPtr = DAG.getFrameIndex(FrameIdx, MVT::i32); OutChains.push_back(DAG.getStore(DAG.getRoot(), dl, Arg, FIPtr, MachinePointerInfo(), false, false, 0)); ArgOffset += 4; } if (!OutChains.empty()) { OutChains.push_back(Chain); Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, &OutChains[0], OutChains.size()); } } return Chain; }
std::vector<SDOperand> IA64TargetLowering::LowerArguments(Function &F, SelectionDAG &DAG) { std::vector<SDOperand> ArgValues; // // add beautiful description of IA64 stack frame format // here (from intel 24535803.pdf most likely) // MachineFunction &MF = DAG.getMachineFunction(); MachineFrameInfo *MFI = MF.getFrameInfo(); const TargetInstrInfo *TII = getTargetMachine().getInstrInfo(); GP = MF.getRegInfo().createVirtualRegister(getRegClassFor(MVT::i64)); SP = MF.getRegInfo().createVirtualRegister(getRegClassFor(MVT::i64)); RP = MF.getRegInfo().createVirtualRegister(getRegClassFor(MVT::i64)); MachineBasicBlock& BB = MF.front(); unsigned args_int[] = {IA64::r32, IA64::r33, IA64::r34, IA64::r35, IA64::r36, IA64::r37, IA64::r38, IA64::r39}; unsigned args_FP[] = {IA64::F8, IA64::F9, IA64::F10, IA64::F11, IA64::F12,IA64::F13,IA64::F14, IA64::F15}; unsigned argVreg[8]; unsigned argPreg[8]; unsigned argOpc[8]; unsigned used_FPArgs = 0; // how many FP args have been used so far? unsigned ArgOffset = 0; int count = 0; for (Function::arg_iterator I = F.arg_begin(), E = F.arg_end(); I != E; ++I) { SDOperand newroot, argt; if(count < 8) { // need to fix this logic? maybe. switch (getValueType(I->getType())) { default: assert(0 && "ERROR in LowerArgs: can't lower this type of arg.\n"); case MVT::f32: // fixme? (well, will need to for weird FP structy stuff, // see intel ABI docs) case MVT::f64: //XXX BuildMI(&BB, IA64::IDEF, 0, args_FP[used_FPArgs]); MF.getRegInfo().addLiveIn(args_FP[used_FPArgs]); // mark this reg as liveIn // floating point args go into f8..f15 as-needed, the increment argVreg[count] = // is below..: MF.getRegInfo().createVirtualRegister(getRegClassFor(MVT::f64)); // FP args go into f8..f15 as needed: (hence the ++) argPreg[count] = args_FP[used_FPArgs++]; argOpc[count] = IA64::FMOV; argt = newroot = DAG.getCopyFromReg(DAG.getRoot(), argVreg[count], MVT::f64); if (I->getType() == Type::FloatTy) argt = DAG.getNode(ISD::FP_ROUND, MVT::f32, argt, DAG.getIntPtrConstant(0)); break; case MVT::i1: // NOTE: as far as C abi stuff goes, // bools are just boring old ints case MVT::i8: case MVT::i16: case MVT::i32: case MVT::i64: //XXX BuildMI(&BB, IA64::IDEF, 0, args_int[count]); MF.getRegInfo().addLiveIn(args_int[count]); // mark this register as liveIn argVreg[count] = MF.getRegInfo().createVirtualRegister(getRegClassFor(MVT::i64)); argPreg[count] = args_int[count]; argOpc[count] = IA64::MOV; argt = newroot = DAG.getCopyFromReg(DAG.getRoot(), argVreg[count], MVT::i64); if ( getValueType(I->getType()) != MVT::i64) argt = DAG.getNode(ISD::TRUNCATE, getValueType(I->getType()), newroot); break; } } else { // more than 8 args go into the frame // Create the frame index object for this incoming parameter... ArgOffset = 16 + 8 * (count - 8); int FI = MFI->CreateFixedObject(8, ArgOffset); // Create the SelectionDAG nodes corresponding to a load //from this parameter SDOperand FIN = DAG.getFrameIndex(FI, MVT::i64); argt = newroot = DAG.getLoad(getValueType(I->getType()), DAG.getEntryNode(), FIN, NULL, 0); } ++count; DAG.setRoot(newroot.getValue(1)); ArgValues.push_back(argt); } // Create a vreg to hold the output of (what will become) // the "alloc" instruction VirtGPR = MF.getRegInfo().createVirtualRegister(getRegClassFor(MVT::i64)); BuildMI(&BB, TII->get(IA64::PSEUDO_ALLOC), VirtGPR); // we create a PSEUDO_ALLOC (pseudo)instruction for now /* BuildMI(&BB, IA64::IDEF, 0, IA64::r1); // hmm: BuildMI(&BB, IA64::IDEF, 0, IA64::r12); BuildMI(&BB, IA64::IDEF, 0, IA64::rp); // ..hmm. BuildMI(&BB, IA64::MOV, 1, GP).addReg(IA64::r1); // hmm: BuildMI(&BB, IA64::MOV, 1, SP).addReg(IA64::r12); BuildMI(&BB, IA64::MOV, 1, RP).addReg(IA64::rp); // ..hmm. */ unsigned tempOffset=0; // if this is a varargs function, we simply lower llvm.va_start by // pointing to the first entry if(F.isVarArg()) { tempOffset=0; VarArgsFrameIndex = MFI->CreateFixedObject(8, tempOffset); } // here we actually do the moving of args, and store them to the stack // too if this is a varargs function: for (int i = 0; i < count && i < 8; ++i) { BuildMI(&BB, TII->get(argOpc[i]), argVreg[i]).addReg(argPreg[i]); if(F.isVarArg()) { // if this is a varargs function, we copy the input registers to the stack int FI = MFI->CreateFixedObject(8, tempOffset); tempOffset+=8; //XXX: is it safe to use r22 like this? BuildMI(&BB, TII->get(IA64::MOV), IA64::r22).addFrameIndex(FI); // FIXME: we should use st8.spill here, one day BuildMI(&BB, TII->get(IA64::ST8), IA64::r22).addReg(argPreg[i]); } } // Finally, inform the code generator which regs we return values in. // (see the ISD::RET: case in the instruction selector) switch (getValueType(F.getReturnType())) { default: assert(0 && "i have no idea where to return this type!"); case MVT::isVoid: break; case MVT::i1: case MVT::i8: case MVT::i16: case MVT::i32: case MVT::i64: MF.getRegInfo().addLiveOut(IA64::r8); break; case MVT::f32: case MVT::f64: MF.getRegInfo().addLiveOut(IA64::F8); break; } return ArgValues; }