static bool CC_Lanai32_VarArg(unsigned ValNo, MVT ValVT, MVT LocVT, CCValAssign::LocInfo LocInfo, ISD::ArgFlagsTy ArgFlags, CCState &State) { // Handle fixed arguments with default CC. // Note: Both the default and fast CC handle VarArg the same and hence the // calling convention of the function is not considered here. if (ValNo < NumFixedArgs) { return CC_Lanai32(ValNo, ValVT, LocVT, LocInfo, ArgFlags, State); } // Promote i8/i16 args to i32 if (LocVT == MVT::i8 || LocVT == MVT::i16) { LocVT = MVT::i32; if (ArgFlags.isSExt()) LocInfo = CCValAssign::SExt; else if (ArgFlags.isZExt()) LocInfo = CCValAssign::ZExt; else LocInfo = CCValAssign::AExt; } // VarArgs get passed on stack unsigned Offset = State.AllocateStack(4, 4); State.addLoc(CCValAssign::getMem(ValNo, ValVT, Offset, LocVT, LocInfo)); return false; }
static bool CC_Sparc_Assign_f64(unsigned &ValNo, MVT &ValVT, MVT &LocVT, CCValAssign::LocInfo &LocInfo, ISD::ArgFlagsTy &ArgFlags, CCState &State) { static const unsigned RegList[] = { SP::I0, SP::I1, SP::I2, SP::I3, SP::I4, SP::I5 }; //Try to get first reg if (unsigned Reg = State.AllocateReg(RegList, 6)) { State.addLoc(CCValAssign::getCustomReg(ValNo, ValVT, Reg, LocVT, LocInfo)); } else { //Assign whole thing in stack State.addLoc(CCValAssign::getCustomMem(ValNo, ValVT, State.AllocateStack(8,4), LocVT, LocInfo)); return true; } //Try to get second reg if (unsigned Reg = State.AllocateReg(RegList, 6)) State.addLoc(CCValAssign::getCustomReg(ValNo, ValVT, Reg, LocVT, LocInfo)); else State.addLoc(CCValAssign::getCustomMem(ValNo, ValVT, State.AllocateStack(4,4), LocVT, LocInfo)); return true; }
static bool CC_MBlaze_AssignReg(unsigned &ValNo, MVT &ValVT, MVT &LocVT, CCValAssign::LocInfo &LocInfo, ISD::ArgFlagsTy &ArgFlags, CCState &State) { static const unsigned ArgRegs[] = { MBlaze::R5, MBlaze::R6, MBlaze::R7, MBlaze::R8, MBlaze::R9, MBlaze::R10 }; const unsigned NumArgRegs = array_lengthof(ArgRegs); unsigned Reg = State.AllocateReg(ArgRegs, NumArgRegs); if (!Reg) return false; unsigned SizeInBytes = ValVT.getSizeInBits() >> 3; State.AllocateStack(SizeInBytes, SizeInBytes); State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo)); return true; }
static bool CC_Sparc_Assign_SRet(unsigned &ValNo, MVT &ValVT, MVT &LocVT, CCValAssign::LocInfo &LocInfo, ISD::ArgFlagsTy &ArgFlags, CCState &State) { assert (ArgFlags.isSRet()); //Assign SRet argument State.addLoc(CCValAssign::getCustomMem(ValNo, ValVT, 0, LocVT, LocInfo)); return true; }
static bool CC_X86_VectorCallAssignRegister(unsigned &ValNo, MVT &ValVT, MVT &LocVT, CCValAssign::LocInfo &LocInfo, ISD::ArgFlagsTy &ArgFlags, CCState &State) { ArrayRef<MCPhysReg> RegList = CC_X86_VectorCallGetSSEs(ValVT); bool Is64bit = static_cast<const X86Subtarget &>( State.getMachineFunction().getSubtarget()) .is64Bit(); for (auto Reg : RegList) { // If the register is not marked as allocated - assign to it. if (!State.isAllocated(Reg)) { unsigned AssigedReg = State.AllocateReg(Reg); assert(AssigedReg == Reg && "Expecting a valid register allocation"); State.addLoc( CCValAssign::getReg(ValNo, ValVT, AssigedReg, LocVT, LocInfo)); return true; } // If the register is marked as shadow allocated - assign to it. if (Is64bit && State.IsShadowAllocatedReg(Reg)) { State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo)); return true; } } llvm_unreachable("Clang should ensure that hva marked vectors will have " "an available register."); return false; }
bool CC_X86_32_VectorCall(unsigned &ValNo, MVT &ValVT, MVT &LocVT, CCValAssign::LocInfo &LocInfo, ISD::ArgFlagsTy &ArgFlags, CCState &State) { // On the second pass, go through the HVAs only. if (ArgFlags.isSecArgPass()) { if (ArgFlags.isHva()) return CC_X86_VectorCallAssignRegister(ValNo, ValVT, LocVT, LocInfo, ArgFlags, State); return true; } // Process only vector types as defined by vectorcall spec: // "A vector type is either a floating point type, for example, // a float or double, or an SIMD vector type, for example, __m128 or __m256". if (!(ValVT.isFloatingPoint() || (ValVT.isVector() && ValVT.getSizeInBits() >= 128))) { return false; } if (ArgFlags.isHva()) return true; // If this is an HVA - Stop the search. // Assign XMM register. if (unsigned Reg = State.AllocateReg(CC_X86_VectorCallGetSSEs(ValVT))) { State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo)); return true; } // In case we did not find an available XMM register for a vector - // pass it indirectly. // It is similar to CCPassIndirect, with the addition of inreg. if (!ValVT.isFloatingPoint()) { LocVT = MVT::i32; LocInfo = CCValAssign::Indirect; ArgFlags.setInReg(); } return false; // No register was assigned - Continue the search. }
bool CC_X86_32_RegCall_Assign2Regs(unsigned &ValNo, MVT &ValVT, MVT &LocVT, CCValAssign::LocInfo &LocInfo, ISD::ArgFlagsTy &ArgFlags, CCState &State) { // List of GPR registers that are available to store values in regcall // calling convention. static const MCPhysReg RegList[] = {X86::EAX, X86::ECX, X86::EDX, X86::EDI, X86::ESI}; // The vector will save all the available registers for allocation. SmallVector<unsigned, 5> AvailableRegs; // searching for the available registers. for (auto Reg : RegList) { if (!State.isAllocated(Reg)) AvailableRegs.push_back(Reg); } const size_t RequiredGprsUponSplit = 2; if (AvailableRegs.size() < RequiredGprsUponSplit) return false; // Not enough free registers - continue the search. // Allocating the available registers for (unsigned I = 0; I < RequiredGprsUponSplit; I++) { // Marking the register as located unsigned Reg = State.AllocateReg(AvailableRegs[I]); // Since we previously made sure that 2 registers are available // we expect that a real register number will be returned assert(Reg && "Expecting a register will be available"); // Assign the value to the allocated register State.addLoc(CCValAssign::getCustomReg(ValNo, ValVT, Reg, LocVT, LocInfo)); } // Successful in allocating regsiters - stop scanning next rules. return true; }
bool CC_X86_64_VectorCall(unsigned &ValNo, MVT &ValVT, MVT &LocVT, CCValAssign::LocInfo &LocInfo, ISD::ArgFlagsTy &ArgFlags, CCState &State) { // On the second pass, go through the HVAs only. if (ArgFlags.isSecArgPass()) { if (ArgFlags.isHva()) return CC_X86_VectorCallAssignRegister(ValNo, ValVT, LocVT, LocInfo, ArgFlags, State); return true; } // Process only vector types as defined by vectorcall spec: // "A vector type is either a floating-point type, for example, // a float or double, or an SIMD vector type, for example, __m128 or __m256". if (!(ValVT.isFloatingPoint() || (ValVT.isVector() && ValVT.getSizeInBits() >= 128))) { // If R9 was already assigned it means that we are after the fourth element // and because this is not an HVA / Vector type, we need to allocate // shadow XMM register. if (State.isAllocated(X86::R9)) { // Assign shadow XMM register. (void)State.AllocateReg(CC_X86_VectorCallGetSSEs(ValVT)); } return false; } if (!ArgFlags.isHva() || ArgFlags.isHvaStart()) { // Assign shadow GPR register. (void)State.AllocateReg(CC_X86_64_VectorCallGetGPRs()); // Assign XMM register - (shadow for HVA and non-shadow for non HVA). if (unsigned Reg = State.AllocateReg(CC_X86_VectorCallGetSSEs(ValVT))) { // In Vectorcall Calling convention, additional shadow stack can be // created on top of the basic 32 bytes of win64. // It can happen if the fifth or sixth argument is vector type or HVA. // At that case for each argument a shadow stack of 8 bytes is allocated. if (Reg == X86::XMM4 || Reg == X86::XMM5) State.AllocateStack(8, 8); if (!ArgFlags.isHva()) { State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo)); return true; // Allocated a register - Stop the search. } } } // If this is an HVA - Stop the search, // otherwise continue the search. return ArgFlags.isHva(); }
static void AnalyzeRetResult(CCState &State, const SmallVectorImpl<ISD::OutputArg> &Outs) { State.AnalyzeReturn(Outs, RetCC_MSP430); }
static void AnalyzeRetResult(CCState &State, const SmallVectorImpl<ISD::InputArg> &Ins) { State.AnalyzeCallResult(Ins, RetCC_MSP430); }
static void AnalyzeArguments(CCState &State, SmallVectorImpl<CCValAssign> &ArgLocs, const SmallVectorImpl<ArgT> &Args) { static const MCPhysReg RegList[] = { MSP430::R15, MSP430::R14, MSP430::R13, MSP430::R12 }; static const unsigned NbRegs = array_lengthof(RegList); if (State.isVarArg()) { AnalyzeVarArgs(State, Args); return; } SmallVector<unsigned, 4> ArgsParts; ParseFunctionArgs(Args, ArgsParts); unsigned RegsLeft = NbRegs; bool UseStack = false; unsigned ValNo = 0; for (unsigned i = 0, e = ArgsParts.size(); i != e; i++) { MVT ArgVT = Args[ValNo].VT; ISD::ArgFlagsTy ArgFlags = Args[ValNo].Flags; MVT LocVT = ArgVT; CCValAssign::LocInfo LocInfo = CCValAssign::Full; // Promote i8 to i16 if (LocVT == MVT::i8) { LocVT = MVT::i16; if (ArgFlags.isSExt()) LocInfo = CCValAssign::SExt; else if (ArgFlags.isZExt()) LocInfo = CCValAssign::ZExt; else LocInfo = CCValAssign::AExt; } // Handle byval arguments if (ArgFlags.isByVal()) { State.HandleByVal(ValNo++, ArgVT, LocVT, LocInfo, 2, 2, ArgFlags); continue; } unsigned Parts = ArgsParts[i]; if (!UseStack && Parts <= RegsLeft) { unsigned FirstVal = ValNo; for (unsigned j = 0; j < Parts; j++) { unsigned Reg = State.AllocateReg(RegList); State.addLoc(CCValAssign::getReg(ValNo++, ArgVT, Reg, LocVT, LocInfo)); RegsLeft--; } // Reverse the order of the pieces to agree with the "big endian" format // required in the calling convention ABI. SmallVectorImpl<CCValAssign>::iterator B = ArgLocs.begin() + FirstVal; std::reverse(B, B + Parts); } else { UseStack = true; for (unsigned j = 0; j < Parts; j++) CC_MSP430_AssignStack(ValNo++, ArgVT, LocVT, LocInfo, ArgFlags, State); } } }
static void AnalyzeVarArgs(CCState &State, const SmallVectorImpl<ISD::InputArg> &Ins) { State.AnalyzeFormalArguments(Ins, CC_MSP430_AssignStack); }
static void AnalyzeVarArgs(CCState &State, const SmallVectorImpl<ISD::OutputArg> &Outs) { State.AnalyzeCallOperands(Outs, CC_MSP430_AssignStack); }
static bool CC_MipsO32(unsigned ValNo, EVT ValVT, EVT LocVT, CCValAssign::LocInfo LocInfo, ISD::ArgFlagsTy ArgFlags, CCState &State) { static const unsigned IntRegsSize=4, FloatRegsSize=2; static const unsigned IntRegs[] = { Mips::A0, Mips::A1, Mips::A2, Mips::A3 }; static const unsigned F32Regs[] = { Mips::F12, Mips::F14 }; static const unsigned F64Regs[] = { Mips::D6, Mips::D7 }; unsigned Reg=0; unsigned UnallocIntReg = State.getFirstUnallocated(IntRegs, IntRegsSize); bool IntRegUsed = (IntRegs[UnallocIntReg] != (unsigned (Mips::A0))); // Promote i8 and i16 if (LocVT == MVT::i8 || LocVT == MVT::i16) { LocVT = MVT::i32; if (ArgFlags.isSExt()) LocInfo = CCValAssign::SExt; else if (ArgFlags.isZExt()) LocInfo = CCValAssign::ZExt; else LocInfo = CCValAssign::AExt; } if (ValVT == MVT::i32 || (ValVT == MVT::f32 && IntRegUsed)) { Reg = State.AllocateReg(IntRegs, IntRegsSize); IntRegUsed = true; LocVT = MVT::i32; } if (ValVT.isFloatingPoint() && !IntRegUsed) { if (ValVT == MVT::f32) Reg = State.AllocateReg(F32Regs, FloatRegsSize); else Reg = State.AllocateReg(F64Regs, FloatRegsSize); } if (ValVT == MVT::f64 && IntRegUsed) { if (UnallocIntReg != IntRegsSize) { // If we hit register A3 as the first not allocated, we must // mark it as allocated (shadow) and use the stack instead. if (IntRegs[UnallocIntReg] != (unsigned (Mips::A3))) Reg = Mips::A2; for (;UnallocIntReg < IntRegsSize; ++UnallocIntReg) State.AllocateReg(UnallocIntReg); } LocVT = MVT::i32; } if (!Reg) { unsigned SizeInBytes = ValVT.getSizeInBits() >> 3; unsigned Offset = State.AllocateStack(SizeInBytes, SizeInBytes); State.addLoc(CCValAssign::getMem(ValNo, ValVT, Offset, LocVT, LocInfo)); } else