void AArch64InstrInfo::loadRegFromStackSlot(MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI, unsigned DestReg, int FrameIdx, const TargetRegisterClass *RC, const TargetRegisterInfo *TRI) const { DebugLoc DL = MBB.findDebugLoc(MBBI); MachineFunction &MF = *MBB.getParent(); MachineFrameInfo &MFI = *MF.getFrameInfo(); unsigned Align = MFI.getObjectAlignment(FrameIdx); MachineMemOperand *MMO = MF.getMachineMemOperand(MachinePointerInfo::getFixedStack(FrameIdx), MachineMemOperand::MOLoad, MFI.getObjectSize(FrameIdx), Align); unsigned LoadOp = 0; if (RC->hasType(MVT::i64) || RC->hasType(MVT::i32)) { switch(RC->getSize()) { case 4: LoadOp = AArch64::LS32_LDR; break; case 8: LoadOp = AArch64::LS64_LDR; break; default: llvm_unreachable("Unknown size for regclass"); } } else if (RC->hasType(MVT::f32) || RC->hasType(MVT::f64) || RC->hasType(MVT::f128)) { switch (RC->getSize()) { case 4: LoadOp = AArch64::LSFP32_LDR; break; case 8: LoadOp = AArch64::LSFP64_LDR; break; case 16: LoadOp = AArch64::LSFP128_LDR; break; default: llvm_unreachable("Unknown size for regclass"); } } else { // The spill of D tuples is implemented by Q tuples if (RC == &AArch64::QPairRegClass) LoadOp = AArch64::LD1x2_16B; else if (RC == &AArch64::QTripleRegClass) LoadOp = AArch64::LD1x3_16B; else if (RC == &AArch64::QQuadRegClass) LoadOp = AArch64::LD1x4_16B; else llvm_unreachable("Unknown reg class"); MachineInstrBuilder NewMI = BuildMI(MBB, MBBI, DL, get(LoadOp), DestReg); // Vector load has different operands from other load instructions. NewMI.addFrameIndex(FrameIdx) .addMemOperand(MMO); return; } MachineInstrBuilder NewMI = BuildMI(MBB, MBBI, DL, get(LoadOp), DestReg); NewMI.addFrameIndex(FrameIdx) .addImm(0) .addMemOperand(MMO); }
void IA64InstrInfo::loadRegFromAddr(MachineFunction &MF, unsigned DestReg, SmallVectorImpl<MachineOperand> &Addr, const TargetRegisterClass *RC, SmallVectorImpl<MachineInstr*> &NewMIs) const { unsigned Opc = 0; if (RC == IA64::FPRegisterClass) { Opc = IA64::LDF8; } else if (RC == IA64::GRRegisterClass) { Opc = IA64::LD8; } else if (RC == IA64::PRRegisterClass) { Opc = IA64::LD1; } else { assert(0 && "sorry, I don't know how to store this sort of reg\n"); } MachineInstrBuilder MIB = BuildMI(MF, get(Opc), DestReg); for (unsigned i = 0, e = Addr.size(); i != e; ++i) { MachineOperand &MO = Addr[i]; if (MO.isReg()) MIB.addReg(MO.getReg()); else if (MO.isImm()) MIB.addImm(MO.getImm()); else MIB.addFrameIndex(MO.getIndex()); } NewMIs.push_back(MIB); return; }
void SparcInstrInfo::loadRegFromAddr(MachineFunction &MF, unsigned DestReg, SmallVectorImpl<MachineOperand> &Addr, const TargetRegisterClass *RC, SmallVectorImpl<MachineInstr*> &NewMIs) const { unsigned Opc = 0; if (RC == SP::IntRegsRegisterClass) Opc = SP::LDri; else if (RC == SP::FPRegsRegisterClass) Opc = SP::LDFri; else if (RC == SP::DFPRegsRegisterClass) Opc = SP::LDDFri; else assert(0 && "Can't load this register"); MachineInstrBuilder MIB = BuildMI(MF, get(Opc), DestReg); for (unsigned i = 0, e = Addr.size(); i != e; ++i) { MachineOperand &MO = Addr[i]; if (MO.isReg()) MIB.addReg(MO.getReg()); else if (MO.isImm()) MIB.addImm(MO.getImm()); else { assert(MO.isFI()); MIB.addFrameIndex(MO.getIndex()); } } NewMIs.push_back(MIB); return; }
void PTXInstrInfo::loadRegFromStackSlot(MachineBasicBlock &MBB, MachineBasicBlock::iterator MII, unsigned DestReg, int FrameIdx, const TargetRegisterClass *RC, const TargetRegisterInfo *TRI) const { MachineInstr& MI = *MII; DebugLoc DL = MI.getDebugLoc(); DEBUG(dbgs() << "loadRegToStackSlot: " << MI); int OpCode; // Select the appropriate opcode based on the register class if (RC == PTX::RegI16RegisterClass) { OpCode = PTX::STACKLOADI16; } else if (RC == PTX::RegI32RegisterClass) { OpCode = PTX::STACKLOADI32; } else if (RC == PTX::RegI64RegisterClass) { OpCode = PTX::STACKLOADI32; } else if (RC == PTX::RegF32RegisterClass) { OpCode = PTX::STACKLOADF32; } else if (RC == PTX::RegF64RegisterClass) { OpCode = PTX::STACKLOADF64; } else { llvm_unreachable("Unknown PTX register class!"); } // Build the load instruction (really a mov) MachineInstrBuilder MIB = BuildMI(MBB, MII, DL, get(OpCode)); MIB.addReg(DestReg); MIB.addFrameIndex(FrameIdx); AddDefaultPredicate(MIB); }
void MipsInstrInfo::loadRegFromAddr(MachineFunction &MF, unsigned DestReg, SmallVectorImpl<MachineOperand> &Addr, const TargetRegisterClass *RC, SmallVectorImpl<MachineInstr*> &NewMIs) const { unsigned Opc; if (RC == Mips::CPURegsRegisterClass) Opc = Mips::LW; else if (RC == Mips::FGR32RegisterClass) Opc = Mips::LWC1; else if (RC == Mips::AFGR32RegisterClass) Opc = Mips::LWC1A; else if (RC == Mips::AFGR64RegisterClass) Opc = Mips::LDC1; else assert(0 && "Can't load this register"); MachineInstrBuilder MIB = BuildMI(MF, get(Opc), DestReg); for (unsigned i = 0, e = Addr.size(); i != e; ++i) { MachineOperand &MO = Addr[i]; if (MO.isReg()) MIB.addReg(MO.getReg()); else if (MO.isImm()) MIB.addImm(MO.getImm()); else MIB.addFrameIndex(MO.getIndex()); } NewMIs.push_back(MIB); return; }
void Thumb2InstrInfo:: loadRegFromStackSlot(MachineBasicBlock &MBB, MachineBasicBlock::iterator I, unsigned DestReg, int FI, const TargetRegisterClass *RC, const TargetRegisterInfo *TRI) const { MachineFunction &MF = *MBB.getParent(); MachineFrameInfo &MFI = MF.getFrameInfo(); MachineMemOperand *MMO = MF.getMachineMemOperand( MachinePointerInfo::getFixedStack(MF, FI), MachineMemOperand::MOLoad, MFI.getObjectSize(FI), MFI.getObjectAlignment(FI)); DebugLoc DL; if (I != MBB.end()) DL = I->getDebugLoc(); if (RC == &ARM::GPRRegClass || RC == &ARM::tGPRRegClass || RC == &ARM::tcGPRRegClass || RC == &ARM::rGPRRegClass || RC == &ARM::GPRnopcRegClass) { BuildMI(MBB, I, DL, get(ARM::t2LDRi12), DestReg) .addFrameIndex(FI) .addImm(0) .addMemOperand(MMO) .add(predOps(ARMCC::AL)); return; } if (ARM::GPRPairRegClass.hasSubClassEq(RC)) { // Thumb2 LDRD expects its dest-registers to be in rGPR. Not a problem for // gsub_0, but needs an extra constraint for gsub_1 (which could be sp // otherwise). if (TargetRegisterInfo::isVirtualRegister(DestReg)) { MachineRegisterInfo *MRI = &MF.getRegInfo(); MRI->constrainRegClass(DestReg, &ARM::GPRPair_with_gsub_1_in_rGPRRegClass); } MachineInstrBuilder MIB = BuildMI(MBB, I, DL, get(ARM::t2LDRDi8)); AddDReg(MIB, DestReg, ARM::gsub_0, RegState::DefineNoRead, TRI); AddDReg(MIB, DestReg, ARM::gsub_1, RegState::DefineNoRead, TRI); MIB.addFrameIndex(FI).addImm(0).addMemOperand(MMO).add(predOps(ARMCC::AL)); if (TargetRegisterInfo::isPhysicalRegister(DestReg)) MIB.addReg(DestReg, RegState::ImplicitDefine); return; } ARMBaseInstrInfo::loadRegFromStackSlot(MBB, I, DestReg, FI, RC, TRI); }
void EpiphanyInstrInfo::loadRegFromStackSlot(MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI, unsigned DestReg, int FrameIdx, const TargetRegisterClass *RC, const TargetRegisterInfo *TRI) const { DebugLoc DL = MBB.findDebugLoc(MBBI); MachineFunction &MF = *MBB.getParent(); MachineFrameInfo &MFI = *MF.getFrameInfo(); unsigned Align = MFI.getObjectAlignment(FrameIdx); MachineMemOperand *MMO = MF.getMachineMemOperand(MachinePointerInfo::getFixedStack(FrameIdx), MachineMemOperand::MOLoad, MFI.getObjectSize(FrameIdx), Align); unsigned LoadOp = 0; if (RC->hasType(MVT::i64) || RC->hasType(MVT::i32)) { switch(RC->getSize()) { case 4: LoadOp = Epiphany::LS32_LDR; break; //case 8: LoadOp = Epiphany::LS64_LDR; break; default: llvm_unreachable("Unknown size for regclass"); } } else { assert((RC->hasType(MVT::f32) || RC->hasType(MVT::f64)) && "Expected integer or floating type for store"); switch (RC->getSize()) { case 4: LoadOp = Epiphany::LSFP32_LDR; break; //case 8: LoadOp = Epiphany::LSFP64_LDR; break; default: llvm_unreachable("Unknown size for regclass"); } } MachineInstrBuilder NewMI = BuildMI(MBB, MBBI, DL, get(LoadOp), DestReg); NewMI.addFrameIndex(FrameIdx) .addImm(0) .addMemOperand(MMO); }
void WebAssemblyFastISel::addLoadStoreOperands(const Address &Addr, const MachineInstrBuilder &MIB, MachineMemOperand *MMO) { if (const GlobalValue *GV = Addr.getGlobalValue()) MIB.addGlobalAddress(GV, Addr.getOffset()); else MIB.addImm(Addr.getOffset()); if (Addr.isRegBase()) MIB.addReg(Addr.getReg()); else MIB.addFrameIndex(Addr.getFI()); // Set the alignment operand (this is rewritten in SetP2AlignOperands). // TODO: Disable SetP2AlignOperands for FastISel and just do it here. MIB.addImm(0); MIB.addMemOperand(MMO); }
void Thumb2InstrInfo:: storeRegToStackSlot(MachineBasicBlock &MBB, MachineBasicBlock::iterator I, unsigned SrcReg, bool isKill, int FI, const TargetRegisterClass *RC, const TargetRegisterInfo *TRI) const { DebugLoc DL; if (I != MBB.end()) DL = I->getDebugLoc(); MachineFunction &MF = *MBB.getParent(); MachineFrameInfo &MFI = *MF.getFrameInfo(); MachineMemOperand *MMO = MF.getMachineMemOperand(MachinePointerInfo::getFixedStack(FI), MachineMemOperand::MOStore, MFI.getObjectSize(FI), MFI.getObjectAlignment(FI)); if (RC == &ARM::GPRRegClass || RC == &ARM::tGPRRegClass || RC == &ARM::tcGPRRegClass || RC == &ARM::rGPRRegClass || RC == &ARM::GPRnopcRegClass) { AddDefaultPred(BuildMI(MBB, I, DL, get(ARM::t2STRi12)) .addReg(SrcReg, getKillRegState(isKill)) .addFrameIndex(FI).addImm(0).addMemOperand(MMO)); return; } if (ARM::GPRPairRegClass.hasSubClassEq(RC)) { // Thumb2 STRD expects its dest-registers to be in rGPR. Not a problem for // gsub_0, but needs an extra constraint for gsub_1 (which could be sp // otherwise). MachineRegisterInfo *MRI = &MF.getRegInfo(); MRI->constrainRegClass(SrcReg, &ARM::GPRPair_with_gsub_1_in_rGPRRegClass); MachineInstrBuilder MIB = BuildMI(MBB, I, DL, get(ARM::t2STRDi8)); AddDReg(MIB, SrcReg, ARM::gsub_0, getKillRegState(isKill), TRI); AddDReg(MIB, SrcReg, ARM::gsub_1, 0, TRI); MIB.addFrameIndex(FI).addImm(0).addMemOperand(MMO); AddDefaultPred(MIB); return; } ARMBaseInstrInfo::storeRegToStackSlot(MBB, I, SrcReg, isKill, FI, RC, TRI); }
void AArch64FrameLowering::emitFrameMemOps(bool isPrologue, MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI, const std::vector<CalleeSavedInfo> &CSI, const TargetRegisterInfo *TRI, const LoadStoreMethod PossClasses[], unsigned NumClasses) const { DebugLoc DL = MBB.findDebugLoc(MBBI); MachineFunction &MF = *MBB.getParent(); MachineFrameInfo &MFI = *MF.getFrameInfo(); const TargetInstrInfo &TII = *MF.getTarget().getInstrInfo(); // A certain amount of implicit contract is present here. The actual stack // offsets haven't been allocated officially yet, so for strictly correct code // we rely on the fact that the elements of CSI are allocated in order // starting at SP, purely as dictated by size and alignment. In practice since // this function handles the only accesses to those slots it's not quite so // important. // // We have also ordered the Callee-saved register list in AArch64CallingConv // so that the above scheme puts registers in order: in particular we want // &X30 to be &X29+8 for an ABI-correct frame record (PCS 5.2.2) for (unsigned i = 0, e = CSI.size(); i < e; ++i) { unsigned Reg = CSI[i].getReg(); // First we need to find out which register class the register belongs to so // that we can use the correct load/store instrucitons. unsigned ClassIdx; for (ClassIdx = 0; ClassIdx < NumClasses; ++ClassIdx) { if (PossClasses[ClassIdx].RegClass->contains(Reg)) break; } assert(ClassIdx != NumClasses && "Asked to store register in unexpected class"); const TargetRegisterClass &TheClass = *PossClasses[ClassIdx].RegClass; // Now we need to decide whether it's possible to emit a paired instruction: // for this we want the next register to be in the same class. MachineInstrBuilder NewMI; bool Pair = false; if (i + 1 < CSI.size() && TheClass.contains(CSI[i+1].getReg())) { Pair = true; unsigned StLow = 0, StHigh = 0; if (isPrologue) { // Most of these registers will be live-in to the MBB and killed by our // store, though there are exceptions (see determinePrologueDeath). StLow = getKillRegState(determinePrologueDeath(MBB, CSI[i+1].getReg())); StHigh = getKillRegState(determinePrologueDeath(MBB, CSI[i].getReg())); } else { StLow = RegState::Define; StHigh = RegState::Define; } NewMI = BuildMI(MBB, MBBI, DL, TII.get(PossClasses[ClassIdx].PairOpcode)) .addReg(CSI[i+1].getReg(), StLow) .addReg(CSI[i].getReg(), StHigh); // If it's a paired op, we've consumed two registers ++i; } else { unsigned State; if (isPrologue) { State = getKillRegState(determinePrologueDeath(MBB, CSI[i].getReg())); } else { State = RegState::Define; } NewMI = BuildMI(MBB, MBBI, DL, TII.get(PossClasses[ClassIdx].SingleOpcode)) .addReg(CSI[i].getReg(), State); } // Note that the FrameIdx refers to the second register in a pair: it will // be allocated the smaller numeric address and so is the one an LDP/STP // address must use. int FrameIdx = CSI[i].getFrameIdx(); MachineMemOperand::MemOperandFlags Flags; Flags = isPrologue ? MachineMemOperand::MOStore : MachineMemOperand::MOLoad; MachineMemOperand *MMO = MF.getMachineMemOperand(MachinePointerInfo::getFixedStack(FrameIdx), Flags, Pair ? TheClass.getSize() * 2 : TheClass.getSize(), MFI.getObjectAlignment(FrameIdx)); NewMI.addFrameIndex(FrameIdx) .addImm(0) // address-register offset .addMemOperand(MMO); if (isPrologue) NewMI.setMIFlags(MachineInstr::FrameSetup); // For aesthetic reasons, during an epilogue we want to emit complementary // operations to the prologue, but in the opposite order. So we still // iterate through the CalleeSavedInfo list in order, but we put the // instructions successively earlier in the MBB. if (!isPrologue) --MBBI; } }
/// AddOperand - Add the specified operand to the specified machine instr. II /// specifies the instruction information for the node, and IIOpNum is the /// operand number (in the II) that we are adding. void InstrEmitter::AddOperand(MachineInstrBuilder &MIB, SDValue Op, unsigned IIOpNum, const MCInstrDesc *II, DenseMap<SDValue, unsigned> &VRBaseMap, bool IsDebug, bool IsClone, bool IsCloned) { if (Op.isMachineOpcode()) { AddRegisterOperand(MIB, Op, IIOpNum, II, VRBaseMap, IsDebug, IsClone, IsCloned); } else if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op)) { MIB.addImm(C->getSExtValue()); } else if (ConstantFPSDNode *F = dyn_cast<ConstantFPSDNode>(Op)) { MIB.addFPImm(F->getConstantFPValue()); } else if (RegisterSDNode *R = dyn_cast<RegisterSDNode>(Op)) { unsigned VReg = R->getReg(); MVT OpVT = Op.getSimpleValueType(); const TargetRegisterClass *OpRC = TLI->isTypeLegal(OpVT) ? TLI->getRegClassFor(OpVT) : nullptr; const TargetRegisterClass *IIRC = II ? TRI->getAllocatableClass(TII->getRegClass(*II, IIOpNum, TRI, *MF)) : nullptr; if (OpRC && IIRC && OpRC != IIRC && TargetRegisterInfo::isVirtualRegister(VReg)) { unsigned NewVReg = MRI->createVirtualRegister(IIRC); BuildMI(*MBB, InsertPos, Op.getNode()->getDebugLoc(), TII->get(TargetOpcode::COPY), NewVReg).addReg(VReg); VReg = NewVReg; } // Turn additional physreg operands into implicit uses on non-variadic // instructions. This is used by call and return instructions passing // arguments in registers. bool Imp = II && (IIOpNum >= II->getNumOperands() && !II->isVariadic()); MIB.addReg(VReg, getImplRegState(Imp)); } else if (RegisterMaskSDNode *RM = dyn_cast<RegisterMaskSDNode>(Op)) { MIB.addRegMask(RM->getRegMask()); } else if (GlobalAddressSDNode *TGA = dyn_cast<GlobalAddressSDNode>(Op)) { MIB.addGlobalAddress(TGA->getGlobal(), TGA->getOffset(), TGA->getTargetFlags()); } else if (BasicBlockSDNode *BBNode = dyn_cast<BasicBlockSDNode>(Op)) { MIB.addMBB(BBNode->getBasicBlock()); } else if (FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(Op)) { MIB.addFrameIndex(FI->getIndex()); } else if (JumpTableSDNode *JT = dyn_cast<JumpTableSDNode>(Op)) { MIB.addJumpTableIndex(JT->getIndex(), JT->getTargetFlags()); } else if (ConstantPoolSDNode *CP = dyn_cast<ConstantPoolSDNode>(Op)) { int Offset = CP->getOffset(); unsigned Align = CP->getAlignment(); Type *Type = CP->getType(); // MachineConstantPool wants an explicit alignment. if (Align == 0) { Align = MF->getDataLayout().getPrefTypeAlignment(Type); if (Align == 0) { // Alignment of vector types. FIXME! Align = MF->getDataLayout().getTypeAllocSize(Type); } } unsigned Idx; MachineConstantPool *MCP = MF->getConstantPool(); if (CP->isMachineConstantPoolEntry()) Idx = MCP->getConstantPoolIndex(CP->getMachineCPVal(), Align); else Idx = MCP->getConstantPoolIndex(CP->getConstVal(), Align); MIB.addConstantPoolIndex(Idx, Offset, CP->getTargetFlags()); } else if (ExternalSymbolSDNode *ES = dyn_cast<ExternalSymbolSDNode>(Op)) { MIB.addExternalSymbol(ES->getSymbol(), ES->getTargetFlags()); } else if (auto *SymNode = dyn_cast<MCSymbolSDNode>(Op)) { MIB.addSym(SymNode->getMCSymbol()); } else if (BlockAddressSDNode *BA = dyn_cast<BlockAddressSDNode>(Op)) { MIB.addBlockAddress(BA->getBlockAddress(), BA->getOffset(), BA->getTargetFlags()); } else if (TargetIndexSDNode *TI = dyn_cast<TargetIndexSDNode>(Op)) { MIB.addTargetIndex(TI->getIndex(), TI->getOffset(), TI->getTargetFlags()); } else { assert(Op.getValueType() != MVT::Other && Op.getValueType() != MVT::Glue && "Chain and glue operands should occur at end of operand list!"); AddRegisterOperand(MIB, Op, IIOpNum, II, VRBaseMap, IsDebug, IsClone, IsCloned); } }
void AArch64InstrInfo::storeRegToStackSlot(MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI, unsigned SrcReg, bool isKill, int FrameIdx, const TargetRegisterClass *RC, const TargetRegisterInfo *TRI) const { DebugLoc DL = MBB.findDebugLoc(MBBI); MachineFunction &MF = *MBB.getParent(); MachineFrameInfo &MFI = *MF.getFrameInfo(); unsigned Align = MFI.getObjectAlignment(FrameIdx); MachineMemOperand *MMO = MF.getMachineMemOperand(MachinePointerInfo::getFixedStack(FrameIdx), MachineMemOperand::MOStore, MFI.getObjectSize(FrameIdx), Align); unsigned StoreOp = 0; if (RC->hasType(MVT::i64) || RC->hasType(MVT::i32)) { switch(RC->getSize()) { case 4: StoreOp = AArch64::LS32_STR; break; case 8: StoreOp = AArch64::LS64_STR; break; default: llvm_unreachable("Unknown size for regclass"); } } else if (RC->hasType(MVT::f32) || RC->hasType(MVT::f64) || RC->hasType(MVT::f128)) { switch (RC->getSize()) { case 4: StoreOp = AArch64::LSFP32_STR; break; case 8: StoreOp = AArch64::LSFP64_STR; break; case 16: StoreOp = AArch64::LSFP128_STR; break; default: llvm_unreachable("Unknown size for regclass"); } } else { // For a super register class has more than one sub registers if (AArch64::DPairRegClass.hasSubClassEq(RC)) StoreOp = AArch64::ST1x2_8B; else if (AArch64::DTripleRegClass.hasSubClassEq(RC)) StoreOp = AArch64::ST1x3_8B; else if (AArch64::DQuadRegClass.hasSubClassEq(RC)) StoreOp = AArch64::ST1x4_8B; else if (AArch64::QPairRegClass.hasSubClassEq(RC)) StoreOp = AArch64::ST1x2_16B; else if (AArch64::QTripleRegClass.hasSubClassEq(RC)) StoreOp = AArch64::ST1x3_16B; else if (AArch64::QQuadRegClass.hasSubClassEq(RC)) StoreOp = AArch64::ST1x4_16B; else llvm_unreachable("Unknown reg class"); MachineInstrBuilder NewMI = BuildMI(MBB, MBBI, DL, get(StoreOp)); // Vector store has different operands from other store instructions. NewMI.addFrameIndex(FrameIdx) .addReg(SrcReg, getKillRegState(isKill)) .addMemOperand(MMO); return; } MachineInstrBuilder NewMI = BuildMI(MBB, MBBI, DL, get(StoreOp)); NewMI.addReg(SrcReg, getKillRegState(isKill)) .addFrameIndex(FrameIdx) .addImm(0) .addMemOperand(MMO); }