bool AArch64FrameLowering::spillCalleeSavedRegisters( MachineBasicBlock &MBB, MachineBasicBlock::iterator MI, const std::vector<CalleeSavedInfo> &CSI, const TargetRegisterInfo *TRI) const { MachineFunction &MF = *MBB.getParent(); const TargetInstrInfo &TII = *MF.getSubtarget().getInstrInfo(); DebugLoc DL; SmallVector<RegPairInfo, 8> RegPairs; computeCalleeSaveRegisterPairs(MF, CSI, TRI, RegPairs); for (auto RPII = RegPairs.rbegin(), RPIE = RegPairs.rend(); RPII != RPIE; ++RPII) { RegPairInfo RPI = *RPII; unsigned Reg1 = RPI.Reg1; unsigned Reg2 = RPI.Reg2; unsigned StrOpc; // Issue sequence of spills for cs regs. The first spill may be converted // to a pre-decrement store later by emitPrologue if the callee-save stack // area allocation can't be combined with the local stack area allocation. // For example: // stp x22, x21, [sp, #0] // addImm(+0) // stp x20, x19, [sp, #16] // addImm(+2) // stp fp, lr, [sp, #32] // addImm(+4) // Rationale: This sequence saves uop updates compared to a sequence of // pre-increment spills like stp xi,xj,[sp,#-16]! // Note: Similar rationale and sequence for restores in epilog. if (RPI.IsGPR) StrOpc = RPI.isPaired() ? AArch64::STPXi : AArch64::STRXui; else StrOpc = RPI.isPaired() ? AArch64::STPDi : AArch64::STRDui; DEBUG(dbgs() << "CSR spill: (" << TRI->getName(Reg1); if (RPI.isPaired()) dbgs() << ", " << TRI->getName(Reg2); dbgs() << ") -> fi#(" << RPI.FrameIdx; if (RPI.isPaired()) dbgs() << ", " << RPI.FrameIdx+1; dbgs() << ")\n"); MachineInstrBuilder MIB = BuildMI(MBB, MI, DL, TII.get(StrOpc)); MBB.addLiveIn(Reg1); if (RPI.isPaired()) { MBB.addLiveIn(Reg2); MIB.addReg(Reg2, getPrologueDeath(MF, Reg2)); MIB.addMemOperand(MF.getMachineMemOperand( MachinePointerInfo::getFixedStack(MF, RPI.FrameIdx + 1), MachineMemOperand::MOStore, 8, 8)); } MIB.addReg(Reg1, getPrologueDeath(MF, Reg1)) .addReg(AArch64::SP) .addImm(RPI.Offset) // [sp, #offset*8], where factor*8 is implicit .setMIFlag(MachineInstr::FrameSetup); MIB.addMemOperand(MF.getMachineMemOperand( MachinePointerInfo::getFixedStack(MF, RPI.FrameIdx), MachineMemOperand::MOStore, 8, 8)); } return true; }
bool AArch64FrameLowering::restoreCalleeSavedRegisters( MachineBasicBlock &MBB, MachineBasicBlock::iterator MI, const std::vector<CalleeSavedInfo> &CSI, const TargetRegisterInfo *TRI) const { MachineFunction &MF = *MBB.getParent(); const TargetInstrInfo &TII = *MF.getSubtarget().getInstrInfo(); DebugLoc DL; SmallVector<RegPairInfo, 8> RegPairs; if (MI != MBB.end()) DL = MI->getDebugLoc(); computeCalleeSaveRegisterPairs(MF, CSI, TRI, RegPairs); for (auto RPII = RegPairs.begin(), RPIE = RegPairs.end(); RPII != RPIE; ++RPII) { RegPairInfo RPI = *RPII; unsigned Reg1 = RPI.Reg1; unsigned Reg2 = RPI.Reg2; // Issue sequence of restores for cs regs. The last restore may be converted // to a post-increment load later by emitEpilogue if the callee-save stack // area allocation can't be combined with the local stack area allocation. // For example: // ldp fp, lr, [sp, #32] // addImm(+4) // ldp x20, x19, [sp, #16] // addImm(+2) // ldp x22, x21, [sp, #0] // addImm(+0) // Note: see comment in spillCalleeSavedRegisters() unsigned LdrOpc; if (RPI.IsGPR) LdrOpc = RPI.isPaired() ? AArch64::LDPXi : AArch64::LDRXui; else LdrOpc = RPI.isPaired() ? AArch64::LDPDi : AArch64::LDRDui; DEBUG(dbgs() << "CSR restore: (" << TRI->getName(Reg1); if (RPI.isPaired()) dbgs() << ", " << TRI->getName(Reg2); dbgs() << ") -> fi#(" << RPI.FrameIdx; if (RPI.isPaired()) dbgs() << ", " << RPI.FrameIdx+1; dbgs() << ")\n"); MachineInstrBuilder MIB = BuildMI(MBB, MI, DL, TII.get(LdrOpc)); if (RPI.isPaired()) { MIB.addReg(Reg2, getDefRegState(true)); MIB.addMemOperand(MF.getMachineMemOperand( MachinePointerInfo::getFixedStack(MF, RPI.FrameIdx + 1), MachineMemOperand::MOLoad, 8, 8)); } MIB.addReg(Reg1, getDefRegState(true)) .addReg(AArch64::SP) .addImm(RPI.Offset) // [sp, #offset*8] where the factor*8 is implicit .setMIFlag(MachineInstr::FrameDestroy); MIB.addMemOperand(MF.getMachineMemOperand( MachinePointerInfo::getFixedStack(MF, RPI.FrameIdx), MachineMemOperand::MOLoad, 8, 8)); } return true; }
void ARMInstrInfo::expandLoadStackGuard(MachineBasicBlock::iterator MI) const { MachineFunction &MF = *MI->getParent()->getParent(); const ARMSubtarget &Subtarget = MF.getSubtarget<ARMSubtarget>(); const TargetMachine &TM = MF.getTarget(); if (!Subtarget.useMovt(MF)) { if (TM.isPositionIndependent()) expandLoadStackGuardBase(MI, ARM::LDRLIT_ga_pcrel, ARM::LDRi12); else expandLoadStackGuardBase(MI, ARM::LDRLIT_ga_abs, ARM::LDRi12); return; } if (!TM.isPositionIndependent()) { expandLoadStackGuardBase(MI, ARM::MOVi32imm, ARM::LDRi12); return; } const GlobalValue *GV = cast<GlobalValue>((*MI->memoperands_begin())->getValue()); if (!Subtarget.isGVIndirectSymbol(GV)) { expandLoadStackGuardBase(MI, ARM::MOV_ga_pcrel, ARM::LDRi12); return; } MachineBasicBlock &MBB = *MI->getParent(); DebugLoc DL = MI->getDebugLoc(); unsigned Reg = MI->getOperand(0).getReg(); MachineInstrBuilder MIB; MIB = BuildMI(MBB, MI, DL, get(ARM::MOV_ga_pcrel_ldr), Reg) .addGlobalAddress(GV, 0, ARMII::MO_NONLAZY); auto Flags = MachineMemOperand::MOLoad | MachineMemOperand::MODereferenceable | MachineMemOperand::MOInvariant; MachineMemOperand *MMO = MBB.getParent()->getMachineMemOperand( MachinePointerInfo::getGOT(*MBB.getParent()), Flags, 4, 4); MIB.addMemOperand(MMO); BuildMI(MBB, MI, DL, get(ARM::LDRi12), Reg) .addReg(Reg, RegState::Kill) .addImm(0) .cloneMemRefs(*MI) .add(predOps(ARMCC::AL)); }
void WebAssemblyFastISel::addLoadStoreOperands(const Address &Addr, const MachineInstrBuilder &MIB, MachineMemOperand *MMO) { if (const GlobalValue *GV = Addr.getGlobalValue()) MIB.addGlobalAddress(GV, Addr.getOffset()); else MIB.addImm(Addr.getOffset()); if (Addr.isRegBase()) MIB.addReg(Addr.getReg()); else MIB.addFrameIndex(Addr.getFI()); // Set the alignment operand (this is rewritten in SetP2AlignOperands). // TODO: Disable SetP2AlignOperands for FastISel and just do it here. MIB.addImm(0); MIB.addMemOperand(MMO); }
void ARMInstrInfo::expandLoadStackGuard(MachineBasicBlock::iterator MI, Reloc::Model RM) const { MachineFunction &MF = *MI->getParent()->getParent(); const ARMSubtarget &Subtarget = MF.getSubtarget<ARMSubtarget>(); if (!Subtarget.useMovt(MF)) { if (RM == Reloc::PIC_) expandLoadStackGuardBase(MI, ARM::LDRLIT_ga_pcrel, ARM::LDRi12, RM); else expandLoadStackGuardBase(MI, ARM::LDRLIT_ga_abs, ARM::LDRi12, RM); return; } if (RM != Reloc::PIC_) { expandLoadStackGuardBase(MI, ARM::MOVi32imm, ARM::LDRi12, RM); return; } const GlobalValue *GV = cast<GlobalValue>((*MI->memoperands_begin())->getValue()); if (!Subtarget.GVIsIndirectSymbol(GV, RM)) { expandLoadStackGuardBase(MI, ARM::MOV_ga_pcrel, ARM::LDRi12, RM); return; } MachineBasicBlock &MBB = *MI->getParent(); DebugLoc DL = MI->getDebugLoc(); unsigned Reg = MI->getOperand(0).getReg(); MachineInstrBuilder MIB; MIB = BuildMI(MBB, MI, DL, get(ARM::MOV_ga_pcrel_ldr), Reg) .addGlobalAddress(GV, 0, ARMII::MO_NONLAZY); unsigned Flag = MachineMemOperand::MOLoad | MachineMemOperand::MOInvariant; MachineMemOperand *MMO = MBB.getParent()->getMachineMemOperand( MachinePointerInfo::getGOT(*MBB.getParent()), Flag, 4, 4); MIB.addMemOperand(MMO); MIB = BuildMI(MBB, MI, DL, get(ARM::LDRi12), Reg); MIB.addReg(Reg, RegState::Kill).addImm(0); MIB.setMemRefs(MI->memoperands_begin(), MI->memoperands_end()); AddDefaultPred(MIB); }
bool ARMInstructionSelector::selectGlobal(MachineInstrBuilder &MIB, MachineRegisterInfo &MRI) const { if ((STI.isROPI() || STI.isRWPI()) && !STI.isTargetELF()) { LLVM_DEBUG(dbgs() << "ROPI and RWPI only supported for ELF\n"); return false; } auto GV = MIB->getOperand(1).getGlobal(); if (GV->isThreadLocal()) { LLVM_DEBUG(dbgs() << "TLS variables not supported yet\n"); return false; } auto &MBB = *MIB->getParent(); auto &MF = *MBB.getParent(); bool UseMovt = STI.useMovt(MF); unsigned Size = TM.getPointerSize(0); unsigned Alignment = 4; auto addOpsForConstantPoolLoad = [&MF, Alignment, Size](MachineInstrBuilder &MIB, const GlobalValue *GV, bool IsSBREL) { assert(MIB->getOpcode() == ARM::LDRi12 && "Unsupported instruction"); auto ConstPool = MF.getConstantPool(); auto CPIndex = // For SB relative entries we need a target-specific constant pool. // Otherwise, just use a regular constant pool entry. IsSBREL ? ConstPool->getConstantPoolIndex( ARMConstantPoolConstant::Create(GV, ARMCP::SBREL), Alignment) : ConstPool->getConstantPoolIndex(GV, Alignment); MIB.addConstantPoolIndex(CPIndex, /*Offset*/ 0, /*TargetFlags*/ 0) .addMemOperand( MF.getMachineMemOperand(MachinePointerInfo::getConstantPool(MF), MachineMemOperand::MOLoad, Size, Alignment)) .addImm(0) .add(predOps(ARMCC::AL)); }; if (TM.isPositionIndependent()) { bool Indirect = STI.isGVIndirectSymbol(GV); // FIXME: Taking advantage of MOVT for ELF is pretty involved, so we don't // support it yet. See PR28229. unsigned Opc = UseMovt && !STI.isTargetELF() ? (Indirect ? ARM::MOV_ga_pcrel_ldr : ARM::MOV_ga_pcrel) : (Indirect ? ARM::LDRLIT_ga_pcrel_ldr : ARM::LDRLIT_ga_pcrel); MIB->setDesc(TII.get(Opc)); int TargetFlags = ARMII::MO_NO_FLAG; if (STI.isTargetDarwin()) TargetFlags |= ARMII::MO_NONLAZY; if (STI.isGVInGOT(GV)) TargetFlags |= ARMII::MO_GOT; MIB->getOperand(1).setTargetFlags(TargetFlags); if (Indirect) MIB.addMemOperand(MF.getMachineMemOperand( MachinePointerInfo::getGOT(MF), MachineMemOperand::MOLoad, TM.getProgramPointerSize(), Alignment)); return constrainSelectedInstRegOperands(*MIB, TII, TRI, RBI); } bool isReadOnly = STI.getTargetLowering()->isReadOnly(GV); if (STI.isROPI() && isReadOnly) { unsigned Opc = UseMovt ? ARM::MOV_ga_pcrel : ARM::LDRLIT_ga_pcrel; MIB->setDesc(TII.get(Opc)); return constrainSelectedInstRegOperands(*MIB, TII, TRI, RBI); } if (STI.isRWPI() && !isReadOnly) { auto Offset = MRI.createVirtualRegister(&ARM::GPRRegClass); MachineInstrBuilder OffsetMIB; if (UseMovt) { OffsetMIB = BuildMI(MBB, *MIB, MIB->getDebugLoc(), TII.get(ARM::MOVi32imm), Offset); OffsetMIB.addGlobalAddress(GV, /*Offset*/ 0, ARMII::MO_SBREL); } else { // Load the offset from the constant pool. OffsetMIB = BuildMI(MBB, *MIB, MIB->getDebugLoc(), TII.get(ARM::LDRi12), Offset); addOpsForConstantPoolLoad(OffsetMIB, GV, /*IsSBREL*/ true); } if (!constrainSelectedInstRegOperands(*OffsetMIB, TII, TRI, RBI)) return false; // Add the offset to the SB register. MIB->setDesc(TII.get(ARM::ADDrr)); MIB->RemoveOperand(1); MIB.addReg(ARM::R9) // FIXME: don't hardcode R9 .addReg(Offset) .add(predOps(ARMCC::AL)) .add(condCodeOp()); return constrainSelectedInstRegOperands(*MIB, TII, TRI, RBI); } if (STI.isTargetELF()) { if (UseMovt) { MIB->setDesc(TII.get(ARM::MOVi32imm)); } else { // Load the global's address from the constant pool. MIB->setDesc(TII.get(ARM::LDRi12)); MIB->RemoveOperand(1); addOpsForConstantPoolLoad(MIB, GV, /*IsSBREL*/ false); } } else if (STI.isTargetMachO()) { if (UseMovt) MIB->setDesc(TII.get(ARM::MOVi32imm)); else MIB->setDesc(TII.get(ARM::LDRLIT_ga_abs)); } else { LLVM_DEBUG(dbgs() << "Object format not supported yet\n"); return false; } return constrainSelectedInstRegOperands(*MIB, TII, TRI, RBI); }
unsigned Disassembler::decodeInstruction(unsigned Address, MachineBasicBlock *Block) { // Disassemble instruction const MCDisassembler *DA = MC->getMCDisassembler(); uint64_t InstSize; MCInst *Inst = new MCInst(); StringRef Bytes; if (!(DA->getInstruction(*Inst, InstSize, *CurSectionMemory, Address, nulls(), nulls()))) { printError("Unknown instruction encountered, instruction decode failed!"); return 1; // Instructions[Address] = NULL; // Block->push_back(NULL); // TODO: Replace with default size for each target. // return 1; // outs() << format("%8" PRIx64 ":\t", SectAddr + Index); // Dism->rawBytesToString(StringRef(Bytes.data() + Index, Size)); // outs() << " unkn\n"; } Instructions[Address] = Inst; // Recover Instruction information const MCInstrInfo *MII = MC->getMCInstrInfo(); MCInstrDesc *MCID = new MCInstrDesc(MII->get(Inst->getOpcode())); MCID->Size = InstSize; // Check if the instruction can load to program counter and mark it as a Ret // FIXME: Better analysis would be to see if the PC value references memory // sent as a parameter or set locally in the function, but that would need to // happen after decompilation. In either case, this is definitely a BB // terminator or branch! if (MCID->mayLoad() && MCID->mayAffectControlFlow(*Inst, *MC->getMCRegisterInfo())) { MCID->Flags |= (1 << MCID::Return); MCID->Flags |= (1 << MCID::Terminator); } // Recover MachineInstr representation DebugLoc *Location = setDebugLoc(Address); MachineInstrBuilder MIB = BuildMI(Block, *Location, *MCID); unsigned int numDefs = MCID->getNumDefs(); for (unsigned int i = 0; i < Inst->getNumOperands(); i++) { MCOperand MCO = Inst->getOperand(i); // FIXME: This hack is a workaround for the assert in MachineInstr.cpp:653, // where OpNo >= MCID->getNumOperands()... if (i >= MCID->getNumOperands() && !(MCID->isVariadic())) break; if (MCO.isReg()) { unsigned flags = 0; // Defs always start at the beginning of the operands list, // unfortunately BuildMI doesn't set default define flags so we have // to do it manually here. // NOTE: This should always be true, but might not be if operands list // is not populated correctly by the MC Backend for the target. if (i < numDefs) { flags |= RegState::Define; } // NOTE: No need to worry about imp defs and uses, as these are already // specificed in the MCID attached to the MachineInst object. MIB.addReg(MCO.getReg(), flags); continue; } if (MCO.isImm()) { MIB.addImm(MCO.getImm()); continue; } //else if (MCO.isFPImm()) MIB.addFPImm(MCO.getFPImm()); if (MCO.isExpr()) { MCOperandInfo MCOpInfo = MCID->OpInfo[i]; switch (MCOpInfo.OperandType) { case MCOI::OPERAND_MEMORY: case MCOI::OPERAND_PCREL: case MCOI::OPERAND_UNKNOWN: default: printError("Unknown how to handle this Expression at this time."); } } printError("Unknown how to handle Operand!"); } // NOTE: I tried MCOpInfo here, and it appearst o be NULL // ... at least for ARM. unsigned flags = 0; if (MCID->mayLoad()) flags |= MachineMemOperand::MOLoad; if (MCID->mayStore()) flags |= MachineMemOperand::MOStore; if (flags != 0) { // Constant* cInt = ConstantInt::get(Type::getInt64Ty(ctx), MCO.getImm()); // Value *Val = ConstantExpr::getIntToPtr(cInt, // PointerType::getUnqual(Type::getInt32Ty(ctx))); // FIXME: note size of 4 is known to be bad for // some targets //Copy & paste set getImm to zero MachineMemOperand* MMO = new MachineMemOperand( MachinePointerInfo(), flags, 4, 0); //MCO.getImm() MIB.addMemOperand(MMO); //outs() << "Name: " << MII->getName(Inst->getOpcode()) << " Flags: " << flags << "\n"; } // Note: I don't know why they decided instruction size needed to be 64 bits, // but the following conversion shouldn't be an issue. return ((unsigned)InstSize); }
bool AArch64FrameLowering::restoreCalleeSavedRegisters( MachineBasicBlock &MBB, MachineBasicBlock::iterator MI, const std::vector<CalleeSavedInfo> &CSI, const TargetRegisterInfo *TRI) const { MachineFunction &MF = *MBB.getParent(); const TargetInstrInfo &TII = *MF.getSubtarget().getInstrInfo(); DebugLoc DL; SmallVector<RegPairInfo, 8> RegPairs; if (MI != MBB.end()) DL = MI->getDebugLoc(); computeCalleeSaveRegisterPairs(MF, CSI, TRI, RegPairs); for (auto RPII = RegPairs.begin(), RPIE = RegPairs.end(); RPII != RPIE; ++RPII) { RegPairInfo RPI = *RPII; unsigned Reg1 = RPI.Reg1; unsigned Reg2 = RPI.Reg2; // Issue sequence of non-sp increment and sp-pi restores for cs regs. Only // the last load is sp-pi post-increment and de-allocates the stack: // For example: // ldp fp, lr, [sp, #32] // addImm(+4) // ldp x20, x19, [sp, #16] // addImm(+2) // ldp x22, x21, [sp], #48 // addImm(+6) // Note: see comment in spillCalleeSavedRegisters() unsigned LdrOpc; bool BumpSP = RPII == std::prev(RegPairs.end()); if (RPI.IsGPR) { if (BumpSP) LdrOpc = RPI.isPaired() ? AArch64::LDPXpost : AArch64::LDRXpost; else LdrOpc = RPI.isPaired() ? AArch64::LDPXi : AArch64::LDRXui; } else { if (BumpSP) LdrOpc = RPI.isPaired() ? AArch64::LDPDpost : AArch64::LDRDpost; else LdrOpc = RPI.isPaired() ? AArch64::LDPDi : AArch64::LDRDui; } DEBUG(dbgs() << "CSR restore: (" << TRI->getName(Reg1); if (RPI.isPaired()) dbgs() << ", " << TRI->getName(Reg2); dbgs() << ") -> fi#(" << RPI.FrameIdx; if (RPI.isPaired()) dbgs() << ", " << RPI.FrameIdx+1; dbgs() << ")\n"); const int Offset = RPI.Offset; MachineInstrBuilder MIB = BuildMI(MBB, MI, DL, TII.get(LdrOpc)); if (BumpSP) MIB.addReg(AArch64::SP, RegState::Define); if (RPI.isPaired()) { MIB.addReg(Reg2, getDefRegState(true)) .addReg(Reg1, getDefRegState(true)) .addReg(AArch64::SP) .addImm(Offset) // [sp], #offset * 8 or [sp, #offset * 8] // where the factor * 8 is implicit .setMIFlag(MachineInstr::FrameDestroy); MIB.addMemOperand(MF.getMachineMemOperand( MachinePointerInfo::getFixedStack(MF, RPI.FrameIdx + 1), MachineMemOperand::MOLoad, 8, 8)); } else { MIB.addReg(Reg1, getDefRegState(true)) .addReg(AArch64::SP) .addImm(BumpSP ? Offset * 8 : Offset) // post-dec version is unscaled .setMIFlag(MachineInstr::FrameDestroy); } MIB.addMemOperand(MF.getMachineMemOperand( MachinePointerInfo::getFixedStack(MF, RPI.FrameIdx), MachineMemOperand::MOLoad, 8, 8)); } return true; }
bool AArch64FrameLowering::spillCalleeSavedRegisters( MachineBasicBlock &MBB, MachineBasicBlock::iterator MI, const std::vector<CalleeSavedInfo> &CSI, const TargetRegisterInfo *TRI) const { MachineFunction &MF = *MBB.getParent(); const TargetInstrInfo &TII = *MF.getSubtarget().getInstrInfo(); DebugLoc DL; SmallVector<RegPairInfo, 8> RegPairs; computeCalleeSaveRegisterPairs(MF, CSI, TRI, RegPairs); for (auto RPII = RegPairs.rbegin(), RPIE = RegPairs.rend(); RPII != RPIE; ++RPII) { RegPairInfo RPI = *RPII; unsigned Reg1 = RPI.Reg1; unsigned Reg2 = RPI.Reg2; unsigned StrOpc; // Issue sequence of non-sp increment and pi sp spills for cs regs. The // first spill is a pre-increment that allocates the stack. // For example: // stp x22, x21, [sp, #-48]! // addImm(-6) // stp x20, x19, [sp, #16] // addImm(+2) // stp fp, lr, [sp, #32] // addImm(+4) // Rationale: This sequence saves uop updates compared to a sequence of // pre-increment spills like stp xi,xj,[sp,#-16]! // Note: Similar rationale and sequence for restores in epilog. bool BumpSP = RPII == RegPairs.rbegin(); if (RPI.IsGPR) { // For first spill use pre-increment store. if (BumpSP) StrOpc = RPI.isPaired() ? AArch64::STPXpre : AArch64::STRXpre; else StrOpc = RPI.isPaired() ? AArch64::STPXi : AArch64::STRXui; } else { // For first spill use pre-increment store. if (BumpSP) StrOpc = RPI.isPaired() ? AArch64::STPDpre : AArch64::STRDpre; else StrOpc = RPI.isPaired() ? AArch64::STPDi : AArch64::STRDui; } DEBUG(dbgs() << "CSR spill: (" << TRI->getName(Reg1); if (RPI.isPaired()) dbgs() << ", " << TRI->getName(Reg2); dbgs() << ") -> fi#(" << RPI.FrameIdx; if (RPI.isPaired()) dbgs() << ", " << RPI.FrameIdx+1; dbgs() << ")\n"); const int Offset = BumpSP ? -RPI.Offset : RPI.Offset; MachineInstrBuilder MIB = BuildMI(MBB, MI, DL, TII.get(StrOpc)); if (BumpSP) MIB.addReg(AArch64::SP, RegState::Define); if (RPI.isPaired()) { MBB.addLiveIn(Reg1); MBB.addLiveIn(Reg2); MIB.addReg(Reg2, getPrologueDeath(MF, Reg2)) .addReg(Reg1, getPrologueDeath(MF, Reg1)) .addReg(AArch64::SP) .addImm(Offset) // [sp, #offset * 8], where factor * 8 is implicit .setMIFlag(MachineInstr::FrameSetup); MIB.addMemOperand(MF.getMachineMemOperand( MachinePointerInfo::getFixedStack(MF, RPI.FrameIdx + 1), MachineMemOperand::MOStore, 8, 8)); } else { MBB.addLiveIn(Reg1); MIB.addReg(Reg1, getPrologueDeath(MF, Reg1)) .addReg(AArch64::SP) .addImm(BumpSP ? Offset * 8 : Offset) // pre-inc version is unscaled .setMIFlag(MachineInstr::FrameSetup); } MIB.addMemOperand(MF.getMachineMemOperand( MachinePointerInfo::getFixedStack(MF, RPI.FrameIdx), MachineMemOperand::MOStore, 8, 8)); } return true; }