unsigned RegScavenger::scavengeRegister(const TargetRegisterClass *RC, MachineBasicBlock::iterator I, int SPAdj) { MachineInstr &MI = *I; const MachineFunction &MF = *MI.getParent()->getParent(); // Consider all allocatable registers in the register class initially BitVector Candidates = TRI->getAllocatableSet(MF, RC); // Exclude all the registers being used by the instruction. for (const MachineOperand &MO : MI.operands()) { if (MO.isReg() && MO.getReg() != 0 && !(MO.isUse() && MO.isUndef()) && !TargetRegisterInfo::isVirtualRegister(MO.getReg())) for (MCRegAliasIterator AI(MO.getReg(), TRI, true); AI.isValid(); ++AI) Candidates.reset(*AI); } // Try to find a register that's unused if there is one, as then we won't // have to spill. BitVector Available = getRegsAvailable(RC); Available &= Candidates; if (Available.any()) Candidates = Available; // Find the register whose use is furthest away. MachineBasicBlock::iterator UseMI; unsigned SReg = findSurvivorReg(I, Candidates, 25, UseMI); // If we found an unused register there is no reason to spill it. if (!isRegUsed(SReg)) { DEBUG(dbgs() << "Scavenged register: " << TRI->getName(SReg) << "\n"); return SReg; } ScavengedInfo &Scavenged = spill(SReg, *RC, SPAdj, I, UseMI); Scavenged.Restore = &*std::prev(UseMI); DEBUG(dbgs() << "Scavenged register (with spill): " << TRI->getName(SReg) << "\n"); return SReg; }
unsigned RegScavenger::scavengeRegister(const TargetRegisterClass *RC, MachineBasicBlock::iterator I, int SPAdj) { // Consider all allocatable registers in the register class initially BitVector Candidates = TRI->getAllocatableSet(*I->getParent()->getParent(), RC); // Exclude all the registers being used by the instruction. for (unsigned i = 0, e = I->getNumOperands(); i != e; ++i) { MachineOperand &MO = I->getOperand(i); if (MO.isReg() && MO.getReg() != 0 && !TargetRegisterInfo::isVirtualRegister(MO.getReg())) Candidates.reset(MO.getReg()); } // Try to find a register that's unused if there is one, as then we won't // have to spill. Search explicitly rather than masking out based on // RegsAvailable, as RegsAvailable does not take aliases into account. // That's what getRegsAvailable() is for. BitVector Available = getRegsAvailable(RC); Available &= Candidates; if (Available.any()) Candidates = Available; // Find the register whose use is furthest away. MachineBasicBlock::iterator UseMI; unsigned SReg = findSurvivorReg(I, Candidates, 25, UseMI); // If we found an unused register there is no reason to spill it. if (!isAliasUsed(SReg)) { DEBUG(dbgs() << "Scavenged register: " << TRI->getName(SReg) << "\n"); return SReg; } assert(ScavengedReg == 0 && "Scavenger slot is live, unable to scavenge another register!"); // Avoid infinite regress ScavengedReg = SReg; // If the target knows how to save/restore the register, let it do so; // otherwise, use the emergency stack spill slot. if (!TRI->saveScavengerRegister(*MBB, I, UseMI, RC, SReg)) { // Spill the scavenged register before I. assert(ScavengingFrameIndex >= 0 && "Cannot scavenge register without an emergency spill slot!"); TII->storeRegToStackSlot(*MBB, I, SReg, true, ScavengingFrameIndex, RC,TRI); MachineBasicBlock::iterator II = prior(I); unsigned FIOperandNum = getFrameIndexOperandNum(II); TRI->eliminateFrameIndex(II, SPAdj, FIOperandNum, this); // Restore the scavenged register before its use (or first terminator). TII->loadRegFromStackSlot(*MBB, UseMI, SReg, ScavengingFrameIndex, RC, TRI); II = prior(UseMI); FIOperandNum = getFrameIndexOperandNum(II); TRI->eliminateFrameIndex(II, SPAdj, FIOperandNum, this); } ScavengeRestore = prior(UseMI); // Doing this here leads to infinite regress. // ScavengedReg = SReg; DEBUG(dbgs() << "Scavenged register (with spill): " << TRI->getName(SReg) << "\n"); return SReg; }
void StackColoring::calculateLiveIntervals(unsigned NumSlots) { SmallVector<SlotIndex, 16> Starts; SmallVector<SlotIndex, 16> Finishes; // For each block, find which slots are active within this block // and update the live intervals. for (MachineFunction::iterator MBB = MF->begin(), MBBe = MF->end(); MBB != MBBe; ++MBB) { Starts.clear(); Starts.resize(NumSlots); Finishes.clear(); Finishes.resize(NumSlots); // Create the interval for the basic blocks with lifetime markers in them. for (SmallVector<MachineInstr*, 8>::iterator it = Markers.begin(), e = Markers.end(); it != e; ++it) { MachineInstr *MI = *it; if (MI->getParent() != MBB) continue; assert((MI->getOpcode() == TargetOpcode::LIFETIME_START || MI->getOpcode() == TargetOpcode::LIFETIME_END) && "Invalid Lifetime marker"); bool IsStart = MI->getOpcode() == TargetOpcode::LIFETIME_START; MachineOperand &Mo = MI->getOperand(0); int Slot = Mo.getIndex(); assert(Slot >= 0 && "Invalid slot"); SlotIndex ThisIndex = Indexes->getInstructionIndex(MI); if (IsStart) { if (!Starts[Slot].isValid() || Starts[Slot] > ThisIndex) Starts[Slot] = ThisIndex; } else { if (!Finishes[Slot].isValid() || Finishes[Slot] < ThisIndex) Finishes[Slot] = ThisIndex; } } // Create the interval of the blocks that we previously found to be 'alive'. BitVector Alive = BlockLiveness[MBB].LiveIn; Alive |= BlockLiveness[MBB].LiveOut; if (Alive.any()) { for (int pos = Alive.find_first(); pos != -1; pos = Alive.find_next(pos)) { if (!Starts[pos].isValid()) Starts[pos] = Indexes->getMBBStartIdx(MBB); if (!Finishes[pos].isValid()) Finishes[pos] = Indexes->getMBBEndIdx(MBB); } } for (unsigned i = 0; i < NumSlots; ++i) { assert(Starts[i].isValid() == Finishes[i].isValid() && "Unmatched range"); if (!Starts[i].isValid()) continue; assert(Starts[i] && Finishes[i] && "Invalid interval"); VNInfo *ValNum = Intervals[i]->getValNumInfo(0); SlotIndex S = Starts[i]; SlotIndex F = Finishes[i]; if (S < F) { // We have a single consecutive region. Intervals[i]->addRange(LiveRange(S, F, ValNum)); } else { // We have two non consecutive regions. This happens when // LIFETIME_START appears after the LIFETIME_END marker. SlotIndex NewStart = Indexes->getMBBStartIdx(MBB); SlotIndex NewFin = Indexes->getMBBEndIdx(MBB); Intervals[i]->addRange(LiveRange(NewStart, F, ValNum)); Intervals[i]->addRange(LiveRange(S, NewFin, ValNum)); } } } }
bool Thumb1FrameLowering::emitPopSpecialFixUp(MachineBasicBlock &MBB, bool DoIt) const { MachineFunction &MF = *MBB.getParent(); ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>(); unsigned ArgRegsSaveSize = AFI->getArgRegsSaveSize(); const TargetInstrInfo &TII = *STI.getInstrInfo(); const ThumbRegisterInfo *RegInfo = static_cast<const ThumbRegisterInfo *>(STI.getRegisterInfo()); // If MBBI is a return instruction, we may be able to directly restore // LR in the PC. // This is possible if we do not need to emit any SP update. // Otherwise, we need a temporary register to pop the value // and copy that value into LR. auto MBBI = MBB.getFirstTerminator(); if (!ArgRegsSaveSize && MBBI != MBB.end() && MBBI->getOpcode() == ARM::tBX_RET) { if (!DoIt) return true; MachineInstrBuilder MIB = AddDefaultPred( BuildMI(MBB, MBBI, MBBI->getDebugLoc(), TII.get(ARM::tPOP_RET))) .addReg(ARM::PC, RegState::Define); MIB.copyImplicitOps(&*MBBI); // erase the old tBX_RET instruction MBB.erase(MBBI); return true; } // Look for a temporary register to use. // First, compute the liveness information. LivePhysRegs UsedRegs(STI.getRegisterInfo()); UsedRegs.addLiveOuts(&MBB, /*AddPristines*/ true); // The semantic of pristines changed recently and now, // the callee-saved registers that are touched in the function // are not part of the pristines set anymore. // Add those callee-saved now. const TargetRegisterInfo *TRI = STI.getRegisterInfo(); const MCPhysReg *CSRegs = TRI->getCalleeSavedRegs(&MF); for (unsigned i = 0; CSRegs[i]; ++i) UsedRegs.addReg(CSRegs[i]); DebugLoc dl = DebugLoc(); if (MBBI != MBB.end()) { dl = MBBI->getDebugLoc(); auto InstUpToMBBI = MBB.end(); // The post-decrement is on purpose here. // We want to have the liveness right before MBBI. while (InstUpToMBBI-- != MBBI) UsedRegs.stepBackward(*InstUpToMBBI); } // Look for a register that can be directly use in the POP. unsigned PopReg = 0; // And some temporary register, just in case. unsigned TemporaryReg = 0; BitVector PopFriendly = TRI->getAllocatableSet(MF, TRI->getRegClass(ARM::tGPRRegClassID)); assert(PopFriendly.any() && "No allocatable pop-friendly register?!"); // Rebuild the GPRs from the high registers because they are removed // form the GPR reg class for thumb1. BitVector GPRsNoLRSP = TRI->getAllocatableSet(MF, TRI->getRegClass(ARM::hGPRRegClassID)); GPRsNoLRSP |= PopFriendly; GPRsNoLRSP.reset(ARM::LR); GPRsNoLRSP.reset(ARM::SP); GPRsNoLRSP.reset(ARM::PC); for (int Register = GPRsNoLRSP.find_first(); Register != -1; Register = GPRsNoLRSP.find_next(Register)) { if (!UsedRegs.contains(Register)) { // Remember the first pop-friendly register and exit. if (PopFriendly.test(Register)) { PopReg = Register; TemporaryReg = 0; break; } // Otherwise, remember that the register will be available to // save a pop-friendly register. TemporaryReg = Register; } } if (!DoIt && !PopReg && !TemporaryReg) return false; assert((PopReg || TemporaryReg) && "Cannot get LR"); if (TemporaryReg) { assert(!PopReg && "Unnecessary MOV is about to be inserted"); PopReg = PopFriendly.find_first(); AddDefaultPred(BuildMI(MBB, MBBI, dl, TII.get(ARM::tMOVr)) .addReg(TemporaryReg, RegState::Define) .addReg(PopReg, RegState::Kill)); } assert(PopReg && "Do not know how to get LR"); AddDefaultPred(BuildMI(MBB, MBBI, dl, TII.get(ARM::tPOP))) .addReg(PopReg, RegState::Define); emitSPUpdate(MBB, MBBI, TII, dl, *RegInfo, ArgRegsSaveSize); if (!TemporaryReg && MBBI != MBB.end() && MBBI->getOpcode() == ARM::tBX_RET) { MachineInstrBuilder MIB = BuildMI(MBB, MBBI, dl, TII.get(ARM::tBX)) .addReg(PopReg, RegState::Kill); AddDefaultPred(MIB); MIB.copyImplicitOps(&*MBBI); // erase the old tBX_RET instruction MBB.erase(MBBI); return true; } AddDefaultPred(BuildMI(MBB, MBBI, dl, TII.get(ARM::tMOVr)) .addReg(ARM::LR, RegState::Define) .addReg(PopReg, RegState::Kill)); if (TemporaryReg) { AddDefaultPred(BuildMI(MBB, MBBI, dl, TII.get(ARM::tMOVr)) .addReg(PopReg, RegState::Define) .addReg(TemporaryReg, RegState::Kill)); } return true; }
bool Thumb1FrameLowering::emitPopSpecialFixUp(MachineBasicBlock &MBB, bool DoIt) const { MachineFunction &MF = *MBB.getParent(); ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>(); unsigned ArgRegsSaveSize = AFI->getArgRegsSaveSize(); const TargetInstrInfo &TII = *STI.getInstrInfo(); const ThumbRegisterInfo *RegInfo = static_cast<const ThumbRegisterInfo *>(STI.getRegisterInfo()); // If MBBI is a return instruction, or is a tPOP followed by a return // instruction in the successor BB, we may be able to directly restore // LR in the PC. // This is only possible with v5T ops (v4T can't change the Thumb bit via // a POP PC instruction), and only if we do not need to emit any SP update. // Otherwise, we need a temporary register to pop the value // and copy that value into LR. auto MBBI = MBB.getFirstTerminator(); bool CanRestoreDirectly = STI.hasV5TOps() && !ArgRegsSaveSize; if (CanRestoreDirectly) { if (MBBI != MBB.end() && MBBI->getOpcode() != ARM::tB) CanRestoreDirectly = (MBBI->getOpcode() == ARM::tBX_RET || MBBI->getOpcode() == ARM::tPOP_RET); else { auto MBBI_prev = MBBI; MBBI_prev--; assert(MBBI_prev->getOpcode() == ARM::tPOP); assert(MBB.succ_size() == 1); if ((*MBB.succ_begin())->begin()->getOpcode() == ARM::tBX_RET) MBBI = MBBI_prev; // Replace the final tPOP with a tPOP_RET. else CanRestoreDirectly = false; } } if (CanRestoreDirectly) { if (!DoIt || MBBI->getOpcode() == ARM::tPOP_RET) return true; MachineInstrBuilder MIB = BuildMI(MBB, MBBI, MBBI->getDebugLoc(), TII.get(ARM::tPOP_RET)) .add(predOps(ARMCC::AL)); // Copy implicit ops and popped registers, if any. for (auto MO: MBBI->operands()) if (MO.isReg() && (MO.isImplicit() || MO.isDef())) MIB.add(MO); MIB.addReg(ARM::PC, RegState::Define); // Erase the old instruction (tBX_RET or tPOP). MBB.erase(MBBI); return true; } // Look for a temporary register to use. // First, compute the liveness information. const TargetRegisterInfo &TRI = *STI.getRegisterInfo(); LivePhysRegs UsedRegs(TRI); UsedRegs.addLiveOuts(MBB); // The semantic of pristines changed recently and now, // the callee-saved registers that are touched in the function // are not part of the pristines set anymore. // Add those callee-saved now. const MCPhysReg *CSRegs = TRI.getCalleeSavedRegs(&MF); for (unsigned i = 0; CSRegs[i]; ++i) UsedRegs.addReg(CSRegs[i]); DebugLoc dl = DebugLoc(); if (MBBI != MBB.end()) { dl = MBBI->getDebugLoc(); auto InstUpToMBBI = MBB.end(); while (InstUpToMBBI != MBBI) // The pre-decrement is on purpose here. // We want to have the liveness right before MBBI. UsedRegs.stepBackward(*--InstUpToMBBI); } // Look for a register that can be directly use in the POP. unsigned PopReg = 0; // And some temporary register, just in case. unsigned TemporaryReg = 0; BitVector PopFriendly = TRI.getAllocatableSet(MF, TRI.getRegClass(ARM::tGPRRegClassID)); assert(PopFriendly.any() && "No allocatable pop-friendly register?!"); // Rebuild the GPRs from the high registers because they are removed // form the GPR reg class for thumb1. BitVector GPRsNoLRSP = TRI.getAllocatableSet(MF, TRI.getRegClass(ARM::hGPRRegClassID)); GPRsNoLRSP |= PopFriendly; GPRsNoLRSP.reset(ARM::LR); GPRsNoLRSP.reset(ARM::SP); GPRsNoLRSP.reset(ARM::PC); findTemporariesForLR(GPRsNoLRSP, PopFriendly, UsedRegs, PopReg, TemporaryReg); // If we couldn't find a pop-friendly register, restore LR before popping the // other callee-saved registers, so we can use one of them as a temporary. bool UseLDRSP = false; if (!PopReg && MBBI != MBB.begin()) { auto PrevMBBI = MBBI; PrevMBBI--; if (PrevMBBI->getOpcode() == ARM::tPOP) { MBBI = PrevMBBI; UsedRegs.stepBackward(*MBBI); findTemporariesForLR(GPRsNoLRSP, PopFriendly, UsedRegs, PopReg, TemporaryReg); UseLDRSP = true; } } if (!DoIt && !PopReg && !TemporaryReg) return false; assert((PopReg || TemporaryReg) && "Cannot get LR"); if (UseLDRSP) { assert(PopReg && "Do not know how to get LR"); // Load the LR via LDR tmp, [SP, #off] BuildMI(MBB, MBBI, dl, TII.get(ARM::tLDRspi)) .addReg(PopReg, RegState::Define) .addReg(ARM::SP) .addImm(MBBI->getNumExplicitOperands() - 2) .add(predOps(ARMCC::AL)); // Move from the temporary register to the LR. BuildMI(MBB, MBBI, dl, TII.get(ARM::tMOVr)) .addReg(ARM::LR, RegState::Define) .addReg(PopReg, RegState::Kill) .add(predOps(ARMCC::AL)); // Advance past the pop instruction. MBBI++; // Increment the SP. emitSPUpdate(MBB, MBBI, TII, dl, *RegInfo, ArgRegsSaveSize + 4); return true; } if (TemporaryReg) { assert(!PopReg && "Unnecessary MOV is about to be inserted"); PopReg = PopFriendly.find_first(); BuildMI(MBB, MBBI, dl, TII.get(ARM::tMOVr)) .addReg(TemporaryReg, RegState::Define) .addReg(PopReg, RegState::Kill) .add(predOps(ARMCC::AL)); } if (MBBI != MBB.end() && MBBI->getOpcode() == ARM::tPOP_RET) { // We couldn't use the direct restoration above, so // perform the opposite conversion: tPOP_RET to tPOP. MachineInstrBuilder MIB = BuildMI(MBB, MBBI, MBBI->getDebugLoc(), TII.get(ARM::tPOP)) .add(predOps(ARMCC::AL)); bool Popped = false; for (auto MO: MBBI->operands()) if (MO.isReg() && (MO.isImplicit() || MO.isDef()) && MO.getReg() != ARM::PC) { MIB.add(MO); if (!MO.isImplicit()) Popped = true; } // Is there anything left to pop? if (!Popped) MBB.erase(MIB.getInstr()); // Erase the old instruction. MBB.erase(MBBI); MBBI = BuildMI(MBB, MBB.end(), dl, TII.get(ARM::tBX_RET)) .add(predOps(ARMCC::AL)); } assert(PopReg && "Do not know how to get LR"); BuildMI(MBB, MBBI, dl, TII.get(ARM::tPOP)) .add(predOps(ARMCC::AL)) .addReg(PopReg, RegState::Define); emitSPUpdate(MBB, MBBI, TII, dl, *RegInfo, ArgRegsSaveSize); BuildMI(MBB, MBBI, dl, TII.get(ARM::tMOVr)) .addReg(ARM::LR, RegState::Define) .addReg(PopReg, RegState::Kill) .add(predOps(ARMCC::AL)); if (TemporaryReg) BuildMI(MBB, MBBI, dl, TII.get(ARM::tMOVr)) .addReg(PopReg, RegState::Define) .addReg(TemporaryReg, RegState::Kill) .add(predOps(ARMCC::AL)); return true; }
unsigned RegScavenger::scavengeRegister(const TargetRegisterClass *RC, MachineBasicBlock::iterator I, int SPAdj) { // Consider all allocatable registers in the register class initially BitVector Candidates = TRI->getAllocatableSet(*I->getParent()->getParent(), RC); // Exclude all the registers being used by the instruction. for (unsigned i = 0, e = I->getNumOperands(); i != e; ++i) { MachineOperand &MO = I->getOperand(i); if (MO.isReg() && MO.getReg() != 0 && !(MO.isUse() && MO.isUndef()) && !TargetRegisterInfo::isVirtualRegister(MO.getReg())) Candidates.reset(MO.getReg()); } // Try to find a register that's unused if there is one, as then we won't // have to spill. BitVector Available = getRegsAvailable(RC); Available &= Candidates; if (Available.any()) Candidates = Available; // Find the register whose use is furthest away. MachineBasicBlock::iterator UseMI; unsigned SReg = findSurvivorReg(I, Candidates, 25, UseMI); // If we found an unused register there is no reason to spill it. if (!isRegUsed(SReg)) { DEBUG(dbgs() << "Scavenged register: " << TRI->getName(SReg) << "\n"); return SReg; } // Find an available scavenging slot. unsigned SI; for (SI = 0; SI < Scavenged.size(); ++SI) if (Scavenged[SI].Reg == 0) break; if (SI == Scavenged.size()) { // We need to scavenge a register but have no spill slot, the target // must know how to do it (if not, we'll assert below). Scavenged.push_back(ScavengedInfo()); } // Avoid infinite regress Scavenged[SI].Reg = SReg; // If the target knows how to save/restore the register, let it do so; // otherwise, use the emergency stack spill slot. if (!TRI->saveScavengerRegister(*MBB, I, UseMI, RC, SReg)) { // Spill the scavenged register before I. assert(Scavenged[SI].FrameIndex >= 0 && "Cannot scavenge register without an emergency spill slot!"); TII->storeRegToStackSlot(*MBB, I, SReg, true, Scavenged[SI].FrameIndex, RC, TRI); MachineBasicBlock::iterator II = std::prev(I); unsigned FIOperandNum = getFrameIndexOperandNum(II); TRI->eliminateFrameIndex(II, SPAdj, FIOperandNum, this); // Restore the scavenged register before its use (or first terminator). TII->loadRegFromStackSlot(*MBB, UseMI, SReg, Scavenged[SI].FrameIndex, RC, TRI); II = std::prev(UseMI); FIOperandNum = getFrameIndexOperandNum(II); TRI->eliminateFrameIndex(II, SPAdj, FIOperandNum, this); } Scavenged[SI].Restore = std::prev(UseMI); // Doing this here leads to infinite regress. // Scavenged[SI].Reg = SReg; DEBUG(dbgs() << "Scavenged register (with spill): " << TRI->getName(SReg) << "\n"); return SReg; }
unsigned RegScavenger::scavengeRegister(const TargetRegisterClass *RC, MachineBasicBlock::iterator I, int SPAdj) { MachineInstr &MI = *I; const MachineFunction &MF = *MI.getParent()->getParent(); // Consider all allocatable registers in the register class initially BitVector Candidates = TRI->getAllocatableSet(MF, RC); // Exclude all the registers being used by the instruction. for (const MachineOperand &MO : MI.operands()) { if (MO.isReg() && MO.getReg() != 0 && !(MO.isUse() && MO.isUndef()) && !TargetRegisterInfo::isVirtualRegister(MO.getReg())) for (MCRegAliasIterator AI(MO.getReg(), TRI, true); AI.isValid(); ++AI) Candidates.reset(*AI); } // Try to find a register that's unused if there is one, as then we won't // have to spill. BitVector Available = getRegsAvailable(RC); Available &= Candidates; if (Available.any()) Candidates = Available; // Find the register whose use is furthest away. MachineBasicBlock::iterator UseMI; unsigned SReg = findSurvivorReg(I, Candidates, 25, UseMI); // If we found an unused register there is no reason to spill it. if (!isRegUsed(SReg)) { DEBUG(dbgs() << "Scavenged register: " << TRI->getName(SReg) << "\n"); return SReg; } // Find an available scavenging slot with size and alignment matching // the requirements of the class RC. const MachineFrameInfo &MFI = MF.getFrameInfo(); unsigned NeedSize = RC->getSize(); unsigned NeedAlign = RC->getAlignment(); unsigned SI = Scavenged.size(), Diff = UINT_MAX; int FIB = MFI.getObjectIndexBegin(), FIE = MFI.getObjectIndexEnd(); for (unsigned I = 0; I < Scavenged.size(); ++I) { if (Scavenged[I].Reg != 0) continue; // Verify that this slot is valid for this register. int FI = Scavenged[I].FrameIndex; if (FI < FIB || FI >= FIE) continue; unsigned S = MFI.getObjectSize(FI); unsigned A = MFI.getObjectAlignment(FI); if (NeedSize > S || NeedAlign > A) continue; // Avoid wasting slots with large size and/or large alignment. Pick one // that is the best fit for this register class (in street metric). // Picking a larger slot than necessary could happen if a slot for a // larger register is reserved before a slot for a smaller one. When // trying to spill a smaller register, the large slot would be found // first, thus making it impossible to spill the larger register later. unsigned D = (S-NeedSize) + (A-NeedAlign); if (D < Diff) { SI = I; Diff = D; } } if (SI == Scavenged.size()) { // We need to scavenge a register but have no spill slot, the target // must know how to do it (if not, we'll assert below). Scavenged.push_back(ScavengedInfo(FIE)); } // Avoid infinite regress Scavenged[SI].Reg = SReg; // If the target knows how to save/restore the register, let it do so; // otherwise, use the emergency stack spill slot. if (!TRI->saveScavengerRegister(*MBB, I, UseMI, RC, SReg)) { // Spill the scavenged register before I. int FI = Scavenged[SI].FrameIndex; if (FI < FIB || FI >= FIE) { std::string Msg = std::string("Error while trying to spill ") + TRI->getName(SReg) + " from class " + TRI->getRegClassName(RC) + ": Cannot scavenge register without an emergency spill slot!"; report_fatal_error(Msg.c_str()); } TII->storeRegToStackSlot(*MBB, I, SReg, true, Scavenged[SI].FrameIndex, RC, TRI); MachineBasicBlock::iterator II = std::prev(I); unsigned FIOperandNum = getFrameIndexOperandNum(*II); TRI->eliminateFrameIndex(II, SPAdj, FIOperandNum, this); // Restore the scavenged register before its use (or first terminator). TII->loadRegFromStackSlot(*MBB, UseMI, SReg, Scavenged[SI].FrameIndex, RC, TRI); II = std::prev(UseMI); FIOperandNum = getFrameIndexOperandNum(*II); TRI->eliminateFrameIndex(II, SPAdj, FIOperandNum, this); } Scavenged[SI].Restore = &*std::prev(UseMI); // Doing this here leads to infinite regress. // Scavenged[SI].Reg = SReg; DEBUG(dbgs() << "Scavenged register (with spill): " << TRI->getName(SReg) << "\n"); return SReg; }