/// The liverange splitting logic sometimes produces bundles of copies when /// subregisters are involved. Expand these into a sequence of copy instructions /// after processing the last in the bundle. Does not update LiveIntervals /// which we shouldn't need for this instruction anymore. void VirtRegRewriter::expandCopyBundle(MachineInstr &MI) const { if (!MI.isCopy()) return; if (MI.isBundledWithPred() && !MI.isBundledWithSucc()) { // Only do this when the complete bundle is made out of COPYs. MachineBasicBlock &MBB = *MI.getParent(); for (MachineBasicBlock::reverse_instr_iterator I = std::next(MI.getReverseIterator()), E = MBB.instr_rend(); I != E && I->isBundledWithSucc(); ++I) { if (!I->isCopy()) return; } for (MachineBasicBlock::reverse_instr_iterator I = MI.getReverseIterator(); I->isBundledWithPred(); ) { MachineInstr &MI = *I; ++I; MI.unbundleFromPred(); if (Indexes) Indexes->insertMachineInstrInMaps(MI); } } }
bool Filler::findDelayInstr(MachineBasicBlock &MBB, MachineBasicBlock::instr_iterator Slot, MachineBasicBlock::instr_iterator &Filler) { SmallSet<unsigned, 32> RegDefs; SmallSet<unsigned, 32> RegUses; insertDefsUses(Slot, RegDefs, RegUses); bool SawLoad = false; bool SawStore = false; for (MachineBasicBlock::reverse_instr_iterator I = ++Slot.getReverse(); I != MBB.instr_rend(); ++I) { // skip debug value if (I->isDebugValue()) continue; // Convert to forward iterator. MachineBasicBlock::instr_iterator FI = I.getReverse(); if (I->hasUnmodeledSideEffects() || I->isInlineAsm() || I->isLabel() || FI == LastFiller || I->isPseudo()) break; if (delayHasHazard(FI, SawLoad, SawStore, RegDefs, RegUses)) { insertDefsUses(FI, RegDefs, RegUses); continue; } Filler = FI; return true; } return false; }
// runOnMachineBasicBlock - Fill in delay slots for the given basic block. // There is one or two delay slot per delayed instruction. bool Filler::runOnMachineBasicBlock(MachineBasicBlock &MBB) { bool Changed = false; LastFiller = MBB.instr_end(); for (MachineBasicBlock::instr_iterator I = MBB.instr_begin(); I != MBB.instr_end(); ++I) { if (I->getDesc().hasDelaySlot()) { MachineBasicBlock::instr_iterator InstrWithSlot = I; MachineBasicBlock::instr_iterator J = I; // Treat RET specially as it is only instruction with 2 delay slots // generated while all others generated have 1 delay slot. if (I->getOpcode() == Lanai::RET) { // RET is generated as part of epilogue generation and hence we know // what the two instructions preceding it are and that it is safe to // insert RET above them. MachineBasicBlock::reverse_instr_iterator RI = ++I.getReverse(); assert(RI->getOpcode() == Lanai::LDW_RI && RI->getOperand(0).isReg() && RI->getOperand(0).getReg() == Lanai::FP && RI->getOperand(1).isReg() && RI->getOperand(1).getReg() == Lanai::FP && RI->getOperand(2).isImm() && RI->getOperand(2).getImm() == -8); ++RI; assert(RI->getOpcode() == Lanai::ADD_I_LO && RI->getOperand(0).isReg() && RI->getOperand(0).getReg() == Lanai::SP && RI->getOperand(1).isReg() && RI->getOperand(1).getReg() == Lanai::FP); MachineBasicBlock::instr_iterator FI = RI.getReverse(); MBB.splice(std::next(I), &MBB, FI, I); FilledSlots += 2; } else { if (!NopDelaySlotFiller && findDelayInstr(MBB, I, J)) { MBB.splice(std::next(I), &MBB, J); } else { BuildMI(MBB, std::next(I), DebugLoc(), TII->get(Lanai::NOP)); } ++FilledSlots; } Changed = true; // Record the filler instruction that filled the delay slot. // The instruction after it will be visited in the next iteration. LastFiller = ++I; // Bundle the delay slot filler to InstrWithSlot so that the machine // verifier doesn't expect this instruction to be a terminator. MIBundleBuilder(MBB, InstrWithSlot, std::next(LastFiller)); } } return Changed; }
/// The liverange splitting logic sometimes produces bundles of copies when /// subregisters are involved. Expand these into a sequence of copy instructions /// after processing the last in the bundle. Does not update LiveIntervals /// which we shouldn't need for this instruction anymore. void VirtRegRewriter::expandCopyBundle(MachineInstr &MI) const { if (!MI.isCopy()) return; if (MI.isBundledWithPred() && !MI.isBundledWithSucc()) { SmallVector<MachineInstr *, 2> MIs({&MI}); // Only do this when the complete bundle is made out of COPYs. MachineBasicBlock &MBB = *MI.getParent(); for (MachineBasicBlock::reverse_instr_iterator I = std::next(MI.getReverseIterator()), E = MBB.instr_rend(); I != E && I->isBundledWithSucc(); ++I) { if (!I->isCopy()) return; MIs.push_back(&*I); } MachineInstr *FirstMI = MIs.back(); auto anyRegsAlias = [](const MachineInstr *Dst, ArrayRef<MachineInstr *> Srcs, const TargetRegisterInfo *TRI) { for (const MachineInstr *Src : Srcs) if (Src != Dst) if (TRI->regsOverlap(Dst->getOperand(0).getReg(), Src->getOperand(1).getReg())) return true; return false; }; // If any of the destination registers in the bundle of copies alias any of // the source registers, try to schedule the instructions to avoid any // clobbering. for (int E = MIs.size(), PrevE = E; E > 1; PrevE = E) { for (int I = E; I--; ) if (!anyRegsAlias(MIs[I], makeArrayRef(MIs).take_front(E), TRI)) { if (I + 1 != E) std::swap(MIs[I], MIs[E - 1]); --E; } if (PrevE == E) { MF->getFunction().getContext().emitError( "register rewriting failed: cycle in copy bundle"); break; } } MachineInstr *BundleStart = FirstMI; for (MachineInstr *BundledMI : llvm::reverse(MIs)) { // If instruction is in the middle of the bundle, move it before the // bundle starts, otherwise, just unbundle it. When we get to the last // instruction, the bundle will have been completely undone. if (BundledMI != BundleStart) { BundledMI->removeFromBundle(); MBB.insert(FirstMI, BundledMI); } else if (BundledMI->isBundledWithSucc()) { BundledMI->unbundleFromSucc(); BundleStart = &*std::next(BundledMI->getIterator()); } if (Indexes && BundledMI != FirstMI) Indexes->insertMachineInstrInMaps(*BundledMI); } } }