unsigned LembergInstrInfo::InsertBranch(MachineBasicBlock &MBB, MachineBasicBlock *TBB, MachineBasicBlock *FBB, const SmallVectorImpl<MachineOperand> &Cond, DebugLoc DL) const { // Shouldn't be a fall through. assert(TBB && "InsertBranch must not be told to insert a fallthrough"); assert((Cond.size() == 2 || Cond.size() == 0) && "Lemberg branch conditions can only have two components!"); if (FBB == 0) { // One way branch. if (Cond.empty()) { // Unconditional branch? BuildMI(&MBB, DL, get(Lemberg::JUMP)).addMBB(TBB); } else if (Cond[1].getImm() == LembergCC::FALSE || Cond[1].getImm() == LembergCC::TRUE) { BuildMI(&MBB, DL, get(Lemberg::JUMPpred)) .addOperand(Cond[1]).addOperand(Cond[0]).addMBB(TBB); } else { unsigned Opcode; switch (Cond[1].getImm()) { case LembergCC::EQZ: Opcode = Lemberg::JUMPeqz; break; case LembergCC::NEZ: Opcode = Lemberg::JUMPnez; break; case LembergCC::LTZ: Opcode = Lemberg::JUMPltz; break; case LembergCC::GEZ: Opcode = Lemberg::JUMPgez; break; case LembergCC::GTZ: Opcode = Lemberg::JUMPgtz; break; case LembergCC::LEZ: Opcode = Lemberg::JUMPlez; break; } MachineInstr *MI = BuildMI(&MBB, DL, get(Opcode)).addOperand(Cond[0]).addMBB(TBB); MI->addRegisterDead(Lemberg::C3, &RI); } return 1; } else { // Two-way conditional branch. if (Cond[1].getImm() == LembergCC::FALSE || Cond[1].getImm() == LembergCC::TRUE) { BuildMI(&MBB, DL, get(Lemberg::JUMPpred)) .addOperand(Cond[1]).addOperand(Cond[0]).addMBB(TBB); } else { unsigned Opcode; switch (Cond[1].getImm()) { case LembergCC::EQZ: Opcode = Lemberg::JUMPeqz; break; case LembergCC::NEZ: Opcode = Lemberg::JUMPnez; break; case LembergCC::LTZ: Opcode = Lemberg::JUMPltz; break; case LembergCC::GEZ: Opcode = Lemberg::JUMPgez; break; case LembergCC::GTZ: Opcode = Lemberg::JUMPgtz; break; case LembergCC::LEZ: Opcode = Lemberg::JUMPlez; break; } MachineInstr *MI = BuildMI(&MBB, DL, get(Opcode)).addOperand(Cond[0]).addMBB(TBB); MI->addRegisterDead(Lemberg::C3, &RI); } BuildMI(&MBB, DL, get(Lemberg::JUMP)).addMBB(FBB); return 2; } }
void SplitEditor::deleteRematVictims() { SmallVector<MachineInstr*, 8> Dead; for (LiveRangeEdit::iterator I = Edit->begin(), E = Edit->end(); I != E; ++I){ LiveInterval *LI = *I; for (LiveInterval::const_iterator LII = LI->begin(), LIE = LI->end(); LII != LIE; ++LII) { // Dead defs end at the store slot. if (LII->end != LII->valno->def.getNextSlot()) continue; MachineInstr *MI = LIS.getInstructionFromIndex(LII->valno->def); assert(MI && "Missing instruction for dead def"); MI->addRegisterDead(LI->reg, &TRI); if (!MI->allDefsAreDead()) continue; DEBUG(dbgs() << "All defs dead: " << *MI); Dead.push_back(MI); } } if (Dead.empty()) return; Edit->eliminateDeadDefs(Dead, LIS, VRM, TII); }
bool LiveRangeEdit::foldAsLoad(LiveInterval *LI, SmallVectorImpl<MachineInstr*> &Dead) { MachineInstr *DefMI = 0, *UseMI = 0; // Check that there is a single def and a single use. for (MachineRegisterInfo::reg_nodbg_iterator I = MRI.reg_nodbg_begin(LI->reg), E = MRI.reg_nodbg_end(); I != E; ++I) { MachineOperand &MO = I.getOperand(); MachineInstr *MI = MO.getParent(); if (MO.isDef()) { if (DefMI && DefMI != MI) return false; if (!MI->canFoldAsLoad()) return false; DefMI = MI; } else if (!MO.isUndef()) { if (UseMI && UseMI != MI) return false; // FIXME: Targets don't know how to fold subreg uses. if (MO.getSubReg()) return false; UseMI = MI; } } if (!DefMI || !UseMI) return false; // Since we're moving the DefMI load, make sure we're not extending any live // ranges. if (!allUsesAvailableAt(DefMI, LIS.getInstructionIndex(DefMI), LIS.getInstructionIndex(UseMI))) return false; // We also need to make sure it is safe to move the load. // Assume there are stores between DefMI and UseMI. bool SawStore = true; if (!DefMI->isSafeToMove(&TII, 0, SawStore)) return false; DEBUG(dbgs() << "Try to fold single def: " << *DefMI << " into single use: " << *UseMI); SmallVector<unsigned, 8> Ops; if (UseMI->readsWritesVirtualRegister(LI->reg, &Ops).second) return false; MachineInstr *FoldMI = TII.foldMemoryOperand(UseMI, Ops, DefMI); if (!FoldMI) return false; DEBUG(dbgs() << " folded: " << *FoldMI); LIS.ReplaceMachineInstrInMaps(UseMI, FoldMI); UseMI->eraseFromParent(); DefMI->addRegisterDead(LI->reg, 0); Dead.push_back(DefMI); ++NumDCEFoldedLoads; return true; }
bool LiveRangeEdit::foldAsLoad(LiveInterval *LI, SmallVectorImpl<MachineInstr*> &Dead, MachineRegisterInfo &MRI, LiveIntervals &LIS, const TargetInstrInfo &TII) { MachineInstr *DefMI = 0, *UseMI = 0; // Check that there is a single def and a single use. for (MachineRegisterInfo::reg_nodbg_iterator I = MRI.reg_nodbg_begin(LI->reg), E = MRI.reg_nodbg_end(); I != E; ++I) { MachineOperand &MO = I.getOperand(); MachineInstr *MI = MO.getParent(); if (MO.isDef()) { if (DefMI && DefMI != MI) return false; if (!MI->getDesc().canFoldAsLoad()) return false; DefMI = MI; } else if (!MO.isUndef()) { if (UseMI && UseMI != MI) return false; // FIXME: Targets don't know how to fold subreg uses. if (MO.getSubReg()) return false; UseMI = MI; } } if (!DefMI || !UseMI) return false; DEBUG(dbgs() << "Try to fold single def: " << *DefMI << " into single use: " << *UseMI); SmallVector<unsigned, 8> Ops; if (UseMI->readsWritesVirtualRegister(LI->reg, &Ops).second) return false; MachineInstr *FoldMI = TII.foldMemoryOperand(UseMI, Ops, DefMI); if (!FoldMI) return false; DEBUG(dbgs() << " folded: " << *FoldMI); LIS.ReplaceMachineInstrInMaps(UseMI, FoldMI); UseMI->eraseFromParent(); DefMI->addRegisterDead(LI->reg, 0); Dead.push_back(DefMI); ++NumDCEFoldedLoads; return true; }
bool LiveIntervals::computeDeadValues(LiveInterval &LI, SmallVectorImpl<MachineInstr*> *dead) { bool PHIRemoved = false; for (auto VNI : LI.valnos) { if (VNI->isUnused()) continue; SlotIndex Def = VNI->def; LiveRange::iterator I = LI.FindSegmentContaining(Def); assert(I != LI.end() && "Missing segment for VNI"); // Is the register live before? Otherwise we may have to add a read-undef // flag for subregister defs. if (MRI->tracksSubRegLiveness()) { if ((I == LI.begin() || std::prev(I)->end < Def) && !VNI->isPHIDef()) { MachineInstr *MI = getInstructionFromIndex(Def); MI->addRegisterDefReadUndef(LI.reg); } } if (I->end != Def.getDeadSlot()) continue; if (VNI->isPHIDef()) { // This is a dead PHI. Remove it. VNI->markUnused(); LI.removeSegment(I); DEBUG(dbgs() << "Dead PHI at " << Def << " may separate interval\n"); PHIRemoved = true; } else { // This is a dead def. Make sure the instruction knows. MachineInstr *MI = getInstructionFromIndex(Def); assert(MI && "No instruction defining live value"); MI->addRegisterDead(LI.reg, TRI); if (dead && MI->allDefsAreDead()) { DEBUG(dbgs() << "All defs dead: " << Def << '\t' << *MI); dead->push_back(MI); } } } return PHIRemoved; }
/// reMaterializeAll - Try to rematerialize as many uses as possible, /// and trim the live ranges after. void InlineSpiller::reMaterializeAll() { // analyzeSiblingValues has already tested all relevant defining instructions. if (!Edit->anyRematerializable(AA)) return; UsedValues.clear(); // Try to remat before all uses of snippets. bool anyRemat = false; for (unsigned i = 0, e = RegsToSpill.size(); i != e; ++i) { unsigned Reg = RegsToSpill[i]; LiveInterval &LI = LIS.getInterval(Reg); for (MachineRegisterInfo::use_nodbg_iterator RI = MRI.use_nodbg_begin(Reg); MachineInstr *MI = RI.skipBundle();) anyRemat |= reMaterializeFor(LI, MI); } if (!anyRemat) return; // Remove any values that were completely rematted. for (unsigned i = 0, e = RegsToSpill.size(); i != e; ++i) { unsigned Reg = RegsToSpill[i]; LiveInterval &LI = LIS.getInterval(Reg); for (LiveInterval::vni_iterator I = LI.vni_begin(), E = LI.vni_end(); I != E; ++I) { VNInfo *VNI = *I; if (VNI->isUnused() || VNI->isPHIDef() || UsedValues.count(VNI)) continue; MachineInstr *MI = LIS.getInstructionFromIndex(VNI->def); MI->addRegisterDead(Reg, &TRI); if (!MI->allDefsAreDead()) continue; DEBUG(dbgs() << "All defs dead: " << *MI); DeadDefs.push_back(MI); } } // Eliminate dead code after remat. Note that some snippet copies may be // deleted here. if (DeadDefs.empty()) return; DEBUG(dbgs() << "Remat created " << DeadDefs.size() << " dead defs.\n"); Edit->eliminateDeadDefs(DeadDefs, RegsToSpill); // Get rid of deleted and empty intervals. unsigned ResultPos = 0; for (unsigned i = 0, e = RegsToSpill.size(); i != e; ++i) { unsigned Reg = RegsToSpill[i]; if (!LIS.hasInterval(Reg)) continue; LiveInterval &LI = LIS.getInterval(Reg); if (LI.empty()) { Edit->eraseVirtReg(Reg); continue; } RegsToSpill[ResultPos++] = Reg; } RegsToSpill.erase(RegsToSpill.begin() + ResultPos, RegsToSpill.end()); DEBUG(dbgs() << RegsToSpill.size() << " registers to spill after remat.\n"); }
void VirtRegRewriter::rewrite() { bool NoSubRegLiveness = !MRI->subRegLivenessEnabled(); SmallVector<unsigned, 8> SuperDeads; SmallVector<unsigned, 8> SuperDefs; SmallVector<unsigned, 8> SuperKills; for (MachineFunction::iterator MBBI = MF->begin(), MBBE = MF->end(); MBBI != MBBE; ++MBBI) { DEBUG(MBBI->print(dbgs(), Indexes)); for (MachineBasicBlock::instr_iterator MII = MBBI->instr_begin(), MIE = MBBI->instr_end(); MII != MIE;) { MachineInstr *MI = &*MII; ++MII; for (MachineInstr::mop_iterator MOI = MI->operands_begin(), MOE = MI->operands_end(); MOI != MOE; ++MOI) { MachineOperand &MO = *MOI; // Make sure MRI knows about registers clobbered by regmasks. if (MO.isRegMask()) MRI->addPhysRegsUsedFromRegMask(MO.getRegMask()); if (!MO.isReg() || !TargetRegisterInfo::isVirtualRegister(MO.getReg())) continue; unsigned VirtReg = MO.getReg(); unsigned PhysReg = VRM->getPhys(VirtReg); assert(PhysReg != VirtRegMap::NO_PHYS_REG && "Instruction uses unmapped VirtReg"); assert(!MRI->isReserved(PhysReg) && "Reserved register assignment"); // Preserve semantics of sub-register operands. unsigned SubReg = MO.getSubReg(); if (SubReg != 0) { if (NoSubRegLiveness) { // A virtual register kill refers to the whole register, so we may // have to add <imp-use,kill> operands for the super-register. A // partial redef always kills and redefines the super-register. if (MO.readsReg() && (MO.isDef() || MO.isKill())) SuperKills.push_back(PhysReg); if (MO.isDef()) { // Also add implicit defs for the super-register. if (MO.isDead()) SuperDeads.push_back(PhysReg); else SuperDefs.push_back(PhysReg); } } else { if (MO.isUse()) { if (readsUndefSubreg(MO)) // We need to add an <undef> flag if the subregister is // completely undefined (and we are not adding super-register // defs). MO.setIsUndef(true); } else if (!MO.isDead()) { assert(MO.isDef()); } } // The <def,undef> flag only makes sense for sub-register defs, and // we are substituting a full physreg. An <imp-use,kill> operand // from the SuperKills list will represent the partial read of the // super-register. if (MO.isDef()) MO.setIsUndef(false); // PhysReg operands cannot have subregister indexes. PhysReg = TRI->getSubReg(PhysReg, SubReg); assert(PhysReg && "Invalid SubReg for physical register"); MO.setSubReg(0); } // Rewrite. Note we could have used MachineOperand::substPhysReg(), but // we need the inlining here. MO.setReg(PhysReg); } // Add any missing super-register kills after rewriting the whole // instruction. while (!SuperKills.empty()) MI->addRegisterKilled(SuperKills.pop_back_val(), TRI, true); while (!SuperDeads.empty()) MI->addRegisterDead(SuperDeads.pop_back_val(), TRI, true); while (!SuperDefs.empty()) MI->addRegisterDefined(SuperDefs.pop_back_val(), TRI); DEBUG(dbgs() << "> " << *MI); // Finally, remove any identity copies. if (MI->isIdentityCopy()) { ++NumIdCopies; DEBUG(dbgs() << "Deleting identity copy.\n"); if (Indexes) Indexes->removeMachineInstrFromMaps(*MI); // It's safe to erase MI because MII has already been incremented. MI->eraseFromParent(); } } } }
void VirtRegRewriter::rewrite() { SmallVector<unsigned, 8> SuperDeads; SmallVector<unsigned, 8> SuperDefs; SmallVector<unsigned, 8> SuperKills; SmallPtrSet<const MachineInstr *, 4> NoReturnInsts; // Here we have a SparseSet to hold which PhysRegs are actually encountered // in the MF we are about to iterate over so that later when we call // setPhysRegUsed, we are only doing it for physRegs that were actually found // in the program and not for all of the possible physRegs for the given // target architecture. If the target has a lot of physRegs, then for a small // program there will be a significant compile time reduction here. PhysRegs.clear(); PhysRegs.setUniverse(TRI->getNumRegs()); // The function with uwtable should guarantee that the stack unwinder // can unwind the stack to the previous frame. Thus, we can't apply the // noreturn optimization if the caller function has uwtable attribute. bool HasUWTable = MF->getFunction()->hasFnAttribute(Attribute::UWTable); for (MachineFunction::iterator MBBI = MF->begin(), MBBE = MF->end(); MBBI != MBBE; ++MBBI) { DEBUG(MBBI->print(dbgs(), Indexes)); bool IsExitBB = MBBI->succ_empty(); for (MachineBasicBlock::instr_iterator MII = MBBI->instr_begin(), MIE = MBBI->instr_end(); MII != MIE;) { MachineInstr *MI = MII; ++MII; // Check if this instruction is a call to a noreturn function. If this // is a call to noreturn function and we don't need the stack unwinding // functionality (i.e. this function does not have uwtable attribute and // the callee function has the nounwind attribute), then we can ignore // the definitions set by this instruction. if (!HasUWTable && IsExitBB && MI->isCall()) { for (MachineInstr::mop_iterator MOI = MI->operands_begin(), MOE = MI->operands_end(); MOI != MOE; ++MOI) { MachineOperand &MO = *MOI; if (!MO.isGlobal()) continue; const Function *Func = dyn_cast<Function>(MO.getGlobal()); if (!Func || !Func->hasFnAttribute(Attribute::NoReturn) || // We need to keep correct unwind information // even if the function will not return, since the // runtime may need it. !Func->hasFnAttribute(Attribute::NoUnwind)) continue; NoReturnInsts.insert(MI); break; } } for (MachineInstr::mop_iterator MOI = MI->operands_begin(), MOE = MI->operands_end(); MOI != MOE; ++MOI) { MachineOperand &MO = *MOI; // Make sure MRI knows about registers clobbered by regmasks. if (MO.isRegMask()) MRI->addPhysRegsUsedFromRegMask(MO.getRegMask()); // If we encounter a VirtReg or PhysReg then get at the PhysReg and add // it to the physreg bitset. Later we use only the PhysRegs that were // actually encountered in the MF to populate the MRI's used physregs. if (MO.isReg() && MO.getReg()) PhysRegs.insert( TargetRegisterInfo::isVirtualRegister(MO.getReg()) ? VRM->getPhys(MO.getReg()) : MO.getReg()); if (!MO.isReg() || !TargetRegisterInfo::isVirtualRegister(MO.getReg())) continue; unsigned VirtReg = MO.getReg(); unsigned PhysReg = VRM->getPhys(VirtReg); assert(PhysReg != VirtRegMap::NO_PHYS_REG && "Instruction uses unmapped VirtReg"); assert(!MRI->isReserved(PhysReg) && "Reserved register assignment"); // Preserve semantics of sub-register operands. if (MO.getSubReg()) { // A virtual register kill refers to the whole register, so we may // have to add <imp-use,kill> operands for the super-register. A // partial redef always kills and redefines the super-register. if (MO.readsReg() && (MO.isDef() || MO.isKill())) SuperKills.push_back(PhysReg); if (MO.isDef()) { // The <def,undef> flag only makes sense for sub-register defs, and // we are substituting a full physreg. An <imp-use,kill> operand // from the SuperKills list will represent the partial read of the // super-register. MO.setIsUndef(false); // Also add implicit defs for the super-register. if (MO.isDead()) SuperDeads.push_back(PhysReg); else SuperDefs.push_back(PhysReg); } // PhysReg operands cannot have subregister indexes. PhysReg = TRI->getSubReg(PhysReg, MO.getSubReg()); assert(PhysReg && "Invalid SubReg for physical register"); MO.setSubReg(0); } // Rewrite. Note we could have used MachineOperand::substPhysReg(), but // we need the inlining here. MO.setReg(PhysReg); } // Add any missing super-register kills after rewriting the whole // instruction. while (!SuperKills.empty()) MI->addRegisterKilled(SuperKills.pop_back_val(), TRI, true); while (!SuperDeads.empty()) MI->addRegisterDead(SuperDeads.pop_back_val(), TRI, true); while (!SuperDefs.empty()) MI->addRegisterDefined(SuperDefs.pop_back_val(), TRI); DEBUG(dbgs() << "> " << *MI); // Finally, remove any identity copies. if (MI->isIdentityCopy()) { ++NumIdCopies; if (MI->getNumOperands() == 2) { DEBUG(dbgs() << "Deleting identity copy.\n"); if (Indexes) Indexes->removeMachineInstrFromMaps(MI); // It's safe to erase MI because MII has already been incremented. MI->eraseFromParent(); } else { // Transform identity copy to a KILL to deal with subregisters. MI->setDesc(TII->get(TargetOpcode::KILL)); DEBUG(dbgs() << "Identity copy: " << *MI); } } } } // Tell MRI about physical registers in use. if (NoReturnInsts.empty()) { for (SparseSet<unsigned>::iterator RegI = PhysRegs.begin(), E = PhysRegs.end(); RegI != E; ++RegI) if (!MRI->reg_nodbg_empty(*RegI)) MRI->setPhysRegUsed(*RegI); } else { for (SparseSet<unsigned>::iterator I = PhysRegs.begin(), E = PhysRegs.end(); I != E; ++I) { unsigned Reg = *I; if (MRI->reg_nodbg_empty(Reg)) continue; // Check if this register has a use that will impact the rest of the // code. Uses in debug and noreturn instructions do not impact the // generated code. for (MachineInstr &It : MRI->reg_nodbg_instructions(Reg)) { if (!NoReturnInsts.count(&It)) { MRI->setPhysRegUsed(Reg); break; } } } } }
bool LiveVariables::HandlePhysRegKill(unsigned Reg, MachineInstr *MI) { MachineInstr *LastDef = PhysRegDef[Reg]; MachineInstr *LastUse = PhysRegUse[Reg]; if (!LastDef && !LastUse) return false; MachineInstr *LastRefOrPartRef = LastUse ? LastUse : LastDef; unsigned LastRefOrPartRefDist = DistanceMap[LastRefOrPartRef]; // The whole register is used. // AL = // AH = // // = AX // = AL, AX<imp-use, kill> // AX = // // Or whole register is defined, but not used at all. // AX<dead> = // ... // AX = // // Or whole register is defined, but only partly used. // AX<dead> = AL<imp-def> // = AL<kill> // AX = MachineInstr *LastPartDef = nullptr; unsigned LastPartDefDist = 0; SmallSet<unsigned, 8> PartUses; for (MCSubRegIterator SubRegs(Reg, TRI); SubRegs.isValid(); ++SubRegs) { unsigned SubReg = *SubRegs; MachineInstr *Def = PhysRegDef[SubReg]; if (Def && Def != LastDef) { // There was a def of this sub-register in between. This is a partial // def, keep track of the last one. unsigned Dist = DistanceMap[Def]; if (Dist > LastPartDefDist) { LastPartDefDist = Dist; LastPartDef = Def; } continue; } if (MachineInstr *Use = PhysRegUse[SubReg]) { for (MCSubRegIterator SS(SubReg, TRI, /*IncludeSelf=*/true); SS.isValid(); ++SS) PartUses.insert(*SS); unsigned Dist = DistanceMap[Use]; if (Dist > LastRefOrPartRefDist) { LastRefOrPartRefDist = Dist; LastRefOrPartRef = Use; } } } if (!PhysRegUse[Reg]) { // Partial uses. Mark register def dead and add implicit def of // sub-registers which are used. // EAX<dead> = op AL<imp-def> // That is, EAX def is dead but AL def extends pass it. PhysRegDef[Reg]->addRegisterDead(Reg, TRI, true); for (MCSubRegIterator SubRegs(Reg, TRI); SubRegs.isValid(); ++SubRegs) { unsigned SubReg = *SubRegs; if (!PartUses.count(SubReg)) continue; bool NeedDef = true; if (PhysRegDef[Reg] == PhysRegDef[SubReg]) { MachineOperand *MO = PhysRegDef[Reg]->findRegisterDefOperand(SubReg); if (MO) { NeedDef = false; assert(!MO->isDead()); } } if (NeedDef) PhysRegDef[Reg]->addOperand(MachineOperand::CreateReg(SubReg, true/*IsDef*/, true/*IsImp*/)); MachineInstr *LastSubRef = FindLastRefOrPartRef(SubReg); if (LastSubRef) LastSubRef->addRegisterKilled(SubReg, TRI, true); else { LastRefOrPartRef->addRegisterKilled(SubReg, TRI, true); for (MCSubRegIterator SS(SubReg, TRI, /*IncludeSelf=*/true); SS.isValid(); ++SS) PhysRegUse[*SS] = LastRefOrPartRef; } for (MCSubRegIterator SS(SubReg, TRI); SS.isValid(); ++SS) PartUses.erase(*SS); } } else if (LastRefOrPartRef == PhysRegDef[Reg] && LastRefOrPartRef != MI) { if (LastPartDef) // The last partial def kills the register. LastPartDef->addOperand(MachineOperand::CreateReg(Reg, false/*IsDef*/, true/*IsImp*/, true/*IsKill*/)); else { MachineOperand *MO = LastRefOrPartRef->findRegisterDefOperand(Reg, false, TRI); bool NeedEC = MO->isEarlyClobber() && MO->getReg() != Reg; // If the last reference is the last def, then it's not used at all. // That is, unless we are currently processing the last reference itself. LastRefOrPartRef->addRegisterDead(Reg, TRI, true); if (NeedEC) { // If we are adding a subreg def and the superreg def is marked early // clobber, add an early clobber marker to the subreg def. MO = LastRefOrPartRef->findRegisterDefOperand(Reg); if (MO) MO->setIsEarlyClobber(); } } } else LastRefOrPartRef->addRegisterKilled(Reg, TRI, true); return true; }
/// shrinkToUses - After removing some uses of a register, shrink its live /// range to just the remaining uses. This method does not compute reaching /// defs for new uses, and it doesn't remove dead defs. bool LiveIntervals::shrinkToUses(LiveInterval *li, SmallVectorImpl<MachineInstr*> *dead) { DEBUG(dbgs() << "Shrink: " << *li << '\n'); assert(TargetRegisterInfo::isVirtualRegister(li->reg) && "Can only shrink virtual registers"); // Find all the values used, including PHI kills. SmallVector<std::pair<SlotIndex, VNInfo*>, 16> WorkList; // Blocks that have already been added to WorkList as live-out. SmallPtrSet<MachineBasicBlock*, 16> LiveOut; // Visit all instructions reading li->reg. for (MachineRegisterInfo::reg_iterator I = MRI->reg_begin(li->reg); MachineInstr *UseMI = I.skipInstruction();) { if (UseMI->isDebugValue() || !UseMI->readsVirtualRegister(li->reg)) continue; SlotIndex Idx = getInstructionIndex(UseMI).getRegSlot(); LiveRangeQuery LRQ(*li, Idx); VNInfo *VNI = LRQ.valueIn(); if (!VNI) { // This shouldn't happen: readsVirtualRegister returns true, but there is // no live value. It is likely caused by a target getting <undef> flags // wrong. DEBUG(dbgs() << Idx << '\t' << *UseMI << "Warning: Instr claims to read non-existent value in " << *li << '\n'); continue; } // Special case: An early-clobber tied operand reads and writes the // register one slot early. if (VNInfo *DefVNI = LRQ.valueDefined()) Idx = DefVNI->def; WorkList.push_back(std::make_pair(Idx, VNI)); } // Create a new live interval with only minimal live segments per def. LiveInterval NewLI(li->reg, 0); for (LiveInterval::vni_iterator I = li->vni_begin(), E = li->vni_end(); I != E; ++I) { VNInfo *VNI = *I; if (VNI->isUnused()) continue; NewLI.addRange(LiveRange(VNI->def, VNI->def.getDeadSlot(), VNI)); } // Keep track of the PHIs that are in use. SmallPtrSet<VNInfo*, 8> UsedPHIs; // Extend intervals to reach all uses in WorkList. while (!WorkList.empty()) { SlotIndex Idx = WorkList.back().first; VNInfo *VNI = WorkList.back().second; WorkList.pop_back(); const MachineBasicBlock *MBB = getMBBFromIndex(Idx.getPrevSlot()); SlotIndex BlockStart = getMBBStartIdx(MBB); // Extend the live range for VNI to be live at Idx. if (VNInfo *ExtVNI = NewLI.extendInBlock(BlockStart, Idx)) { (void)ExtVNI; assert(ExtVNI == VNI && "Unexpected existing value number"); // Is this a PHIDef we haven't seen before? if (!VNI->isPHIDef() || VNI->def != BlockStart || !UsedPHIs.insert(VNI)) continue; // The PHI is live, make sure the predecessors are live-out. for (MachineBasicBlock::const_pred_iterator PI = MBB->pred_begin(), PE = MBB->pred_end(); PI != PE; ++PI) { if (!LiveOut.insert(*PI)) continue; SlotIndex Stop = getMBBEndIdx(*PI); // A predecessor is not required to have a live-out value for a PHI. if (VNInfo *PVNI = li->getVNInfoBefore(Stop)) WorkList.push_back(std::make_pair(Stop, PVNI)); } continue; } // VNI is live-in to MBB. DEBUG(dbgs() << " live-in at " << BlockStart << '\n'); NewLI.addRange(LiveRange(BlockStart, Idx, VNI)); // Make sure VNI is live-out from the predecessors. for (MachineBasicBlock::const_pred_iterator PI = MBB->pred_begin(), PE = MBB->pred_end(); PI != PE; ++PI) { if (!LiveOut.insert(*PI)) continue; SlotIndex Stop = getMBBEndIdx(*PI); assert(li->getVNInfoBefore(Stop) == VNI && "Wrong value out of predecessor"); WorkList.push_back(std::make_pair(Stop, VNI)); } } // Handle dead values. bool CanSeparate = false; for (LiveInterval::vni_iterator I = li->vni_begin(), E = li->vni_end(); I != E; ++I) { VNInfo *VNI = *I; if (VNI->isUnused()) continue; LiveInterval::iterator LII = NewLI.FindLiveRangeContaining(VNI->def); assert(LII != NewLI.end() && "Missing live range for PHI"); if (LII->end != VNI->def.getDeadSlot()) continue; if (VNI->isPHIDef()) { // This is a dead PHI. Remove it. VNI->markUnused(); NewLI.removeRange(*LII); DEBUG(dbgs() << "Dead PHI at " << VNI->def << " may separate interval\n"); CanSeparate = true; } else { // This is a dead def. Make sure the instruction knows. MachineInstr *MI = getInstructionFromIndex(VNI->def); assert(MI && "No instruction defining live value"); MI->addRegisterDead(li->reg, TRI); if (dead && MI->allDefsAreDead()) { DEBUG(dbgs() << "All defs dead: " << VNI->def << '\t' << *MI); dead->push_back(MI); } } } // Move the trimmed ranges back. li->ranges.swap(NewLI.ranges); DEBUG(dbgs() << "Shrunk: " << *li << '\n'); return CanSeparate; }
bool LiveVariables::HandlePhysRegKill(unsigned Reg, MachineInstr *MI) { if (!PhysRegUse[Reg] && !PhysRegDef[Reg]) return false; MachineInstr *LastRefOrPartRef = PhysRegUse[Reg] ? PhysRegUse[Reg] : PhysRegDef[Reg]; unsigned LastRefOrPartRefDist = DistanceMap[LastRefOrPartRef]; // The whole register is used. // AL = // AH = // // = AX // = AL, AX<imp-use, kill> // AX = // // Or whole register is defined, but not used at all. // AX<dead> = // ... // AX = // // Or whole register is defined, but only partly used. // AX<dead> = AL<imp-def> // = AL<kill> // AX = SmallSet<unsigned, 8> PartUses; for (const unsigned *SubRegs = TRI->getSubRegisters(Reg); unsigned SubReg = *SubRegs; ++SubRegs) { if (MachineInstr *Use = PhysRegUse[SubReg]) { PartUses.insert(SubReg); for (const unsigned *SS = TRI->getSubRegisters(SubReg); *SS; ++SS) PartUses.insert(*SS); unsigned Dist = DistanceMap[Use]; if (Dist > LastRefOrPartRefDist) { LastRefOrPartRefDist = Dist; LastRefOrPartRef = Use; } } } if (LastRefOrPartRef == PhysRegDef[Reg] && LastRefOrPartRef != MI) // If the last reference is the last def, then it's not used at all. // That is, unless we are currently processing the last reference itself. LastRefOrPartRef->addRegisterDead(Reg, TRI, true); /* Partial uses. Mark register def dead and add implicit def of sub-registers which are used. FIXME: LiveIntervalAnalysis can't handle this yet! EAX<dead> = op AL<imp-def> That is, EAX def is dead but AL def extends pass it. Enable this after live interval analysis is fixed to improve codegen! else if (!PhysRegUse[Reg]) { PhysRegDef[Reg]->addRegisterDead(Reg, TRI, true); for (const unsigned *SubRegs = TRI->getSubRegisters(Reg); unsigned SubReg = *SubRegs; ++SubRegs) { if (PartUses.count(SubReg)) { PhysRegDef[Reg]->addOperand(MachineOperand::CreateReg(SubReg, true, true)); LastRefOrPartRef->addRegisterKilled(SubReg, TRI, true); for (const unsigned *SS = TRI->getSubRegisters(SubReg); *SS; ++SS) PartUses.erase(*SS); } } } */ else LastRefOrPartRef->addRegisterKilled(Reg, TRI, true); return true; }
/// EmitMachineNode - Generate machine code for a target-specific node and /// needed dependencies. /// void InstrEmitter:: EmitMachineNode(SDNode *Node, bool IsClone, bool IsCloned, DenseMap<SDValue, unsigned> &VRBaseMap) { unsigned Opc = Node->getMachineOpcode(); // Handle subreg insert/extract specially if (Opc == TargetOpcode::EXTRACT_SUBREG || Opc == TargetOpcode::INSERT_SUBREG || Opc == TargetOpcode::SUBREG_TO_REG) { EmitSubregNode(Node, VRBaseMap, IsClone, IsCloned); return; } // Handle COPY_TO_REGCLASS specially. if (Opc == TargetOpcode::COPY_TO_REGCLASS) { EmitCopyToRegClassNode(Node, VRBaseMap); return; } // Handle REG_SEQUENCE specially. if (Opc == TargetOpcode::REG_SEQUENCE) { EmitRegSequence(Node, VRBaseMap, IsClone, IsCloned); return; } if (Opc == TargetOpcode::IMPLICIT_DEF) // We want a unique VR for each IMPLICIT_DEF use. return; const TargetInstrDesc &II = TII->get(Opc); unsigned NumResults = CountResults(Node); unsigned NodeOperands = CountOperands(Node); bool HasPhysRegOuts = NumResults > II.getNumDefs() && II.getImplicitDefs()!=0; #ifndef NDEBUG unsigned NumMIOperands = NodeOperands + NumResults; if (II.isVariadic()) assert(NumMIOperands >= II.getNumOperands() && "Too few operands for a variadic node!"); else assert(NumMIOperands >= II.getNumOperands() && NumMIOperands <= II.getNumOperands()+II.getNumImplicitDefs() && "#operands for dag node doesn't match .td file!"); #endif // Create the new machine instruction. MachineInstr *MI = BuildMI(*MF, Node->getDebugLoc(), II); // The MachineInstr constructor adds implicit-def operands. Scan through // these to determine which are dead. if (MI->getNumOperands() != 0 && Node->getValueType(Node->getNumValues()-1) == MVT::Glue) { // First, collect all used registers. SmallVector<unsigned, 8> UsedRegs; for (SDNode *F = Node->getGluedUser(); F; F = F->getGluedUser()) if (F->getOpcode() == ISD::CopyFromReg) UsedRegs.push_back(cast<RegisterSDNode>(F->getOperand(1))->getReg()); else { // Collect declared implicit uses. const TargetInstrDesc &TID = TII->get(F->getMachineOpcode()); UsedRegs.append(TID.getImplicitUses(), TID.getImplicitUses() + TID.getNumImplicitUses()); // In addition to declared implicit uses, we must also check for // direct RegisterSDNode operands. for (unsigned i = 0, e = F->getNumOperands(); i != e; ++i) if (RegisterSDNode *R = dyn_cast<RegisterSDNode>(F->getOperand(i))) { unsigned Reg = R->getReg(); if (TargetRegisterInfo::isPhysicalRegister(Reg)) UsedRegs.push_back(Reg); } } // Then mark unused registers as dead. MI->setPhysRegsDeadExcept(UsedRegs, *TRI); } // Add result register values for things that are defined by this // instruction. if (NumResults) CreateVirtualRegisters(Node, MI, II, IsClone, IsCloned, VRBaseMap); // Emit all of the actual operands of this instruction, adding them to the // instruction as appropriate. bool HasOptPRefs = II.getNumDefs() > NumResults; assert((!HasOptPRefs || !HasPhysRegOuts) && "Unable to cope with optional defs and phys regs defs!"); unsigned NumSkip = HasOptPRefs ? II.getNumDefs() - NumResults : 0; for (unsigned i = NumSkip; i != NodeOperands; ++i) AddOperand(MI, Node->getOperand(i), i-NumSkip+II.getNumDefs(), &II, VRBaseMap, /*IsDebug=*/false, IsClone, IsCloned); // Transfer all of the memory reference descriptions of this instruction. MI->setMemRefs(cast<MachineSDNode>(Node)->memoperands_begin(), cast<MachineSDNode>(Node)->memoperands_end()); // Insert the instruction into position in the block. This needs to // happen before any custom inserter hook is called so that the // hook knows where in the block to insert the replacement code. MBB->insert(InsertPos, MI); // Additional results must be physical register defs. if (HasPhysRegOuts) { for (unsigned i = II.getNumDefs(); i < NumResults; ++i) { unsigned Reg = II.getImplicitDefs()[i - II.getNumDefs()]; if (Node->hasAnyUseOfValue(i)) EmitCopyFromReg(Node, i, IsClone, IsCloned, Reg, VRBaseMap); // If there are no uses, mark the register as dead now, so that // MachineLICM/Sink can see that it's dead. Don't do this if the // node has a Glue value, for the benefit of targets still using // Glue for values in physregs. else if (Node->getValueType(Node->getNumValues()-1) != MVT::Glue) MI->addRegisterDead(Reg, TRI); } } // If the instruction has implicit defs and the node doesn't, mark the // implicit def as dead. If the node has any glue outputs, we don't do this // because we don't know what implicit defs are being used by glued nodes. if (Node->getValueType(Node->getNumValues()-1) != MVT::Glue) if (const unsigned *IDList = II.getImplicitDefs()) { for (unsigned i = NumResults, e = II.getNumDefs()+II.getNumImplicitDefs(); i != e; ++i) MI->addRegisterDead(IDList[i-II.getNumDefs()], TRI); } }
void VirtRegMap::rewrite(SlotIndexes *Indexes) { DEBUG(dbgs() << "********** REWRITE VIRTUAL REGISTERS **********\n" << "********** Function: " << MF->getFunction()->getName() << '\n'); DEBUG(dump()); SmallVector<unsigned, 8> SuperDeads; SmallVector<unsigned, 8> SuperDefs; SmallVector<unsigned, 8> SuperKills; for (MachineFunction::iterator MBBI = MF->begin(), MBBE = MF->end(); MBBI != MBBE; ++MBBI) { DEBUG(MBBI->print(dbgs(), Indexes)); for (MachineBasicBlock::iterator MII = MBBI->begin(), MIE = MBBI->end(); MII != MIE;) { MachineInstr *MI = MII; ++MII; for (MachineInstr::mop_iterator MOI = MI->operands_begin(), MOE = MI->operands_end(); MOI != MOE; ++MOI) { MachineOperand &MO = *MOI; if (!MO.isReg() || !TargetRegisterInfo::isVirtualRegister(MO.getReg())) continue; unsigned VirtReg = MO.getReg(); unsigned PhysReg = getPhys(VirtReg); assert(PhysReg != NO_PHYS_REG && "Instruction uses unmapped VirtReg"); // Preserve semantics of sub-register operands. if (MO.getSubReg()) { // A virtual register kill refers to the whole register, so we may // have to add <imp-use,kill> operands for the super-register. A // partial redef always kills and redefines the super-register. if (MO.readsReg() && (MO.isDef() || MO.isKill())) SuperKills.push_back(PhysReg); if (MO.isDef()) { // The <def,undef> flag only makes sense for sub-register defs, and // we are substituting a full physreg. An <imp-use,kill> operand // from the SuperKills list will represent the partial read of the // super-register. MO.setIsUndef(false); // Also add implicit defs for the super-register. if (MO.isDead()) SuperDeads.push_back(PhysReg); else SuperDefs.push_back(PhysReg); } // PhysReg operands cannot have subregister indexes. PhysReg = TRI->getSubReg(PhysReg, MO.getSubReg()); assert(PhysReg && "Invalid SubReg for physical register"); MO.setSubReg(0); } // Rewrite. Note we could have used MachineOperand::substPhysReg(), but // we need the inlining here. MO.setReg(PhysReg); } // Add any missing super-register kills after rewriting the whole // instruction. while (!SuperKills.empty()) MI->addRegisterKilled(SuperKills.pop_back_val(), TRI, true); while (!SuperDeads.empty()) MI->addRegisterDead(SuperDeads.pop_back_val(), TRI, true); while (!SuperDefs.empty()) MI->addRegisterDefined(SuperDefs.pop_back_val(), TRI); DEBUG(dbgs() << "> " << *MI); // Finally, remove any identity copies. if (MI->isIdentityCopy()) { ++NumIdCopies; if (MI->getNumOperands() == 2) { DEBUG(dbgs() << "Deleting identity copy.\n"); RemoveMachineInstrFromMaps(MI); if (Indexes) Indexes->removeMachineInstrFromMaps(MI); // It's safe to erase MI because MII has already been incremented. MI->eraseFromParent(); } else { // Transform identity copy to a KILL to deal with subregisters. MI->setDesc(TII->get(TargetOpcode::KILL)); DEBUG(dbgs() << "Identity copy: " << *MI); } } } } // Tell MRI about physical registers in use. for (unsigned Reg = 1, RegE = TRI->getNumRegs(); Reg != RegE; ++Reg) if (!MRI->reg_nodbg_empty(Reg)) MRI->setPhysRegUsed(Reg); }
/// EmitNode - Generate machine code for a node and needed dependencies. /// void InstrEmitter::EmitNode(SDNode *Node, bool IsClone, bool IsCloned, DenseMap<SDValue, unsigned> &VRBaseMap, DenseMap<MachineBasicBlock*, MachineBasicBlock*> *EM) { // If machine instruction if (Node->isMachineOpcode()) { unsigned Opc = Node->getMachineOpcode(); // Handle subreg insert/extract specially if (Opc == TargetOpcode::EXTRACT_SUBREG || Opc == TargetOpcode::INSERT_SUBREG || Opc == TargetOpcode::SUBREG_TO_REG) { EmitSubregNode(Node, VRBaseMap); return; } // Handle COPY_TO_REGCLASS specially. if (Opc == TargetOpcode::COPY_TO_REGCLASS) { EmitCopyToRegClassNode(Node, VRBaseMap); return; } if (Opc == TargetOpcode::IMPLICIT_DEF) // We want a unique VR for each IMPLICIT_DEF use. return; const TargetInstrDesc &II = TII->get(Opc); unsigned NumResults = CountResults(Node); unsigned NodeOperands = CountOperands(Node); bool HasPhysRegOuts = (NumResults > II.getNumDefs()) && II.getImplicitDefs() != 0; #ifndef NDEBUG unsigned NumMIOperands = NodeOperands + NumResults; assert((II.getNumOperands() == NumMIOperands || HasPhysRegOuts || II.isVariadic()) && "#operands for dag node doesn't match .td file!"); #endif // Create the new machine instruction. MachineInstr *MI = BuildMI(*MF, Node->getDebugLoc(), II); // Add result register values for things that are defined by this // instruction. if (NumResults) CreateVirtualRegisters(Node, MI, II, IsClone, IsCloned, VRBaseMap); // Emit all of the actual operands of this instruction, adding them to the // instruction as appropriate. bool HasOptPRefs = II.getNumDefs() > NumResults; assert((!HasOptPRefs || !HasPhysRegOuts) && "Unable to cope with optional defs and phys regs defs!"); unsigned NumSkip = HasOptPRefs ? II.getNumDefs() - NumResults : 0; for (unsigned i = NumSkip; i != NodeOperands; ++i) AddOperand(MI, Node->getOperand(i), i-NumSkip+II.getNumDefs(), &II, VRBaseMap); // Transfer all of the memory reference descriptions of this instruction. MI->setMemRefs(cast<MachineSDNode>(Node)->memoperands_begin(), cast<MachineSDNode>(Node)->memoperands_end()); if (II.usesCustomInsertionHook()) { // Insert this instruction into the basic block using a target // specific inserter which may returns a new basic block. MBB = TLI->EmitInstrWithCustomInserter(MI, MBB, EM); InsertPos = MBB->end(); } else { MBB->insert(InsertPos, MI); } // Additional results must be an physical register def. if (HasPhysRegOuts) { for (unsigned i = II.getNumDefs(); i < NumResults; ++i) { unsigned Reg = II.getImplicitDefs()[i - II.getNumDefs()]; if (Node->hasAnyUseOfValue(i)) EmitCopyFromReg(Node, i, IsClone, IsCloned, Reg, VRBaseMap); // If there are no uses, mark the register as dead now, so that // MachineLICM/Sink can see that it's dead. Don't do this if the // node has a Flag value, for the benefit of targets still using // Flag for values in physregs. else if (Node->getValueType(Node->getNumValues()-1) != MVT::Flag) MI->addRegisterDead(Reg, TRI); } } return; } switch (Node->getOpcode()) { default: #ifndef NDEBUG Node->dump(); #endif llvm_unreachable("This target-independent node should have been selected!"); break; case ISD::EntryToken: llvm_unreachable("EntryToken should have been excluded from the schedule!"); break; case ISD::MERGE_VALUES: case ISD::TokenFactor: // fall thru break; case ISD::CopyToReg: { unsigned SrcReg; SDValue SrcVal = Node->getOperand(2); if (RegisterSDNode *R = dyn_cast<RegisterSDNode>(SrcVal)) SrcReg = R->getReg(); else SrcReg = getVR(SrcVal, VRBaseMap); unsigned DestReg = cast<RegisterSDNode>(Node->getOperand(1))->getReg(); if (SrcReg == DestReg) // Coalesced away the copy? Ignore. break; const TargetRegisterClass *SrcTRC = 0, *DstTRC = 0; // Get the register classes of the src/dst. if (TargetRegisterInfo::isVirtualRegister(SrcReg)) SrcTRC = MRI->getRegClass(SrcReg); else SrcTRC = TRI->getPhysicalRegisterRegClass(SrcReg,SrcVal.getValueType()); if (TargetRegisterInfo::isVirtualRegister(DestReg)) DstTRC = MRI->getRegClass(DestReg); else DstTRC = TRI->getPhysicalRegisterRegClass(DestReg, Node->getOperand(1).getValueType()); bool Emitted = TII->copyRegToReg(*MBB, InsertPos, DestReg, SrcReg, DstTRC, SrcTRC); assert(Emitted && "Unable to issue a copy instruction!\n"); (void) Emitted; break; } case ISD::CopyFromReg: { unsigned SrcReg = cast<RegisterSDNode>(Node->getOperand(1))->getReg(); EmitCopyFromReg(Node, 0, IsClone, IsCloned, SrcReg, VRBaseMap); break; } case ISD::INLINEASM: { unsigned NumOps = Node->getNumOperands(); if (Node->getOperand(NumOps-1).getValueType() == MVT::Flag) --NumOps; // Ignore the flag operand. // Create the inline asm machine instruction. MachineInstr *MI = BuildMI(*MF, Node->getDebugLoc(), TII->get(TargetOpcode::INLINEASM)); // Add the asm string as an external symbol operand. const char *AsmStr = cast<ExternalSymbolSDNode>(Node->getOperand(1))->getSymbol(); MI->addOperand(MachineOperand::CreateES(AsmStr)); // Add all of the operand registers to the instruction. for (unsigned i = 2; i != NumOps;) { unsigned Flags = cast<ConstantSDNode>(Node->getOperand(i))->getZExtValue(); unsigned NumVals = InlineAsm::getNumOperandRegisters(Flags); MI->addOperand(MachineOperand::CreateImm(Flags)); ++i; // Skip the ID value. switch (Flags & 7) { default: llvm_unreachable("Bad flags!"); case 2: // Def of register. for (; NumVals; --NumVals, ++i) { unsigned Reg = cast<RegisterSDNode>(Node->getOperand(i))->getReg(); MI->addOperand(MachineOperand::CreateReg(Reg, true)); } break; case 6: // Def of earlyclobber register. for (; NumVals; --NumVals, ++i) { unsigned Reg = cast<RegisterSDNode>(Node->getOperand(i))->getReg(); MI->addOperand(MachineOperand::CreateReg(Reg, true, false, false, false, false, true)); } break; case 1: // Use of register. case 3: // Immediate. case 4: // Addressing mode. // The addressing mode has been selected, just add all of the // operands to the machine instruction. for (; NumVals; --NumVals, ++i) AddOperand(MI, Node->getOperand(i), 0, 0, VRBaseMap); break; } } MBB->insert(InsertPos, MI); break; } } }