/// collectRegsToSpill - Collect live range snippets that only have a single /// real use. void InlineSpiller::collectRegsToSpill() { unsigned Reg = Edit->getReg(); // Main register always spills. RegsToSpill.assign(1, Reg); SnippetCopies.clear(); // Snippets all have the same original, so there can't be any for an original // register. if (Original == Reg) return; for (MachineRegisterInfo::reg_iterator RI = MRI.reg_begin(Reg); MachineInstr *MI = RI.skipInstruction();) { unsigned SnipReg = isFullCopyOf(MI, Reg); if (!isSibling(SnipReg)) continue; LiveInterval &SnipLI = LIS.getInterval(SnipReg); if (!isSnippet(SnipLI)) continue; SnippetCopies.insert(MI); if (isRegToSpill(SnipReg)) continue; RegsToSpill.push_back(SnipReg); DEBUG(dbgs() << "\talso spill snippet " << SnipLI << '\n'); ++NumSnippets; } }
void ConnectedVNInfoEqClasses::Distribute(LiveInterval *LIV[], MachineRegisterInfo &MRI) { assert(LIV[0] && "LIV[0] must be set"); LiveInterval &LI = *LIV[0]; // Rewrite instructions. for (MachineRegisterInfo::reg_iterator RI = MRI.reg_begin(LI.reg), RE = MRI.reg_end(); RI != RE;) { MachineOperand &MO = *RI; MachineInstr *MI = RI->getParent(); ++RI; // DBG_VALUE instructions don't have slot indexes, so get the index of the // instruction before them. // Normally, DBG_VALUE instructions are removed before this function is // called, but it is not a requirement. SlotIndex Idx; if (MI->isDebugValue()) Idx = LIS.getSlotIndexes()->getIndexBefore(MI); else Idx = LIS.getInstructionIndex(MI); LiveQueryResult LRQ = LI.Query(Idx); const VNInfo *VNI = MO.readsReg() ? LRQ.valueIn() : LRQ.valueDefined(); // In the case of an <undef> use that isn't tied to any def, VNI will be // NULL. If the use is tied to a def, VNI will be the defined value. if (!VNI) continue; MO.setReg(LIV[getEqClass(VNI)]->reg); } // Move runs to new intervals. LiveInterval::iterator J = LI.begin(), E = LI.end(); while (J != E && EqClass[J->valno->id] == 0) ++J; for (LiveInterval::iterator I = J; I != E; ++I) { if (unsigned eq = EqClass[I->valno->id]) { assert((LIV[eq]->empty() || LIV[eq]->expiredAt(I->start)) && "New intervals should be empty"); LIV[eq]->segments.push_back(*I); } else *J++ = *I; } LI.segments.erase(J, E); // Transfer VNInfos to their new owners and renumber them. unsigned j = 0, e = LI.getNumValNums(); while (j != e && EqClass[j] == 0) ++j; for (unsigned i = j; i != e; ++i) { VNInfo *VNI = LI.getValNumInfo(i); if (unsigned eq = EqClass[i]) { VNI->id = LIV[eq]->getNumValNums(); LIV[eq]->valnos.push_back(VNI); } else { VNI->id = j; LI.valnos[j++] = VNI; } } LI.valnos.resize(j); }
/// rewrite - after all the new live ranges have been created, rewrite /// instructions using curli to use the new intervals. void SplitEditor::rewrite() { assert(!openli_ && "Previous LI not closed before rewrite"); const LiveInterval *curli = sa_.getCurLI(); for (MachineRegisterInfo::reg_iterator RI = mri_.reg_begin(curli->reg), RE = mri_.reg_end(); RI != RE;) { MachineOperand &MO = RI.getOperand(); MachineInstr *MI = MO.getParent(); ++RI; if (MI->isDebugValue()) { DEBUG(dbgs() << "Zapping " << *MI); // FIXME: We can do much better with debug values. MO.setReg(0); continue; } SlotIndex Idx = lis_.getInstructionIndex(MI); Idx = MO.isUse() ? Idx.getUseIndex() : Idx.getDefIndex(); LiveInterval *LI = dupli_; for (unsigned i = firstInterval, e = intervals_.size(); i != e; ++i) { LiveInterval *testli = intervals_[i]; if (testli->liveAt(Idx)) { LI = testli; break; } } if (LI) { MO.setReg(LI->reg); sa_.removeUse(MI); DEBUG(dbgs() << " rewrite " << Idx << '\t' << *MI); } } // dupli_ goes in last, after rewriting. if (dupli_) { if (dupli_->empty()) { DEBUG(dbgs() << " dupli became empty?\n"); lis_.removeInterval(dupli_->reg); dupli_ = 0; } else { dupli_->RenumberValues(lis_); intervals_.push_back(dupli_); } } // Calculate spill weight and allocation hints for new intervals. VirtRegAuxInfo vrai(vrm_.getMachineFunction(), lis_, sa_.loops_); for (unsigned i = firstInterval, e = intervals_.size(); i != e; ++i) { LiveInterval &li = *intervals_[i]; vrai.CalculateRegClass(li.reg); vrai.CalculateWeightAndHint(li); DEBUG(dbgs() << " new interval " << mri_.getRegClass(li.reg)->getName() << ":" << li << '\n'); } }
void ConnectedVNInfoEqClasses::Distribute(LiveInterval *LIV[], MachineRegisterInfo &MRI) { assert(LIV[0] && "LIV[0] must be set"); LiveInterval &LI = *LIV[0]; // Rewrite instructions. for (MachineRegisterInfo::reg_iterator RI = MRI.reg_begin(LI.reg), RE = MRI.reg_end(); RI != RE;) { MachineOperand &MO = RI.getOperand(); MachineInstr *MI = MO.getParent(); ++RI; if (MO.isUse() && MO.isUndef()) continue; // DBG_VALUE instructions should have been eliminated earlier. SlotIndex Idx = LIS.getInstructionIndex(MI); Idx = Idx.getRegSlot(MO.isUse()); const VNInfo *VNI = LI.getVNInfoAt(Idx); assert(VNI && "Interval not live at use."); MO.setReg(LIV[getEqClass(VNI)]->reg); } // Move runs to new intervals. LiveInterval::iterator J = LI.begin(), E = LI.end(); while (J != E && EqClass[J->valno->id] == 0) ++J; for (LiveInterval::iterator I = J; I != E; ++I) { if (unsigned eq = EqClass[I->valno->id]) { assert((LIV[eq]->empty() || LIV[eq]->expiredAt(I->start)) && "New intervals should be empty"); LIV[eq]->ranges.push_back(*I); } else *J++ = *I; } LI.ranges.erase(J, E); // Transfer VNInfos to their new owners and renumber them. unsigned j = 0, e = LI.getNumValNums(); while (j != e && EqClass[j] == 0) ++j; for (unsigned i = j; i != e; ++i) { VNInfo *VNI = LI.getValNumInfo(i); if (unsigned eq = EqClass[i]) { VNI->id = LIV[eq]->getNumValNums(); LIV[eq]->valnos.push_back(VNI); } else { VNI->id = j; LI.valnos[j++] = VNI; } } LI.valnos.resize(j); }
void ConnectedVNInfoEqClasses::Distribute(LiveInterval *LIV[], MachineRegisterInfo &MRI) { assert(LIV[0] && "LIV[0] must be set"); LiveInterval &LI = *LIV[0]; // Rewrite instructions. for (MachineRegisterInfo::reg_iterator RI = MRI.reg_begin(LI.reg), RE = MRI.reg_end(); RI != RE;) { MachineOperand &MO = RI.getOperand(); MachineInstr *MI = MO.getParent(); ++RI; // DBG_VALUE instructions should have been eliminated earlier. LiveRangeQuery LRQ(LI, LIS.getInstructionIndex(MI)); const VNInfo *VNI = MO.readsReg() ? LRQ.valueIn() : LRQ.valueDefined(); // In the case of an <undef> use that isn't tied to any def, VNI will be // NULL. If the use is tied to a def, VNI will be the defined value. if (!VNI) continue; MO.setReg(LIV[getEqClass(VNI)]->reg); } // Move runs to new intervals. LiveInterval::iterator J = LI.begin(), E = LI.end(); while (J != E && EqClass[J->valno->id] == 0) ++J; for (LiveInterval::iterator I = J; I != E; ++I) { if (unsigned eq = EqClass[I->valno->id]) { assert((LIV[eq]->empty() || LIV[eq]->expiredAt(I->start)) && "New intervals should be empty"); LIV[eq]->ranges.push_back(*I); } else *J++ = *I; } LI.ranges.erase(J, E); // Transfer VNInfos to their new owners and renumber them. unsigned j = 0, e = LI.getNumValNums(); while (j != e && EqClass[j] == 0) ++j; for (unsigned i = j; i != e; ++i) { VNInfo *VNI = LI.getValNumInfo(i); if (unsigned eq = EqClass[i]) { VNI->id = LIV[eq]->getNumValNums(); LIV[eq]->valnos.push_back(VNI); } else { VNI->id = j; LI.valnos[j++] = VNI; } } LI.valnos.resize(j); }
/// rewriteAssigned - Rewrite all uses of Edit->getReg(). void SplitEditor::rewriteAssigned(bool ExtendRanges) { for (MachineRegisterInfo::reg_iterator RI = MRI.reg_begin(Edit->getReg()), RE = MRI.reg_end(); RI != RE;) { MachineOperand &MO = RI.getOperand(); MachineInstr *MI = MO.getParent(); ++RI; // LiveDebugVariables should have handled all DBG_VALUE instructions. if (MI->isDebugValue()) { DEBUG(dbgs() << "Zapping " << *MI); MO.setReg(0); continue; } // <undef> operands don't really read the register, so it doesn't matter // which register we choose. When the use operand is tied to a def, we must // use the same register as the def, so just do that always. SlotIndex Idx = LIS.getInstructionIndex(MI); if (MO.isDef() || MO.isUndef()) Idx = MO.isEarlyClobber() ? Idx.getUseIndex() : Idx.getDefIndex(); // Rewrite to the mapped register at Idx. unsigned RegIdx = RegAssign.lookup(Idx); LiveInterval *LI = Edit->get(RegIdx); MO.setReg(LI->reg); DEBUG(dbgs() << " rewr BB#" << MI->getParent()->getNumber() << '\t' << Idx << ':' << RegIdx << '\t' << *MI); // Extend liveness to Idx if the instruction reads reg. if (!ExtendRanges || MO.isUndef()) continue; // Skip instructions that don't read Reg. if (MO.isDef()) { if (!MO.getSubReg() && !MO.isEarlyClobber()) continue; // We may wan't to extend a live range for a partial redef, or for a use // tied to an early clobber. Idx = Idx.getPrevSlot(); if (!Edit->getParent().liveAt(Idx)) continue; } else Idx = Idx.getUseIndex(); getLRCalc(RegIdx).extend(LI, Idx.getNextSlot(), LIS.getSlotIndexes(), &MDT, &LIS.getVNInfoAllocator()); } }
/// analyzeUses - Count instructions, basic blocks, and loops using curli. void SplitAnalysis::analyzeUses() { const MachineRegisterInfo &MRI = mf_.getRegInfo(); for (MachineRegisterInfo::reg_iterator I = MRI.reg_begin(curli_->reg); MachineInstr *MI = I.skipInstruction();) { if (MI->isDebugValue() || !usingInstrs_.insert(MI)) continue; MachineBasicBlock *MBB = MI->getParent(); if (usingBlocks_[MBB]++) continue; if (MachineLoop *Loop = loops_.getLoopFor(MBB)) usingLoops_[Loop]++; } DEBUG(dbgs() << " counted " << usingInstrs_.size() << " instrs, " << usingBlocks_.size() << " blocks, " << usingLoops_.size() << " loops.\n"); }
/// spillAll - Spill all registers remaining after rematerialization. void InlineSpiller::spillAll() { // Update LiveStacks now that we are committed to spilling. if (StackSlot == VirtRegMap::NO_STACK_SLOT) { StackSlot = VRM.assignVirt2StackSlot(Original); StackInt = &LSS.getOrCreateInterval(StackSlot, MRI.getRegClass(Original)); StackInt->getNextValue(SlotIndex(), LSS.getVNInfoAllocator()); } else StackInt = &LSS.getInterval(StackSlot); if (Original != Edit->getReg()) VRM.assignVirt2StackSlot(Edit->getReg(), StackSlot); assert(StackInt->getNumValNums() == 1 && "Bad stack interval values"); for (unsigned i = 0, e = RegsToSpill.size(); i != e; ++i) StackInt->MergeRangesInAsValue(LIS.getInterval(RegsToSpill[i]), StackInt->getValNumInfo(0)); DEBUG(dbgs() << "Merged spilled regs: " << *StackInt << '\n'); // Spill around uses of all RegsToSpill. for (unsigned i = 0, e = RegsToSpill.size(); i != e; ++i) spillAroundUses(RegsToSpill[i]); // Hoisted spills may cause dead code. if (!DeadDefs.empty()) { DEBUG(dbgs() << "Eliminating " << DeadDefs.size() << " dead defs\n"); Edit->eliminateDeadDefs(DeadDefs, RegsToSpill); } // Finally delete the SnippetCopies. for (unsigned i = 0, e = RegsToSpill.size(); i != e; ++i) { for (MachineRegisterInfo::reg_iterator RI = MRI.reg_begin(RegsToSpill[i]); MachineInstr *MI = RI.skipInstruction();) { assert(SnippetCopies.count(MI) && "Remaining use wasn't a snippet copy"); // FIXME: Do this with a LiveRangeEdit callback. LIS.RemoveMachineInstrFromMaps(MI); MI->eraseFromParent(); } } // Delete all spilled registers. for (unsigned i = 0, e = RegsToSpill.size(); i != e; ++i) Edit->eraseVirtReg(RegsToSpill[i]); }
/// analyzeUses - Count instructions, basic blocks, and loops using CurLI. void SplitAnalysis::analyzeUses() { const MachineRegisterInfo &MRI = MF.getRegInfo(); for (MachineRegisterInfo::reg_iterator I = MRI.reg_begin(CurLI->reg); MachineInstr *MI = I.skipInstruction();) { if (MI->isDebugValue() || !UsingInstrs.insert(MI)) continue; UseSlots.push_back(LIS.getInstructionIndex(MI).getDefIndex()); MachineBasicBlock *MBB = MI->getParent(); if (UsingBlocks[MBB]++) continue; for (MachineLoop *Loop = Loops.getLoopFor(MBB); Loop; Loop = Loop->getParentLoop()) UsingLoops[Loop]++; } array_pod_sort(UseSlots.begin(), UseSlots.end()); DEBUG(dbgs() << " counted " << UsingInstrs.size() << " instrs, " << UsingBlocks.size() << " blocks, " << UsingLoops.size() << " loops.\n"); }
/// NoUseAfterLastDef - Return true if there are no intervening uses between the /// last instruction in the MBB that defines the specified register and the /// two-address instruction which is being processed. It also returns the last /// def location by reference bool TwoAddressInstructionPass::NoUseAfterLastDef(unsigned Reg, MachineBasicBlock *MBB, unsigned Dist, unsigned &LastDef) { LastDef = 0; unsigned LastUse = Dist; for (MachineRegisterInfo::reg_iterator I = MRI->reg_begin(Reg), E = MRI->reg_end(); I != E; ++I) { MachineOperand &MO = I.getOperand(); MachineInstr *MI = MO.getParent(); if (MI->getParent() != MBB) continue; DenseMap<MachineInstr*, unsigned>::iterator DI = DistanceMap.find(MI); if (DI == DistanceMap.end()) continue; if (MO.isUse() && DI->second < LastUse) LastUse = DI->second; if (MO.isDef() && DI->second > LastDef) LastDef = DI->second; } return !(LastUse > LastDef && LastUse < Dist); }
MachineInstr *TwoAddressInstructionPass::FindLastUseInMBB(unsigned Reg, MachineBasicBlock *MBB, unsigned Dist) { unsigned LastUseDist = 0; MachineInstr *LastUse = 0; for (MachineRegisterInfo::reg_iterator I = MRI->reg_begin(Reg), E = MRI->reg_end(); I != E; ++I) { MachineOperand &MO = I.getOperand(); MachineInstr *MI = MO.getParent(); if (MI->getParent() != MBB) continue; DenseMap<MachineInstr*, unsigned>::iterator DI = DistanceMap.find(MI); if (DI == DistanceMap.end()) continue; if (DI->second >= Dist) continue; if (MO.isUse() && DI->second > LastUseDist) { LastUse = DI->first; LastUseDist = DI->second; } } return LastUse; }
/// shrinkToUses - After removing some uses of a register, shrink its live /// range to just the remaining uses. This method does not compute reaching /// defs for new uses, and it doesn't remove dead defs. bool LiveIntervals::shrinkToUses(LiveInterval *li, SmallVectorImpl<MachineInstr*> *dead) { DEBUG(dbgs() << "Shrink: " << *li << '\n'); assert(TargetRegisterInfo::isVirtualRegister(li->reg) && "Can only shrink virtual registers"); // Find all the values used, including PHI kills. SmallVector<std::pair<SlotIndex, VNInfo*>, 16> WorkList; // Blocks that have already been added to WorkList as live-out. SmallPtrSet<MachineBasicBlock*, 16> LiveOut; // Visit all instructions reading li->reg. for (MachineRegisterInfo::reg_iterator I = MRI->reg_begin(li->reg); MachineInstr *UseMI = I.skipInstruction();) { if (UseMI->isDebugValue() || !UseMI->readsVirtualRegister(li->reg)) continue; SlotIndex Idx = getInstructionIndex(UseMI).getRegSlot(); LiveRangeQuery LRQ(*li, Idx); VNInfo *VNI = LRQ.valueIn(); if (!VNI) { // This shouldn't happen: readsVirtualRegister returns true, but there is // no live value. It is likely caused by a target getting <undef> flags // wrong. DEBUG(dbgs() << Idx << '\t' << *UseMI << "Warning: Instr claims to read non-existent value in " << *li << '\n'); continue; } // Special case: An early-clobber tied operand reads and writes the // register one slot early. if (VNInfo *DefVNI = LRQ.valueDefined()) Idx = DefVNI->def; WorkList.push_back(std::make_pair(Idx, VNI)); } // Create a new live interval with only minimal live segments per def. LiveInterval NewLI(li->reg, 0); for (LiveInterval::vni_iterator I = li->vni_begin(), E = li->vni_end(); I != E; ++I) { VNInfo *VNI = *I; if (VNI->isUnused()) continue; NewLI.addRange(LiveRange(VNI->def, VNI->def.getDeadSlot(), VNI)); } // Keep track of the PHIs that are in use. SmallPtrSet<VNInfo*, 8> UsedPHIs; // Extend intervals to reach all uses in WorkList. while (!WorkList.empty()) { SlotIndex Idx = WorkList.back().first; VNInfo *VNI = WorkList.back().second; WorkList.pop_back(); const MachineBasicBlock *MBB = getMBBFromIndex(Idx.getPrevSlot()); SlotIndex BlockStart = getMBBStartIdx(MBB); // Extend the live range for VNI to be live at Idx. if (VNInfo *ExtVNI = NewLI.extendInBlock(BlockStart, Idx)) { (void)ExtVNI; assert(ExtVNI == VNI && "Unexpected existing value number"); // Is this a PHIDef we haven't seen before? if (!VNI->isPHIDef() || VNI->def != BlockStart || !UsedPHIs.insert(VNI)) continue; // The PHI is live, make sure the predecessors are live-out. for (MachineBasicBlock::const_pred_iterator PI = MBB->pred_begin(), PE = MBB->pred_end(); PI != PE; ++PI) { if (!LiveOut.insert(*PI)) continue; SlotIndex Stop = getMBBEndIdx(*PI); // A predecessor is not required to have a live-out value for a PHI. if (VNInfo *PVNI = li->getVNInfoBefore(Stop)) WorkList.push_back(std::make_pair(Stop, PVNI)); } continue; } // VNI is live-in to MBB. DEBUG(dbgs() << " live-in at " << BlockStart << '\n'); NewLI.addRange(LiveRange(BlockStart, Idx, VNI)); // Make sure VNI is live-out from the predecessors. for (MachineBasicBlock::const_pred_iterator PI = MBB->pred_begin(), PE = MBB->pred_end(); PI != PE; ++PI) { if (!LiveOut.insert(*PI)) continue; SlotIndex Stop = getMBBEndIdx(*PI); assert(li->getVNInfoBefore(Stop) == VNI && "Wrong value out of predecessor"); WorkList.push_back(std::make_pair(Stop, VNI)); } } // Handle dead values. bool CanSeparate = false; for (LiveInterval::vni_iterator I = li->vni_begin(), E = li->vni_end(); I != E; ++I) { VNInfo *VNI = *I; if (VNI->isUnused()) continue; LiveInterval::iterator LII = NewLI.FindLiveRangeContaining(VNI->def); assert(LII != NewLI.end() && "Missing live range for PHI"); if (LII->end != VNI->def.getDeadSlot()) continue; if (VNI->isPHIDef()) { // This is a dead PHI. Remove it. VNI->markUnused(); NewLI.removeRange(*LII); DEBUG(dbgs() << "Dead PHI at " << VNI->def << " may separate interval\n"); CanSeparate = true; } else { // This is a dead def. Make sure the instruction knows. MachineInstr *MI = getInstructionFromIndex(VNI->def); assert(MI && "No instruction defining live value"); MI->addRegisterDead(li->reg, TRI); if (dead && MI->allDefsAreDead()) { DEBUG(dbgs() << "All defs dead: " << VNI->def << '\t' << *MI); dead->push_back(MI); } } } // Move the trimmed ranges back. li->ranges.swap(NewLI.ranges); DEBUG(dbgs() << "Shrunk: " << *li << '\n'); return CanSeparate; }
/// ComputeLocalLiveness - Computes liveness of registers within a basic /// block, setting the killed/dead flags as appropriate. void RALocal::ComputeLocalLiveness(MachineBasicBlock& MBB) { MachineRegisterInfo &MRI = MBB.getParent()->getRegInfo(); // Keep track of the most recently seen previous use or def of each reg, // so that we can update them with dead/kill markers. DenseMap<unsigned, std::pair<MachineInstr*, unsigned> > LastUseDef; for (MachineBasicBlock::iterator I = MBB.begin(), E = MBB.end(); I != E; ++I) { if (I->isDebugValue()) continue; for (unsigned i = 0, e = I->getNumOperands(); i != e; ++i) { MachineOperand &MO = I->getOperand(i); // Uses don't trigger any flags, but we need to save // them for later. Also, we have to process these // _before_ processing the defs, since an instr // uses regs before it defs them. if (!MO.isReg() || !MO.getReg() || !MO.isUse()) continue; LastUseDef[MO.getReg()] = std::make_pair(I, i); if (TargetRegisterInfo::isVirtualRegister(MO.getReg())) continue; const unsigned *Aliases = TRI->getAliasSet(MO.getReg()); if (Aliases == 0) continue; while (*Aliases) { DenseMap<unsigned, std::pair<MachineInstr*, unsigned> >::iterator alias = LastUseDef.find(*Aliases); if (alias != LastUseDef.end() && alias->second.first != I) LastUseDef[*Aliases] = std::make_pair(I, i); ++Aliases; } } for (unsigned i = 0, e = I->getNumOperands(); i != e; ++i) { MachineOperand &MO = I->getOperand(i); // Defs others than 2-addr redefs _do_ trigger flag changes: // - A def followed by a def is dead // - A use followed by a def is a kill if (!MO.isReg() || !MO.getReg() || !MO.isDef()) continue; DenseMap<unsigned, std::pair<MachineInstr*, unsigned> >::iterator last = LastUseDef.find(MO.getReg()); if (last != LastUseDef.end()) { // Check if this is a two address instruction. If so, then // the def does not kill the use. if (last->second.first == I && I->isRegTiedToUseOperand(i)) continue; MachineOperand &lastUD = last->second.first->getOperand(last->second.second); if (lastUD.isDef()) lastUD.setIsDead(true); else lastUD.setIsKill(true); } LastUseDef[MO.getReg()] = std::make_pair(I, i); } } // Live-out (of the function) registers contain return values of the function, // so we need to make sure they are alive at return time. MachineBasicBlock::iterator Ret = MBB.getFirstTerminator(); bool BBEndsInReturn = (Ret != MBB.end() && Ret->getDesc().isReturn()); if (BBEndsInReturn) for (MachineRegisterInfo::liveout_iterator I = MF->getRegInfo().liveout_begin(), E = MF->getRegInfo().liveout_end(); I != E; ++I) if (!Ret->readsRegister(*I)) { Ret->addOperand(MachineOperand::CreateReg(*I, false, true)); LastUseDef[*I] = std::make_pair(Ret, Ret->getNumOperands()-1); } // Finally, loop over the final use/def of each reg // in the block and determine if it is dead. for (DenseMap<unsigned, std::pair<MachineInstr*, unsigned> >::iterator I = LastUseDef.begin(), E = LastUseDef.end(); I != E; ++I) { MachineInstr *MI = I->second.first; unsigned idx = I->second.second; MachineOperand &MO = MI->getOperand(idx); bool isPhysReg = TargetRegisterInfo::isPhysicalRegister(MO.getReg()); // A crude approximation of "live-out" calculation bool usedOutsideBlock = isPhysReg ? false : UsedInMultipleBlocks.test(MO.getReg() - TargetRegisterInfo::FirstVirtualRegister); // If the machine BB ends in a return instruction, then the value isn't used // outside of the BB. if (!isPhysReg && (!usedOutsideBlock || BBEndsInReturn)) { // DBG_VALUE complicates this: if the only refs of a register outside // this block are DBG_VALUE, we can't keep the reg live just for that, // as it will cause the reg to be spilled at the end of this block when // it wouldn't have been otherwise. Nullify the DBG_VALUEs when that // happens. bool UsedByDebugValueOnly = false; for (MachineRegisterInfo::reg_iterator UI = MRI.reg_begin(MO.getReg()), UE = MRI.reg_end(); UI != UE; ++UI) { // Two cases: // - used in another block // - used in the same block before it is defined (loop) if (UI->getParent() == &MBB && !(MO.isDef() && UI.getOperand().isUse() && precedes(&*UI, MI))) continue; if (UI->isDebugValue()) { UsedByDebugValueOnly = true; continue; } // A non-DBG_VALUE use means we can leave DBG_VALUE uses alone. UsedInMultipleBlocks.set(MO.getReg() - TargetRegisterInfo::FirstVirtualRegister); usedOutsideBlock = true; UsedByDebugValueOnly = false; break; } if (UsedByDebugValueOnly) for (MachineRegisterInfo::reg_iterator UI = MRI.reg_begin(MO.getReg()), UE = MRI.reg_end(); UI != UE; ++UI) if (UI->isDebugValue() && (UI->getParent() != &MBB || (MO.isDef() && precedes(&*UI, MI)))) UI.getOperand().setReg(0U); } // Physical registers and those that are not live-out of the block are // killed/dead at their last use/def within this block. if (isPhysReg || !usedOutsideBlock || BBEndsInReturn) { if (MO.isUse()) { // Don't mark uses that are tied to defs as kills. if (!MI->isRegTiedToDefOperand(idx)) MO.setIsKill(true); } else { MO.setIsDead(true); } } } }
/// hasRegisterUseBelow - Return true if the specified register is used after /// the current instruction and before it's next definition. bool LiveVariables::hasRegisterUseBelow(unsigned Reg, MachineBasicBlock::iterator I, MachineBasicBlock *MBB) { if (I == MBB->end()) return false; // First find out if there are any uses / defs below. bool hasDistInfo = true; unsigned CurDist = DistanceMap[I]; SmallVector<MachineInstr*, 4> Uses; SmallVector<MachineInstr*, 4> Defs; for (MachineRegisterInfo::reg_iterator RI = MRI->reg_begin(Reg), RE = MRI->reg_end(); RI != RE; ++RI) { MachineOperand &UDO = RI.getOperand(); MachineInstr *UDMI = &*RI; if (UDMI->getParent() != MBB) continue; DenseMap<MachineInstr*, unsigned>::iterator DI = DistanceMap.find(UDMI); bool isBelow = false; if (DI == DistanceMap.end()) { // Must be below if it hasn't been assigned a distance yet. isBelow = true; hasDistInfo = false; } else if (DI->second > CurDist) isBelow = true; if (isBelow) { if (UDO.isUse()) Uses.push_back(UDMI); if (UDO.isDef()) Defs.push_back(UDMI); } } if (Uses.empty()) // No uses below. return false; else if (!Uses.empty() && Defs.empty()) // There are uses below but no defs below. return true; // There are both uses and defs below. We need to know which comes first. if (!hasDistInfo) { // Complete DistanceMap for this MBB. This information is computed only // once per MBB. ++I; ++CurDist; for (MachineBasicBlock::iterator E = MBB->end(); I != E; ++I, ++CurDist) DistanceMap.insert(std::make_pair(I, CurDist)); } unsigned EarliestUse = DistanceMap[Uses[0]]; for (unsigned i = 1, e = Uses.size(); i != e; ++i) { unsigned Dist = DistanceMap[Uses[i]]; if (Dist < EarliestUse) EarliestUse = Dist; } for (unsigned i = 0, e = Defs.size(); i != e; ++i) { unsigned Dist = DistanceMap[Defs[i]]; if (Dist < EarliestUse) // The register is defined before its first use below. return false; } return true; }
void ConnectedVNInfoEqClasses::Distribute(LiveInterval &LI, LiveInterval *LIV[], MachineRegisterInfo &MRI) { // Rewrite instructions. for (MachineRegisterInfo::reg_iterator RI = MRI.reg_begin(LI.reg), RE = MRI.reg_end(); RI != RE;) { MachineOperand &MO = *RI; MachineInstr *MI = RI->getParent(); ++RI; // DBG_VALUE instructions don't have slot indexes, so get the index of the // instruction before them. // Normally, DBG_VALUE instructions are removed before this function is // called, but it is not a requirement. SlotIndex Idx; if (MI->isDebugValue()) Idx = LIS.getSlotIndexes()->getIndexBefore(*MI); else Idx = LIS.getInstructionIndex(*MI); LiveQueryResult LRQ = LI.Query(Idx); const VNInfo *VNI = MO.readsReg() ? LRQ.valueIn() : LRQ.valueDefined(); // In the case of an <undef> use that isn't tied to any def, VNI will be // NULL. If the use is tied to a def, VNI will be the defined value. if (!VNI) continue; if (unsigned EqClass = getEqClass(VNI)) MO.setReg(LIV[EqClass-1]->reg); } // Distribute subregister liveranges. if (LI.hasSubRanges()) { unsigned NumComponents = EqClass.getNumClasses(); SmallVector<unsigned, 8> VNIMapping; SmallVector<LiveInterval::SubRange*, 8> SubRanges; BumpPtrAllocator &Allocator = LIS.getVNInfoAllocator(); for (LiveInterval::SubRange &SR : LI.subranges()) { // Create new subranges in the split intervals and construct a mapping // for the VNInfos in the subrange. unsigned NumValNos = SR.valnos.size(); VNIMapping.clear(); VNIMapping.reserve(NumValNos); SubRanges.clear(); SubRanges.resize(NumComponents-1, nullptr); for (unsigned I = 0; I < NumValNos; ++I) { const VNInfo &VNI = *SR.valnos[I]; unsigned ComponentNum; if (VNI.isUnused()) { ComponentNum = 0; } else { const VNInfo *MainRangeVNI = LI.getVNInfoAt(VNI.def); assert(MainRangeVNI != nullptr && "SubRange def must have corresponding main range def"); ComponentNum = getEqClass(MainRangeVNI); if (ComponentNum > 0 && SubRanges[ComponentNum-1] == nullptr) { SubRanges[ComponentNum-1] = LIV[ComponentNum-1]->createSubRange(Allocator, SR.LaneMask); } } VNIMapping.push_back(ComponentNum); } DistributeRange(SR, SubRanges.data(), VNIMapping); } LI.removeEmptySubRanges(); } // Distribute main liverange. DistributeRange(LI, LIV, EqClass); }
/// getTripCount - Return a loop-invariant LLVM value indicating the /// number of times the loop will be executed. The trip count can /// be either a register or a constant value. If the trip-count /// cannot be determined, this returns null. /// /// We find the trip count from the phi instruction that defines the /// induction variable. We follow the links to the CMP instruction /// to get the trip count. /// /// Based upon getTripCount in LoopInfo. /// CountValue *PPCCTRLoops::getTripCount(MachineLoop *L, SmallVector<MachineInstr *, 2> &OldInsts) const { MachineBasicBlock *LastMBB = L->getExitingBlock(); // Don't generate a CTR loop if the loop has more than one exit. if (LastMBB == 0) return 0; MachineBasicBlock::iterator LastI = LastMBB->getFirstTerminator(); if (LastI->getOpcode() != PPC::BCC) return 0; // We need to make sure that this compare is defining the condition // register actually used by the terminating branch. unsigned PredReg = LastI->getOperand(1).getReg(); DEBUG(dbgs() << "Examining loop with first terminator: " << *LastI); unsigned PredCond = LastI->getOperand(0).getImm(); if (PredCond != PPC::PRED_EQ && PredCond != PPC::PRED_NE) return 0; // Check that the loop has a induction variable. SmallVector<MachineInstr *, 4> IVars, IOps; getCanonicalInductionVariable(L, IVars, IOps); for (unsigned i = 0; i < IVars.size(); ++i) { MachineInstr *IOp = IOps[i]; MachineInstr *IV_Inst = IVars[i]; // Canonical loops will end with a 'cmpwi/cmpdi cr, IV, Imm', // if Imm is 0, get the count from the PHI opnd // if Imm is -M, than M is the count // Otherwise, Imm is the count MachineOperand *IV_Opnd; const MachineOperand *InitialValue; if (!L->contains(IV_Inst->getOperand(2).getMBB())) { InitialValue = &IV_Inst->getOperand(1); IV_Opnd = &IV_Inst->getOperand(3); } else { InitialValue = &IV_Inst->getOperand(3); IV_Opnd = &IV_Inst->getOperand(1); } DEBUG(dbgs() << "Considering:\n"); DEBUG(dbgs() << " induction operation: " << *IOp); DEBUG(dbgs() << " induction variable: " << *IV_Inst); DEBUG(dbgs() << " initial value: " << *InitialValue << "\n"); // Look for the cmp instruction to determine if we // can get a useful trip count. The trip count can // be either a register or an immediate. The location // of the value depends upon the type (reg or imm). for (MachineRegisterInfo::reg_iterator RI = MRI->reg_begin(IV_Opnd->getReg()), RE = MRI->reg_end(); RI != RE; ++RI) { IV_Opnd = &RI.getOperand(); bool SignedCmp, Int64Cmp; MachineInstr *MI = IV_Opnd->getParent(); if (L->contains(MI) && isCompareEqualsImm(MI, SignedCmp, Int64Cmp) && MI->getOperand(0).getReg() == PredReg) { OldInsts.push_back(MI); OldInsts.push_back(IOp); DEBUG(dbgs() << " compare: " << *MI); const MachineOperand &MO = MI->getOperand(2); assert(MO.isImm() && "IV Cmp Operand should be an immediate"); int64_t ImmVal; if (SignedCmp) ImmVal = (short) MO.getImm(); else ImmVal = MO.getImm(); const MachineInstr *IV_DefInstr = MRI->getVRegDef(IV_Opnd->getReg()); assert(L->contains(IV_DefInstr->getParent()) && "IV definition should occurs in loop"); int64_t iv_value = (short) IV_DefInstr->getOperand(2).getImm(); assert(InitialValue->isReg() && "Expecting register for init value"); unsigned InitialValueReg = InitialValue->getReg(); MachineInstr *DefInstr = MRI->getVRegDef(InitialValueReg); // Here we need to look for an immediate load (an li or lis/ori pair). if (DefInstr && (DefInstr->getOpcode() == PPC::ORI8 || DefInstr->getOpcode() == PPC::ORI)) { int64_t start = DefInstr->getOperand(2).getImm(); MachineInstr *DefInstr2 = MRI->getVRegDef(DefInstr->getOperand(1).getReg()); if (DefInstr2 && (DefInstr2->getOpcode() == PPC::LIS8 || DefInstr2->getOpcode() == PPC::LIS)) { DEBUG(dbgs() << " initial constant: " << *DefInstr); DEBUG(dbgs() << " initial constant: " << *DefInstr2); start |= int64_t(short(DefInstr2->getOperand(1).getImm())) << 16; int64_t count = ImmVal - start; if ((count % iv_value) != 0) { return 0; } OldInsts.push_back(DefInstr); OldInsts.push_back(DefInstr2); // count/iv_value, the trip count, should be positive here. If it // is negative, that indicates that the counter will wrap. if (Int64Cmp) return new CountValue(count/iv_value); else return new CountValue(uint32_t(count/iv_value)); } } else if (DefInstr && (DefInstr->getOpcode() == PPC::LI8 || DefInstr->getOpcode() == PPC::LI)) { DEBUG(dbgs() << " initial constant: " << *DefInstr); int64_t count = ImmVal - int64_t(short(DefInstr->getOperand(1).getImm())); if ((count % iv_value) != 0) { return 0; } OldInsts.push_back(DefInstr); if (Int64Cmp) return new CountValue(count/iv_value); else return new CountValue(uint32_t(count/iv_value)); } else if (iv_value == 1 || iv_value == -1) { // We can't determine a constant starting value. if (ImmVal == 0) { return new CountValue(InitialValueReg, iv_value > 0); } // FIXME: handle non-zero end value. } // FIXME: handle non-unit increments (we might not want to introduce // division but we can handle some 2^n cases with shifts). } } } return 0; }
void VirtRegAuxInfo::CalculateWeightAndHint(LiveInterval &li) { MachineRegisterInfo &mri = MF.getRegInfo(); const TargetRegisterInfo &tri = *MF.getTarget().getRegisterInfo(); MachineBasicBlock *mbb = 0; MachineLoop *loop = 0; unsigned loopDepth = 0; bool isExiting = false; float totalWeight = 0; SmallPtrSet<MachineInstr*, 8> visited; // Find the best physreg hist and the best virtreg hint. float bestPhys = 0, bestVirt = 0; unsigned hintPhys = 0, hintVirt = 0; // Don't recompute a target specific hint. bool noHint = mri.getRegAllocationHint(li.reg).first != 0; // Don't recompute spill weight for an unspillable register. bool Spillable = li.isSpillable(); for (MachineRegisterInfo::reg_iterator I = mri.reg_begin(li.reg); MachineInstr *mi = I.skipInstruction();) { if (mi->isIdentityCopy() || mi->isImplicitDef() || mi->isDebugValue()) continue; if (!visited.insert(mi)) continue; float weight = 1.0f; if (Spillable) { // Get loop info for mi. if (mi->getParent() != mbb) { mbb = mi->getParent(); loop = Loops.getLoopFor(mbb); loopDepth = loop ? loop->getLoopDepth() : 0; isExiting = loop ? loop->isLoopExiting(mbb) : false; } // Calculate instr weight. bool reads, writes; tie(reads, writes) = mi->readsWritesVirtualRegister(li.reg); weight = LiveIntervals::getSpillWeight(writes, reads, loopDepth); // Give extra weight to what looks like a loop induction variable update. if (writes && isExiting && LIS.isLiveOutOfMBB(li, mbb)) weight *= 3; totalWeight += weight; } // Get allocation hints from copies. if (noHint || !mi->isCopy()) continue; unsigned hint = copyHint(mi, li.reg, tri, mri); if (!hint) continue; float hweight = Hint[hint] += weight; if (TargetRegisterInfo::isPhysicalRegister(hint)) { if (hweight > bestPhys && mri.isAllocatable(hint)) bestPhys = hweight, hintPhys = hint; } else { if (hweight > bestVirt) bestVirt = hweight, hintVirt = hint; } } Hint.clear(); // Always prefer the physreg hint. if (unsigned hint = hintPhys ? hintPhys : hintVirt) { mri.setRegAllocationHint(li.reg, 0, hint); // Weakly boost the spill weight of hinted registers. totalWeight *= 1.01F; } // If the live interval was already unspillable, leave it that way. if (!Spillable) return; // Mark li as unspillable if all live ranges are tiny. if (li.isZeroLength(LIS.getSlotIndexes())) { li.markNotSpillable(); return; } // If all of the definitions of the interval are re-materializable, // it is a preferred candidate for spilling. // FIXME: this gets much more complicated once we support non-trivial // re-materialization. if (isRematerializable(li, LIS, *MF.getTarget().getInstrInfo())) totalWeight *= 0.5F; li.weight = normalizeSpillWeight(totalWeight, li.getSize()); }
/// spillAroundUses - insert spill code around each use of Reg. void InlineSpiller::spillAroundUses(unsigned Reg) { DEBUG(dbgs() << "spillAroundUses " << PrintReg(Reg) << '\n'); LiveInterval &OldLI = LIS.getInterval(Reg); // Iterate over instructions using Reg. for (MachineRegisterInfo::reg_iterator RegI = MRI.reg_begin(Reg); MachineInstr *MI = RegI.skipBundle();) { // Debug values are not allowed to affect codegen. if (MI->isDebugValue()) { // Modify DBG_VALUE now that the value is in a spill slot. uint64_t Offset = MI->getOperand(1).getImm(); const MDNode *MDPtr = MI->getOperand(2).getMetadata(); DebugLoc DL = MI->getDebugLoc(); if (MachineInstr *NewDV = TII.emitFrameIndexDebugValue(MF, StackSlot, Offset, MDPtr, DL)) { DEBUG(dbgs() << "Modifying debug info due to spill:" << "\t" << *MI); MachineBasicBlock *MBB = MI->getParent(); MBB->insert(MBB->erase(MI), NewDV); } else { DEBUG(dbgs() << "Removing debug info due to spill:" << "\t" << *MI); MI->eraseFromParent(); } continue; } // Ignore copies to/from snippets. We'll delete them. if (SnippetCopies.count(MI)) continue; // Stack slot accesses may coalesce away. if (coalesceStackAccess(MI, Reg)) continue; // Analyze instruction. SmallVector<std::pair<MachineInstr*, unsigned>, 8> Ops; MIBundleOperands::VirtRegInfo RI = MIBundleOperands(MI).analyzeVirtReg(Reg, &Ops); // Find the slot index where this instruction reads and writes OldLI. // This is usually the def slot, except for tied early clobbers. SlotIndex Idx = LIS.getInstructionIndex(MI).getRegSlot(); if (VNInfo *VNI = OldLI.getVNInfoAt(Idx.getRegSlot(true))) if (SlotIndex::isSameInstr(Idx, VNI->def)) Idx = VNI->def; // Check for a sibling copy. unsigned SibReg = isFullCopyOf(MI, Reg); if (SibReg && isSibling(SibReg)) { // This may actually be a copy between snippets. if (isRegToSpill(SibReg)) { DEBUG(dbgs() << "Found new snippet copy: " << *MI); SnippetCopies.insert(MI); continue; } if (RI.Writes) { // Hoist the spill of a sib-reg copy. if (hoistSpill(OldLI, MI)) { // This COPY is now dead, the value is already in the stack slot. MI->getOperand(0).setIsDead(); DeadDefs.push_back(MI); continue; } } else { // This is a reload for a sib-reg copy. Drop spills downstream. LiveInterval &SibLI = LIS.getInterval(SibReg); eliminateRedundantSpills(SibLI, SibLI.getVNInfoAt(Idx)); // The COPY will fold to a reload below. } } // Attempt to fold memory ops. if (foldMemoryOperand(Ops)) continue; // Allocate interval around instruction. // FIXME: Infer regclass from instruction alone. LiveInterval &NewLI = Edit->createFrom(Reg); NewLI.markNotSpillable(); if (RI.Reads) insertReload(NewLI, Idx, MI); // Rewrite instruction operands. bool hasLiveDef = false; for (unsigned i = 0, e = Ops.size(); i != e; ++i) { MachineOperand &MO = Ops[i].first->getOperand(Ops[i].second); MO.setReg(NewLI.reg); if (MO.isUse()) { if (!Ops[i].first->isRegTiedToDefOperand(Ops[i].second)) MO.setIsKill(); } else { if (!MO.isDead()) hasLiveDef = true; } } DEBUG(dbgs() << "\trewrite: " << Idx << '\t' << *MI); // FIXME: Use a second vreg if instruction has no tied ops. if (RI.Writes) { if (hasLiveDef) insertSpill(NewLI, OldLI, Idx, MI); else { // This instruction defines a dead value. We don't need to spill it, // but do create a live range for the dead value. VNInfo *VNI = NewLI.getNextValue(Idx, LIS.getVNInfoAllocator()); NewLI.addRange(LiveRange(Idx, Idx.getDeadSlot(), VNI)); } } DEBUG(dbgs() << "\tinterval: " << NewLI << '\n'); } }
// Top-level driver to manage the queue of unassigned VirtRegs and call the // selectOrSplit implementation. void RegAllocBase::allocatePhysRegs() { seedLiveRegs(); // Continue assigning vregs one at a time to available physical registers. while (LiveInterval *VirtReg = dequeue()) { assert(!VRM->hasPhys(VirtReg->reg) && "Register already assigned"); // Unused registers can appear when the spiller coalesces snippets. if (MRI->reg_nodbg_empty(VirtReg->reg)) { DEBUG(dbgs() << "Dropping unused " << *VirtReg << '\n'); LIS->removeInterval(VirtReg->reg); continue; } // Invalidate all interference queries, live ranges could have changed. invalidateVirtRegs(); // selectOrSplit requests the allocator to return an available physical // register if possible and populate a list of new live intervals that // result from splitting. DEBUG(dbgs() << "\nselectOrSplit " << MRI->getRegClass(VirtReg->reg)->getName() << ':' << *VirtReg << '\n'); typedef SmallVector<LiveInterval*, 4> VirtRegVec; VirtRegVec SplitVRegs; unsigned AvailablePhysReg = selectOrSplit(*VirtReg, SplitVRegs); if (AvailablePhysReg == ~0u) { // selectOrSplit failed to find a register! const char *Msg = "ran out of registers during register allocation"; // Probably caused by an inline asm. MachineInstr *MI; for (MachineRegisterInfo::reg_iterator I = MRI->reg_begin(VirtReg->reg); (MI = I.skipInstruction());) if (MI->isInlineAsm()) break; if (MI) MI->emitError(Msg); else report_fatal_error(Msg); // Keep going after reporting the error. VRM->assignVirt2Phys(VirtReg->reg, RegClassInfo.getOrder(MRI->getRegClass(VirtReg->reg)).front()); continue; } if (AvailablePhysReg) assign(*VirtReg, AvailablePhysReg); for (VirtRegVec::iterator I = SplitVRegs.begin(), E = SplitVRegs.end(); I != E; ++I) { LiveInterval *SplitVirtReg = *I; assert(!VRM->hasPhys(SplitVirtReg->reg) && "Register already assigned"); if (MRI->reg_nodbg_empty(SplitVirtReg->reg)) { DEBUG(dbgs() << "not queueing unused " << *SplitVirtReg << '\n'); LIS->removeInterval(SplitVirtReg->reg); continue; } DEBUG(dbgs() << "queuing new interval: " << *SplitVirtReg << "\n"); assert(TargetRegisterInfo::isVirtualRegister(SplitVirtReg->reg) && "expect split value in virtual register"); enqueue(SplitVirtReg); ++NumNewQueued; } } }
// Top-level driver to manage the queue of unassigned VirtRegs and call the // selectOrSplit implementation. void RegAllocBase::allocatePhysRegs() { seedLiveRegs(); // Continue assigning vregs one at a time to available physical registers. while (LiveInterval *VirtReg = dequeue()) { assert(!VRM->hasPhys(VirtReg->reg) && "Register already assigned"); // Unused registers can appear when the spiller coalesces snippets. if (MRI->reg_nodbg_empty(VirtReg->reg)) { DEBUG(dbgs() << "Dropping unused " << *VirtReg << '\n'); LIS->removeInterval(VirtReg->reg); continue; } // Invalidate all interference queries, live ranges could have changed. invalidateVirtRegs(); // selectOrSplit requests the allocator to return an available physical // register if possible and populate a list of new live intervals that // result from splitting. DEBUG(dbgs() << "\nselectOrSplit " << MRI->getRegClass(VirtReg->reg)->getName() << ':' << *VirtReg << '\n'); typedef SmallVector<LiveInterval*, 4> VirtRegVec; VirtRegVec SplitVRegs; unsigned AvailablePhysReg = selectOrSplit(*VirtReg, SplitVRegs); if (AvailablePhysReg == ~0u) { // selectOrSplit failed to find a register! std::string msg; raw_string_ostream Msg(msg); Msg << "Ran out of registers during register allocation!" "\nCannot allocate: " << *VirtReg; for (MachineRegisterInfo::reg_iterator I = MRI->reg_begin(VirtReg->reg); MachineInstr *MI = I.skipInstruction();) { if (!MI->isInlineAsm()) continue; Msg << "\nPlease check your inline asm statement for " "invalid constraints:\n"; MI->print(Msg, &VRM->getMachineFunction().getTarget()); } report_fatal_error(Msg.str()); } if (AvailablePhysReg) assign(*VirtReg, AvailablePhysReg); for (VirtRegVec::iterator I = SplitVRegs.begin(), E = SplitVRegs.end(); I != E; ++I) { LiveInterval *SplitVirtReg = *I; assert(!VRM->hasPhys(SplitVirtReg->reg) && "Register already assigned"); if (MRI->reg_nodbg_empty(SplitVirtReg->reg)) { DEBUG(dbgs() << "not queueing unused " << *SplitVirtReg << '\n'); LIS->removeInterval(SplitVirtReg->reg); continue; } DEBUG(dbgs() << "queuing new interval: " << *SplitVirtReg << "\n"); assert(TargetRegisterInfo::isVirtualRegister(SplitVirtReg->reg) && "expect split value in virtual register"); enqueue(SplitVirtReg); ++NumNewQueued; } } }
void InlineSpiller::spill(LiveInterval *li, SmallVectorImpl<LiveInterval*> &newIntervals, SmallVectorImpl<LiveInterval*> &spillIs) { DEBUG(dbgs() << "Inline spilling " << *li << "\n"); assert(li->isSpillable() && "Attempting to spill already spilled value."); assert(!li->isStackSlot() && "Trying to spill a stack slot."); li_ = li; newIntervals_ = &newIntervals; rc_ = mri_.getRegClass(li->reg); spillIs_ = &spillIs; if (split()) return; reMaterializeAll(); // Remat may handle everything. if (li_->empty()) return; stackSlot_ = vrm_.getStackSlot(li->reg); if (stackSlot_ == VirtRegMap::NO_STACK_SLOT) stackSlot_ = vrm_.assignVirt2StackSlot(li->reg); // Iterate over instructions using register. for (MachineRegisterInfo::reg_iterator RI = mri_.reg_begin(li->reg); MachineInstr *MI = RI.skipInstruction();) { // Debug values are not allowed to affect codegen. if (MI->isDebugValue()) { // Modify DBG_VALUE now that the value is in a spill slot. uint64_t Offset = MI->getOperand(1).getImm(); const MDNode *MDPtr = MI->getOperand(2).getMetadata(); DebugLoc DL = MI->getDebugLoc(); if (MachineInstr *NewDV = tii_.emitFrameIndexDebugValue(mf_, stackSlot_, Offset, MDPtr, DL)) { DEBUG(dbgs() << "Modifying debug info due to spill:" << "\t" << *MI); MachineBasicBlock *MBB = MI->getParent(); MBB->insert(MBB->erase(MI), NewDV); } else { DEBUG(dbgs() << "Removing debug info due to spill:" << "\t" << *MI); MI->eraseFromParent(); } continue; } // Stack slot accesses may coalesce away. if (coalesceStackAccess(MI)) continue; // Analyze instruction. bool Reads, Writes; SmallVector<unsigned, 8> Ops; tie(Reads, Writes) = MI->readsWritesVirtualRegister(li->reg, &Ops); // Attempt to fold memory ops. if (foldMemoryOperand(MI, Ops)) continue; // Allocate interval around instruction. // FIXME: Infer regclass from instruction alone. unsigned NewVReg = mri_.createVirtualRegister(rc_); vrm_.grow(); LiveInterval &NewLI = lis_.getOrCreateInterval(NewVReg); NewLI.markNotSpillable(); if (Reads) insertReload(NewLI, MI); // Rewrite instruction operands. bool hasLiveDef = false; for (unsigned i = 0, e = Ops.size(); i != e; ++i) { MachineOperand &MO = MI->getOperand(Ops[i]); MO.setReg(NewVReg); if (MO.isUse()) { if (!MI->isRegTiedToDefOperand(Ops[i])) MO.setIsKill(); } else { if (!MO.isDead()) hasLiveDef = true; } } // FIXME: Use a second vreg if instruction has no tied ops. if (Writes && hasLiveDef) insertSpill(NewLI, MI); DEBUG(dbgs() << "\tinterval: " << NewLI << '\n'); newIntervals.push_back(&NewLI); } }