bool SIFixSGPRLiveRanges::runOnMachineFunction(MachineFunction &MF) { MachineRegisterInfo &MRI = MF.getRegInfo(); const TargetInstrInfo *TII = MF.getSubtarget().getInstrInfo(); const SIRegisterInfo *TRI = static_cast<const SIRegisterInfo *>( MF.getSubtarget().getRegisterInfo()); bool MadeChange = false; MachinePostDominatorTree *PDT = &getAnalysis<MachinePostDominatorTree>(); std::vector<std::pair<unsigned, LiveRange *>> SGPRLiveRanges; LiveIntervals *LIS = &getAnalysis<LiveIntervals>(); LiveVariables *LV = getAnalysisIfAvailable<LiveVariables>(); MachineBasicBlock *Entry = MF.begin(); // Use a depth first order so that in SSA, we encounter all defs before // uses. Once the defs of the block have been found, attempt to insert // SGPR_USE instructions in successor blocks if required. for (MachineBasicBlock *MBB : depth_first(Entry)) { for (const MachineInstr &MI : *MBB) { for (const MachineOperand &MO : MI.defs()) { if (MO.isImplicit()) continue; unsigned Def = MO.getReg(); if (TargetRegisterInfo::isVirtualRegister(Def)) { if (TRI->isSGPRClass(MRI.getRegClass(Def))) { // Only consider defs that are live outs. We don't care about def / // use within the same block. LiveRange &LR = LIS->getInterval(Def); if (LIS->isLiveOutOfMBB(LR, MBB)) SGPRLiveRanges.push_back(std::make_pair(Def, &LR)); } } else if (TRI->isSGPRClass(TRI->getPhysRegClass(Def))) { SGPRLiveRanges.push_back(std::make_pair(Def, &LIS->getRegUnit(Def))); } } } if (MBB->succ_size() < 2) continue; // We have structured control flow, so the number of successors should be // two. assert(MBB->succ_size() == 2); MachineBasicBlock *SuccA = *MBB->succ_begin(); MachineBasicBlock *SuccB = *(++MBB->succ_begin()); MachineBasicBlock *NCD = PDT->findNearestCommonDominator(SuccA, SuccB); if (!NCD) continue; MachineBasicBlock::iterator NCDTerm = NCD->getFirstTerminator(); if (NCDTerm != NCD->end() && NCDTerm->getOpcode() == AMDGPU::SI_ELSE) { assert(NCD->succ_size() == 2); // We want to make sure we insert the Use after the ENDIF, not after // the ELSE. NCD = PDT->findNearestCommonDominator(*NCD->succ_begin(), *(++NCD->succ_begin())); } for (std::pair<unsigned, LiveRange*> RegLR : SGPRLiveRanges) { unsigned Reg = RegLR.first; LiveRange *LR = RegLR.second; // FIXME: We could be smarter here. If the register is Live-In to one // block, but the other doesn't have any SGPR defs, then there won't be a // conflict. Also, if the branch condition is uniform then there will be // no conflict. bool LiveInToA = LIS->isLiveInToMBB(*LR, SuccA); bool LiveInToB = LIS->isLiveInToMBB(*LR, SuccB); if (!LiveInToA && !LiveInToB) { DEBUG(dbgs() << PrintReg(Reg, TRI, 0) << " is live into neither successor\n"); continue; } if (LiveInToA && LiveInToB) { DEBUG(dbgs() << PrintReg(Reg, TRI, 0) << " is live into both successors\n"); continue; } // This interval is live in to one successor, but not the other, so // we need to update its range so it is live in to both. DEBUG(dbgs() << "Possible SGPR conflict detected for " << PrintReg(Reg, TRI, 0) << " in " << *LR << " BB#" << SuccA->getNumber() << ", BB#" << SuccB->getNumber() << " with NCD = BB#" << NCD->getNumber() << '\n'); assert(TargetRegisterInfo::isVirtualRegister(Reg) && "Not expecting to extend live range of physreg"); // FIXME: Need to figure out how to update LiveRange here so this pass // will be able to preserve LiveInterval analysis. MachineInstr *NCDSGPRUse = BuildMI(*NCD, NCD->getFirstNonPHI(), DebugLoc(), TII->get(AMDGPU::SGPR_USE)) .addReg(Reg, RegState::Implicit); MadeChange = true; SlotIndex SI = LIS->InsertMachineInstrInMaps(NCDSGPRUse); LIS->extendToIndices(*LR, SI.getRegSlot()); if (LV) { // TODO: This won't work post-SSA LV->HandleVirtRegUse(Reg, NCD, NCDSGPRUse); } DEBUG(NCDSGPRUse->dump()); } } return MadeChange; }
// Replace uses of FromReg with ToReg if they are dominated by MI. static bool ReplaceDominatedUses(MachineBasicBlock &MBB, MachineInstr &MI, unsigned FromReg, unsigned ToReg, const MachineRegisterInfo &MRI, MachineDominatorTree &MDT, LiveIntervals &LIS) { bool Changed = false; LiveInterval *FromLI = &LIS.getInterval(FromReg); LiveInterval *ToLI = &LIS.getInterval(ToReg); SlotIndex FromIdx = LIS.getInstructionIndex(MI).getRegSlot(); VNInfo *FromVNI = FromLI->getVNInfoAt(FromIdx); SmallVector<SlotIndex, 4> Indices; for (auto I = MRI.use_nodbg_begin(FromReg), E = MRI.use_nodbg_end(); I != E;) { MachineOperand &O = *I++; MachineInstr *Where = O.getParent(); // Check that MI dominates the instruction in the normal way. if (&MI == Where || !MDT.dominates(&MI, Where)) continue; // If this use gets a different value, skip it. SlotIndex WhereIdx = LIS.getInstructionIndex(*Where); VNInfo *WhereVNI = FromLI->getVNInfoAt(WhereIdx); if (WhereVNI && WhereVNI != FromVNI) continue; // Make sure ToReg isn't clobbered before it gets there. VNInfo *ToVNI = ToLI->getVNInfoAt(WhereIdx); if (ToVNI && ToVNI != FromVNI) continue; Changed = true; LLVM_DEBUG(dbgs() << "Setting operand " << O << " in " << *Where << " from " << MI << "\n"); O.setReg(ToReg); // If the store's def was previously dead, it is no longer. if (!O.isUndef()) { MI.getOperand(0).setIsDead(false); Indices.push_back(WhereIdx.getRegSlot()); } } if (Changed) { // Extend ToReg's liveness. LIS.extendToIndices(*ToLI, Indices); // Shrink FromReg's liveness. LIS.shrinkToUses(FromLI); // If we replaced all dominated uses, FromReg is now killed at MI. if (!FromLI->liveAt(FromIdx.getDeadSlot())) MI.addRegisterKilled(FromReg, MBB.getParent() ->getSubtarget<WebAssemblySubtarget>() .getRegisterInfo()); } return Changed; }