// Return true if \p MI dominates of uses of virtual register \p VReg static bool dominatesAllUsesOf(const MachineInstr *MI, unsigned VReg, MachineDominatorTree *MDT, MachineRegisterInfo *MRI) { assert(TargetRegisterInfo::isVirtualRegister(VReg) && "Expected virtual register!"); for (auto it = MRI->use_nodbg_begin(VReg), end = MRI->use_nodbg_end(); it != end; ++it) { MachineInstr *User = it->getParent(); if (User->isPHI()) { unsigned BBOperandIdx = User->getOperandNo(&*it) + 1; MachineBasicBlock *MBB = User->getOperand(BBOperandIdx).getMBB(); if (MBB->empty()) { const MachineBasicBlock *InstBB = MI->getParent(); assert(InstBB != MBB && "Instruction found in empty MBB"); if (!MDT->dominates(InstBB, MBB)) return false; continue; } User = &*MBB->rbegin(); } if (!MDT->dominates(MI, User)) return false; } return true; }
static bool isCrossCopy(const MachineRegisterInfo &MRI, const MachineInstr &MI, const TargetRegisterClass *DstRC, const MachineOperand &MO) { assert(lowersToCopies(MI)); unsigned SrcReg = MO.getReg(); const TargetRegisterClass *SrcRC = MRI.getRegClass(SrcReg); if (DstRC == SrcRC) return false; unsigned SrcSubIdx = MO.getSubReg(); const TargetRegisterInfo &TRI = *MRI.getTargetRegisterInfo(); unsigned DstSubIdx = 0; switch (MI.getOpcode()) { case TargetOpcode::INSERT_SUBREG: if (MI.getOperandNo(&MO) == 2) DstSubIdx = MI.getOperand(3).getImm(); break; case TargetOpcode::REG_SEQUENCE: { unsigned OpNum = MI.getOperandNo(&MO); DstSubIdx = MI.getOperand(OpNum+1).getImm(); break; } case TargetOpcode::EXTRACT_SUBREG: { unsigned SubReg = MI.getOperand(2).getImm(); SrcSubIdx = TRI.composeSubRegIndices(SubReg, SrcSubIdx); } } unsigned PreA, PreB; // Unused. if (SrcSubIdx && DstSubIdx) return !TRI.getCommonSuperRegClass(SrcRC, SrcSubIdx, DstRC, DstSubIdx, PreA, PreB); if (SrcSubIdx) return !TRI.getMatchingSuperRegClass(SrcRC, DstRC, SrcSubIdx); if (DstSubIdx) return !TRI.getMatchingSuperRegClass(DstRC, SrcRC, DstSubIdx); return !TRI.getCommonSubClass(SrcRC, DstRC); }
LaneBitmask DetectDeadLanes::transferUsedLanes(const MachineInstr &MI, LaneBitmask UsedLanes, const MachineOperand &MO) const { unsigned OpNum = MI.getOperandNo(&MO); assert(lowersToCopies(MI) && DefinedByCopy[ TargetRegisterInfo::virtReg2Index(MI.getOperand(0).getReg())]); switch (MI.getOpcode()) { case TargetOpcode::COPY: case TargetOpcode::PHI: return UsedLanes; case TargetOpcode::REG_SEQUENCE: { assert(OpNum % 2 == 1); unsigned SubIdx = MI.getOperand(OpNum + 1).getImm(); return TRI->reverseComposeSubRegIndexLaneMask(SubIdx, UsedLanes); } case TargetOpcode::INSERT_SUBREG: { unsigned SubIdx = MI.getOperand(3).getImm(); LaneBitmask MO2UsedLanes = TRI->reverseComposeSubRegIndexLaneMask(SubIdx, UsedLanes); if (OpNum == 2) return MO2UsedLanes; const MachineOperand &Def = MI.getOperand(0); unsigned DefReg = Def.getReg(); const TargetRegisterClass *RC = MRI->getRegClass(DefReg); LaneBitmask MO1UsedLanes; if (RC->CoveredBySubRegs) MO1UsedLanes = UsedLanes & ~TRI->getSubRegIndexLaneMask(SubIdx); else MO1UsedLanes = RC->LaneMask; assert(OpNum == 1); return MO1UsedLanes; } case TargetOpcode::EXTRACT_SUBREG: { assert(OpNum == 1); unsigned SubIdx = MI.getOperand(2).getImm(); return TRI->composeSubRegIndexLaneMask(SubIdx, UsedLanes); } default: llvm_unreachable("function must be called with COPY-like instruction"); } }
/// Check whether the given instruction can the end of a LOH chain involving a /// store. static bool isCandidateStore(const MachineInstr &MI, const MachineOperand &MO) { switch (MI.getOpcode()) { default: return false; case AArch64::STRBBui: case AArch64::STRHHui: case AArch64::STRBui: case AArch64::STRHui: case AArch64::STRWui: case AArch64::STRXui: case AArch64::STRSui: case AArch64::STRDui: case AArch64::STRQui: // We can only optimize the index operand. // In case we have str xA, [xA, #imm], this is two different uses // of xA and we cannot fold, otherwise the xA stored may be wrong, // even if #imm == 0. return MI.getOperandNo(&MO) == 1 && MI.getOperand(0).getReg() != MI.getOperand(1).getReg(); } }