/// isImplicitlyDefined - Return true if all defs of VirtReg are implicit-defs. /// This includes registers with no defs. static bool isImplicitlyDefined(unsigned VirtReg, const MachineRegisterInfo *MRI) { for (MachineRegisterInfo::def_iterator DI = MRI->def_begin(VirtReg), DE = MRI->def_end(); DI != DE; ++DI) if (!DI->isImplicitDef()) return false; return true; }
/// Allocate a register for the virtual register \p VReg. The last use of /// \p VReg is around the current position of the register scavenger \p RS. /// \p ReserveAfter controls whether the scavenged register needs to be reserved /// after the current instruction, otherwise it will only be reserved before the /// current instruction. static unsigned scavengeVReg(MachineRegisterInfo &MRI, RegScavenger &RS, unsigned VReg, bool ReserveAfter) { const TargetRegisterInfo &TRI = *MRI.getTargetRegisterInfo(); #ifndef NDEBUG // Verify that all definitions and uses are in the same basic block. const MachineBasicBlock *CommonMBB = nullptr; // Real definition for the reg, re-definitions are not considered. const MachineInstr *RealDef = nullptr; for (MachineOperand &MO : MRI.reg_nodbg_operands(VReg)) { MachineBasicBlock *MBB = MO.getParent()->getParent(); if (CommonMBB == nullptr) CommonMBB = MBB; assert(MBB == CommonMBB && "All defs+uses must be in the same basic block"); if (MO.isDef()) { const MachineInstr &MI = *MO.getParent(); if (!MI.readsRegister(VReg, &TRI)) { assert((!RealDef || RealDef == &MI) && "Can have at most one definition which is not a redefinition"); RealDef = &MI; } } } assert(RealDef != nullptr && "Must have at least 1 Def"); #endif // We should only have one definition of the register. However to accomodate // the requirements of two address code we also allow definitions in // subsequent instructions provided they also read the register. That way // we get a single contiguous lifetime. // // Definitions in MRI.def_begin() are unordered, search for the first. MachineRegisterInfo::def_iterator FirstDef = std::find_if(MRI.def_begin(VReg), MRI.def_end(), [VReg, &TRI](const MachineOperand &MO) { return !MO.getParent()->readsRegister(VReg, &TRI); }); assert(FirstDef != MRI.def_end() && "Must have one definition that does not redefine vreg"); MachineInstr &DefMI = *FirstDef->getParent(); // The register scavenger will report a free register inserting an emergency // spill/reload if necessary. int SPAdj = 0; const TargetRegisterClass &RC = *MRI.getRegClass(VReg); unsigned SReg = RS.scavengeRegisterBackwards(RC, DefMI.getIterator(), ReserveAfter, SPAdj); MRI.replaceRegWith(VReg, SReg); ++NumScavengedRegs; return SReg; }
unsigned BitLevelInfo::getBitWidth(unsigned R) const { unsigned Size = 0; for (MachineRegisterInfo::def_iterator I = MRI->def_begin(R), E = MRI->def_end(); I != E; ++I) { unsigned S = VInstrInfo::getBitWidthOrZero(I.getOperand()); if (S == 0) { // Get the bit width from source operand. assert(I->isCopy() && "Can not get register bit width!"); S = getBitWidth(I->getOperand(1).getReg()); } Size = std::max(Size, S); } return Size; }
void LiveRangeCalc::createDeadDefs(LiveInterval *LI, unsigned Reg) { assert(MRI && Indexes && "call reset() first"); // Visit all def operands. If the same instruction has multiple defs of Reg, // LI->createDeadDef() will deduplicate. for (MachineRegisterInfo::def_iterator I = MRI->def_begin(Reg), E = MRI->def_end(); I != E; ++I) { const MachineInstr *MI = &*I; // Find the corresponding slot index. SlotIndex Idx; if (MI->isPHI()) // PHI defs begin at the basic block start index. Idx = Indexes->getMBBStartIdx(MI->getParent()); else // Instructions are either normal 'r', or early clobber 'e'. Idx = Indexes->getInstructionIndex(MI) .getRegSlot(I.getOperand().isEarlyClobber()); // Create the def in LI. This may find an existing def. LI->createDeadDef(Idx, *Alloc); } }
// tranformInstruction - Perform the transformation of an instruction // to its equivalant AdvSIMD scalar instruction. Update inputs and outputs // to be the correct register class, minimizing cross-class copies. void ARM64AdvSIMDScalar::transformInstruction(MachineInstr *MI) { DEBUG(dbgs() << "Scalar transform: " << *MI); MachineBasicBlock *MBB = MI->getParent(); int OldOpc = MI->getOpcode(); int NewOpc = getTransformOpcode(OldOpc); assert(OldOpc != NewOpc && "transform an instruction to itself?!"); // Check if we need a copy for the source registers. unsigned OrigSrc0 = MI->getOperand(1).getReg(); unsigned OrigSrc1 = MI->getOperand(2).getReg(); unsigned Src0 = 0, SubReg0 = 0; unsigned Src1 = 0, SubReg1 = 0; if (!MRI->def_empty(OrigSrc0)) { MachineRegisterInfo::def_iterator Def = MRI->def_begin(OrigSrc0); assert(llvm::next(Def) == MRI->def_end() && "Multiple def in SSA!"); Src0 = getSrcFromCopy(&*Def, MRI, SubReg0); // If there are no other users of the original source, we can delete // that instruction. if (Src0 && MRI->hasOneNonDBGUse(OrigSrc0)) { assert(Src0 && "Can't delete copy w/o a valid original source!"); Def->eraseFromParent(); ++NumCopiesDeleted; } } if (!MRI->def_empty(OrigSrc1)) { MachineRegisterInfo::def_iterator Def = MRI->def_begin(OrigSrc1); assert(llvm::next(Def) == MRI->def_end() && "Multiple def in SSA!"); Src1 = getSrcFromCopy(&*Def, MRI, SubReg1); // If there are no other users of the original source, we can delete // that instruction. if (Src1 && MRI->hasOneNonDBGUse(OrigSrc1)) { assert(Src1 && "Can't delete copy w/o a valid original source!"); Def->eraseFromParent(); ++NumCopiesDeleted; } } // If we weren't able to reference the original source directly, create a // copy. if (!Src0) { SubReg0 = 0; Src0 = MRI->createVirtualRegister(&ARM64::FPR64RegClass); insertCopy(TII, MI, Src0, OrigSrc0, true); } if (!Src1) { SubReg1 = 0; Src1 = MRI->createVirtualRegister(&ARM64::FPR64RegClass); insertCopy(TII, MI, Src1, OrigSrc1, true); } // Create a vreg for the destination. // FIXME: No need to do this if the ultimate user expects an FPR64. // Check for that and avoid the copy if possible. unsigned Dst = MRI->createVirtualRegister(&ARM64::FPR64RegClass); // For now, all of the new instructions have the same simple three-register // form, so no need to special case based on what instruction we're // building. BuildMI(*MBB, MI, MI->getDebugLoc(), TII->get(NewOpc), Dst) .addReg(Src0, getKillRegState(true), SubReg0) .addReg(Src1, getKillRegState(true), SubReg1); // Now copy the result back out to a GPR. // FIXME: Try to avoid this if all uses could actually just use the FPR64 // directly. insertCopy(TII, MI, MI->getOperand(0).getReg(), Dst, true); // Erase the old instruction. MI->eraseFromParent(); ++NumScalarInsnsUsed; }