示例#1
0
bool RegisterBank::verify(const TargetRegisterInfo &TRI) const {
  assert(isValid() && "Invalid register bank");
  for (unsigned RCId = 0, End = TRI.getNumRegClasses(); RCId != End; ++RCId) {
    const TargetRegisterClass &RC = *TRI.getRegClass(RCId);

    if (!covers(RC))
      continue;
    // Verify that the register bank covers all the sub classes of the
    // classes it covers.

    // Use a different (slow in that case) method than
    // RegisterBankInfo to find the subclasses of RC, to make sure
    // both agree on the covers.
    for (unsigned SubRCId = 0; SubRCId != End; ++SubRCId) {
      const TargetRegisterClass &SubRC = *TRI.getRegClass(RCId);

      if (!RC.hasSubClassEq(&SubRC))
        continue;

      // Verify that the Size of the register bank is big enough to cover
      // all the register classes it covers.
      assert((getSize() >= SubRC.getSize() * 8) &&
             "Size is not big enough for all the subclasses!");
      assert(covers(SubRC) && "Not all subclasses are covered");
    }
  }
  return true;
}
示例#2
0
/// \brief Check if the registers defined by the pair (RegisterClass, SubReg)
/// share the same register file.
static bool shareSameRegisterFile(const TargetRegisterInfo &TRI,
                                  const TargetRegisterClass *DefRC,
                                  unsigned DefSubReg,
                                  const TargetRegisterClass *SrcRC,
                                  unsigned SrcSubReg) {
  // Same register class.
  if (DefRC == SrcRC)
    return true;

  // Both operands are sub registers. Check if they share a register class.
  unsigned SrcIdx, DefIdx;
  if (SrcSubReg && DefSubReg) {
    return TRI.getCommonSuperRegClass(SrcRC, SrcSubReg, DefRC, DefSubReg,
                                      SrcIdx, DefIdx) != nullptr;
  }

  // At most one of the register is a sub register, make it Src to avoid
  // duplicating the test.
  if (!SrcSubReg) {
    std::swap(DefSubReg, SrcSubReg);
    std::swap(DefRC, SrcRC);
  }

  // One of the register is a sub register, check if we can get a superclass.
  if (SrcSubReg)
    return TRI.getMatchingSuperRegClass(SrcRC, DefRC, SrcSubReg) != nullptr;

  // Plain copy.
  return TRI.getCommonSubClass(DefRC, SrcRC) != nullptr;
}
示例#3
0
void EDDisassembler::initMaps(const TargetRegisterInfo &registerInfo) {
  unsigned numRegisters = registerInfo.getNumRegs();
  unsigned registerIndex;
  
  for (registerIndex = 0; registerIndex < numRegisters; ++registerIndex) {
    const char* registerName = registerInfo.get(registerIndex).Name;
    
    RegVec.push_back(registerName);
    RegRMap[registerName] = registerIndex;
  }
  
  switch (Key.Arch) {
  default:
    break;
  case Triple::x86:
  case Triple::x86_64:
    stackPointers.insert(registerIDWithName("SP"));
    stackPointers.insert(registerIDWithName("ESP"));
    stackPointers.insert(registerIDWithName("RSP"));
    
    programCounters.insert(registerIDWithName("IP"));
    programCounters.insert(registerIDWithName("EIP"));
    programCounters.insert(registerIDWithName("RIP"));
    break;
  case Triple::arm:
  case Triple::thumb:
    stackPointers.insert(registerIDWithName("SP"));
    
    programCounters.insert(registerIDWithName("PC"));
    break;  
  }
}
示例#4
0
static bool isACalleeSavedRegister(unsigned reg, const TargetRegisterInfo &TRI,
                                   const MachineFunction &MF) {
  const MCPhysReg *CSR = TRI.getCalleeSavedRegs(&MF);
  for (unsigned i = 0; CSR[i] != 0; ++i)
    if (TRI.regsOverlap(reg, CSR[i]))
      return true;
  return false;
}
示例#5
0
MachineRegisterInfo::MachineRegisterInfo(const TargetRegisterInfo &TRI) {
  VRegInfo.reserve(256);
  RegAllocHints.reserve(256);
  UsedPhysRegs.resize(TRI.getNumRegs());
  
  // Create the physreg use/def lists.
  PhysRegUseDefLists = new MachineOperand*[TRI.getNumRegs()];
  memset(PhysRegUseDefLists, 0, sizeof(MachineOperand*)*TRI.getNumRegs());
}
MachineRegisterInfo::MachineRegisterInfo(const TargetRegisterInfo &TRI)
  : TRI(&TRI), IsSSA(true), TracksLiveness(true) {
  VRegInfo.reserve(256);
  RegAllocHints.reserve(256);
  UsedRegUnits.resize(TRI.getNumRegUnits());
  UsedPhysRegMask.resize(TRI.getNumRegs());

  // Create the physreg use/def lists.
  PhysRegUseDefLists = new MachineOperand*[TRI.getNumRegs()];
  memset(PhysRegUseDefLists, 0, sizeof(MachineOperand*)*TRI.getNumRegs());
}
示例#7
0
unsigned RegisterBankInfo::getSizeInBits(unsigned Reg,
                                         const MachineRegisterInfo &MRI,
                                         const TargetRegisterInfo &TRI) const {
  if (TargetRegisterInfo::isPhysicalRegister(Reg)) {
    // The size is not directly available for physical registers.
    // Instead, we need to access a register class that contains Reg and
    // get the size of that register class.
    // Because this is expensive, we'll cache the register class by calling
    auto *RC = &getMinimalPhysRegClass(Reg, TRI);
    assert(RC && "Expecting Register class");
    return TRI.getRegSizeInBits(*RC);
  }
  return TRI.getRegSizeInBits(Reg, MRI);
}
HexagonBlockRanges::RegisterSet HexagonBlockRanges::getLiveIns(
      const MachineBasicBlock &B, const MachineRegisterInfo &MRI,
      const TargetRegisterInfo &TRI) {
  RegisterSet LiveIns;
  RegisterSet Tmp;

  for (auto I : B.liveins()) {
    MCSubRegIndexIterator S(I.PhysReg, &TRI);
    if (I.LaneMask.all() || (I.LaneMask.any() && !S.isValid())) {
      Tmp.insert({I.PhysReg, 0});
      continue;
    }
    for (; S.isValid(); ++S) {
      unsigned SI = S.getSubRegIndex();
      if ((I.LaneMask & TRI.getSubRegIndexLaneMask(SI)).any())
        Tmp.insert({S.getSubReg(), 0});
    }
  }

  for (auto R : Tmp) {
    if (!Reserved[R.Reg])
      LiveIns.insert(R);
    for (auto S : expandToSubRegs(R, MRI, TRI))
      if (!Reserved[S.Reg])
        LiveIns.insert(S);
  }
  return LiveIns;
}
示例#9
0
// Return the preferred allocation register for reg, given a COPY instruction.
static unsigned copyHint(const MachineInstr *mi, unsigned reg,
                         const TargetRegisterInfo &tri,
                         const MachineRegisterInfo &mri) {
  unsigned sub, hreg, hsub;
  if (mi->getOperand(0).getReg() == reg) {
    sub = mi->getOperand(0).getSubReg();
    hreg = mi->getOperand(1).getReg();
    hsub = mi->getOperand(1).getSubReg();
  } else {
    sub = mi->getOperand(1).getSubReg();
    hreg = mi->getOperand(0).getReg();
    hsub = mi->getOperand(0).getSubReg();
  }

  if (!hreg)
    return 0;

  if (TargetRegisterInfo::isVirtualRegister(hreg))
    return sub == hsub ? hreg : 0;

  const TargetRegisterClass *rc = mri.getRegClass(reg);

  // Only allow physreg hints in rc.
  if (sub == 0)
    return rc->contains(hreg) ? hreg : 0;

  // reg:sub should match the physreg hreg.
  return tri.getMatchingSuperReg(hreg, sub, rc);
}
void MachineRegisterInfo::closePhysRegsUsed(const TargetRegisterInfo &TRI) {
  for (int i = UsedPhysRegs.find_first(); i >= 0;
       i = UsedPhysRegs.find_next(i))
         for (const unsigned *SS = TRI.getSubRegisters(i);
              unsigned SubReg = *SS; ++SS)
           if (SubReg > unsigned(i))
             UsedPhysRegs.set(SubReg);
}
示例#11
0
/// Add pristine registers to the given \p LiveRegs. This function removes
/// actually saved callee save registers when \p InPrologueEpilogue is false.
static void addPristines(LivePhysRegs &LiveRegs, const MachineFunction &MF,
                         const MachineFrameInfo &MFI,
                         const TargetRegisterInfo &TRI) {
  for (const MCPhysReg *CSR = TRI.getCalleeSavedRegs(&MF); CSR && *CSR; ++CSR)
    LiveRegs.addReg(*CSR);
  for (const CalleeSavedInfo &Info : MFI.getCalleeSavedInfo())
    LiveRegs.removeReg(Info.getReg());
}
示例#12
0
void MachineOperand::substVirtReg(unsigned Reg, unsigned SubIdx,
                                  const TargetRegisterInfo &TRI) {
  assert(TargetRegisterInfo::isVirtualRegister(Reg));
  if (SubIdx && getSubReg())
    SubIdx = TRI.composeSubRegIndices(SubIdx, getSubReg());
  setReg(Reg);
  if (SubIdx)
    setSubReg(SubIdx);
}
示例#13
0
void PressureDiff::dump(const TargetRegisterInfo &TRI) const {
  for (const PressureChange &Change : *this) {
    if (!Change.isValid() || Change.getUnitInc() == 0)
      continue;
    dbgs() << "    " << TRI.getRegPressureSetName(Change.getPSet())
           << " " << Change.getUnitInc();
  }
  dbgs() << '\n';
}
示例#14
0
void PressureDiff::dump(const TargetRegisterInfo &TRI) const {
  const char *sep = "";
  for (const PressureChange &Change : *this) {
    if (!Change.isValid())
      break;
    dbgs() << sep << TRI.getRegPressureSetName(Change.getPSet())
           << " " << Change.getUnitInc();
    sep = "    ";
  }
  dbgs() << '\n';
}
示例#15
0
void MachineOperand::substPhysReg(unsigned Reg, const TargetRegisterInfo &TRI) {
  assert(TargetRegisterInfo::isPhysicalRegister(Reg));
  if (getSubReg()) {
    Reg = TRI.getSubReg(Reg, getSubReg());
    // Note that getSubReg() may return 0 if the sub-register doesn't exist.
    // That won't happen in legal code.
    setSubReg(0);
    if (isDef())
      setIsUndef(false);
  }
  setReg(Reg);
}
示例#16
0
AArch64RegisterBankInfo::AArch64RegisterBankInfo(const TargetRegisterInfo &TRI)
    : RegisterBankInfo(AArch64::NumRegisterBanks) {
  // Initialize the GPR bank.
  createRegisterBank(AArch64::GPRRegBankID, "GPR");
  // The GPR register bank is fully defined by all the registers in
  // GR64all + its subclasses.
  addRegBankCoverage(AArch64::GPRRegBankID, AArch64::GPR64allRegClassID, TRI);
  const RegisterBank &RBGPR = getRegBank(AArch64::GPRRegBankID);
  (void)RBGPR;
  assert(RBGPR.covers(*TRI.getRegClass(AArch64::GPR32RegClassID)) &&
         "Subclass not added?");
  assert(RBGPR.getSize() == 64 && "GPRs should hold up to 64-bit");

  // Initialize the FPR bank.
  createRegisterBank(AArch64::FPRRegBankID, "FPR");
  // The FPR register bank is fully defined by all the registers in
  // GR64all + its subclasses.
  addRegBankCoverage(AArch64::FPRRegBankID, AArch64::QQQQRegClassID, TRI);
  const RegisterBank &RBFPR = getRegBank(AArch64::FPRRegBankID);
  (void)RBFPR;
  assert(RBFPR.covers(*TRI.getRegClass(AArch64::QQRegClassID)) &&
         "Subclass not added?");
  assert(RBFPR.covers(*TRI.getRegClass(AArch64::FPR64RegClassID)) &&
         "Subclass not added?");
  assert(RBFPR.getSize() == 512 &&
         "FPRs should hold up to 512-bit via QQQQ sequence");

  // Initialize the CCR bank.
  createRegisterBank(AArch64::CCRRegBankID, "CCR");
  addRegBankCoverage(AArch64::CCRRegBankID, AArch64::CCRRegClassID, TRI);
  const RegisterBank &RBCCR = getRegBank(AArch64::CCRRegBankID);
  (void)RBCCR;
  assert(RBCCR.covers(*TRI.getRegClass(AArch64::CCRRegClassID)) &&
         "Class not added?");
  assert(RBCCR.getSize() == 32 && "CCR should hold up to 32-bit");

  verify(TRI);
}
示例#17
0
X86RegisterBankInfo::X86RegisterBankInfo(const TargetRegisterInfo &TRI)
    : X86GenRegisterBankInfo() {

  // validate RegBank initialization.
  const RegisterBank &RBGPR = getRegBank(X86::GPRRegBankID);
  (void)RBGPR;
  assert(&X86::GPRRegBank == &RBGPR && "Incorrect RegBanks inizalization.");

  // The GPR register bank is fully defined by all the registers in
  // GR64 + its subclasses.
  assert(RBGPR.covers(*TRI.getRegClass(X86::GR64RegClassID)) &&
         "Subclass not added?");
  assert(RBGPR.getSize() == 64 && "GPRs should hold up to 64-bit");
}
示例#18
0
void LembergInstrInfo::reMaterialize(MachineBasicBlock &MBB,
									 MachineBasicBlock::iterator I,
									 unsigned DestReg,
									 unsigned SubIdx,
									 const MachineInstr *Orig,
									 const TargetRegisterInfo &TRI) const {

  MachineInstr *MI = MBB.getParent()->CloneMachineInstr(Orig);
  MI->substituteRegister(MI->getOperand(0).getReg(), DestReg, SubIdx, TRI);

  // Make sure registers end up in the right register class
  if (MI->getOpcode() == Lemberg::LOADga
	  || MI->getOpcode() == Lemberg::LOADga_off
	  || MI->getOpcode() == Lemberg::LOADuimm19s2) {
	  assert((TRI.isVirtualRegister(DestReg) || Lemberg::AImmRegClass.contains(DestReg))
			 && "Cannot rematerialize this instruction to arbitrary physical register");
	  if (TRI.isVirtualRegister(DestReg)) {
		MachineRegisterInfo &MRI = MBB.getParent()->getRegInfo();
		MRI.constrainRegClass(DestReg, Lemberg::AImmRegisterClass);
	  }
  }

  MBB.insert(I, MI);
}
示例#19
0
文件: Utils.cpp 项目: happz/llvm
bool llvm::constrainSelectedInstRegOperands(MachineInstr &I,
                                            const TargetInstrInfo &TII,
                                            const TargetRegisterInfo &TRI,
                                            const RegisterBankInfo &RBI) {
  assert(!isPreISelGenericOpcode(I.getOpcode()) &&
         "A selected instruction is expected");
  MachineBasicBlock &MBB = *I.getParent();
  MachineFunction &MF = *MBB.getParent();
  MachineRegisterInfo &MRI = MF.getRegInfo();

  for (unsigned OpI = 0, OpE = I.getNumExplicitOperands(); OpI != OpE; ++OpI) {
    MachineOperand &MO = I.getOperand(OpI);

    // There's nothing to be done on non-register operands.
    if (!MO.isReg())
      continue;

    LLVM_DEBUG(dbgs() << "Converting operand: " << MO << '\n');
    assert(MO.isReg() && "Unsupported non-reg operand");

    unsigned Reg = MO.getReg();
    // Physical registers don't need to be constrained.
    if (TRI.isPhysicalRegister(Reg))
      continue;

    // Register operands with a value of 0 (e.g. predicate operands) don't need
    // to be constrained.
    if (Reg == 0)
      continue;

    // If the operand is a vreg, we should constrain its regclass, and only
    // insert COPYs if that's impossible.
    // constrainOperandRegClass does that for us.
    MO.setReg(constrainOperandRegClass(MF, TRI, MRI, TII, RBI, I, I.getDesc(),
                                       MO, OpI));

    // Tie uses to defs as indicated in MCInstrDesc if this hasn't already been
    // done.
    if (MO.isUse()) {
      int DefIdx = I.getDesc().getOperandConstraint(OpI, MCOI::TIED_TO);
      if (DefIdx != -1 && !I.isRegTiedToUseOperand(DefIdx))
        I.tieOperands(DefIdx, OpI);
    }
  }
  return true;
}
示例#20
0
/// Get the size in bits of the \p Reg.
///
/// \pre \p Reg != 0 (NoRegister).
static unsigned getSizeInBits(unsigned Reg, const MachineRegisterInfo &MRI,
                              const TargetRegisterInfo &TRI) {
  const TargetRegisterClass *RC = nullptr;
  if (TargetRegisterInfo::isPhysicalRegister(Reg)) {
    // The size is not directly available for physical registers.
    // Instead, we need to access a register class that contains Reg and
    // get the size of that register class.
    RC = TRI.getMinimalPhysRegClass(Reg);
  } else {
    unsigned RegSize = MRI.getSize(Reg);
    // If Reg is not a generic register, query the register class to
    // get its size.
    if (RegSize)
      return RegSize;
    // Since Reg is not a generic register, it must have a register class.
    RC = MRI.getRegClass(Reg);
  }
  assert(RC && "Unable to deduce the register class");
  return RC->getSize() * 8;
}
示例#21
0
文件: Utils.cpp 项目: happz/llvm
unsigned llvm::constrainOperandRegClass(
    const MachineFunction &MF, const TargetRegisterInfo &TRI,
    MachineRegisterInfo &MRI, const TargetInstrInfo &TII,
    const RegisterBankInfo &RBI, MachineInstr &InsertPt, const MCInstrDesc &II,
    const MachineOperand &RegMO, unsigned OpIdx) {
  unsigned Reg = RegMO.getReg();
  // Assume physical registers are properly constrained.
  assert(TargetRegisterInfo::isVirtualRegister(Reg) &&
         "PhysReg not implemented");

  const TargetRegisterClass *RegClass = TII.getRegClass(II, OpIdx, &TRI, MF);
  // Some of the target independent instructions, like COPY, may not impose any
  // register class constraints on some of their operands: If it's a use, we can
  // skip constraining as the instruction defining the register would constrain
  // it.

  // We can't constrain unallocatable register classes, because we can't create
  // virtual registers for these classes, so we need to let targets handled this
  // case.
  if (RegClass && !RegClass->isAllocatable())
    RegClass = TRI.getConstrainedRegClassForOperand(RegMO, MRI);

  if (!RegClass) {
    assert((!isTargetSpecificOpcode(II.getOpcode()) || RegMO.isUse()) &&
           "Register class constraint is required unless either the "
           "instruction is target independent or the operand is a use");
    // FIXME: Just bailing out like this here could be not enough, unless we
    // expect the users of this function to do the right thing for PHIs and
    // COPY:
    //   v1 = COPY v0
    //   v2 = COPY v1
    // v1 here may end up not being constrained at all. Please notice that to
    // reproduce the issue we likely need a destination pattern of a selection
    // rule producing such extra copies, not just an input GMIR with them as
    // every existing target using selectImpl handles copies before calling it
    // and they never reach this function.
    return Reg;
  }
  return constrainRegToClass(MRI, TII, RBI, InsertPt, Reg, *RegClass);
}
示例#22
0
bool InstructionSelector::constrainSelectedInstRegOperands(
    MachineInstr &I, const TargetInstrInfo &TII, const TargetRegisterInfo &TRI,
    const RegisterBankInfo &RBI) const {
  MachineBasicBlock &MBB = *I.getParent();
  MachineFunction &MF = *MBB.getParent();
  MachineRegisterInfo &MRI = MF.getRegInfo();

  for (unsigned OpI = 0, OpE = I.getNumExplicitOperands(); OpI != OpE; ++OpI) {
    MachineOperand &MO = I.getOperand(OpI);

    // There's nothing to be done on non-register operands.
    if (!MO.isReg())
      continue;

    DEBUG(dbgs() << "Converting operand: " << MO << '\n');
    assert(MO.isReg() && "Unsupported non-reg operand");

    // Physical registers don't need to be constrained.
    if (TRI.isPhysicalRegister(MO.getReg()))
      continue;

    const TargetRegisterClass *RC = TII.getRegClass(I.getDesc(), OpI, &TRI, MF);
    assert(RC && "Selected inst should have regclass operand");

    // If the operand is a vreg, we should constrain its regclass, and only
    // insert COPYs if that's impossible.
    // If the operand is a physreg, we only insert COPYs if the register class
    // doesn't contain the register.
    if (RBI.constrainGenericRegister(MO.getReg(), *RC, MRI))
      continue;

    DEBUG(dbgs() << "Constraining with COPYs isn't implemented yet");
    return false;
  }
  return true;
}
示例#23
0
ARMRegisterBankInfo::ARMRegisterBankInfo(const TargetRegisterInfo &TRI)
    : ARMGenRegisterBankInfo() {
  static bool AlreadyInit = false;
  // We have only one set of register banks, whatever the subtarget
  // is. Therefore, the initialization of the RegBanks table should be
  // done only once. Indeed the table of all register banks
  // (ARM::RegBanks) is unique in the compiler. At some point, it
  // will get tablegen'ed and the whole constructor becomes empty.
  if (AlreadyInit)
    return;
  AlreadyInit = true;

  const RegisterBank &RBGPR = getRegBank(ARM::GPRRegBankID);
  (void)RBGPR;
  assert(&ARM::GPRRegBank == &RBGPR && "The order in RegBanks is messed up");

  // Initialize the GPR bank.
  assert(RBGPR.covers(*TRI.getRegClass(ARM::GPRRegClassID)) &&
         "Subclass not added?");
  assert(RBGPR.covers(*TRI.getRegClass(ARM::GPRwithAPSRRegClassID)) &&
         "Subclass not added?");
  assert(RBGPR.covers(*TRI.getRegClass(ARM::GPRnopcRegClassID)) &&
         "Subclass not added?");
  assert(RBGPR.covers(*TRI.getRegClass(ARM::rGPRRegClassID)) &&
         "Subclass not added?");
  assert(RBGPR.covers(*TRI.getRegClass(ARM::tGPRRegClassID)) &&
         "Subclass not added?");
  assert(RBGPR.covers(*TRI.getRegClass(ARM::tcGPRRegClassID)) &&
         "Subclass not added?");
  assert(RBGPR.covers(*TRI.getRegClass(ARM::tGPR_and_tcGPRRegClassID)) &&
         "Subclass not added?");
  assert(RBGPR.getSize() == 32 && "GPRs should hold up to 32-bit");

#ifndef NDEBUG
  ARM::checkPartialMappings();
  ARM::checkValueMappings();
#endif
}
示例#24
0
bool DwarfExpression::addMachineReg(const TargetRegisterInfo &TRI,
                                    unsigned MachineReg, unsigned MaxSize) {
  if (!TRI.isPhysicalRegister(MachineReg)) {
    if (isFrameRegister(TRI, MachineReg)) {
      DwarfRegs.push_back({-1, 0, nullptr});
      return true;
    }
    return false;
  }

  int Reg = TRI.getDwarfRegNum(MachineReg, false);

  // If this is a valid register number, emit it.
  if (Reg >= 0) {
    DwarfRegs.push_back({Reg, 0, nullptr});
    return true;
  }

  // Walk up the super-register chain until we find a valid number.
  // For example, EAX on x86_64 is a 32-bit fragment of RAX with offset 0.
  for (MCSuperRegIterator SR(MachineReg, &TRI); SR.isValid(); ++SR) {
    Reg = TRI.getDwarfRegNum(*SR, false);
    if (Reg >= 0) {
      unsigned Idx = TRI.getSubRegIndex(*SR, MachineReg);
      unsigned Size = TRI.getSubRegIdxSize(Idx);
      unsigned RegOffset = TRI.getSubRegIdxOffset(Idx);
      DwarfRegs.push_back({Reg, 0, "super-register"});
      // Use a DW_OP_bit_piece to describe the sub-register.
      setSubRegisterPiece(Size, RegOffset);
      return true;
    }
  }

  // Otherwise, attempt to find a covering set of sub-register numbers.
  // For example, Q0 on ARM is a composition of D0+D1.
  unsigned CurPos = 0;
  // The size of the register in bits, assuming 8 bits per byte.
  unsigned RegSize = TRI.getMinimalPhysRegClass(MachineReg)->getSize() * 8;
  // Keep track of the bits in the register we already emitted, so we
  // can avoid emitting redundant aliasing subregs.
  SmallBitVector Coverage(RegSize, false);
  for (MCSubRegIterator SR(MachineReg, &TRI); SR.isValid(); ++SR) {
    unsigned Idx = TRI.getSubRegIndex(MachineReg, *SR);
    unsigned Size = TRI.getSubRegIdxSize(Idx);
    unsigned Offset = TRI.getSubRegIdxOffset(Idx);
    Reg = TRI.getDwarfRegNum(*SR, false);

    // Intersection between the bits we already emitted and the bits
    // covered by this subregister.
    SmallBitVector Intersection(RegSize, false);
    Intersection.set(Offset, Offset + Size);
    Intersection ^= Coverage;

    // If this sub-register has a DWARF number and we haven't covered
    // its range, emit a DWARF piece for it.
    if (Reg >= 0 && Intersection.any()) {
      // Emit a piece for any gap in the coverage.
      if (Offset > CurPos)
        DwarfRegs.push_back({-1, Offset - CurPos, nullptr});
      DwarfRegs.push_back(
          {Reg, std::min<unsigned>(Size, MaxSize - Offset), "sub-register"});
      if (Offset >= MaxSize)
	break;

      // Mark it as emitted.
      Coverage.set(Offset, Offset + Size);
      CurPos = Offset + Size;
    }
  }

  return CurPos;
}
示例#25
0
bool DwarfExpression::addMachineReg(const TargetRegisterInfo &TRI,
                                    unsigned MachineReg, unsigned MaxSize) {
  if (!TRI.isPhysicalRegister(MachineReg)) {
    if (isFrameRegister(TRI, MachineReg)) {
      DwarfRegs.push_back({-1, 0, nullptr});
      return true;
    }
    return false;
  }

  int Reg = TRI.getDwarfRegNum(MachineReg, false);

  // If this is a valid register number, emit it.
  if (Reg >= 0) {
    DwarfRegs.push_back({Reg, 0, nullptr});
    return true;
  }

  // Walk up the super-register chain until we find a valid number.
  // For example, EAX on x86_64 is a 32-bit fragment of RAX with offset 0.
  for (MCSuperRegIterator SR(MachineReg, &TRI); SR.isValid(); ++SR) {
    Reg = TRI.getDwarfRegNum(*SR, false);
    if (Reg >= 0) {
      unsigned Idx = TRI.getSubRegIndex(*SR, MachineReg);
      unsigned Size = TRI.getSubRegIdxSize(Idx);
      unsigned RegOffset = TRI.getSubRegIdxOffset(Idx);
      DwarfRegs.push_back({Reg, 0, "super-register"});
      // Use a DW_OP_bit_piece to describe the sub-register.
      setSubRegisterPiece(Size, RegOffset);
      return true;
    }
  }

  // Otherwise, attempt to find a covering set of sub-register numbers.
  // For example, Q0 on ARM is a composition of D0+D1.
  unsigned CurPos = 0;
  // The size of the register in bits.
  const TargetRegisterClass *RC = TRI.getMinimalPhysRegClass(MachineReg);
  unsigned RegSize = TRI.getRegSizeInBits(*RC);
  // Keep track of the bits in the register we already emitted, so we
  // can avoid emitting redundant aliasing subregs. Because this is
  // just doing a greedy scan of all subregisters, it is possible that
  // this doesn't find a combination of subregisters that fully cover
  // the register (even though one may exist).
  SmallBitVector Coverage(RegSize, false);
  for (MCSubRegIterator SR(MachineReg, &TRI); SR.isValid(); ++SR) {
    unsigned Idx = TRI.getSubRegIndex(MachineReg, *SR);
    unsigned Size = TRI.getSubRegIdxSize(Idx);
    unsigned Offset = TRI.getSubRegIdxOffset(Idx);
    Reg = TRI.getDwarfRegNum(*SR, false);
    if (Reg < 0)
      continue;

    // Intersection between the bits we already emitted and the bits
    // covered by this subregister.
    SmallBitVector CurSubReg(RegSize, false);
    CurSubReg.set(Offset, Offset + Size);

    // If this sub-register has a DWARF number and we haven't covered
    // its range, emit a DWARF piece for it.
    if (CurSubReg.test(Coverage)) {
      // Emit a piece for any gap in the coverage.
      if (Offset > CurPos)
        DwarfRegs.push_back({-1, Offset - CurPos, "no DWARF register encoding"});
      DwarfRegs.push_back(
          {Reg, std::min<unsigned>(Size, MaxSize - Offset), "sub-register"});
      if (Offset >= MaxSize)
        break;

      // Mark it as emitted.
      Coverage.set(Offset, Offset + Size);
      CurPos = Offset + Size;
    }
  }
  // Failed to find any DWARF encoding.
  if (CurPos == 0)
    return false;
  // Found a partial or complete DWARF encoding.
  if (CurPos < RegSize)
    DwarfRegs.push_back({-1, RegSize - CurPos, "no DWARF register encoding"});
  return true;
}
RegDefsUses::RegDefsUses(const TargetRegisterInfo &TRI)
    : TRI(TRI), Defs(TRI.getNumRegs(), false), Uses(TRI.getNumRegs(), false) {}
示例#27
0
void RegisterBankInfo::addRegBankCoverage(unsigned ID, unsigned RCId,
                                          const TargetRegisterInfo &TRI,
                                          bool AddTypeMapping) {
  RegisterBank &RB = getRegBank(ID);
  unsigned NbOfRegClasses = TRI.getNumRegClasses();

  DEBUG(dbgs() << "Add coverage for: " << RB << '\n');

  // Check if RB is underconstruction.
  if (!RB.isValid())
    RB.ContainedRegClasses.resize(NbOfRegClasses);
  else if (RB.covers(*TRI.getRegClass(RCId)))
    // If RB already covers this register class, there is nothing
    // to do.
    return;

  BitVector &Covered = RB.ContainedRegClasses;
  SmallVector<unsigned, 8> WorkList;

  WorkList.push_back(RCId);
  Covered.set(RCId);

  unsigned &MaxSize = RB.Size;
  do {
    unsigned RCId = WorkList.pop_back_val();

    const TargetRegisterClass &CurRC = *TRI.getRegClass(RCId);

    DEBUG(dbgs() << "Examine: " << TRI.getRegClassName(&CurRC)
                 << "(Size*8: " << (CurRC.getSize() * 8) << ")\n");

    // Remember the biggest size in bits.
    MaxSize = std::max(MaxSize, CurRC.getSize() * 8);

    // If we have been asked to record the type supported by this
    // register bank, do it now.
    if (AddTypeMapping)
      for (MVT::SimpleValueType SVT :
           make_range(CurRC.vt_begin(), CurRC.vt_end()))
        recordRegBankForType(getRegBank(ID), SVT);

    // Walk through all sub register classes and push them into the worklist.
    bool First = true;
    for (BitMaskClassIterator It(CurRC.getSubClassMask(), TRI); It.isValid();
         ++It) {
      unsigned SubRCId = It.getID();
      if (!Covered.test(SubRCId)) {
        if (First)
          DEBUG(dbgs() << "  Enqueue sub-class: ");
        DEBUG(dbgs() << TRI.getRegClassName(TRI.getRegClass(SubRCId)) << ", ");
        WorkList.push_back(SubRCId);
        // Remember that we saw the sub class.
        Covered.set(SubRCId);
        First = false;
      }
    }
    if (!First)
      DEBUG(dbgs() << '\n');

    // Push also all the register classes that can be accessed via a
    // subreg index, i.e., its subreg-class (which is different than
    // its subclass).
    //
    // Note: It would probably be faster to go the other way around
    // and have this method add only super classes, since this
    // information is available in a more efficient way. However, it
    // feels less natural for the client of this APIs plus we will
    // TableGen the whole bitset at some point, so compile time for
    // the initialization is not very important.
    First = true;
    for (unsigned SubRCId = 0; SubRCId < NbOfRegClasses; ++SubRCId) {
      if (Covered.test(SubRCId))
        continue;
      bool Pushed = false;
      const TargetRegisterClass *SubRC = TRI.getRegClass(SubRCId);
      for (SuperRegClassIterator SuperRCIt(SubRC, &TRI); SuperRCIt.isValid();
           ++SuperRCIt) {
        if (Pushed)
          break;
        for (BitMaskClassIterator It(SuperRCIt.getMask(), TRI); It.isValid();
             ++It) {
          unsigned SuperRCId = It.getID();
          if (SuperRCId == RCId) {
            if (First)
              DEBUG(dbgs() << "  Enqueue subreg-class: ");
            DEBUG(dbgs() << TRI.getRegClassName(SubRC) << ", ");
            WorkList.push_back(SubRCId);
            // Remember that we saw the sub class.
            Covered.set(SubRCId);
            Pushed = true;
            First = false;
            break;
          }
        }
      }
    }
    if (!First)
      DEBUG(dbgs() << '\n');
  } while (!WorkList.empty());
}
AArch64RegisterBankInfo::AArch64RegisterBankInfo(const TargetRegisterInfo &TRI)
    : RegisterBankInfo(AArch64::RegBanks, AArch64::NumRegisterBanks) {
  static bool AlreadyInit = false;
  // We have only one set of register banks, whatever the subtarget
  // is. Therefore, the initialization of the RegBanks table should be
  // done only once. Indeed the table of all register banks
  // (AArch64::RegBanks) is unique in the compiler. At some point, it
  // will get tablegen'ed and the whole constructor becomes empty.
  if (AlreadyInit)
    return;
  AlreadyInit = true;
  // Initialize the GPR bank.
  createRegisterBank(AArch64::GPRRegBankID, "GPR");
  // The GPR register bank is fully defined by all the registers in
  // GR64all + its subclasses.
  addRegBankCoverage(AArch64::GPRRegBankID, AArch64::GPR64allRegClassID, TRI);
  const RegisterBank &RBGPR = getRegBank(AArch64::GPRRegBankID);
  (void)RBGPR;
  assert(&AArch64::GPRRegBank == &RBGPR &&
         "The order in RegBanks is messed up");
  assert(RBGPR.covers(*TRI.getRegClass(AArch64::GPR32RegClassID)) &&
         "Subclass not added?");
  assert(RBGPR.getSize() == 64 && "GPRs should hold up to 64-bit");

  // Initialize the FPR bank.
  createRegisterBank(AArch64::FPRRegBankID, "FPR");
  // The FPR register bank is fully defined by all the registers in
  // GR64all + its subclasses.
  addRegBankCoverage(AArch64::FPRRegBankID, AArch64::QQQQRegClassID, TRI);
  const RegisterBank &RBFPR = getRegBank(AArch64::FPRRegBankID);
  (void)RBFPR;
  assert(&AArch64::FPRRegBank == &RBFPR &&
         "The order in RegBanks is messed up");
  assert(RBFPR.covers(*TRI.getRegClass(AArch64::QQRegClassID)) &&
         "Subclass not added?");
  assert(RBFPR.covers(*TRI.getRegClass(AArch64::FPR64RegClassID)) &&
         "Subclass not added?");
  assert(RBFPR.getSize() == 512 &&
         "FPRs should hold up to 512-bit via QQQQ sequence");

  // Initialize the CCR bank.
  createRegisterBank(AArch64::CCRRegBankID, "CCR");
  addRegBankCoverage(AArch64::CCRRegBankID, AArch64::CCRRegClassID, TRI);
  const RegisterBank &RBCCR = getRegBank(AArch64::CCRRegBankID);
  (void)RBCCR;
  assert(&AArch64::CCRRegBank == &RBCCR &&
         "The order in RegBanks is messed up");
  assert(RBCCR.covers(*TRI.getRegClass(AArch64::CCRRegClassID)) &&
         "Class not added?");
  assert(RBCCR.getSize() == 32 && "CCR should hold up to 32-bit");

  // Check that the TableGen'ed like file is in sync we our expectations.
  // First, the Idx.
  assert(AArch64::PartialMappingIdx::GPR32 ==
             AArch64::PartialMappingIdx::FirstGPR &&
         "GPR32 index not first in the GPR list");
  assert(AArch64::PartialMappingIdx::GPR64 ==
             AArch64::PartialMappingIdx::LastGPR &&
         "GPR64 index not last in the GPR list");
  assert(AArch64::PartialMappingIdx::FirstGPR <=
             AArch64::PartialMappingIdx::LastGPR &&
         "GPR list is backward");
  assert(AArch64::PartialMappingIdx::FPR32 ==
             AArch64::PartialMappingIdx::FirstFPR &&
         "FPR32 index not first in the FPR list");
  assert(AArch64::PartialMappingIdx::FPR512 ==
             AArch64::PartialMappingIdx::LastFPR &&
         "FPR512 index not last in the FPR list");
  assert(AArch64::PartialMappingIdx::FirstFPR <=
             AArch64::PartialMappingIdx::LastFPR &&
         "FPR list is backward");
  assert(AArch64::PartialMappingIdx::FPR32 + 1 ==
             AArch64::PartialMappingIdx::FPR64 &&
         AArch64::PartialMappingIdx::FPR64 + 1 ==
             AArch64::PartialMappingIdx::FPR128 &&
         AArch64::PartialMappingIdx::FPR128 + 1 ==
             AArch64::PartialMappingIdx::FPR256 &&
         AArch64::PartialMappingIdx::FPR256 + 1 ==
             AArch64::PartialMappingIdx::FPR512 &&
         "FPR indices not properly ordered");
// Now, the content.
#define CHECK_PARTIALMAP(Idx, ValStartIdx, ValLength, RB)                      \
  do {                                                                         \
    const PartialMapping &Map =                                                \
        AArch64::PartMappings[AArch64::PartialMappingIdx::Idx];                \
    (void) Map;                                                                \
    assert(Map.StartIdx == ValStartIdx && Map.Length == ValLength &&           \
           Map.RegBank == &RB && #Idx " is incorrectly initialized");          \
  } while (0)

  CHECK_PARTIALMAP(GPR32, 0, 32, RBGPR);
  CHECK_PARTIALMAP(GPR64, 0, 64, RBGPR);
  CHECK_PARTIALMAP(FPR32, 0, 32, RBFPR);
  CHECK_PARTIALMAP(FPR64, 0, 64, RBFPR);
  CHECK_PARTIALMAP(FPR128, 0, 128, RBFPR);
  CHECK_PARTIALMAP(FPR256, 0, 256, RBFPR);
  CHECK_PARTIALMAP(FPR512, 0, 512, RBFPR);

  assert(verify(TRI) && "Invalid register bank information");
}
示例#29
0
/// EmitLiveInCopy - Emit a copy for a live in physical register. If the
/// physical register has only a single copy use, then coalesced the copy
/// if possible.
static void EmitLiveInCopy(MachineBasicBlock *MBB,
                           MachineBasicBlock::iterator &InsertPos,
                           unsigned VirtReg, unsigned PhysReg,
                           const TargetRegisterClass *RC,
                           DenseMap<MachineInstr*, unsigned> &CopyRegMap,
                           const MachineRegisterInfo &MRI,
                           const TargetRegisterInfo &TRI,
                           const TargetInstrInfo &TII) {
  unsigned NumUses = 0;
  MachineInstr *UseMI = NULL;
  for (MachineRegisterInfo::use_iterator UI = MRI.use_begin(VirtReg),
         UE = MRI.use_end(); UI != UE; ++UI) {
    UseMI = &*UI;
    if (++NumUses > 1)
      break;
  }

  // If the number of uses is not one, or the use is not a move instruction,
  // don't coalesce. Also, only coalesce away a virtual register to virtual
  // register copy.
  bool Coalesced = false;
  unsigned SrcReg, DstReg, SrcSubReg, DstSubReg;
  if (NumUses == 1 &&
      TII.isMoveInstr(*UseMI, SrcReg, DstReg, SrcSubReg, DstSubReg) &&
      TargetRegisterInfo::isVirtualRegister(DstReg)) {
    VirtReg = DstReg;
    Coalesced = true;
  }

  // Now find an ideal location to insert the copy.
  MachineBasicBlock::iterator Pos = InsertPos;
  while (Pos != MBB->begin()) {
    MachineInstr *PrevMI = prior(Pos);
    DenseMap<MachineInstr*, unsigned>::iterator RI = CopyRegMap.find(PrevMI);
    // copyRegToReg might emit multiple instructions to do a copy.
    unsigned CopyDstReg = (RI == CopyRegMap.end()) ? 0 : RI->second;
    if (CopyDstReg && !TRI.regsOverlap(CopyDstReg, PhysReg))
      // This is what the BB looks like right now:
      // r1024 = mov r0
      // ...
      // r1    = mov r1024
      //
      // We want to insert "r1025 = mov r1". Inserting this copy below the
      // move to r1024 makes it impossible for that move to be coalesced.
      //
      // r1025 = mov r1
      // r1024 = mov r0
      // ...
      // r1    = mov 1024
      // r2    = mov 1025
      break; // Woot! Found a good location.
    --Pos;
  }

  bool Emitted = TII.copyRegToReg(*MBB, Pos, VirtReg, PhysReg, RC, RC);
  assert(Emitted && "Unable to issue a live-in copy instruction!\n");
  (void) Emitted;

  CopyRegMap.insert(std::make_pair(prior(Pos), VirtReg));
  if (Coalesced) {
    if (&*InsertPos == UseMI) ++InsertPos;
    MBB->erase(UseMI);
  }
}
示例#30
0
AArch64RegisterBankInfo::AArch64RegisterBankInfo(const TargetRegisterInfo &TRI)
    : AArch64GenRegisterBankInfo() {
  static bool AlreadyInit = false;
  // We have only one set of register banks, whatever the subtarget
  // is. Therefore, the initialization of the RegBanks table should be
  // done only once. Indeed the table of all register banks
  // (AArch64::RegBanks) is unique in the compiler. At some point, it
  // will get tablegen'ed and the whole constructor becomes empty.
  if (AlreadyInit)
    return;
  AlreadyInit = true;

  const RegisterBank &RBGPR = getRegBank(AArch64::GPRRegBankID);
  (void)RBGPR;
  assert(&AArch64::GPRRegBank == &RBGPR &&
         "The order in RegBanks is messed up");

  const RegisterBank &RBFPR = getRegBank(AArch64::FPRRegBankID);
  (void)RBFPR;
  assert(&AArch64::FPRRegBank == &RBFPR &&
         "The order in RegBanks is messed up");

  const RegisterBank &RBCCR = getRegBank(AArch64::CCRRegBankID);
  (void)RBCCR;
  assert(&AArch64::CCRRegBank == &RBCCR &&
         "The order in RegBanks is messed up");

  // The GPR register bank is fully defined by all the registers in
  // GR64all + its subclasses.
  assert(RBGPR.covers(*TRI.getRegClass(AArch64::GPR32RegClassID)) &&
         "Subclass not added?");
  assert(RBGPR.getSize() == 64 && "GPRs should hold up to 64-bit");

  // The FPR register bank is fully defined by all the registers in
  // GR64all + its subclasses.
  assert(RBFPR.covers(*TRI.getRegClass(AArch64::QQRegClassID)) &&
         "Subclass not added?");
  assert(RBFPR.covers(*TRI.getRegClass(AArch64::FPR64RegClassID)) &&
         "Subclass not added?");
  assert(RBFPR.getSize() == 512 &&
         "FPRs should hold up to 512-bit via QQQQ sequence");

  assert(RBCCR.covers(*TRI.getRegClass(AArch64::CCRRegClassID)) &&
         "Class not added?");
  assert(RBCCR.getSize() == 32 && "CCR should hold up to 32-bit");

  // Check that the TableGen'ed like file is in sync we our expectations.
  // First, the Idx.
  assert(checkPartialMappingIdx(PMI_FirstGPR, PMI_LastGPR,
                                {PMI_GPR32, PMI_GPR64}) &&
         "PartialMappingIdx's are incorrectly ordered");
  assert(checkPartialMappingIdx(
             PMI_FirstFPR, PMI_LastFPR,
             {PMI_FPR32, PMI_FPR64, PMI_FPR128, PMI_FPR256, PMI_FPR512}) &&
         "PartialMappingIdx's are incorrectly ordered");
// Now, the content.
// Check partial mapping.
#define CHECK_PARTIALMAP(Idx, ValStartIdx, ValLength, RB)                      \
  do {                                                                         \
    assert(                                                                    \
        checkPartialMap(PartialMappingIdx::Idx, ValStartIdx, ValLength, RB) && \
        #Idx " is incorrectly initialized");                                   \
  } while (false)

  CHECK_PARTIALMAP(PMI_GPR32, 0, 32, RBGPR);
  CHECK_PARTIALMAP(PMI_GPR64, 0, 64, RBGPR);
  CHECK_PARTIALMAP(PMI_FPR32, 0, 32, RBFPR);
  CHECK_PARTIALMAP(PMI_FPR64, 0, 64, RBFPR);
  CHECK_PARTIALMAP(PMI_FPR128, 0, 128, RBFPR);
  CHECK_PARTIALMAP(PMI_FPR256, 0, 256, RBFPR);
  CHECK_PARTIALMAP(PMI_FPR512, 0, 512, RBFPR);

// Check value mapping.
#define CHECK_VALUEMAP_IMPL(RBName, Size, Offset)                              \
  do {                                                                         \
    assert(checkValueMapImpl(PartialMappingIdx::PMI_##RBName##Size,            \
                             PartialMappingIdx::PMI_First##RBName, Size,       \
                             Offset) &&                                        \
           #RBName #Size " " #Offset " is incorrectly initialized");           \
  } while (false)

#define CHECK_VALUEMAP(RBName, Size) CHECK_VALUEMAP_IMPL(RBName, Size, 0)

  CHECK_VALUEMAP(GPR, 32);
  CHECK_VALUEMAP(GPR, 64);
  CHECK_VALUEMAP(FPR, 32);
  CHECK_VALUEMAP(FPR, 64);
  CHECK_VALUEMAP(FPR, 128);
  CHECK_VALUEMAP(FPR, 256);
  CHECK_VALUEMAP(FPR, 512);

// Check the value mapping for 3-operands instructions where all the operands
// map to the same value mapping.
#define CHECK_VALUEMAP_3OPS(RBName, Size)                                      \
  do {                                                                         \
    CHECK_VALUEMAP_IMPL(RBName, Size, 0);                                      \
    CHECK_VALUEMAP_IMPL(RBName, Size, 1);                                      \
    CHECK_VALUEMAP_IMPL(RBName, Size, 2);                                      \
  } while (false)

  CHECK_VALUEMAP_3OPS(GPR, 32);
  CHECK_VALUEMAP_3OPS(GPR, 64);
  CHECK_VALUEMAP_3OPS(FPR, 32);
  CHECK_VALUEMAP_3OPS(FPR, 64);
  CHECK_VALUEMAP_3OPS(FPR, 128);
  CHECK_VALUEMAP_3OPS(FPR, 256);
  CHECK_VALUEMAP_3OPS(FPR, 512);

#define CHECK_VALUEMAP_CROSSREGCPY(RBNameDst, RBNameSrc, Size)                 \
  do {                                                                         \
    unsigned PartialMapDstIdx = PMI_##RBNameDst##Size - PMI_Min;               \
    unsigned PartialMapSrcIdx = PMI_##RBNameSrc##Size - PMI_Min;               \
    (void)PartialMapDstIdx;                                                    \
    (void)PartialMapSrcIdx;                                                    \
    const ValueMapping *Map = getCopyMapping(                                  \
        AArch64::RBNameDst##RegBankID, AArch64::RBNameSrc##RegBankID, Size);  \
    (void)Map;                                                                 \
    assert(Map[0].BreakDown ==                                                 \
               &AArch64GenRegisterBankInfo::PartMappings[PartialMapDstIdx] &&  \
           Map[0].NumBreakDowns == 1 && #RBNameDst #Size                       \
           " Dst is incorrectly initialized");                                 \
    assert(Map[1].BreakDown ==                                                 \
               &AArch64GenRegisterBankInfo::PartMappings[PartialMapSrcIdx] &&  \
           Map[1].NumBreakDowns == 1 && #RBNameSrc #Size                       \
           " Src is incorrectly initialized");                                 \
                                                                               \
  } while (false)

  CHECK_VALUEMAP_CROSSREGCPY(GPR, GPR, 32);
  CHECK_VALUEMAP_CROSSREGCPY(GPR, FPR, 32);
  CHECK_VALUEMAP_CROSSREGCPY(GPR, GPR, 64);
  CHECK_VALUEMAP_CROSSREGCPY(GPR, FPR, 64);
  CHECK_VALUEMAP_CROSSREGCPY(FPR, FPR, 32);
  CHECK_VALUEMAP_CROSSREGCPY(FPR, GPR, 32);
  CHECK_VALUEMAP_CROSSREGCPY(FPR, FPR, 64);
  CHECK_VALUEMAP_CROSSREGCPY(FPR, GPR, 64);

  assert(verify(TRI) && "Invalid register bank information");
}