Example #1
0
/// Changes operand OpNum in MI the refer the PhysReg, considering subregs. This
/// may invalidate any operand pointers.  Return true if the operand kills its
/// register.
bool RegAllocFast::setPhysReg(MachineInstr &MI, MachineOperand &MO,
                              MCPhysReg PhysReg) {
  bool Dead = MO.isDead();
  if (!MO.getSubReg()) {
    MO.setReg(PhysReg);
    MO.setIsRenamable(true);
    return MO.isKill() || Dead;
  }

  // Handle subregister index.
  MO.setReg(PhysReg ? TRI->getSubReg(PhysReg, MO.getSubReg()) : 0);
  MO.setIsRenamable(true);
  MO.setSubReg(0);

  // A kill flag implies killing the full register. Add corresponding super
  // register kill.
  if (MO.isKill()) {
    MI.addRegisterKilled(PhysReg, TRI, true);
    return true;
  }

  // A <def,read-undef> of a sub-register requires an implicit def of the full
  // register.
  if (MO.isDef() && MO.isUndef())
    MI.addRegisterDefined(PhysReg, TRI);

  return Dead;
}
Example #2
0
/// Generate a predicated version of MI (where the condition is given via
/// PredR and Cond) at the point indicated by Where.
void HexagonExpandCondsets::predicateAt(const MachineOperand &DefOp,
                                        MachineInstr &MI,
                                        MachineBasicBlock::iterator Where,
                                        const MachineOperand &PredOp, bool Cond,
                                        std::set<unsigned> &UpdRegs) {
  // The problem with updating live intervals is that we can move one def
  // past another def. In particular, this can happen when moving an A2_tfrt
  // over an A2_tfrf defining the same register. From the point of view of
  // live intervals, these two instructions are two separate definitions,
  // and each one starts another live segment. LiveIntervals's "handleMove"
  // does not allow such moves, so we need to handle it ourselves. To avoid
  // invalidating liveness data while we are using it, the move will be
  // implemented in 4 steps: (1) add a clone of the instruction MI at the
  // target location, (2) update liveness, (3) delete the old instruction,
  // and (4) update liveness again.

  MachineBasicBlock &B = *MI.getParent();
  DebugLoc DL = Where->getDebugLoc();  // "Where" points to an instruction.
  unsigned Opc = MI.getOpcode();
  unsigned PredOpc = HII->getCondOpcode(Opc, !Cond);
  MachineInstrBuilder MB = BuildMI(B, Where, DL, HII->get(PredOpc));
  unsigned Ox = 0, NP = MI.getNumOperands();
  // Skip all defs from MI first.
  while (Ox < NP) {
    MachineOperand &MO = MI.getOperand(Ox);
    if (!MO.isReg() || !MO.isDef())
      break;
    Ox++;
  }
  // Add the new def, then the predicate register, then the rest of the
  // operands.
  MB.addReg(DefOp.getReg(), getRegState(DefOp), DefOp.getSubReg());
  MB.addReg(PredOp.getReg(), PredOp.isUndef() ? RegState::Undef : 0,
            PredOp.getSubReg());
  while (Ox < NP) {
    MachineOperand &MO = MI.getOperand(Ox);
    if (!MO.isReg() || !MO.isImplicit())
      MB.add(MO);
    Ox++;
  }

  MachineFunction &MF = *B.getParent();
  MachineInstr::mmo_iterator I = MI.memoperands_begin();
  unsigned NR = std::distance(I, MI.memoperands_end());
  MachineInstr::mmo_iterator MemRefs = MF.allocateMemRefsArray(NR);
  for (unsigned i = 0; i < NR; ++i)
    MemRefs[i] = *I++;
  MB.setMemRefs(MemRefs, MemRefs+NR);

  MachineInstr *NewI = MB;
  NewI->clearKillInfo();
  LIS->InsertMachineInstrInMaps(*NewI);

  for (auto &Op : NewI->operands())
    if (Op.isReg())
      UpdRegs.insert(Op.getReg());
}
Example #3
0
static LaneBitmask getDefRegMask(const MachineOperand &MO,
                                 const MachineRegisterInfo &MRI) {
  assert(MO.isDef() && MO.isReg() &&
    TargetRegisterInfo::isVirtualRegister(MO.getReg()));

  // We don't rely on read-undef flag because in case of tentative schedule
  // tracking it isn't set correctly yet. This works correctly however since
  // use mask has been tracked before using LIS.
  return MO.getSubReg() == 0 ?
    MRI.getMaxLaneMaskForVReg(MO.getReg()) :
    MRI.getTargetRegisterInfo()->getSubRegIndexLaneMask(MO.getSubReg());
}
Example #4
0
/// isIdenticalTo - Return true if this operand is identical to the specified
/// operand.
bool MachineOperand::isIdenticalTo(const MachineOperand &Other) const {
  if (getType() != Other.getType()) return false;
  
  switch (getType()) {
  default: assert(0 && "Unrecognized operand type");
  case MachineOperand::MO_Register:
    return getReg() == Other.getReg() && isDef() == Other.isDef() &&
           getSubReg() == Other.getSubReg();
  case MachineOperand::MO_Immediate:
    return getImm() == Other.getImm();
  case MachineOperand::MO_FPImmediate:
    return getFPImm() == Other.getFPImm();
  case MachineOperand::MO_MachineBasicBlock:
    return getMBB() == Other.getMBB();
  case MachineOperand::MO_FrameIndex:
    return getIndex() == Other.getIndex();
  case MachineOperand::MO_ConstantPoolIndex:
    return getIndex() == Other.getIndex() && getOffset() == Other.getOffset();
  case MachineOperand::MO_JumpTableIndex:
    return getIndex() == Other.getIndex();
  case MachineOperand::MO_GlobalAddress:
    return getGlobal() == Other.getGlobal() && getOffset() == Other.getOffset();
  case MachineOperand::MO_ExternalSymbol:
    return !strcmp(getSymbolName(), Other.getSymbolName()) &&
           getOffset() == Other.getOffset();
  }
}
Example #5
0
void DetectDeadLanes::addUsedLanesOnOperand(const MachineOperand &MO,
                                            LaneBitmask UsedLanes) {
  if (!MO.readsReg())
    return;
  unsigned MOReg = MO.getReg();
  if (!TargetRegisterInfo::isVirtualRegister(MOReg))
    return;

  unsigned MOSubReg = MO.getSubReg();
  if (MOSubReg != 0)
    UsedLanes = TRI->composeSubRegIndexLaneMask(MOSubReg, UsedLanes);
  UsedLanes &= MRI->getMaxLaneMaskForVReg(MOReg);

  unsigned MORegIdx = TargetRegisterInfo::virtReg2Index(MOReg);
  VRegInfo &MORegInfo = VRegInfos[MORegIdx];
  LaneBitmask PrevUsedLanes = MORegInfo.UsedLanes;
  // Any change at all?
  if ((UsedLanes & ~PrevUsedLanes) == 0)
    return;

  // Set UsedLanes and remember instruction for further propagation.
  MORegInfo.UsedLanes = PrevUsedLanes | UsedLanes;
  if (DefinedByCopy.test(MORegIdx))
    PutInWorklist(MORegIdx);
}
Example #6
0
static bool updateOperand(FoldCandidate &Fold,
                          const TargetRegisterInfo &TRI) {
  MachineInstr *MI = Fold.UseMI;
  MachineOperand &Old = MI->getOperand(Fold.UseOpNo);
  assert(Old.isReg());

  if (Fold.isImm()) {
    Old.ChangeToImmediate(Fold.ImmToFold);
    return true;
  }

  if (Fold.isFI()) {
    Old.ChangeToFrameIndex(Fold.FrameIndexToFold);
    return true;
  }

  MachineOperand *New = Fold.OpToFold;
  if (TargetRegisterInfo::isVirtualRegister(Old.getReg()) &&
      TargetRegisterInfo::isVirtualRegister(New->getReg())) {
    Old.substVirtReg(New->getReg(), New->getSubReg(), TRI);
    return true;
  }

  // FIXME: Handle physical registers.

  return false;
}
MachineOperand
AMDGPUInstructionSelector::getSubOperand64(MachineOperand &MO,
                                           unsigned SubIdx) const {

  MachineInstr *MI = MO.getParent();
  MachineBasicBlock *BB = MO.getParent()->getParent();
  MachineFunction *MF = BB->getParent();
  MachineRegisterInfo &MRI = MF->getRegInfo();
  unsigned DstReg = MRI.createVirtualRegister(&AMDGPU::SGPR_32RegClass);

  if (MO.isReg()) {
    unsigned ComposedSubIdx = TRI.composeSubRegIndices(MO.getSubReg(), SubIdx);
    unsigned Reg = MO.getReg();
    BuildMI(*BB, MI, MI->getDebugLoc(), TII.get(AMDGPU::COPY), DstReg)
            .addReg(Reg, 0, ComposedSubIdx);

    return MachineOperand::CreateReg(DstReg, MO.isDef(), MO.isImplicit(),
                                     MO.isKill(), MO.isDead(), MO.isUndef(),
                                     MO.isEarlyClobber(), 0, MO.isDebug(),
                                     MO.isInternalRead());
  }

  assert(MO.isImm());

  APInt Imm(64, MO.getImm());

  switch (SubIdx) {
  default:
    llvm_unreachable("do not know to split immediate with this sub index.");
  case AMDGPU::sub0:
    return MachineOperand::CreateImm(Imm.getLoBits(32).getSExtValue());
  case AMDGPU::sub1:
    return MachineOperand::CreateImm(Imm.getHiBits(32).getSExtValue());
  }
}
Example #8
0
void DetectDeadLanes::transferDefinedLanesStep(const MachineOperand &Use,
                                               LaneBitmask DefinedLanes) {
  if (!Use.readsReg())
    return;
  // Check whether the operand writes a vreg and is part of a COPY-like
  // instruction.
  const MachineInstr &MI = *Use.getParent();
  if (MI.getDesc().getNumDefs() != 1)
    return;
  // FIXME: PATCHPOINT instructions announce a Def that does not always exist,
  // they really need to be modeled differently!
  if (MI.getOpcode() == TargetOpcode::PATCHPOINT)
    return;
  const MachineOperand &Def = *MI.defs().begin();
  unsigned DefReg = Def.getReg();
  if (!TargetRegisterInfo::isVirtualRegister(DefReg))
    return;
  unsigned DefRegIdx = TargetRegisterInfo::virtReg2Index(DefReg);
  if (!DefinedByCopy.test(DefRegIdx))
    return;

  unsigned OpNum = MI.getOperandNo(&Use);
  DefinedLanes =
      TRI->reverseComposeSubRegIndexLaneMask(Use.getSubReg(), DefinedLanes);
  DefinedLanes = transferDefinedLanes(Def, OpNum, DefinedLanes);

  VRegInfo &RegInfo = VRegInfos[DefRegIdx];
  LaneBitmask PrevDefinedLanes = RegInfo.DefinedLanes;
  // Any change at all?
  if ((DefinedLanes & ~PrevDefinedLanes) == 0)
    return;

  RegInfo.DefinedLanes = PrevDefinedLanes | DefinedLanes;
  PutInWorklist(DefRegIdx);
}
Example #9
0
// Copy MachineOperand with all flags except setting it as implicit.
static MachineOperand copyRegOperandAsImplicit(const MachineOperand &Orig) {
  assert(!Orig.isImplicit());
  return MachineOperand::CreateReg(Orig.getReg(),
                                   Orig.isDef(),
                                   true,
                                   Orig.isKill(),
                                   Orig.isDead(),
                                   Orig.isUndef(),
                                   Orig.isEarlyClobber(),
                                   Orig.getSubReg(),
                                   Orig.isDebug(),
                                   Orig.isInternalRead());
}
Example #10
0
/// isPartialRedef - Return true if the specified def at the specific index is
/// partially re-defining the specified live interval. A common case of this is
/// a definition of the sub-register.
bool LiveIntervals::isPartialRedef(SlotIndex MIIdx, MachineOperand &MO,
                                   LiveInterval &interval) {
  if (!MO.getSubReg() || MO.isEarlyClobber())
    return false;

  SlotIndex RedefIndex = MIIdx.getRegSlot();
  const LiveRange *OldLR =
    interval.getLiveRangeContaining(RedefIndex.getRegSlot(true));
  MachineInstr *DefMI = getInstructionFromIndex(OldLR->valno->def);
  if (DefMI != 0) {
    return DefMI->findRegisterDefOperandIdx(interval.reg) != -1;
  }
  return false;
}
Example #11
0
bool ARMAsmPrinter::lowerOperand(const MachineOperand &MO,
                                 MCOperand &MCOp) {
  switch (MO.getType()) {
  default: llvm_unreachable("unknown operand type");
  case MachineOperand::MO_Register:
    // Ignore all non-CPSR implicit register operands.
    if (MO.isImplicit() && MO.getReg() != ARM::CPSR)
      return false;
    assert(!MO.getSubReg() && "Subregs should be eliminated!");
    MCOp = MCOperand::createReg(MO.getReg());
    break;
  case MachineOperand::MO_Immediate:
    MCOp = MCOperand::createImm(MO.getImm());
    break;
  case MachineOperand::MO_MachineBasicBlock:
    MCOp = MCOperand::createExpr(MCSymbolRefExpr::create(
        MO.getMBB()->getSymbol(), OutContext));
    break;
  case MachineOperand::MO_GlobalAddress:
    MCOp = GetSymbolRef(MO,
                        GetARMGVSymbol(MO.getGlobal(), MO.getTargetFlags()));
    break;
  case MachineOperand::MO_ExternalSymbol:
    MCOp = GetSymbolRef(MO,
                        GetExternalSymbolSymbol(MO.getSymbolName()));
    break;
  case MachineOperand::MO_JumpTableIndex:
    MCOp = GetSymbolRef(MO, GetJTISymbol(MO.getIndex()));
    break;
  case MachineOperand::MO_ConstantPoolIndex:
    if (Subtarget->genExecuteOnly())
      llvm_unreachable("execute-only should not generate constant pools");
    MCOp = GetSymbolRef(MO, GetCPISymbol(MO.getIndex()));
    break;
  case MachineOperand::MO_BlockAddress:
    MCOp = GetSymbolRef(MO, GetBlockAddressSymbol(MO.getBlockAddress()));
    break;
  case MachineOperand::MO_FPImmediate: {
    APFloat Val = MO.getFPImm()->getValueAPF();
    bool ignored;
    Val.convert(APFloat::IEEEdouble(), APFloat::rmTowardZero, &ignored);
    MCOp = MCOperand::createFPImm(Val.convertToDouble());
    break;
  }
  case MachineOperand::MO_RegisterMask:
    // Ignore call clobbers.
    return false;
  }
  return true;
}
bool EpiphanyAsmPrinter::lowerOperand(const MachineOperand &MO,
                                      MCOperand &MCOp) const {
    switch (MO.getType()) {
    default:
        llvm_unreachable("unknown operand type");
    case MachineOperand::MO_Register:
        if (MO.isImplicit())
            return false;
        assert(!MO.getSubReg() && "Subregs should be eliminated!");
        MCOp = MCOperand::CreateReg(MO.getReg());
        break;
    case MachineOperand::MO_Immediate:
        MCOp = MCOperand::CreateImm(MO.getImm());
        break;
    case MachineOperand::MO_FPImmediate: {// a bit hacky, see arm
        APFloat Val = MO.getFPImm()->getValueAPF();
        bool ignored;
        Val.convert(APFloat::IEEEdouble, APFloat::rmTowardZero, &ignored);
        MCOp = MCOperand::CreateFPImm(Val.convertToDouble());
        break;
    }
    case MachineOperand::MO_BlockAddress:
        MCOp = lowerSymbolOperand(MO, GetBlockAddressSymbol(MO.getBlockAddress()));
        break;
    case MachineOperand::MO_ExternalSymbol:
        MCOp = lowerSymbolOperand(MO, GetExternalSymbolSymbol(MO.getSymbolName()));
        break;
    case MachineOperand::MO_GlobalAddress:
        MCOp = lowerSymbolOperand(MO, Mang->getSymbol(MO.getGlobal()));
        break;
    case MachineOperand::MO_MachineBasicBlock:
        MCOp = MCOperand::CreateExpr(MCSymbolRefExpr::Create(
                                         MO.getMBB()->getSymbol(), OutContext));
        break;
    case MachineOperand::MO_JumpTableIndex:
        MCOp = lowerSymbolOperand(MO, GetJTISymbol(MO.getIndex()));
        break;
    case MachineOperand::MO_ConstantPoolIndex:
        MCOp = lowerSymbolOperand(MO, GetCPISymbol(MO.getIndex()));
        break;
    case MachineOperand::MO_RegisterMask:
        // Ignore call clobbers
        return false;

    }

    return true;
}
Example #13
0
// Note: this must stay exactly in sync with isIdenticalTo above.
hash_code llvm::hash_value(const MachineOperand &MO) {
  switch (MO.getType()) {
  case MachineOperand::MO_Register:
    // Register operands don't have target flags.
    return hash_combine(MO.getType(), MO.getReg(), MO.getSubReg(), MO.isDef());
  case MachineOperand::MO_Immediate:
    return hash_combine(MO.getType(), MO.getTargetFlags(), MO.getImm());
  case MachineOperand::MO_CImmediate:
    return hash_combine(MO.getType(), MO.getTargetFlags(), MO.getCImm());
  case MachineOperand::MO_FPImmediate:
    return hash_combine(MO.getType(), MO.getTargetFlags(), MO.getFPImm());
  case MachineOperand::MO_MachineBasicBlock:
    return hash_combine(MO.getType(), MO.getTargetFlags(), MO.getMBB());
  case MachineOperand::MO_FrameIndex:
    return hash_combine(MO.getType(), MO.getTargetFlags(), MO.getIndex());
  case MachineOperand::MO_ConstantPoolIndex:
  case MachineOperand::MO_TargetIndex:
    return hash_combine(MO.getType(), MO.getTargetFlags(), MO.getIndex(),
                        MO.getOffset());
  case MachineOperand::MO_JumpTableIndex:
    return hash_combine(MO.getType(), MO.getTargetFlags(), MO.getIndex());
  case MachineOperand::MO_ExternalSymbol:
    return hash_combine(MO.getType(), MO.getTargetFlags(), MO.getOffset(),
                        MO.getSymbolName());
  case MachineOperand::MO_GlobalAddress:
    return hash_combine(MO.getType(), MO.getTargetFlags(), MO.getGlobal(),
                        MO.getOffset());
  case MachineOperand::MO_BlockAddress:
    return hash_combine(MO.getType(), MO.getTargetFlags(), MO.getBlockAddress(),
                        MO.getOffset());
  case MachineOperand::MO_RegisterMask:
  case MachineOperand::MO_RegisterLiveOut:
    return hash_combine(MO.getType(), MO.getTargetFlags(), MO.getRegMask());
  case MachineOperand::MO_Metadata:
    return hash_combine(MO.getType(), MO.getTargetFlags(), MO.getMetadata());
  case MachineOperand::MO_MCSymbol:
    return hash_combine(MO.getType(), MO.getTargetFlags(), MO.getMCSymbol());
  case MachineOperand::MO_CFIIndex:
    return hash_combine(MO.getType(), MO.getTargetFlags(), MO.getCFIIndex());
  case MachineOperand::MO_IntrinsicID:
    return hash_combine(MO.getType(), MO.getTargetFlags(), MO.getIntrinsicID());
  case MachineOperand::MO_Predicate:
    return hash_combine(MO.getType(), MO.getTargetFlags(), MO.getPredicate());
  }
  llvm_unreachable("Invalid machine operand type");
}
bool ARMAsmPrinter::lowerOperand(const MachineOperand &MO,
                                 MCOperand &MCOp) {
  switch (MO.getType()) {
  default:
    assert(0 && "unknown operand type");
    return false;
  case MachineOperand::MO_Register:
    // Ignore all non-CPSR implicit register operands.
    if (MO.isImplicit() && MO.getReg() != ARM::CPSR)
      return false;
    assert(!MO.getSubReg() && "Subregs should be eliminated!");
    MCOp = MCOperand::CreateReg(MO.getReg());
    break;
  case MachineOperand::MO_Immediate:
    MCOp = MCOperand::CreateImm(MO.getImm());
    break;
  case MachineOperand::MO_MachineBasicBlock:
    MCOp = MCOperand::CreateExpr(MCSymbolRefExpr::Create(
        MO.getMBB()->getSymbol(), OutContext));
    break;
  case MachineOperand::MO_GlobalAddress:
    MCOp = GetSymbolRef(MO, Mang->getSymbol(MO.getGlobal()));
    break;
  case MachineOperand::MO_ExternalSymbol:
   MCOp = GetSymbolRef(MO,
                        GetExternalSymbolSymbol(MO.getSymbolName()));
    break;
  case MachineOperand::MO_JumpTableIndex:
    MCOp = GetSymbolRef(MO, GetJTISymbol(MO.getIndex()));
    break;
  case MachineOperand::MO_ConstantPoolIndex:
    MCOp = GetSymbolRef(MO, GetCPISymbol(MO.getIndex()));
    break;
  case MachineOperand::MO_BlockAddress:
    MCOp = GetSymbolRef(MO, GetBlockAddressSymbol(MO.getBlockAddress()));
    break;
  case MachineOperand::MO_FPImmediate: {
    APFloat Val = MO.getFPImm()->getValueAPF();
    bool ignored;
    Val.convert(APFloat::IEEEdouble, APFloat::rmTowardZero, &ignored);
    MCOp = MCOperand::CreateFPImm(Val.convertToDouble());
    break;
  }
  }
  return true;
}
Example #15
0
bool AArch64AsmPrinter::lowerOperand(const MachineOperand &MO,
                                     MCOperand &MCOp) const {
  switch (MO.getType()) {
  default: llvm_unreachable("unknown operand type");
  case MachineOperand::MO_Register:
    if (MO.isImplicit())
      return false;
    assert(!MO.getSubReg() && "Subregs should be eliminated!");
    MCOp = MCOperand::CreateReg(MO.getReg());
    break;
  case MachineOperand::MO_Immediate:
    MCOp = MCOperand::CreateImm(MO.getImm());
    break;
  case MachineOperand::MO_FPImmediate: {
    assert(MO.getFPImm()->isZero() && "Only fp imm 0.0 is supported");
    MCOp = MCOperand::CreateFPImm(0.0);
    break;
  }
  case MachineOperand::MO_BlockAddress:
    MCOp = lowerSymbolOperand(MO, GetBlockAddressSymbol(MO.getBlockAddress()));
    break;
  case MachineOperand::MO_ExternalSymbol:
    MCOp = lowerSymbolOperand(MO, GetExternalSymbolSymbol(MO.getSymbolName()));
    break;
  case MachineOperand::MO_GlobalAddress:
    MCOp = lowerSymbolOperand(MO, getSymbol(MO.getGlobal()));
    break;
  case MachineOperand::MO_MachineBasicBlock:
    MCOp = MCOperand::CreateExpr(MCSymbolRefExpr::Create(
                                   MO.getMBB()->getSymbol(), OutContext));
    break;
  case MachineOperand::MO_JumpTableIndex:
    MCOp = lowerSymbolOperand(MO, GetJTISymbol(MO.getIndex()));
    break;
  case MachineOperand::MO_ConstantPoolIndex:
    MCOp = lowerSymbolOperand(MO, GetCPISymbol(MO.getIndex()));
    break;
  case MachineOperand::MO_RegisterMask:
    // Ignore call clobbers
    return false;

  }

  return true;
}
Example #16
0
static LaneBitmask getUsedRegMask(const MachineOperand &MO,
                                  const MachineRegisterInfo &MRI,
                                  const LiveIntervals &LIS) {
  assert(MO.isUse() && MO.isReg() &&
         TargetRegisterInfo::isVirtualRegister(MO.getReg()));

  if (auto SubReg = MO.getSubReg())
    return MRI.getTargetRegisterInfo()->getSubRegIndexLaneMask(SubReg);

  auto MaxMask = MRI.getMaxLaneMaskForVReg(MO.getReg());
  if (MaxMask == LaneBitmask::getLane(0)) // cannot have subregs
    return MaxMask;

  // For a tentative schedule LIS isn't updated yet but livemask should remain
  // the same on any schedule. Subreg defs can be reordered but they all must
  // dominate uses anyway.
  auto SI = LIS.getInstructionIndex(*MO.getParent()).getBaseIndex();
  return getLiveLaneMask(MO.getReg(), SI, LIS, MRI);
}
Example #17
0
LaneBitmask DetectDeadLanes::transferDefinedLanes(const MachineOperand &Def,
    unsigned OpNum, LaneBitmask DefinedLanes) const {
  const MachineInstr &MI = *Def.getParent();
  // Translate DefinedLanes if necessary.
  switch (MI.getOpcode()) {
  case TargetOpcode::REG_SEQUENCE: {
    unsigned SubIdx = MI.getOperand(OpNum + 1).getImm();
    DefinedLanes = TRI->composeSubRegIndexLaneMask(SubIdx, DefinedLanes);
    DefinedLanes &= TRI->getSubRegIndexLaneMask(SubIdx);
    break;
  }
  case TargetOpcode::INSERT_SUBREG: {
    unsigned SubIdx = MI.getOperand(3).getImm();
    if (OpNum == 2) {
      DefinedLanes = TRI->composeSubRegIndexLaneMask(SubIdx, DefinedLanes);
      DefinedLanes &= TRI->getSubRegIndexLaneMask(SubIdx);
    } else {
      assert(OpNum == 1 && "INSERT_SUBREG must have two operands");
      // Ignore lanes defined by operand 2.
      DefinedLanes &= ~TRI->getSubRegIndexLaneMask(SubIdx);
    }
    break;
  }
  case TargetOpcode::EXTRACT_SUBREG: {
    unsigned SubIdx = MI.getOperand(2).getImm();
    assert(OpNum == 1 && "EXTRACT_SUBREG must have one register operand only");
    DefinedLanes = TRI->reverseComposeSubRegIndexLaneMask(SubIdx, DefinedLanes);
    break;
  }
  case TargetOpcode::COPY:
  case TargetOpcode::PHI:
    break;
  default:
    llvm_unreachable("function must be called with COPY-like instruction");
  }

  assert(Def.getSubReg() == 0 &&
         "Should not have subregister defs in machine SSA phase");
  DefinedLanes &= MRI->getMaxLaneMaskForVReg(Def.getReg());
  return DefinedLanes;
}
Example #18
0
static bool isCrossCopy(const MachineRegisterInfo &MRI,
                        const MachineInstr &MI,
                        const TargetRegisterClass *DstRC,
                        const MachineOperand &MO) {
  assert(lowersToCopies(MI));
  unsigned SrcReg = MO.getReg();
  const TargetRegisterClass *SrcRC = MRI.getRegClass(SrcReg);
  if (DstRC == SrcRC)
    return false;

  unsigned SrcSubIdx = MO.getSubReg();

  const TargetRegisterInfo &TRI = *MRI.getTargetRegisterInfo();
  unsigned DstSubIdx = 0;
  switch (MI.getOpcode()) {
  case TargetOpcode::INSERT_SUBREG:
    if (MI.getOperandNo(&MO) == 2)
      DstSubIdx = MI.getOperand(3).getImm();
    break;
  case TargetOpcode::REG_SEQUENCE: {
    unsigned OpNum = MI.getOperandNo(&MO);
    DstSubIdx = MI.getOperand(OpNum+1).getImm();
    break;
  }
  case TargetOpcode::EXTRACT_SUBREG: {
    unsigned SubReg = MI.getOperand(2).getImm();
    SrcSubIdx = TRI.composeSubRegIndices(SubReg, SrcSubIdx);
  }
  }

  unsigned PreA, PreB; // Unused.
  if (SrcSubIdx && DstSubIdx)
    return !TRI.getCommonSuperRegClass(SrcRC, SrcSubIdx, DstRC, DstSubIdx, PreA,
                                       PreB);
  if (SrcSubIdx)
    return !TRI.getMatchingSuperRegClass(SrcRC, DstRC, SrcSubIdx);
  if (DstSubIdx)
    return !TRI.getMatchingSuperRegClass(DstRC, SrcRC, DstSubIdx);
  return !TRI.getCommonSubClass(SrcRC, DstRC);
}
Example #19
0
/// Returns true if the given machine operand \p MO only reads undefined lanes.
/// The function only works for use operands with a subregister set.
bool VirtRegRewriter::readsUndefSubreg(const MachineOperand &MO) const {
  // Shortcut if the operand is already marked undef.
  if (MO.isUndef())
    return true;

  unsigned Reg = MO.getReg();
  const LiveInterval &LI = LIS->getInterval(Reg);
  const MachineInstr &MI = *MO.getParent();
  SlotIndex BaseIndex = LIS->getInstructionIndex(MI);
  // This code is only meant to handle reading undefined subregisters which
  // we couldn't properly detect before.
  assert(LI.liveAt(BaseIndex) &&
         "Reads of completely dead register should be marked undef already");
  unsigned SubRegIdx = MO.getSubReg();
  LaneBitmask UseMask = TRI->getSubRegIndexLaneMask(SubRegIdx);
  // See if any of the relevant subregister liveranges is defined at this point.
  for (const LiveInterval::SubRange &SR : LI.subranges()) {
    if ((SR.LaneMask & UseMask) != 0 && SR.liveAt(BaseIndex))
      return false;
  }
  return true;
}
Example #20
0
// Get the subreg type that is most likely to be coalesced
// for an SPR register that will be used in VDUP32d pseudo.
unsigned A15SDOptimizer::getPrefSPRLane(unsigned SReg) {
  if (!TRI->isVirtualRegister(SReg))
    return getDPRLaneFromSPR(SReg);

  MachineInstr *MI = MRI->getVRegDef(SReg);
  if (!MI) return ARM::ssub_0;
  MachineOperand *MO = MI->findRegisterDefOperand(SReg);

  assert(MO->isReg() && "Non-register operand found!");
  if (!MO) return ARM::ssub_0;

  if (MI->isCopy() && usesRegClass(MI->getOperand(1),
                                    &ARM::SPRRegClass)) {
    SReg = MI->getOperand(1).getReg();
  }

  if (TargetRegisterInfo::isVirtualRegister(SReg)) {
    if (MO->getSubReg() == ARM::ssub_1) return ARM::ssub_1;
    return ARM::ssub_0;
  }
  return getDPRLaneFromSPR(SReg);
}
Example #21
0
bool llvm::LowerPPCMachineOperandToMCOperand(const MachineOperand &MO,
                                             MCOperand &OutMO, AsmPrinter &AP,
                                             bool isDarwin) {
  switch (MO.getType()) {
  default:
    llvm_unreachable("unknown operand type");
  case MachineOperand::MO_Register:
    assert(!MO.getSubReg() && "Subregs should be eliminated!");
    assert(MO.getReg() > PPC::NoRegister &&
           MO.getReg() < PPC::NUM_TARGET_REGS &&
           "Invalid register for this target!");
    OutMO = MCOperand::createReg(MO.getReg());
    return true;
  case MachineOperand::MO_Immediate:
    OutMO = MCOperand::createImm(MO.getImm());
    return true;
  case MachineOperand::MO_MachineBasicBlock:
    OutMO = MCOperand::createExpr(
        MCSymbolRefExpr::create(MO.getMBB()->getSymbol(), AP.OutContext));
    return true;
  case MachineOperand::MO_GlobalAddress:
  case MachineOperand::MO_ExternalSymbol:
    OutMO = GetSymbolRef(MO, GetSymbolFromOperand(MO, AP), AP, isDarwin);
    return true;
  case MachineOperand::MO_JumpTableIndex:
    OutMO = GetSymbolRef(MO, AP.GetJTISymbol(MO.getIndex()), AP, isDarwin);
    return true;
  case MachineOperand::MO_ConstantPoolIndex:
    OutMO = GetSymbolRef(MO, AP.GetCPISymbol(MO.getIndex()), AP, isDarwin);
    return true;
  case MachineOperand::MO_BlockAddress:
    OutMO = GetSymbolRef(MO, AP.GetBlockAddressSymbol(MO.getBlockAddress()), AP,
                         isDarwin);
    return true;
  case MachineOperand::MO_RegisterMask:
    return false;
  }
}
Example #22
0
void RegAllocFast::allocVirtRegUndef(MachineOperand &MO) {
  assert(MO.isUndef() && "expected undef use");
  unsigned VirtReg = MO.getReg();
  assert(TargetRegisterInfo::isVirtualRegister(VirtReg) && "Expected virtreg");

  LiveRegMap::const_iterator LRI = findLiveVirtReg(VirtReg);
  MCPhysReg PhysReg;
  if (LRI != LiveVirtRegs.end() && LRI->PhysReg) {
    PhysReg = LRI->PhysReg;
  } else {
    const TargetRegisterClass &RC = *MRI->getRegClass(VirtReg);
    ArrayRef<MCPhysReg> AllocationOrder = RegClassInfo.getOrder(&RC);
    assert(!AllocationOrder.empty() && "Allocation order must not be empty");
    PhysReg = AllocationOrder[0];
  }

  unsigned SubRegIdx = MO.getSubReg();
  if (SubRegIdx != 0) {
    PhysReg = TRI->getSubReg(PhysReg, SubRegIdx);
    MO.setSubReg(0);
  }
  MO.setReg(PhysReg);
  MO.setIsRenamable(true);
}
Example #23
0
void SIFoldOperands::foldOperand(
  MachineOperand &OpToFold,
  MachineInstr *UseMI,
  unsigned UseOpIdx,
  SmallVectorImpl<FoldCandidate> &FoldList,
  SmallVectorImpl<MachineInstr *> &CopiesToReplace) const {
  const MachineOperand &UseOp = UseMI->getOperand(UseOpIdx);

  if (!isUseSafeToFold(TII, *UseMI, UseOp))
    return;

  // FIXME: Fold operands with subregs.
  if (UseOp.isReg() && OpToFold.isReg()) {
    if (UseOp.isImplicit() || UseOp.getSubReg() != AMDGPU::NoSubRegister)
      return;

    // Don't fold subregister extracts into tied operands, only if it is a full
    // copy since a subregister use tied to a full register def doesn't really
    // make sense. e.g. don't fold:
    //
    // %vreg1 = COPY %vreg0:sub1
    // %vreg2<tied3> = V_MAC_{F16, F32} %vreg3, %vreg4, %vreg1<tied0>
    //
    //  into
    // %vreg2<tied3> = V_MAC_{F16, F32} %vreg3, %vreg4, %vreg0:sub1<tied0>
    if (UseOp.isTied() && OpToFold.getSubReg() != AMDGPU::NoSubRegister)
      return;
  }

  // Special case for REG_SEQUENCE: We can't fold literals into
  // REG_SEQUENCE instructions, so we have to fold them into the
  // uses of REG_SEQUENCE.
  if (UseMI->isRegSequence()) {
    unsigned RegSeqDstReg = UseMI->getOperand(0).getReg();
    unsigned RegSeqDstSubReg = UseMI->getOperand(UseOpIdx + 1).getImm();

    for (MachineRegisterInfo::use_iterator
           RSUse = MRI->use_begin(RegSeqDstReg), RSE = MRI->use_end();
         RSUse != RSE; ++RSUse) {

      MachineInstr *RSUseMI = RSUse->getParent();
      if (RSUse->getSubReg() != RegSeqDstSubReg)
        continue;

      foldOperand(OpToFold, RSUseMI, RSUse.getOperandNo(), FoldList,
                  CopiesToReplace);
    }

    return;
  }


  bool FoldingImm = OpToFold.isImm();

  // In order to fold immediates into copies, we need to change the
  // copy to a MOV.
  if (FoldingImm && UseMI->isCopy()) {
    unsigned DestReg = UseMI->getOperand(0).getReg();
    const TargetRegisterClass *DestRC
      = TargetRegisterInfo::isVirtualRegister(DestReg) ?
      MRI->getRegClass(DestReg) :
      TRI->getPhysRegClass(DestReg);

    unsigned MovOp = TII->getMovOpcode(DestRC);
    if (MovOp == AMDGPU::COPY)
      return;

    UseMI->setDesc(TII->get(MovOp));
    CopiesToReplace.push_back(UseMI);
  } else {
    const MCInstrDesc &UseDesc = UseMI->getDesc();

    // Don't fold into target independent nodes.  Target independent opcodes
    // don't have defined register classes.
    if (UseDesc.isVariadic() ||
        UseDesc.OpInfo[UseOpIdx].RegClass == -1)
      return;
  }

  if (!FoldingImm) {
    tryAddToFoldList(FoldList, UseMI, UseOpIdx, &OpToFold, TII);

    // FIXME: We could try to change the instruction from 64-bit to 32-bit
    // to enable more folding opportunites.  The shrink operands pass
    // already does this.
    return;
  }


  const MCInstrDesc &FoldDesc = OpToFold.getParent()->getDesc();
  const TargetRegisterClass *FoldRC =
    TRI->getRegClass(FoldDesc.OpInfo[0].RegClass);


  // Split 64-bit constants into 32-bits for folding.
  if (UseOp.getSubReg() && AMDGPU::getRegBitWidth(FoldRC->getID()) == 64) {
    unsigned UseReg = UseOp.getReg();
    const TargetRegisterClass *UseRC
      = TargetRegisterInfo::isVirtualRegister(UseReg) ?
      MRI->getRegClass(UseReg) :
      TRI->getPhysRegClass(UseReg);

    if (AMDGPU::getRegBitWidth(UseRC->getID()) != 64)
      return;

    APInt Imm(64, OpToFold.getImm());
    if (UseOp.getSubReg() == AMDGPU::sub0) {
      Imm = Imm.getLoBits(32);
    } else {
      assert(UseOp.getSubReg() == AMDGPU::sub1);
      Imm = Imm.getHiBits(32);
    }

    MachineOperand ImmOp = MachineOperand::CreateImm(Imm.getSExtValue());
    tryAddToFoldList(FoldList, UseMI, UseOpIdx, &ImmOp, TII);
    return;
  }



  tryAddToFoldList(FoldList, UseMI, UseOpIdx, &OpToFold, TII);
}
Example #24
0
void MIPrinter::print(const MachineOperand &Op, const TargetRegisterInfo *TRI) {
  printTargetFlags(Op);
  switch (Op.getType()) {
  case MachineOperand::MO_Register:
    // TODO: Print the other register flags.
    if (Op.isImplicit())
      OS << (Op.isDef() ? "implicit-def " : "implicit ");
    if (Op.isDead())
      OS << "dead ";
    if (Op.isKill())
      OS << "killed ";
    if (Op.isUndef())
      OS << "undef ";
    if (Op.isEarlyClobber())
      OS << "early-clobber ";
    if (Op.isDebug())
      OS << "debug-use ";
    printReg(Op.getReg(), OS, TRI);
    // Print the sub register.
    if (Op.getSubReg() != 0)
      OS << ':' << TRI->getSubRegIndexName(Op.getSubReg());
    break;
  case MachineOperand::MO_Immediate:
    OS << Op.getImm();
    break;
  case MachineOperand::MO_CImmediate:
    Op.getCImm()->printAsOperand(OS, /*PrintType=*/true, MST);
    break;
  case MachineOperand::MO_FPImmediate:
    Op.getFPImm()->printAsOperand(OS, /*PrintType=*/true, MST);
    break;
  case MachineOperand::MO_MachineBasicBlock:
    printMBBReference(*Op.getMBB());
    break;
  case MachineOperand::MO_FrameIndex:
    printStackObjectReference(Op.getIndex());
    break;
  case MachineOperand::MO_ConstantPoolIndex:
    OS << "%const." << Op.getIndex();
    printOffset(Op.getOffset());
    break;
  case MachineOperand::MO_TargetIndex: {
    OS << "target-index(";
    if (const auto *Name = getTargetIndexName(
            *Op.getParent()->getParent()->getParent(), Op.getIndex()))
      OS << Name;
    else
      OS << "<unknown>";
    OS << ')';
    printOffset(Op.getOffset());
    break;
  }
  case MachineOperand::MO_JumpTableIndex:
    OS << "%jump-table." << Op.getIndex();
    break;
  case MachineOperand::MO_ExternalSymbol:
    OS << '$';
    printLLVMNameWithoutPrefix(OS, Op.getSymbolName());
    printOffset(Op.getOffset());
    break;
  case MachineOperand::MO_GlobalAddress:
    Op.getGlobal()->printAsOperand(OS, /*PrintType=*/false, MST);
    printOffset(Op.getOffset());
    break;
  case MachineOperand::MO_BlockAddress:
    OS << "blockaddress(";
    Op.getBlockAddress()->getFunction()->printAsOperand(OS, /*PrintType=*/false,
                                                        MST);
    OS << ", ";
    printIRBlockReference(*Op.getBlockAddress()->getBasicBlock());
    OS << ')';
    printOffset(Op.getOffset());
    break;
  case MachineOperand::MO_RegisterMask: {
    auto RegMaskInfo = RegisterMaskIds.find(Op.getRegMask());
    if (RegMaskInfo != RegisterMaskIds.end())
      OS << StringRef(TRI->getRegMaskNames()[RegMaskInfo->second]).lower();
    else
      llvm_unreachable("Can't print this machine register mask yet.");
    break;
  }
  case MachineOperand::MO_Metadata:
    Op.getMetadata()->printAsOperand(OS, MST);
    break;
  case MachineOperand::MO_CFIIndex: {
    const auto &MMI = Op.getParent()->getParent()->getParent()->getMMI();
    print(MMI.getFrameInstructions()[Op.getCFIIndex()], TRI);
    break;
  }
  default:
    // TODO: Print the other machine operands.
    llvm_unreachable("Can't print this machine operand at the moment");
  }
}
Example #25
0
void LiveIntervals::handleVirtualRegisterDef(MachineBasicBlock *mbb,
                                             MachineBasicBlock::iterator mi,
                                             SlotIndex MIIdx,
                                             MachineOperand& MO,
                                             unsigned MOIdx,
                                             LiveInterval &interval) {
  DEBUG(dbgs() << "\t\tregister: " << PrintReg(interval.reg, tri_));

  // Virtual registers may be defined multiple times (due to phi
  // elimination and 2-addr elimination).  Much of what we do only has to be
  // done once for the vreg.  We use an empty interval to detect the first
  // time we see a vreg.
  LiveVariables::VarInfo& vi = lv_->getVarInfo(interval.reg);
  if (interval.empty()) {
    // Get the Idx of the defining instructions.
    SlotIndex defIndex = MIIdx.getRegSlot(MO.isEarlyClobber());

    // Make sure the first definition is not a partial redefinition. Add an
    // <imp-def> of the full register.
    // FIXME: LiveIntervals shouldn't modify the code like this.  Whoever
    // created the machine instruction should annotate it with <undef> flags
    // as needed.  Then we can simply assert here.  The REG_SEQUENCE lowering
    // is the main suspect.
    if (MO.getSubReg()) {
      mi->addRegisterDefined(interval.reg);
      // Mark all defs of interval.reg on this instruction as reading <undef>.
      for (unsigned i = MOIdx, e = mi->getNumOperands(); i != e; ++i) {
        MachineOperand &MO2 = mi->getOperand(i);
        if (MO2.isReg() && MO2.getReg() == interval.reg && MO2.getSubReg())
          MO2.setIsUndef();
      }
    }

    MachineInstr *CopyMI = NULL;
    if (mi->isCopyLike()) {
      CopyMI = mi;
    }

    VNInfo *ValNo = interval.getNextValue(defIndex, CopyMI, VNInfoAllocator);
    assert(ValNo->id == 0 && "First value in interval is not 0?");

    // Loop over all of the blocks that the vreg is defined in.  There are
    // two cases we have to handle here.  The most common case is a vreg
    // whose lifetime is contained within a basic block.  In this case there
    // will be a single kill, in MBB, which comes after the definition.
    if (vi.Kills.size() == 1 && vi.Kills[0]->getParent() == mbb) {
      // FIXME: what about dead vars?
      SlotIndex killIdx;
      if (vi.Kills[0] != mi)
        killIdx = getInstructionIndex(vi.Kills[0]).getRegSlot();
      else
        killIdx = defIndex.getDeadSlot();

      // If the kill happens after the definition, we have an intra-block
      // live range.
      if (killIdx > defIndex) {
        assert(vi.AliveBlocks.empty() &&
               "Shouldn't be alive across any blocks!");
        LiveRange LR(defIndex, killIdx, ValNo);
        interval.addRange(LR);
        DEBUG(dbgs() << " +" << LR << "\n");
        return;
      }
    }

    // The other case we handle is when a virtual register lives to the end
    // of the defining block, potentially live across some blocks, then is
    // live into some number of blocks, but gets killed.  Start by adding a
    // range that goes from this definition to the end of the defining block.
    LiveRange NewLR(defIndex, getMBBEndIdx(mbb), ValNo);
    DEBUG(dbgs() << " +" << NewLR);
    interval.addRange(NewLR);

    bool PHIJoin = lv_->isPHIJoin(interval.reg);

    if (PHIJoin) {
      // A phi join register is killed at the end of the MBB and revived as a new
      // valno in the killing blocks.
      assert(vi.AliveBlocks.empty() && "Phi join can't pass through blocks");
      DEBUG(dbgs() << " phi-join");
      ValNo->setHasPHIKill(true);
    } else {
      // Iterate over all of the blocks that the variable is completely
      // live in, adding [insrtIndex(begin), instrIndex(end)+4) to the
      // live interval.
      for (SparseBitVector<>::iterator I = vi.AliveBlocks.begin(),
               E = vi.AliveBlocks.end(); I != E; ++I) {
        MachineBasicBlock *aliveBlock = mf_->getBlockNumbered(*I);
        LiveRange LR(getMBBStartIdx(aliveBlock), getMBBEndIdx(aliveBlock), ValNo);
        interval.addRange(LR);
        DEBUG(dbgs() << " +" << LR);
      }
    }

    // Finally, this virtual register is live from the start of any killing
    // block to the 'use' slot of the killing instruction.
    for (unsigned i = 0, e = vi.Kills.size(); i != e; ++i) {
      MachineInstr *Kill = vi.Kills[i];
      SlotIndex Start = getMBBStartIdx(Kill->getParent());
      SlotIndex killIdx = getInstructionIndex(Kill).getRegSlot();

      // Create interval with one of a NEW value number.  Note that this value
      // number isn't actually defined by an instruction, weird huh? :)
      if (PHIJoin) {
        assert(getInstructionFromIndex(Start) == 0 &&
               "PHI def index points at actual instruction.");
        ValNo = interval.getNextValue(Start, 0, VNInfoAllocator);
        ValNo->setIsPHIDef(true);
      }
      LiveRange LR(Start, killIdx, ValNo);
      interval.addRange(LR);
      DEBUG(dbgs() << " +" << LR);
    }

  } else {
    if (MultipleDefsBySameMI(*mi, MOIdx))
      // Multiple defs of the same virtual register by the same instruction.
      // e.g. %reg1031:5<def>, %reg1031:6<def> = VLD1q16 %reg1024<kill>, ...
      // This is likely due to elimination of REG_SEQUENCE instructions. Return
      // here since there is nothing to do.
      return;

    // If this is the second time we see a virtual register definition, it
    // must be due to phi elimination or two addr elimination.  If this is
    // the result of two address elimination, then the vreg is one of the
    // def-and-use register operand.

    // It may also be partial redef like this:
    // 80  %reg1041:6<def> = VSHRNv4i16 %reg1034<kill>, 12, pred:14, pred:%reg0
    // 120 %reg1041:5<def> = VSHRNv4i16 %reg1039<kill>, 12, pred:14, pred:%reg0
    bool PartReDef = isPartialRedef(MIIdx, MO, interval);
    if (PartReDef || mi->isRegTiedToUseOperand(MOIdx)) {
      // If this is a two-address definition, then we have already processed
      // the live range.  The only problem is that we didn't realize there
      // are actually two values in the live interval.  Because of this we
      // need to take the LiveRegion that defines this register and split it
      // into two values.
      SlotIndex RedefIndex = MIIdx.getRegSlot(MO.isEarlyClobber());

      const LiveRange *OldLR =
        interval.getLiveRangeContaining(RedefIndex.getRegSlot(true));
      VNInfo *OldValNo = OldLR->valno;
      SlotIndex DefIndex = OldValNo->def.getRegSlot();

      // Delete the previous value, which should be short and continuous,
      // because the 2-addr copy must be in the same MBB as the redef.
      interval.removeRange(DefIndex, RedefIndex);

      // The new value number (#1) is defined by the instruction we claimed
      // defined value #0.
      VNInfo *ValNo = interval.createValueCopy(OldValNo, VNInfoAllocator);

      // Value#0 is now defined by the 2-addr instruction.
      OldValNo->def  = RedefIndex;
      OldValNo->setCopy(0);

      // A re-def may be a copy. e.g. %reg1030:6<def> = VMOVD %reg1026, ...
      if (PartReDef && mi->isCopyLike())
        OldValNo->setCopy(&*mi);

      // Add the new live interval which replaces the range for the input copy.
      LiveRange LR(DefIndex, RedefIndex, ValNo);
      DEBUG(dbgs() << " replace range with " << LR);
      interval.addRange(LR);

      // If this redefinition is dead, we need to add a dummy unit live
      // range covering the def slot.
      if (MO.isDead())
        interval.addRange(LiveRange(RedefIndex, RedefIndex.getDeadSlot(),
                                    OldValNo));

      DEBUG({
          dbgs() << " RESULT: ";
          interval.print(dbgs(), tri_);
        });
    } else if (lv_->isPHIJoin(interval.reg)) {
Example #26
0
/// isIdenticalTo - Return true if this operand is identical to the specified
/// operand. Note that this should stay in sync with the hash_value overload
/// below.
bool MachineOperand::isIdenticalTo(const MachineOperand &Other) const {
  if (getType() != Other.getType() ||
      getTargetFlags() != Other.getTargetFlags())
    return false;

  switch (getType()) {
  case MachineOperand::MO_Register:
    return getReg() == Other.getReg() && isDef() == Other.isDef() &&
           getSubReg() == Other.getSubReg();
  case MachineOperand::MO_Immediate:
    return getImm() == Other.getImm();
  case MachineOperand::MO_CImmediate:
    return getCImm() == Other.getCImm();
  case MachineOperand::MO_FPImmediate:
    return getFPImm() == Other.getFPImm();
  case MachineOperand::MO_MachineBasicBlock:
    return getMBB() == Other.getMBB();
  case MachineOperand::MO_FrameIndex:
    return getIndex() == Other.getIndex();
  case MachineOperand::MO_ConstantPoolIndex:
  case MachineOperand::MO_TargetIndex:
    return getIndex() == Other.getIndex() && getOffset() == Other.getOffset();
  case MachineOperand::MO_JumpTableIndex:
    return getIndex() == Other.getIndex();
  case MachineOperand::MO_GlobalAddress:
    return getGlobal() == Other.getGlobal() && getOffset() == Other.getOffset();
  case MachineOperand::MO_ExternalSymbol:
    return strcmp(getSymbolName(), Other.getSymbolName()) == 0 &&
           getOffset() == Other.getOffset();
  case MachineOperand::MO_BlockAddress:
    return getBlockAddress() == Other.getBlockAddress() &&
           getOffset() == Other.getOffset();
  case MachineOperand::MO_RegisterMask:
  case MachineOperand::MO_RegisterLiveOut: {
    // Shallow compare of the two RegMasks
    const uint32_t *RegMask = getRegMask();
    const uint32_t *OtherRegMask = Other.getRegMask();
    if (RegMask == OtherRegMask)
      return true;

    if (const MachineFunction *MF = getMFIfAvailable(*this)) {
      // Calculate the size of the RegMask
      const TargetRegisterInfo *TRI = MF->getSubtarget().getRegisterInfo();
      unsigned RegMaskSize = (TRI->getNumRegs() + 31) / 32;

      // Deep compare of the two RegMasks
      return std::equal(RegMask, RegMask + RegMaskSize, OtherRegMask);
    }
    // We don't know the size of the RegMask, so we can't deep compare the two
    // reg masks.
    return false;
  }
  case MachineOperand::MO_MCSymbol:
    return getMCSymbol() == Other.getMCSymbol();
  case MachineOperand::MO_CFIIndex:
    return getCFIIndex() == Other.getCFIIndex();
  case MachineOperand::MO_Metadata:
    return getMetadata() == Other.getMetadata();
  case MachineOperand::MO_IntrinsicID:
    return getIntrinsicID() == Other.getIntrinsicID();
  case MachineOperand::MO_Predicate:
    return getPredicate() == Other.getPredicate();
  }
  llvm_unreachable("Invalid machine operand type");
}
Example #27
0
bool DetectDeadLanes::isUndefRegAtInput(const MachineOperand &MO,
                                        const VRegInfo &RegInfo) const {
  unsigned SubReg = MO.getSubReg();
  LaneBitmask Mask = TRI->getSubRegIndexLaneMask(SubReg);
  return (RegInfo.DefinedLanes & RegInfo.UsedLanes & Mask) == 0;
}
Example #28
0
void MIPrinter::print(const MachineOperand &Op, const TargetRegisterInfo *TRI,
                      unsigned I, bool ShouldPrintRegisterTies, bool IsDef) {
  printTargetFlags(Op);
  switch (Op.getType()) {
  case MachineOperand::MO_Register:
    if (Op.isImplicit())
      OS << (Op.isDef() ? "implicit-def " : "implicit ");
    else if (!IsDef && Op.isDef())
      // Print the 'def' flag only when the operand is defined after '='.
      OS << "def ";
    if (Op.isInternalRead())
      OS << "internal ";
    if (Op.isDead())
      OS << "dead ";
    if (Op.isKill())
      OS << "killed ";
    if (Op.isUndef())
      OS << "undef ";
    if (Op.isEarlyClobber())
      OS << "early-clobber ";
    if (Op.isDebug())
      OS << "debug-use ";
    printReg(Op.getReg(), OS, TRI);
    // Print the sub register.
    if (Op.getSubReg() != 0)
      OS << ':' << TRI->getSubRegIndexName(Op.getSubReg());
    if (ShouldPrintRegisterTies && Op.isTied() && !Op.isDef())
      OS << "(tied-def " << Op.getParent()->findTiedOperandIdx(I) << ")";
    break;
  case MachineOperand::MO_Immediate:
    OS << Op.getImm();
    break;
  case MachineOperand::MO_CImmediate:
    Op.getCImm()->printAsOperand(OS, /*PrintType=*/true, MST);
    break;
  case MachineOperand::MO_FPImmediate:
    Op.getFPImm()->printAsOperand(OS, /*PrintType=*/true, MST);
    break;
  case MachineOperand::MO_MachineBasicBlock:
    printMBBReference(*Op.getMBB());
    break;
  case MachineOperand::MO_FrameIndex:
    printStackObjectReference(Op.getIndex());
    break;
  case MachineOperand::MO_ConstantPoolIndex:
    OS << "%const." << Op.getIndex();
    printOffset(Op.getOffset());
    break;
  case MachineOperand::MO_TargetIndex: {
    OS << "target-index(";
    if (const auto *Name = getTargetIndexName(
            *Op.getParent()->getParent()->getParent(), Op.getIndex()))
      OS << Name;
    else
      OS << "<unknown>";
    OS << ')';
    printOffset(Op.getOffset());
    break;
  }
  case MachineOperand::MO_JumpTableIndex:
    OS << "%jump-table." << Op.getIndex();
    break;
  case MachineOperand::MO_ExternalSymbol:
    OS << '$';
    printLLVMNameWithoutPrefix(OS, Op.getSymbolName());
    printOffset(Op.getOffset());
    break;
  case MachineOperand::MO_GlobalAddress:
    Op.getGlobal()->printAsOperand(OS, /*PrintType=*/false, MST);
    printOffset(Op.getOffset());
    break;
  case MachineOperand::MO_BlockAddress:
    OS << "blockaddress(";
    Op.getBlockAddress()->getFunction()->printAsOperand(OS, /*PrintType=*/false,
                                                        MST);
    OS << ", ";
    printIRBlockReference(*Op.getBlockAddress()->getBasicBlock());
    OS << ')';
    printOffset(Op.getOffset());
    break;
  case MachineOperand::MO_RegisterMask: {
    auto RegMaskInfo = RegisterMaskIds.find(Op.getRegMask());
    if (RegMaskInfo != RegisterMaskIds.end())
      OS << StringRef(TRI->getRegMaskNames()[RegMaskInfo->second]).lower();
    else
      llvm_unreachable("Can't print this machine register mask yet.");
    break;
  }
  case MachineOperand::MO_RegisterLiveOut: {
    const uint32_t *RegMask = Op.getRegLiveOut();
    OS << "liveout(";
    bool IsCommaNeeded = false;
    for (unsigned Reg = 0, E = TRI->getNumRegs(); Reg < E; ++Reg) {
      if (RegMask[Reg / 32] & (1U << (Reg % 32))) {
        if (IsCommaNeeded)
          OS << ", ";
        printReg(Reg, OS, TRI);
        IsCommaNeeded = true;
      }
    }
    OS << ")";
    break;
  }
  case MachineOperand::MO_Metadata:
    Op.getMetadata()->printAsOperand(OS, MST);
    break;
  case MachineOperand::MO_MCSymbol:
    OS << "<mcsymbol " << *Op.getMCSymbol() << ">";
    break;
  case MachineOperand::MO_CFIIndex: {
    const auto &MMI = Op.getParent()->getParent()->getParent()->getMMI();
    print(MMI.getFrameInstructions()[Op.getCFIIndex()], TRI);
    break;
  }
  }
}
Example #29
0
void MIPrinter::print(const MachineOperand &Op, const TargetRegisterInfo *TRI,
                      unsigned I, bool ShouldPrintRegisterTies, LLT TypeToPrint,
                      bool IsDef) {
  printTargetFlags(Op);
  switch (Op.getType()) {
  case MachineOperand::MO_Register:
    if (Op.isImplicit())
      OS << (Op.isDef() ? "implicit-def " : "implicit ");
    else if (!IsDef && Op.isDef())
      // Print the 'def' flag only when the operand is defined after '='.
      OS << "def ";
    if (Op.isInternalRead())
      OS << "internal ";
    if (Op.isDead())
      OS << "dead ";
    if (Op.isKill())
      OS << "killed ";
    if (Op.isUndef())
      OS << "undef ";
    if (Op.isEarlyClobber())
      OS << "early-clobber ";
    if (Op.isDebug())
      OS << "debug-use ";
    printReg(Op.getReg(), OS, TRI);
    // Print the sub register.
    if (Op.getSubReg() != 0)
      OS << '.' << TRI->getSubRegIndexName(Op.getSubReg());
    if (ShouldPrintRegisterTies && Op.isTied() && !Op.isDef())
      OS << "(tied-def " << Op.getParent()->findTiedOperandIdx(I) << ")";
    if (TypeToPrint.isValid())
      OS << '(' << TypeToPrint << ')';
    break;
  case MachineOperand::MO_Immediate:
    OS << Op.getImm();
    break;
  case MachineOperand::MO_CImmediate:
    Op.getCImm()->printAsOperand(OS, /*PrintType=*/true, MST);
    break;
  case MachineOperand::MO_FPImmediate:
    Op.getFPImm()->printAsOperand(OS, /*PrintType=*/true, MST);
    break;
  case MachineOperand::MO_MachineBasicBlock:
    printMBBReference(*Op.getMBB());
    break;
  case MachineOperand::MO_FrameIndex:
    printStackObjectReference(Op.getIndex());
    break;
  case MachineOperand::MO_ConstantPoolIndex:
    OS << "%const." << Op.getIndex();
    printOffset(Op.getOffset());
    break;
  case MachineOperand::MO_TargetIndex:
    OS << "target-index(";
    if (const auto *Name = getTargetIndexName(
            *Op.getParent()->getParent()->getParent(), Op.getIndex()))
      OS << Name;
    else
      OS << "<unknown>";
    OS << ')';
    printOffset(Op.getOffset());
    break;
  case MachineOperand::MO_JumpTableIndex:
    OS << "%jump-table." << Op.getIndex();
    break;
  case MachineOperand::MO_ExternalSymbol: {
    StringRef Name = Op.getSymbolName();
    OS << '$';
    if (Name.empty()) {
      OS << "\"\"";
    } else {
      printLLVMNameWithoutPrefix(OS, Name);
    }
    printOffset(Op.getOffset());
    break;
  }
  case MachineOperand::MO_GlobalAddress:
    Op.getGlobal()->printAsOperand(OS, /*PrintType=*/false, MST);
    printOffset(Op.getOffset());
    break;
  case MachineOperand::MO_BlockAddress:
    OS << "blockaddress(";
    Op.getBlockAddress()->getFunction()->printAsOperand(OS, /*PrintType=*/false,
                                                        MST);
    OS << ", ";
    printIRBlockReference(*Op.getBlockAddress()->getBasicBlock());
    OS << ')';
    printOffset(Op.getOffset());
    break;
  case MachineOperand::MO_RegisterMask: {
    auto RegMaskInfo = RegisterMaskIds.find(Op.getRegMask());
    if (RegMaskInfo != RegisterMaskIds.end())
      OS << StringRef(TRI->getRegMaskNames()[RegMaskInfo->second]).lower();
    else
      printCustomRegMask(Op.getRegMask(), OS, TRI);
    break;
  }
  case MachineOperand::MO_RegisterLiveOut: {
    const uint32_t *RegMask = Op.getRegLiveOut();
    OS << "liveout(";
    bool IsCommaNeeded = false;
    for (unsigned Reg = 0, E = TRI->getNumRegs(); Reg < E; ++Reg) {
      if (RegMask[Reg / 32] & (1U << (Reg % 32))) {
        if (IsCommaNeeded)
          OS << ", ";
        printReg(Reg, OS, TRI);
        IsCommaNeeded = true;
      }
    }
    OS << ")";
    break;
  }
  case MachineOperand::MO_Metadata:
    Op.getMetadata()->printAsOperand(OS, MST);
    break;
  case MachineOperand::MO_MCSymbol:
    OS << "<mcsymbol " << *Op.getMCSymbol() << ">";
    break;
  case MachineOperand::MO_CFIIndex: {
    const MachineFunction &MF = *Op.getParent()->getParent()->getParent();
    print(MF.getFrameInstructions()[Op.getCFIIndex()], TRI);
    break;
  }
  case MachineOperand::MO_IntrinsicID: {
    Intrinsic::ID ID = Op.getIntrinsicID();
    if (ID < Intrinsic::num_intrinsics)
      OS << "intrinsic(@" << Intrinsic::getName(ID, None) << ')';
    else {
      const MachineFunction &MF = *Op.getParent()->getParent()->getParent();
      const TargetIntrinsicInfo *TII = MF.getTarget().getIntrinsicInfo();
      OS << "intrinsic(@" << TII->getName(ID) << ')';
    }
    break;
  }
  case MachineOperand::MO_Predicate: {
    auto Pred = static_cast<CmpInst::Predicate>(Op.getPredicate());
    OS << (CmpInst::isIntPredicate(Pred) ? "int" : "float") << "pred("
       << CmpInst::getPredicateName(Pred) << ')';
    break;
  }
  }
}