Beispiel #1
0
void AArch64RegisterInfo::resolveFrameIndex(MachineInstr &MI, unsigned BaseReg,
                                            int64_t Offset) const {
  int Off = Offset; // ARM doesn't need the general 64-bit offsets
  unsigned i = 0;

  while (!MI.getOperand(i).isFI()) {
    ++i;
    assert(i < MI.getNumOperands() && "Instr doesn't have FrameIndex operand!");
  }
  const MachineFunction *MF = MI.getParent()->getParent();
  const AArch64InstrInfo *TII =
      MF->getSubtarget<AArch64Subtarget>().getInstrInfo();
  bool Done = rewriteAArch64FrameIndex(MI, i, BaseReg, Off, TII);
  assert(Done && "Unable to resolve frame index!");
  (void)Done;
}
void RegDefsUses::init(const MachineInstr &MI) {
  // Add all register operands which are explicit and non-variadic.
  update(MI, 0, MI.getDesc().getNumOperands());

  // If MI is a call, add RA to Defs to prevent users of RA from going into
  // delay slot.
  if (MI.isCall())
    Defs.set(Mips::RA);

  // Add all implicit register operands of branch instructions except
  // register AT.
  if (MI.isBranch()) {
    update(MI, MI.getDesc().getNumOperands(), MI.getNumOperands());
    Defs.reset(Mips::AT);
  }
}
static bool UpdateCPSRDef(MachineInstr &MI, bool LiveCPSR, bool &DefCPSR) {
  bool HasDef = false;
  for (unsigned i = 0, e = MI.getNumOperands(); i != e; ++i) {
    const MachineOperand &MO = MI.getOperand(i);
    if (!MO.isReg() || MO.isUndef() || MO.isUse())
      continue;
    if (MO.getReg() != ARM::CPSR)
      continue;

    DefCPSR = true;
    if (!MO.isDead())
      HasDef = true;
  }

  return HasDef || LiveCPSR;
}
Beispiel #4
0
static bool UpdateCPSRUse(MachineInstr &MI, bool LiveCPSR) {
  for (unsigned i = 0, e = MI.getNumOperands(); i != e; ++i) {
    const MachineOperand &MO = MI.getOperand(i);
    if (!MO.isReg() || MO.isUndef() || MO.isDef())
      continue;
    if (MO.getReg() != ARM::CPSR)
      continue;
    assert(LiveCPSR && "CPSR liveness tracking is wrong!");
    if (MO.isKill()) {
      LiveCPSR = false;
      break;
    }
  }

  return LiveCPSR;
}
Beispiel #5
0
/// isTwoAddrUse - Return true if the specified MI uses the specified register
/// as a two-address use. If so, return the destination register by reference.
static bool isTwoAddrUse(MachineInstr &MI, unsigned Reg, unsigned &DstReg) {
  const TargetInstrDesc &TID = MI.getDesc();
  unsigned NumOps = (MI.getOpcode() == TargetInstrInfo::INLINEASM)
    ? MI.getNumOperands() : TID.getNumOperands();
  for (unsigned i = 0; i != NumOps; ++i) {
    const MachineOperand &MO = MI.getOperand(i);
    if (!MO.isReg() || !MO.isUse() || MO.getReg() != Reg)
      continue;
    unsigned ti;
    if (MI.isRegTiedToDefOperand(i, &ti)) {
      DstReg = MI.getOperand(ti).getReg();
      return true;
    }
  }
  return false;
}
bool X86InstrInfo::isMoveInstr(const MachineInstr& MI,
                               unsigned& sourceReg,
                               unsigned& destReg) const {
  MachineOpCode oc = MI.getOpcode();
  if (oc == X86::MOV8rr || oc == X86::MOV16rr || oc == X86::MOV32rr ||
      oc == X86::FpMOV) {
      assert(MI.getNumOperands() == 2 &&
             MI.getOperand(0).isRegister() &&
             MI.getOperand(1).isRegister() &&
             "invalid register-register move instruction");
      sourceReg = MI.getOperand(1).getReg();
      destReg = MI.getOperand(0).getReg();
      return true;
  }
  return false;
}
Beispiel #7
0
bool MachineSinking::isWorthBreakingCriticalEdge(MachineInstr &MI,
                                                 MachineBasicBlock *From,
                                                 MachineBasicBlock *To) {
  // FIXME: Need much better heuristics.

  // If the pass has already considered breaking this edge (during this pass
  // through the function), then let's go ahead and break it. This means
  // sinking multiple "cheap" instructions into the same block.
  if (!CEBCandidates.insert(std::make_pair(From, To)).second)
    return true;

  if (!MI.isCopy() && !TII->isAsCheapAsAMove(MI))
    return true;

  // MI is cheap, we probably don't want to break the critical edge for it.
  // However, if this would allow some definitions of its source operands
  // to be sunk then it's probably worth it.
  for (unsigned i = 0, e = MI.getNumOperands(); i != e; ++i) {
    const MachineOperand &MO = MI.getOperand(i);
    if (!MO.isReg() || !MO.isUse())
      continue;
    unsigned Reg = MO.getReg();
    if (Reg == 0)
      continue;

    // We don't move live definitions of physical registers,
    // so sinking their uses won't enable any opportunities.
    if (TargetRegisterInfo::isPhysicalRegister(Reg))
      continue;

    // If this instruction is the only user of a virtual register,
    // check if breaking the edge will enable sinking
    // both this instruction and the defining instruction.
    if (MRI->hasOneNonDBGUse(Reg)) {
      // If the definition resides in same MBB,
      // claim it's likely we can sink these together.
      // If definition resides elsewhere, we aren't
      // blocking it from being sunk so don't break the edge.
      MachineInstr *DefMI = MRI->getVRegDef(Reg);
      if (DefMI->getParent() == MI.getParent())
        return true;
    }
  }

  return false;
}
Beispiel #8
0
// Compare compares the result of MI against zero.  If MI is an addition
// of -1 and if CCUsers is a single branch on nonzero, eliminate the addition
// and convert the branch to a BRCT(G).  Return true on success.
bool
SystemZElimCompare::convertToBRCT(MachineInstr *MI, MachineInstr *Compare,
                                  SmallVectorImpl<MachineInstr *> &CCUsers) {
  // Check whether we have an addition of -1.
  unsigned Opcode = MI->getOpcode();
  unsigned BRCT;
  if (Opcode == SystemZ::AHI)
    BRCT = SystemZ::BRCT;
  else if (Opcode == SystemZ::AGHI)
    BRCT = SystemZ::BRCTG;
  else
    return false;
  if (MI->getOperand(2).getImm() != -1)
    return false;

  // Check whether we have a single JLH.
  if (CCUsers.size() != 1)
    return false;
  MachineInstr *Branch = CCUsers[0];
  if (Branch->getOpcode() != SystemZ::BRC ||
      Branch->getOperand(0).getImm() != SystemZ::CCMASK_ICMP ||
      Branch->getOperand(1).getImm() != SystemZ::CCMASK_CMP_NE)
    return false;

  // We already know that there are no references to the register between
  // MI and Compare.  Make sure that there are also no references between
  // Compare and Branch.
  unsigned SrcReg = getCompareSourceReg(Compare);
  MachineBasicBlock::iterator MBBI = Compare, MBBE = Branch;
  for (++MBBI; MBBI != MBBE; ++MBBI)
    if (getRegReferences(MBBI, SrcReg))
      return false;

  // The transformation is OK.  Rebuild Branch as a BRCT(G).
  MachineOperand Target(Branch->getOperand(2));
  while (Branch->getNumOperands())
    Branch->RemoveOperand(0);
  Branch->setDesc(TII->get(BRCT));
  MachineInstrBuilder(*Branch->getParent()->getParent(), Branch)
    .addOperand(MI->getOperand(0))
    .addOperand(MI->getOperand(1))
    .addOperand(Target)
    .addReg(SystemZ::CC, RegState::ImplicitDefine);
  MI->eraseFromParent();
  return true;
}
// Describe the references to Reg or any of its aliases in MI.
Reference SystemZElimCompare::getRegReferences(MachineInstr &MI, unsigned Reg) {
  Reference Ref;
  for (unsigned I = 0, E = MI.getNumOperands(); I != E; ++I) {
    const MachineOperand &MO = MI.getOperand(I);
    if (MO.isReg()) {
      if (unsigned MOReg = MO.getReg()) {
        if (TRI->regsOverlap(MOReg, Reg)) {
          if (MO.isUse())
            Ref.Use = true;
          else if (MO.isDef())
            Ref.Def = true;
        }
      }
    }
  }
  return Ref;
}
Beispiel #10
0
/// getCanonicalInductionVariable - Check to see if the loop has a canonical
/// induction variable. We check for a simple recurrence pattern - an
/// integer recurrence that decrements by one each time through the loop and
/// ends at zero.  If so, return the phi node that corresponds to it.
///
/// Based upon the similar code in LoopInfo except this code is specific to
/// the machine.
/// This method assumes that the IndVarSimplify pass has been run by 'opt'.
///
void
PPCCTRLoops::getCanonicalInductionVariable(MachineLoop *L,
                                  SmallVector<MachineInstr *, 4> &IVars,
                                  SmallVector<MachineInstr *, 4> &IOps) const {
  MachineBasicBlock *TopMBB = L->getTopBlock();
  MachineBasicBlock::pred_iterator PI = TopMBB->pred_begin();
  assert(PI != TopMBB->pred_end() &&
         "Loop must have more than one incoming edge!");
  MachineBasicBlock *Backedge = *PI++;
  if (PI == TopMBB->pred_end()) return;  // dead loop
  MachineBasicBlock *Incoming = *PI++;
  if (PI != TopMBB->pred_end()) return;  // multiple backedges?

  // make sure there is one incoming and one backedge and determine which
  // is which.
  if (L->contains(Incoming)) {
    if (L->contains(Backedge))
      return;
    std::swap(Incoming, Backedge);
  } else if (!L->contains(Backedge))
    return;

  // Loop over all of the PHI nodes, looking for a canonical induction variable:
  //   - The PHI node is "reg1 = PHI reg2, BB1, reg3, BB2".
  //   - The recurrence comes from the backedge.
  //   - the definition is an induction operatio.n
  for (MachineBasicBlock::iterator I = TopMBB->begin(), E = TopMBB->end();
       I != E && I->isPHI(); ++I) {
    MachineInstr *MPhi = &*I;
    unsigned DefReg = MPhi->getOperand(0).getReg();
    for (unsigned i = 1; i != MPhi->getNumOperands(); i += 2) {
      // Check each operand for the value from the backedge.
      MachineBasicBlock *MBB = MPhi->getOperand(i+1).getMBB();
      if (L->contains(MBB)) { // operands comes from the backedge
        // Check if the definition is an induction operation.
        MachineInstr *DI = MRI->getVRegDef(MPhi->getOperand(i).getReg());
        if (isInductionOperation(DI, DefReg)) {
          IOps.push_back(DI);
          IVars.push_back(MPhi);
        }
      }
    }
  }
  return;
}
Beispiel #11
0
bool X86RegisterBankInfo::getInstrValueMapping(
    const MachineInstr &MI,
    const SmallVectorImpl<PartialMappingIdx> &OpRegBankIdx,
    SmallVectorImpl<const ValueMapping *> &OpdsMapping) {

  unsigned NumOperands = MI.getNumOperands();
  for (unsigned Idx = 0; Idx < NumOperands; ++Idx) {
    if (!MI.getOperand(Idx).isReg())
      continue;

    auto Mapping = getValueMapping(OpRegBankIdx[Idx], 1);
    if (!Mapping->isValid())
      return false;

    OpdsMapping[Idx] = Mapping;
  }
  return true;
}
Beispiel #12
0
/// AddToLiveIns - Add register 'Reg' to the livein sets of BBs in the current
/// loop, and make sure it is not killed by any instructions in the loop.
void MachineLICM::AddToLiveIns(unsigned Reg) {
  const std::vector<MachineBasicBlock*> Blocks = CurLoop->getBlocks();
  for (unsigned i = 0, e = Blocks.size(); i != e; ++i) {
    MachineBasicBlock *BB = Blocks[i];
    if (!BB->isLiveIn(Reg))
      BB->addLiveIn(Reg);
    for (MachineBasicBlock::iterator
           MII = BB->begin(), E = BB->end(); MII != E; ++MII) {
      MachineInstr *MI = &*MII;
      for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
        MachineOperand &MO = MI->getOperand(i);
        if (!MO.isReg() || !MO.getReg() || MO.isDef()) continue;
        if (MO.getReg() == Reg || TRI->isSuperRegister(Reg, MO.getReg()))
          MO.setIsKill(false);
      }
    }
  }
}
/// Search MI's operands for register GP and erase it.
static void eraseGPOpnd(MachineInstr &MI) {
  if (!EraseGPOpnd)
    return;

  MachineFunction &MF = *MI.getParent()->getParent();
  MVT::SimpleValueType Ty = getRegTy(MI.getOperand(0).getReg(), MF);
  unsigned Reg = Ty == MVT::i32 ? Mips::GP : Mips::GP_64;

  for (unsigned I = 0; I < MI.getNumOperands(); ++I) {
    MachineOperand &MO = MI.getOperand(I);
    if (MO.isReg() && MO.getReg() == Reg) {
      MI.RemoveOperand(I);
      return;
    }
  }

  llvm_unreachable(nullptr);
}
RegisterBankInfo::InstructionMappings
AArch64RegisterBankInfo::getInstrAlternativeMappings(
    const MachineInstr &MI) const {
  switch (MI.getOpcode()) {
  case TargetOpcode::G_OR: {
    // 32 and 64-bit or can be mapped on either FPR or
    // GPR for the same cost.
    const MachineFunction &MF = *MI.getParent()->getParent();
    const TargetSubtargetInfo &STI = MF.getSubtarget();
    const TargetRegisterInfo &TRI = *STI.getRegisterInfo();
    const MachineRegisterInfo &MRI = MF.getRegInfo();

    unsigned Size = getSizeInBits(MI.getOperand(0).getReg(), MRI, TRI);
    if (Size != 32 && Size != 64)
      break;

    // If the instruction has any implicit-defs or uses,
    // do not mess with it.
    if (MI.getNumOperands() != 3)
      break;
    InstructionMappings AltMappings;
    InstructionMapping GPRMapping(/*ID*/ 1, /*Cost*/ 1, /*NumOperands*/ 3);
    InstructionMapping FPRMapping(/*ID*/ 2, /*Cost*/ 1, /*NumOperands*/ 3);
    for (unsigned Idx = 0; Idx != 3; ++Idx) {
      GPRMapping.setOperandMapping(
          Idx,
          ValueMapping{&AArch64::PartMappings[AArch64::getRegBankBaseIdx(Size) +
                                              AArch64::FirstGPR],
                       1});
      FPRMapping.setOperandMapping(
          Idx,
          ValueMapping{&AArch64::PartMappings[AArch64::getRegBankBaseIdx(Size) +
                                              AArch64::FirstFPR],
                       1});
    }
    AltMappings.emplace_back(std::move(GPRMapping));
    AltMappings.emplace_back(std::move(FPRMapping));
    return AltMappings;
  }
  default:
    break;
  }
  return RegisterBankInfo::getInstrAlternativeMappings(MI);
}
void ScheduleDAGInstrs::ComputeOperandLatency(SUnit *Def, SUnit *Use, 
                                              SDep& dep) const {
  const InstrItineraryData &InstrItins = TM.getInstrItineraryData();
  if (InstrItins.isEmpty())
    return;
  
  // For a data dependency with a known register...
  if ((dep.getKind() != SDep::Data) || (dep.getReg() == 0))
    return;

  const unsigned Reg = dep.getReg();

  // ... find the definition of the register in the defining
  // instruction
  MachineInstr *DefMI = Def->getInstr();
  int DefIdx = DefMI->findRegisterDefOperandIdx(Reg);
  if (DefIdx != -1) {
    int DefCycle = InstrItins.getOperandCycle(DefMI->getDesc().getSchedClass(),
                                              DefIdx);
    if (DefCycle >= 0) {
      MachineInstr *UseMI = Use->getInstr();
      const unsigned UseClass = UseMI->getDesc().getSchedClass();

      // For all uses of the register, calculate the maxmimum latency
      int Latency = -1;
      for (unsigned i = 0, e = UseMI->getNumOperands(); i != e; ++i) {
        const MachineOperand &MO = UseMI->getOperand(i);
        if (!MO.isReg() || !MO.isUse())
          continue;
        unsigned MOReg = MO.getReg();
        if (MOReg != Reg)
          continue;

        int UseCycle = InstrItins.getOperandCycle(UseClass, i);
        if (UseCycle >= 0)
          Latency = std::max(Latency, DefCycle - UseCycle + 1);
      }

      // If we found a latency, then replace the existing dependence latency.
      if (Latency >= 0)
        dep.setLatency(Latency);
    }
  }
}
Beispiel #16
0
void BT::visitPHI(const MachineInstr &PI) {
  int ThisN = PI.getParent()->getNumber();
  if (Trace)
    dbgs() << "Visit FI(" << printMBBReference(*PI.getParent()) << "): " << PI;

  const MachineOperand &MD = PI.getOperand(0);
  assert(MD.getSubReg() == 0 && "Unexpected sub-register in definition");
  RegisterRef DefRR(MD);
  uint16_t DefBW = ME.getRegBitWidth(DefRR);

  RegisterCell DefC = ME.getCell(DefRR, Map);
  if (DefC == RegisterCell::self(DefRR.Reg, DefBW))    // XXX slow
    return;

  bool Changed = false;

  for (unsigned i = 1, n = PI.getNumOperands(); i < n; i += 2) {
    const MachineBasicBlock *PB = PI.getOperand(i + 1).getMBB();
    int PredN = PB->getNumber();
    if (Trace)
      dbgs() << "  edge " << printMBBReference(*PB) << "->"
             << printMBBReference(*PI.getParent());
    if (!EdgeExec.count(CFGEdge(PredN, ThisN))) {
      if (Trace)
        dbgs() << " not executable\n";
      continue;
    }

    RegisterRef RU = PI.getOperand(i);
    RegisterCell ResC = ME.getCell(RU, Map);
    if (Trace)
      dbgs() << " input reg: " << printReg(RU.Reg, &ME.TRI, RU.Sub)
             << " cell: " << ResC << "\n";
    Changed |= DefC.meet(ResC, DefRR.Reg);
  }

  if (Changed) {
    if (Trace)
      dbgs() << "Output: " << printReg(DefRR.Reg, &ME.TRI, DefRR.Sub)
             << " cell: " << DefC << "\n";
    ME.putCell(DefRR, DefC, Map);
    visitUsesOf(DefRR.Reg);
  }
}
// Check all machine operands that reference the antidependent register and must
// be replaced by NewReg. Return true if any of their parent instructions may
// clobber the new register.
//
// Note: AntiDepReg may be referenced by a two-address instruction such that
// it's use operand is tied to a def operand. We guard against the case in which
// the two-address instruction also defines NewReg, as may happen with
// pre/postincrement loads. In this case, both the use and def operands are in
// RegRefs because the def is inserted by PrescanInstruction and not erased
// during ScanInstruction. So checking for an instructions with definitions of
// both NewReg and AntiDepReg covers it.
bool
CriticalAntiDepBreaker::isNewRegClobberedByRefs(RegRefIter RegRefBegin,
                                                RegRefIter RegRefEnd,
                                                unsigned NewReg)
{
  for (RegRefIter I = RegRefBegin; I != RegRefEnd; ++I ) {
    MachineOperand *RefOper = I->second;

    // Don't allow the instruction defining AntiDepReg to earlyclobber its
    // operands, in case they may be assigned to NewReg. In this case antidep
    // breaking must fail, but it's too rare to bother optimizing.
    if (RefOper->isDef() && RefOper->isEarlyClobber())
      return true;

    // Handle cases in which this instructions defines NewReg.
    MachineInstr *MI = RefOper->getParent();
    for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
      const MachineOperand &CheckOper = MI->getOperand(i);

      if (CheckOper.isRegMask() && CheckOper.clobbersPhysReg(NewReg))
        return true;

      if (!CheckOper.isReg() || !CheckOper.isDef() ||
          CheckOper.getReg() != NewReg)
        continue;

      // Don't allow the instruction to define NewReg and AntiDepReg.
      // When AntiDepReg is renamed it will be an illegal op.
      if (RefOper->isDef())
        return true;

      // Don't allow an instruction using AntiDepReg to be earlyclobbered by
      // NewReg
      if (CheckOper.isEarlyClobber())
        return true;

      // Don't allow inline asm to define NewReg at all. Who know what it's
      // doing with it.
      if (MI->isInlineAsm())
        return true;
    }
  }
  return false;
}
Beispiel #18
0
bool LDVImpl::handleDebugValue(MachineInstr &MI, SlotIndex Idx) {
  // DBG_VALUE loc, offset, variable
  if (MI.getNumOperands() != 4 ||
      !(MI.getOperand(1).isReg() || MI.getOperand(1).isImm()) ||
      !MI.getOperand(2).isMetadata()) {
    DEBUG(dbgs() << "Can't handle " << MI);
    return false;
  }

  // Get or create the UserValue for (variable,offset).
  bool IsIndirect = MI.isIndirectDebugValue();
  unsigned Offset = IsIndirect ? MI.getOperand(1).getImm() : 0;
  const MDNode *Var = MI.getDebugVariable();
  const MDNode *Expr = MI.getDebugExpression();
  //here.
  UserValue *UV = getUserValue(Var, Expr, Offset, IsIndirect, MI.getDebugLoc());
  UV->addDef(Idx, MI.getOperand(0));
  return true;
}
Beispiel #19
0
void ThumbRegisterInfo::resolveFrameIndex(MachineInstr &MI, unsigned BaseReg,
                                           int64_t Offset) const {
  const MachineFunction &MF = *MI.getParent()->getParent();
  const ARMSubtarget &STI = MF.getSubtarget<ARMSubtarget>();
  if (!STI.isThumb1Only())
    return ARMBaseRegisterInfo::resolveFrameIndex(MI, BaseReg, Offset);

  const ARMBaseInstrInfo &TII = *STI.getInstrInfo();
  int Off = Offset; // ARM doesn't need the general 64-bit offsets
  unsigned i = 0;

  while (!MI.getOperand(i).isFI()) {
    ++i;
    assert(i < MI.getNumOperands() && "Instr doesn't have FrameIndex operand!");
  }
  bool Done = rewriteFrameIndex(MI, i, BaseReg, Off, TII);
  assert (Done && "Unable to resolve frame index!");
  (void)Done;
}
Beispiel #20
0
void Thumb1RegisterInfo::resolveFrameIndex(MachineInstr &MI, unsigned BaseReg,
                                           int64_t Offset) const {
  const ARMBaseInstrInfo &TII =
      *static_cast<const ARMBaseInstrInfo *>(MI.getParent()
                                                 ->getParent()
                                                 ->getTarget()
                                                 .getSubtargetImpl()
                                                 ->getInstrInfo());
  int Off = Offset; // ARM doesn't need the general 64-bit offsets
  unsigned i = 0;

  while (!MI.getOperand(i).isFI()) {
    ++i;
    assert(i < MI.getNumOperands() && "Instr doesn't have FrameIndex operand!");
  }
  bool Done = rewriteFrameIndex(MI, i, BaseReg, Off, TII);
  assert (Done && "Unable to resolve frame index!");
  (void)Done;
}
Beispiel #21
0
/// AllMemRefsCanBeUnfolded - Return true if all references of the specified
/// spill slot index can be unfolded.
bool StackSlotColoring::AllMemRefsCanBeUnfolded(int SS) {
  SmallVector<MachineInstr*, 8> &RefMIs = SSRefs[SS];
  for (unsigned i = 0, e = RefMIs.size(); i != e; ++i) {
    MachineInstr *MI = RefMIs[i];
    if (TII->isLoadFromStackSlot(MI, SS) ||
        TII->isStoreToStackSlot(MI, SS))
      // Restore and spill will become copies.
      return true;
    if (!TII->getOpcodeAfterMemoryUnfold(MI->getOpcode(), false, false))
      return false;
    for (unsigned j = 0, ee = MI->getNumOperands(); j != ee; ++j) {
      MachineOperand &MO = MI->getOperand(j);
      if (MO.isFI() && MO.getIndex() != SS)
        // If it uses another frameindex, we can, currently* unfold it.
        return false;
    }
  }
  return true;
}
Beispiel #22
0
/// addRetOperand - ensure that a return instruction has an operand for each
/// value live out of the function.
///
/// Things marked both call and return are tail calls; do not do this for them.
/// The tail callee need not take the same registers as input that it produces
/// as output, and there are dependencies for its input registers elsewhere.
///
/// FIXME: This should be done as part of instruction selection, and this helper
/// should be deleted. Until then, we use custom logic here to create the proper
/// operand under all circumstances. We can't use addRegisterKilled because that
/// doesn't make sense for undefined values. We can't simply avoid calling it
/// for undefined values, because we must ensure that the operand always exists.
void RAFast::addRetOperands(MachineBasicBlock *MBB) {
  if (MBB->empty() || !MBB->back().isReturn() || MBB->back().isCall())
    return;

  MachineInstr *MI = &MBB->back();

  for (MachineRegisterInfo::liveout_iterator
         I = MBB->getParent()->getRegInfo().liveout_begin(),
         E = MBB->getParent()->getRegInfo().liveout_end(); I != E; ++I) {
    unsigned Reg = *I;
    assert(TargetRegisterInfo::isPhysicalRegister(Reg) &&
           "Cannot have a live-out virtual register.");

    bool hasDef = PhysRegState[Reg] == regReserved;

    // Check if this register already has an operand.
    bool Found = false;
    for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
      MachineOperand &MO = MI->getOperand(i);
      if (!MO.isReg() || !MO.isUse())
        continue;

      unsigned OperReg = MO.getReg();
      if (!TargetRegisterInfo::isPhysicalRegister(OperReg))
        continue;

      if (OperReg == Reg || TRI->isSuperRegister(OperReg, Reg)) {
        // If the ret already has an operand for this physreg or a superset,
        // don't duplicate it. Set the kill flag if the value is defined.
        if (hasDef && !MO.isKill())
          MO.setIsKill();
        Found = true;
        break;
      }
    }
    if (!Found)
      MI->addOperand(MachineOperand::CreateReg(Reg,
                                               false /*IsDef*/,
                                               true  /*IsImp*/,
                                               hasDef/*IsKill*/));
  }
}
Beispiel #23
0
static bool isSafeToFold(const MachineInstr &MI) {
  switch (MI.getOpcode()) {
  case AMDGPU::V_MOV_B32_e32:
  case AMDGPU::V_MOV_B32_e64:
  case AMDGPU::V_MOV_B64_PSEUDO: {
    // If there are additional implicit register operands, this may be used for
    // register indexing so the source register operand isn't simply copied.
    unsigned NumOps = MI.getDesc().getNumOperands() +
      MI.getDesc().getNumImplicitUses();

    return MI.getNumOperands() == NumOps;
  }
  case AMDGPU::S_MOV_B32:
  case AMDGPU::S_MOV_B64:
  case AMDGPU::COPY:
    return true;
  default:
    return false;
  }
}
bool
MSP430InstrInfo::isMoveInstr(const MachineInstr& MI,
                             unsigned &SrcReg, unsigned &DstReg,
                             unsigned &SrcSubIdx, unsigned &DstSubIdx) const {
  SrcSubIdx = DstSubIdx = 0; // No sub-registers yet.

  switch (MI.getOpcode()) {
  default:
    return false;
  case MSP430::MOV8rr:
  case MSP430::MOV16rr:
   assert(MI.getNumOperands() >= 2 &&
           MI.getOperand(0).isReg() &&
           MI.getOperand(1).isReg() &&
           "invalid register-register move instruction");
    SrcReg = MI.getOperand(1).getReg();
    DstReg = MI.getOperand(0).getReg();
    return true;
  }
}
MachineInstr *
ReloadIPPass::addReloadIPInstr(MachineFunction::iterator &MFI,
                               MachineBasicBlock::iterator &MBBI) const {
    MachineInstr *MI = MBBI;
    StringRef JTPrefix(JumpInstrTables::jump_func_prefix);
    switch (MI->getOpcode()) {
    case X86::MOV32rm:
        if (MI->getNumOperands() > 4 && MI->getOperand(4).isGlobal() &&
                MI->getOperand(4).getGlobal()->getName().startswith(JTPrefix)) {
            if ((TM->Options.CFIType == CFIntegrity::Add && MI->getNextNode()->getOpcode() == X86::AND32rr)
                    || (TM->Options.CFIType == CFIntegrity::Sub && MI->getNextNode()->getOpcode() == X86::SUB32rr)
                    || (TM->Options.CFIType == CFIntegrity::Ror && MI->getNextNode()->getOpcode() == X86::SUB32rr)
               ) {
                //                    MI->dump(); // For debugging
                DEBUG(dbgs() << TM->getSubtargetImpl()->getInstrInfo()->getName(MI->getOpcode()) << ": "
                      << " Op" << ": "
                      << MI->getOperand(4).getGlobal()->getName().str() << "\n";);

                return MI;
            } else
Beispiel #26
0
void AMDGPUInstrInfo::convertToISA(MachineInstr & MI, MachineFunction &MF,
    DebugLoc DL) const {
  MachineRegisterInfo &MRI = MF.getRegInfo();
  const AMDGPURegisterInfo & RI = getRegisterInfo();

  for (unsigned i = 0; i < MI.getNumOperands(); i++) {
    MachineOperand &MO = MI.getOperand(i);
    // Convert dst regclass to one that is supported by the ISA
    if (MO.isReg() && MO.isDef()) {
      if (TargetRegisterInfo::isVirtualRegister(MO.getReg())) {
        const TargetRegisterClass * oldRegClass = MRI.getRegClass(MO.getReg());
        const TargetRegisterClass * newRegClass = RI.getISARegClass(oldRegClass);

        assert(newRegClass);

        MRI.setRegClass(MO.getReg(), newRegClass);
      }
    }
  }
}
RegisterBankInfo::InstructionMapping
AArch64RegisterBankInfo::getSameKindOfOperandsMapping(const MachineInstr &MI) {
  const unsigned Opc = MI.getOpcode();
  const MachineFunction &MF = *MI.getParent()->getParent();
  const MachineRegisterInfo &MRI = MF.getRegInfo();

  unsigned NumOperands = MI.getNumOperands();
  assert(NumOperands <= 3 &&
         "This code is for instructions with 3 or less operands");

  LLT Ty = MRI.getType(MI.getOperand(0).getReg());
  unsigned Size = Ty.getSizeInBits();
  bool IsFPR = Ty.isVector() || isPreISelGenericFloatingPointOpcode(Opc);

  PartialMappingIdx RBIdx = IsFPR ? PMI_FirstFPR : PMI_FirstGPR;

#ifndef NDEBUG
  // Make sure all the operands are using similar size and type.
  // Should probably be checked by the machine verifier.
  // This code won't catch cases where the number of lanes is
  // different between the operands.
  // If we want to go to that level of details, it is probably
  // best to check that the types are the same, period.
  // Currently, we just check that the register banks are the same
  // for each types.
  for (unsigned Idx = 1; Idx != NumOperands; ++Idx) {
    LLT OpTy = MRI.getType(MI.getOperand(Idx).getReg());
    assert(
        AArch64GenRegisterBankInfo::getRegBankBaseIdxOffset(
            RBIdx, OpTy.getSizeInBits()) ==
            AArch64GenRegisterBankInfo::getRegBankBaseIdxOffset(RBIdx, Size) &&
        "Operand has incompatible size");
    bool OpIsFPR = OpTy.isVector() || isPreISelGenericFloatingPointOpcode(Opc);
    (void)OpIsFPR;
    assert(IsFPR == OpIsFPR && "Operand has incompatible type");
  }
#endif // End NDEBUG.

  return InstructionMapping{DefaultMappingID, 1, getValueMapping(RBIdx, Size),
                            NumOperands};
}
/// hasLoopHazard - Check whether an MLx instruction is chained to itself across
/// a single-MBB loop.
bool MLxExpansion::hasLoopHazard(MachineInstr *MI) const {
  unsigned Reg = MI->getOperand(1).getReg();
  if (TargetRegisterInfo::isPhysicalRegister(Reg))
    return false;

  MachineBasicBlock *MBB = MI->getParent();
  MachineInstr *DefMI = MRI->getVRegDef(Reg);
  while (true) {
outer_continue:
    if (DefMI->getParent() != MBB)
      break;

    if (DefMI->isPHI()) {
      for (unsigned i = 1, e = DefMI->getNumOperands(); i < e; i += 2) {
        if (DefMI->getOperand(i + 1).getMBB() == MBB) {
          unsigned SrcReg = DefMI->getOperand(i).getReg();
          if (TargetRegisterInfo::isVirtualRegister(SrcReg)) {
            DefMI = MRI->getVRegDef(SrcReg);
            goto outer_continue;
          }
        }
      }
    } else if (DefMI->isCopyLike()) {
      Reg = DefMI->getOperand(1).getReg();
      if (TargetRegisterInfo::isVirtualRegister(Reg)) {
        DefMI = MRI->getVRegDef(Reg);
        continue;
      }
    } else if (DefMI->isInsertSubreg()) {
      Reg = DefMI->getOperand(2).getReg();
      if (TargetRegisterInfo::isVirtualRegister(Reg)) {
        DefMI = MRI->getVRegDef(Reg);
        continue;
      }
    }

    break;
  }

  return DefMI == MI;
}
bool Filler::delayHasHazard(const MachineInstr &Candidate, bool &SawLoad,
                            bool &SawStore, RegDefsUses &RegDU) const {
  bool HasHazard = (Candidate.isImplicitDef() || Candidate.isKill());

  // Loads or stores cannot be moved past a store to the delay slot
  // and stores cannot be moved past a load.
  if (Candidate.mayStore() || Candidate.hasOrderedMemoryRef()) {
    HasHazard |= SawStore | SawLoad;
    SawStore = true;
  } else if (Candidate.mayLoad()) {
    HasHazard |= SawStore;
    SawLoad = true;
  }

  assert((!Candidate.isCall() && !Candidate.isReturn()) &&
         "Cannot put calls or returns in delay slot.");

  HasHazard |= RegDU.update(Candidate, 0, Candidate.getNumOperands());

  return HasHazard;
}
Beispiel #30
0
bool PTXInstrInfo::isMoveInstr(const MachineInstr& MI,
                               unsigned &SrcReg, unsigned &DstReg,
                               unsigned &SrcSubIdx, unsigned &DstSubIdx) const {
  switch (MI.getOpcode()) {
    default:
      return false;
    case PTX::MOVU16rr:
    case PTX::MOVU32rr:
    case PTX::MOVU64rr:
    case PTX::MOVF32rr:
    case PTX::MOVF64rr:
    case PTX::MOVPREDrr:
      assert(MI.getNumOperands() >= 2 &&
             MI.getOperand(0).isReg() && MI.getOperand(1).isReg() &&
             "Invalid register-register move instruction");
      SrcSubIdx = DstSubIdx = 0; // No sub-registers
      DstReg = MI.getOperand(0).getReg();
      SrcReg = MI.getOperand(1).getReg();
      return true;
  }
}