Example #1
0
// Check all machine operands that reference the antidependent register and must
// be replaced by NewReg. Return true if any of their parent instructions may
// clobber the new register.
//
// Note: AntiDepReg may be referenced by a two-address instruction such that
// it's use operand is tied to a def operand. We guard against the case in which
// the two-address instruction also defines NewReg, as may happen with
// pre/postincrement loads. In this case, both the use and def operands are in
// RegRefs because the def is inserted by PrescanInstruction and not erased
// during ScanInstruction. So checking for an instructions with definitions of
// both NewReg and AntiDepReg covers it.
bool
CriticalAntiDepBreaker::isNewRegClobberedByRefs(RegRefIter RegRefBegin,
                                                RegRefIter RegRefEnd,
                                                unsigned NewReg)
{
  for (RegRefIter I = RegRefBegin; I != RegRefEnd; ++I ) {
    MachineOperand *RefOper = I->second;

    // Don't allow the instruction defining AntiDepReg to earlyclobber its
    // operands, in case they may be assigned to NewReg. In this case antidep
    // breaking must fail, but it's too rare to bother optimizing.
    if (RefOper->isDef() && RefOper->isEarlyClobber())
      return true;

    // Handle cases in which this instructions defines NewReg.
    MachineInstr *MI = RefOper->getParent();
    for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
      const MachineOperand &CheckOper = MI->getOperand(i);

      if (CheckOper.isRegMask() && CheckOper.clobbersPhysReg(NewReg))
        return true;

      if (!CheckOper.isReg() || !CheckOper.isDef() ||
          CheckOper.getReg() != NewReg)
        continue;

      // Don't allow the instruction to define NewReg and AntiDepReg.
      // When AntiDepReg is renamed it will be an illegal op.
      if (RefOper->isDef())
        return true;

      // Don't allow an instruction using AntiDepReg to be earlyclobbered by
      // NewReg
      if (CheckOper.isEarlyClobber())
        return true;

      // Don't allow inline asm to define NewReg at all. Who know what it's
      // doing with it.
      if (MI->isInlineAsm())
        return true;
    }
  }
  return false;
}
Example #2
0
static LaneBitmask getUsedRegMask(const MachineOperand &MO,
                                  const MachineRegisterInfo &MRI,
                                  const LiveIntervals &LIS) {
  assert(MO.isUse() && MO.isReg() &&
         TargetRegisterInfo::isVirtualRegister(MO.getReg()));

  if (auto SubReg = MO.getSubReg())
    return MRI.getTargetRegisterInfo()->getSubRegIndexLaneMask(SubReg);

  auto MaxMask = MRI.getMaxLaneMaskForVReg(MO.getReg());
  if (MaxMask == LaneBitmask::getLane(0)) // cannot have subregs
    return MaxMask;

  // For a tentative schedule LIS isn't updated yet but livemask should remain
  // the same on any schedule. Subreg defs can be reordered but they all must
  // dominate uses anyway.
  auto SI = LIS.getInstructionIndex(*MO.getParent()).getBaseIndex();
  return getLiveLaneMask(MO.getReg(), SI, LIS, MRI);
}
Example #3
0
bool AMDGPUMCInstLower::lowerOperand(const MachineOperand &MO,
                                     MCOperand &MCOp) const {
  switch (MO.getType()) {
  default:
    llvm_unreachable("unknown operand type");
  case MachineOperand::MO_Immediate:
    MCOp = MCOperand::createImm(MO.getImm());
    return true;
  case MachineOperand::MO_Register:
    MCOp = MCOperand::createReg(AMDGPU::getMCReg(MO.getReg(), ST));
    return true;
  case MachineOperand::MO_MachineBasicBlock: {
    if (MO.getTargetFlags() != 0) {
      MCOp = MCOperand::createExpr(
        getLongBranchBlockExpr(*MO.getParent()->getParent(), MO));
    } else {
      MCOp = MCOperand::createExpr(
        MCSymbolRefExpr::create(MO.getMBB()->getSymbol(), Ctx));
    }

    return true;
  }
  case MachineOperand::MO_GlobalAddress: {
    const GlobalValue *GV = MO.getGlobal();
    SmallString<128> SymbolName;
    AP.getNameWithPrefix(SymbolName, GV);
    MCSymbol *Sym = Ctx.getOrCreateSymbol(SymbolName);
    const MCExpr *SymExpr =
      MCSymbolRefExpr::create(Sym, getVariantKind(MO.getTargetFlags()),Ctx);
    const MCExpr *Expr = MCBinaryExpr::createAdd(SymExpr,
      MCConstantExpr::create(MO.getOffset(), Ctx), Ctx);
    MCOp = MCOperand::createExpr(Expr);
    return true;
  }
  case MachineOperand::MO_ExternalSymbol: {
    MCSymbol *Sym = Ctx.getOrCreateSymbol(StringRef(MO.getSymbolName()));
    Sym->setExternal(true);
    const MCSymbolRefExpr *Expr = MCSymbolRefExpr::create(Sym, Ctx);
    MCOp = MCOperand::createExpr(Expr);
    return true;
  }
  }
}
Example #4
0
static bool isNoReturnDef(const MachineOperand &MO) {
    // Anything which is not a noreturn function is a real def.
    const MachineInstr &MI = *MO.getParent();
    if (!MI.isCall())
        return false;
    const MachineBasicBlock &MBB = *MI.getParent();
    if (!MBB.succ_empty())
        return false;
    const MachineFunction &MF = *MBB.getParent();
    // We need to keep correct unwind information even if the function will
    // not return, since the runtime may need it.
    if (MF.getFunction()->hasFnAttribute(Attribute::UWTable))
        return false;
    const Function *Called = getCalledFunction(MI);
    if (Called == nullptr || !Called->hasFnAttribute(Attribute::NoReturn)
            || !Called->hasFnAttribute(Attribute::NoUnwind))
        return false;

    return true;
}
Example #5
0
LaneBitmask DetectDeadLanes::transferDefinedLanes(const MachineOperand &Def,
    unsigned OpNum, LaneBitmask DefinedLanes) const {
  const MachineInstr &MI = *Def.getParent();
  // Translate DefinedLanes if necessary.
  switch (MI.getOpcode()) {
  case TargetOpcode::REG_SEQUENCE: {
    unsigned SubIdx = MI.getOperand(OpNum + 1).getImm();
    DefinedLanes = TRI->composeSubRegIndexLaneMask(SubIdx, DefinedLanes);
    DefinedLanes &= TRI->getSubRegIndexLaneMask(SubIdx);
    break;
  }
  case TargetOpcode::INSERT_SUBREG: {
    unsigned SubIdx = MI.getOperand(3).getImm();
    if (OpNum == 2) {
      DefinedLanes = TRI->composeSubRegIndexLaneMask(SubIdx, DefinedLanes);
      DefinedLanes &= TRI->getSubRegIndexLaneMask(SubIdx);
    } else {
      assert(OpNum == 1 && "INSERT_SUBREG must have two operands");
      // Ignore lanes defined by operand 2.
      DefinedLanes &= ~TRI->getSubRegIndexLaneMask(SubIdx);
    }
    break;
  }
  case TargetOpcode::EXTRACT_SUBREG: {
    unsigned SubIdx = MI.getOperand(2).getImm();
    assert(OpNum == 1 && "EXTRACT_SUBREG must have one register operand only");
    DefinedLanes = TRI->reverseComposeSubRegIndexLaneMask(SubIdx, DefinedLanes);
    break;
  }
  case TargetOpcode::COPY:
  case TargetOpcode::PHI:
    break;
  default:
    llvm_unreachable("function must be called with COPY-like instruction");
  }

  assert(Def.getSubReg() == 0 &&
         "Should not have subregister defs in machine SSA phase");
  DefinedLanes &= MRI->getMaxLaneMaskForVReg(Def.getReg());
  return DefinedLanes;
}
Example #6
0
/// Returns true if the given machine operand \p MO only reads undefined lanes.
/// The function only works for use operands with a subregister set.
bool VirtRegRewriter::readsUndefSubreg(const MachineOperand &MO) const {
  // Shortcut if the operand is already marked undef.
  if (MO.isUndef())
    return true;

  unsigned Reg = MO.getReg();
  const LiveInterval &LI = LIS->getInterval(Reg);
  const MachineInstr &MI = *MO.getParent();
  SlotIndex BaseIndex = LIS->getInstructionIndex(MI);
  // This code is only meant to handle reading undefined subregisters which
  // we couldn't properly detect before.
  assert(LI.liveAt(BaseIndex) &&
         "Reads of completely dead register should be marked undef already");
  unsigned SubRegIdx = MO.getSubReg();
  LaneBitmask UseMask = TRI->getSubRegIndexLaneMask(SubRegIdx);
  // See if any of the relevant subregister liveranges is defined at this point.
  for (const LiveInterval::SubRange &SR : LI.subranges()) {
    if ((SR.LaneMask & UseMask) != 0 && SR.liveAt(BaseIndex))
      return false;
  }
  return true;
}
Example #7
0
void MachineRegisterInfo::verifyUseList(unsigned Reg) const {
#ifndef NDEBUG
  bool Valid = true;
  for (MachineOperand &M : reg_operands(Reg)) {
    MachineOperand *MO = &M;
    MachineInstr *MI = MO->getParent();
    if (!MI) {
      errs() << PrintReg(Reg, getTargetRegisterInfo())
             << " use list MachineOperand " << MO
             << " has no parent instruction.\n";
      Valid = false;
      continue;
    }
    MachineOperand *MO0 = &MI->getOperand(0);
    unsigned NumOps = MI->getNumOperands();
    if (!(MO >= MO0 && MO < MO0+NumOps)) {
      errs() << PrintReg(Reg, getTargetRegisterInfo())
             << " use list MachineOperand " << MO
             << " doesn't belong to parent MI: " << *MI;
      Valid = false;
    }
    if (!MO->isReg()) {
      errs() << PrintReg(Reg, getTargetRegisterInfo())
             << " MachineOperand " << MO << ": " << *MO
             << " is not a register\n";
      Valid = false;
    }
    if (MO->getReg() != Reg) {
      errs() << PrintReg(Reg, getTargetRegisterInfo())
             << " use-list MachineOperand " << MO << ": "
             << *MO << " is the wrong register\n";
      Valid = false;
    }
  }
  assert(Valid && "Invalid use list");
#endif
}
/// optimizeExtInstr - If instruction is a copy-like instruction, i.e. it reads
/// a single register and writes a single register and it does not modify the
/// source, and if the source value is preserved as a sub-register of the
/// result, then replace all reachable uses of the source with the subreg of the
/// result.
///
/// Do not generate an EXTRACT that is used only in a debug use, as this changes
/// the code. Since this code does not currently share EXTRACTs, just ignore all
/// debug uses.
bool PeepholeOptimizer::
optimizeExtInstr(MachineInstr *MI, MachineBasicBlock *MBB,
                 SmallPtrSet<MachineInstr*, 8> &LocalMIs) {
  unsigned SrcReg, DstReg, SubIdx;
  if (!TII->isCoalescableExtInstr(*MI, SrcReg, DstReg, SubIdx))
    return false;

  if (TargetRegisterInfo::isPhysicalRegister(DstReg) ||
      TargetRegisterInfo::isPhysicalRegister(SrcReg))
    return false;

  if (MRI->hasOneNonDBGUse(SrcReg))
    // No other uses.
    return false;

  // Ensure DstReg can get a register class that actually supports
  // sub-registers. Don't change the class until we commit.
  const TargetRegisterClass *DstRC = MRI->getRegClass(DstReg);
  DstRC = TM->getRegisterInfo()->getSubClassWithSubReg(DstRC, SubIdx);
  if (!DstRC)
    return false;

  // The ext instr may be operating on a sub-register of SrcReg as well.
  // PPC::EXTSW is a 32 -> 64-bit sign extension, but it reads a 64-bit
  // register.
  // If UseSrcSubIdx is Set, SubIdx also applies to SrcReg, and only uses of
  // SrcReg:SubIdx should be replaced.
  bool UseSrcSubIdx = TM->getRegisterInfo()->
    getSubClassWithSubReg(MRI->getRegClass(SrcReg), SubIdx) != 0;

  // The source has other uses. See if we can replace the other uses with use of
  // the result of the extension.
  SmallPtrSet<MachineBasicBlock*, 4> ReachedBBs;
  for (MachineRegisterInfo::use_nodbg_iterator
       UI = MRI->use_nodbg_begin(DstReg), UE = MRI->use_nodbg_end();
       UI != UE; ++UI)
    ReachedBBs.insert(UI->getParent());

  // Uses that are in the same BB of uses of the result of the instruction.
  SmallVector<MachineOperand*, 8> Uses;

  // Uses that the result of the instruction can reach.
  SmallVector<MachineOperand*, 8> ExtendedUses;

  bool ExtendLife = true;
  for (MachineRegisterInfo::use_nodbg_iterator
       UI = MRI->use_nodbg_begin(SrcReg), UE = MRI->use_nodbg_end();
       UI != UE; ++UI) {
    MachineOperand &UseMO = UI.getOperand();
    MachineInstr *UseMI = &*UI;
    if (UseMI == MI)
      continue;

    if (UseMI->isPHI()) {
      ExtendLife = false;
      continue;
    }

    // Only accept uses of SrcReg:SubIdx.
    if (UseSrcSubIdx && UseMO.getSubReg() != SubIdx)
      continue;

    // It's an error to translate this:
    //
    //    %reg1025 = <sext> %reg1024
    //     ...
    //    %reg1026 = SUBREG_TO_REG 0, %reg1024, 4
    //
    // into this:
    //
    //    %reg1025 = <sext> %reg1024
    //     ...
    //    %reg1027 = COPY %reg1025:4
    //    %reg1026 = SUBREG_TO_REG 0, %reg1027, 4
    //
    // The problem here is that SUBREG_TO_REG is there to assert that an
    // implicit zext occurs. It doesn't insert a zext instruction. If we allow
    // the COPY here, it will give us the value after the <sext>, not the
    // original value of %reg1024 before <sext>.
    if (UseMI->getOpcode() == TargetOpcode::SUBREG_TO_REG)
      continue;

    MachineBasicBlock *UseMBB = UseMI->getParent();
    if (UseMBB == MBB) {
      // Local uses that come after the extension.
      if (!LocalMIs.count(UseMI))
        Uses.push_back(&UseMO);
    } else if (ReachedBBs.count(UseMBB)) {
      // Non-local uses where the result of the extension is used. Always
      // replace these unless it's a PHI.
      Uses.push_back(&UseMO);
    } else if (Aggressive && DT->dominates(MBB, UseMBB)) {
      // We may want to extend the live range of the extension result in order
      // to replace these uses.
      ExtendedUses.push_back(&UseMO);
    } else {
      // Both will be live out of the def MBB anyway. Don't extend live range of
      // the extension result.
      ExtendLife = false;
      break;
    }
  }

  if (ExtendLife && !ExtendedUses.empty())
    // Extend the liveness of the extension result.
    std::copy(ExtendedUses.begin(), ExtendedUses.end(),
              std::back_inserter(Uses));

  // Now replace all uses.
  bool Changed = false;
  if (!Uses.empty()) {
    SmallPtrSet<MachineBasicBlock*, 4> PHIBBs;

    // Look for PHI uses of the extended result, we don't want to extend the
    // liveness of a PHI input. It breaks all kinds of assumptions down
    // stream. A PHI use is expected to be the kill of its source values.
    for (MachineRegisterInfo::use_nodbg_iterator
         UI = MRI->use_nodbg_begin(DstReg), UE = MRI->use_nodbg_end();
         UI != UE; ++UI)
      if (UI->isPHI())
        PHIBBs.insert(UI->getParent());

    const TargetRegisterClass *RC = MRI->getRegClass(SrcReg);
    for (unsigned i = 0, e = Uses.size(); i != e; ++i) {
      MachineOperand *UseMO = Uses[i];
      MachineInstr *UseMI = UseMO->getParent();
      MachineBasicBlock *UseMBB = UseMI->getParent();
      if (PHIBBs.count(UseMBB))
        continue;

      // About to add uses of DstReg, clear DstReg's kill flags.
      if (!Changed) {
        MRI->clearKillFlags(DstReg);
        MRI->constrainRegClass(DstReg, DstRC);
      }

      unsigned NewVR = MRI->createVirtualRegister(RC);
      MachineInstr *Copy = BuildMI(*UseMBB, UseMI, UseMI->getDebugLoc(),
                                   TII->get(TargetOpcode::COPY), NewVR)
        .addReg(DstReg, 0, SubIdx);
      // SubIdx applies to both SrcReg and DstReg when UseSrcSubIdx is set.
      if (UseSrcSubIdx) {
        Copy->getOperand(0).setSubReg(SubIdx);
        Copy->getOperand(0).setIsUndef();
      }
      UseMO->setReg(NewVR);
      ++NumReuse;
      Changed = true;
    }
  }

  return Changed;
}
Example #9
0
static void foldOperand(MachineOperand &OpToFold, MachineInstr *UseMI,
                        unsigned UseOpIdx,
                        std::vector<FoldCandidate> &FoldList,
                        SmallVectorImpl<MachineInstr *> &CopiesToReplace,
                        const SIInstrInfo *TII, const SIRegisterInfo &TRI,
                        MachineRegisterInfo &MRI) {
  const MachineOperand &UseOp = UseMI->getOperand(UseOpIdx);

  // FIXME: Fold operands with subregs.
  if (UseOp.isReg() && ((UseOp.getSubReg() && OpToFold.isReg()) ||
      UseOp.isImplicit())) {
    return;
  }

  bool FoldingImm = OpToFold.isImm();
  APInt Imm;

  if (FoldingImm) {
    unsigned UseReg = UseOp.getReg();
    const TargetRegisterClass *UseRC
      = TargetRegisterInfo::isVirtualRegister(UseReg) ?
      MRI.getRegClass(UseReg) :
      TRI.getPhysRegClass(UseReg);

    Imm = APInt(64, OpToFold.getImm());

    const MCInstrDesc &FoldDesc = TII->get(OpToFold.getParent()->getOpcode());
    const TargetRegisterClass *FoldRC =
        TRI.getRegClass(FoldDesc.OpInfo[0].RegClass);

    // Split 64-bit constants into 32-bits for folding.
    if (FoldRC->getSize() == 8 && UseOp.getSubReg()) {
      if (UseRC->getSize() != 8)
        return;

      if (UseOp.getSubReg() == AMDGPU::sub0) {
        Imm = Imm.getLoBits(32);
      } else {
        assert(UseOp.getSubReg() == AMDGPU::sub1);
        Imm = Imm.getHiBits(32);
      }
    }

    // In order to fold immediates into copies, we need to change the
    // copy to a MOV.
    if (UseMI->getOpcode() == AMDGPU::COPY) {
      unsigned DestReg = UseMI->getOperand(0).getReg();
      const TargetRegisterClass *DestRC
        = TargetRegisterInfo::isVirtualRegister(DestReg) ?
        MRI.getRegClass(DestReg) :
        TRI.getPhysRegClass(DestReg);

      unsigned MovOp = TII->getMovOpcode(DestRC);
      if (MovOp == AMDGPU::COPY)
        return;

      UseMI->setDesc(TII->get(MovOp));
      CopiesToReplace.push_back(UseMI);
    }
  }

  // Special case for REG_SEQUENCE: We can't fold literals into
  // REG_SEQUENCE instructions, so we have to fold them into the
  // uses of REG_SEQUENCE.
  if (UseMI->getOpcode() == AMDGPU::REG_SEQUENCE) {
    unsigned RegSeqDstReg = UseMI->getOperand(0).getReg();
    unsigned RegSeqDstSubReg = UseMI->getOperand(UseOpIdx + 1).getImm();

    for (MachineRegisterInfo::use_iterator
         RSUse = MRI.use_begin(RegSeqDstReg),
         RSE = MRI.use_end(); RSUse != RSE; ++RSUse) {

      MachineInstr *RSUseMI = RSUse->getParent();
      if (RSUse->getSubReg() != RegSeqDstSubReg)
        continue;

      foldOperand(OpToFold, RSUseMI, RSUse.getOperandNo(), FoldList,
                  CopiesToReplace, TII, TRI, MRI);
    }
    return;
  }

  const MCInstrDesc &UseDesc = UseMI->getDesc();

  // Don't fold into target independent nodes.  Target independent opcodes
  // don't have defined register classes.
  if (UseDesc.isVariadic() ||
      UseDesc.OpInfo[UseOpIdx].RegClass == -1)
    return;

  if (FoldingImm) {
    MachineOperand ImmOp = MachineOperand::CreateImm(Imm.getSExtValue());
    tryAddToFoldList(FoldList, UseMI, UseOpIdx, &ImmOp, TII);
    return;
  }

  tryAddToFoldList(FoldList, UseMI, UseOpIdx, &OpToFold, TII);

  // FIXME: We could try to change the instruction from 64-bit to 32-bit
  // to enable more folding opportunites.  The shrink operands pass
  // already does this.
  return;
}
Example #10
0
void SIFoldOperands::foldInstOperand(MachineInstr &MI,
                                     MachineOperand &OpToFold) const {
  // We need mutate the operands of new mov instructions to add implicit
  // uses of EXEC, but adding them invalidates the use_iterator, so defer
  // this.
  SmallVector<MachineInstr *, 4> CopiesToReplace;
  SmallVector<FoldCandidate, 4> FoldList;
  MachineOperand &Dst = MI.getOperand(0);

  bool FoldingImm = OpToFold.isImm() || OpToFold.isFI();
  if (FoldingImm) {
    unsigned NumLiteralUses = 0;
    MachineOperand *NonInlineUse = nullptr;
    int NonInlineUseOpNo = -1;

    MachineRegisterInfo::use_iterator NextUse, NextInstUse;
    for (MachineRegisterInfo::use_iterator
           Use = MRI->use_begin(Dst.getReg()), E = MRI->use_end();
         Use != E; Use = NextUse) {
      NextUse = std::next(Use);
      MachineInstr *UseMI = Use->getParent();
      unsigned OpNo = Use.getOperandNo();

      // Folding the immediate may reveal operations that can be constant
      // folded or replaced with a copy. This can happen for example after
      // frame indices are lowered to constants or from splitting 64-bit
      // constants.
      //
      // We may also encounter cases where one or both operands are
      // immediates materialized into a register, which would ordinarily not
      // be folded due to multiple uses or operand constraints.

      if (OpToFold.isImm() && tryConstantFoldOp(*MRI, TII, UseMI, &OpToFold)) {
        DEBUG(dbgs() << "Constant folded " << *UseMI <<'\n');

        // Some constant folding cases change the same immediate's use to a new
        // instruction, e.g. and x, 0 -> 0. Make sure we re-visit the user
        // again. The same constant folded instruction could also have a second
        // use operand.
        NextUse = MRI->use_begin(Dst.getReg());
        continue;
      }

      // Try to fold any inline immediate uses, and then only fold other
      // constants if they have one use.
      //
      // The legality of the inline immediate must be checked based on the use
      // operand, not the defining instruction, because 32-bit instructions
      // with 32-bit inline immediate sources may be used to materialize
      // constants used in 16-bit operands.
      //
      // e.g. it is unsafe to fold:
      //  s_mov_b32 s0, 1.0    // materializes 0x3f800000
      //  v_add_f16 v0, v1, s0 // 1.0 f16 inline immediate sees 0x00003c00

      // Folding immediates with more than one use will increase program size.
      // FIXME: This will also reduce register usage, which may be better
      // in some cases. A better heuristic is needed.
      if (isInlineConstantIfFolded(TII, *UseMI, OpNo, OpToFold)) {
        foldOperand(OpToFold, UseMI, OpNo, FoldList, CopiesToReplace);
      } else {
        if (++NumLiteralUses == 1) {
          NonInlineUse = &*Use;
          NonInlineUseOpNo = OpNo;
        }
      }
    }

    if (NumLiteralUses == 1) {
      MachineInstr *UseMI = NonInlineUse->getParent();
      foldOperand(OpToFold, UseMI, NonInlineUseOpNo, FoldList, CopiesToReplace);
    }
  } else {
    // Folding register.
    for (MachineRegisterInfo::use_iterator
           Use = MRI->use_begin(Dst.getReg()), E = MRI->use_end();
         Use != E; ++Use) {
      MachineInstr *UseMI = Use->getParent();

      foldOperand(OpToFold, UseMI, Use.getOperandNo(),
                  FoldList, CopiesToReplace);
    }
  }

  MachineFunction *MF = MI.getParent()->getParent();
  // Make sure we add EXEC uses to any new v_mov instructions created.
  for (MachineInstr *Copy : CopiesToReplace)
    Copy->addImplicitDefUseOperands(*MF);

  for (FoldCandidate &Fold : FoldList) {
    if (updateOperand(Fold, *TRI)) {
      // Clear kill flags.
      if (Fold.isReg()) {
        assert(Fold.OpToFold && Fold.OpToFold->isReg());
        // FIXME: Probably shouldn't bother trying to fold if not an
        // SGPR. PeepholeOptimizer can eliminate redundant VGPR->VGPR
        // copies.
        MRI->clearKillFlags(Fold.OpToFold->getReg());
      }
      DEBUG(dbgs() << "Folded source from " << MI << " into OpNo " <<
            static_cast<int>(Fold.UseOpNo) << " of " << *Fold.UseMI << '\n');
      tryFoldInst(TII, Fold.UseMI);
    } else if (Fold.isCommuted()) {
      // Restoring instruction's original operand order if fold has failed.
      TII->commuteInstruction(*Fold.UseMI, false);
    }
  }
}
Example #11
0
void SIFoldOperands::foldOperand(
  MachineOperand &OpToFold,
  MachineInstr *UseMI,
  unsigned UseOpIdx,
  SmallVectorImpl<FoldCandidate> &FoldList,
  SmallVectorImpl<MachineInstr *> &CopiesToReplace) const {
  const MachineOperand &UseOp = UseMI->getOperand(UseOpIdx);

  if (!isUseSafeToFold(TII, *UseMI, UseOp))
    return;

  // FIXME: Fold operands with subregs.
  if (UseOp.isReg() && OpToFold.isReg()) {
    if (UseOp.isImplicit() || UseOp.getSubReg() != AMDGPU::NoSubRegister)
      return;

    // Don't fold subregister extracts into tied operands, only if it is a full
    // copy since a subregister use tied to a full register def doesn't really
    // make sense. e.g. don't fold:
    //
    // %vreg1 = COPY %vreg0:sub1
    // %vreg2<tied3> = V_MAC_{F16, F32} %vreg3, %vreg4, %vreg1<tied0>
    //
    //  into
    // %vreg2<tied3> = V_MAC_{F16, F32} %vreg3, %vreg4, %vreg0:sub1<tied0>
    if (UseOp.isTied() && OpToFold.getSubReg() != AMDGPU::NoSubRegister)
      return;
  }

  // Special case for REG_SEQUENCE: We can't fold literals into
  // REG_SEQUENCE instructions, so we have to fold them into the
  // uses of REG_SEQUENCE.
  if (UseMI->isRegSequence()) {
    unsigned RegSeqDstReg = UseMI->getOperand(0).getReg();
    unsigned RegSeqDstSubReg = UseMI->getOperand(UseOpIdx + 1).getImm();

    for (MachineRegisterInfo::use_iterator
           RSUse = MRI->use_begin(RegSeqDstReg), RSE = MRI->use_end();
         RSUse != RSE; ++RSUse) {

      MachineInstr *RSUseMI = RSUse->getParent();
      if (RSUse->getSubReg() != RegSeqDstSubReg)
        continue;

      foldOperand(OpToFold, RSUseMI, RSUse.getOperandNo(), FoldList,
                  CopiesToReplace);
    }

    return;
  }


  bool FoldingImm = OpToFold.isImm();

  // In order to fold immediates into copies, we need to change the
  // copy to a MOV.
  if (FoldingImm && UseMI->isCopy()) {
    unsigned DestReg = UseMI->getOperand(0).getReg();
    const TargetRegisterClass *DestRC
      = TargetRegisterInfo::isVirtualRegister(DestReg) ?
      MRI->getRegClass(DestReg) :
      TRI->getPhysRegClass(DestReg);

    unsigned MovOp = TII->getMovOpcode(DestRC);
    if (MovOp == AMDGPU::COPY)
      return;

    UseMI->setDesc(TII->get(MovOp));
    CopiesToReplace.push_back(UseMI);
  } else {
    const MCInstrDesc &UseDesc = UseMI->getDesc();

    // Don't fold into target independent nodes.  Target independent opcodes
    // don't have defined register classes.
    if (UseDesc.isVariadic() ||
        UseDesc.OpInfo[UseOpIdx].RegClass == -1)
      return;
  }

  if (!FoldingImm) {
    tryAddToFoldList(FoldList, UseMI, UseOpIdx, &OpToFold, TII);

    // FIXME: We could try to change the instruction from 64-bit to 32-bit
    // to enable more folding opportunites.  The shrink operands pass
    // already does this.
    return;
  }


  const MCInstrDesc &FoldDesc = OpToFold.getParent()->getDesc();
  const TargetRegisterClass *FoldRC =
    TRI->getRegClass(FoldDesc.OpInfo[0].RegClass);


  // Split 64-bit constants into 32-bits for folding.
  if (UseOp.getSubReg() && AMDGPU::getRegBitWidth(FoldRC->getID()) == 64) {
    unsigned UseReg = UseOp.getReg();
    const TargetRegisterClass *UseRC
      = TargetRegisterInfo::isVirtualRegister(UseReg) ?
      MRI->getRegClass(UseReg) :
      TRI->getPhysRegClass(UseReg);

    if (AMDGPU::getRegBitWidth(UseRC->getID()) != 64)
      return;

    APInt Imm(64, OpToFold.getImm());
    if (UseOp.getSubReg() == AMDGPU::sub0) {
      Imm = Imm.getLoBits(32);
    } else {
      assert(UseOp.getSubReg() == AMDGPU::sub1);
      Imm = Imm.getHiBits(32);
    }

    MachineOperand ImmOp = MachineOperand::CreateImm(Imm.getSExtValue());
    tryAddToFoldList(FoldList, UseMI, UseOpIdx, &ImmOp, TII);
    return;
  }



  tryAddToFoldList(FoldList, UseMI, UseOpIdx, &OpToFold, TII);
}
Example #12
0
static bool isKUImmOperand(const SIInstrInfo *TII, const MachineOperand &Src) {
  return isUInt<16>(Src.getImm()) &&
    !TII->isInlineConstant(*Src.getParent(),
                           Src.getParent()->getOperandNo(&Src));
}
Example #13
0
/// Handle the direct use of a physical register.  Check that the register is
/// not used by a virtreg. Kill the physreg, marking it free. This may add
/// implicit kills to MO->getParent() and invalidate MO.
void RegAllocFast::usePhysReg(MachineOperand &MO) {
  // Ignore undef uses.
  if (MO.isUndef())
    return;

  unsigned PhysReg = MO.getReg();
  assert(TargetRegisterInfo::isPhysicalRegister(PhysReg) &&
         "Bad usePhysReg operand");

  markRegUsedInInstr(PhysReg);
  switch (PhysRegState[PhysReg]) {
  case regDisabled:
    break;
  case regReserved:
    PhysRegState[PhysReg] = regFree;
    LLVM_FALLTHROUGH;
  case regFree:
    MO.setIsKill();
    return;
  default:
    // The physreg was allocated to a virtual register. That means the value we
    // wanted has been clobbered.
    llvm_unreachable("Instruction uses an allocated register");
  }

  // Maybe a superregister is reserved?
  for (MCRegAliasIterator AI(PhysReg, TRI, false); AI.isValid(); ++AI) {
    MCPhysReg Alias = *AI;
    switch (PhysRegState[Alias]) {
    case regDisabled:
      break;
    case regReserved:
      // Either PhysReg is a subregister of Alias and we mark the
      // whole register as free, or PhysReg is the superregister of
      // Alias and we mark all the aliases as disabled before freeing
      // PhysReg.
      // In the latter case, since PhysReg was disabled, this means that
      // its value is defined only by physical sub-registers. This check
      // is performed by the assert of the default case in this loop.
      // Note: The value of the superregister may only be partial
      // defined, that is why regDisabled is a valid state for aliases.
      assert((TRI->isSuperRegister(PhysReg, Alias) ||
              TRI->isSuperRegister(Alias, PhysReg)) &&
             "Instruction is not using a subregister of a reserved register");
      LLVM_FALLTHROUGH;
    case regFree:
      if (TRI->isSuperRegister(PhysReg, Alias)) {
        // Leave the superregister in the working set.
        setPhysRegState(Alias, regFree);
        MO.getParent()->addRegisterKilled(Alias, TRI, true);
        return;
      }
      // Some other alias was in the working set - clear it.
      setPhysRegState(Alias, regDisabled);
      break;
    default:
      llvm_unreachable("Instruction uses an alias of an allocated register");
    }
  }

  // All aliases are disabled, bring register into working set.
  setPhysRegState(PhysReg, regFree);
  MO.setIsKill();
}
Example #14
0
bool StrongPHIElimination::runOnMachineFunction(MachineFunction &MF) {
  MRI = &MF.getRegInfo();
  TII = MF.getTarget().getInstrInfo();
  DT = &getAnalysis<MachineDominatorTree>();
  LI = &getAnalysis<LiveIntervals>();

  for (MachineFunction::iterator I = MF.begin(), E = MF.end();
       I != E; ++I) {
    for (MachineBasicBlock::iterator BBI = I->begin(), BBE = I->end();
         BBI != BBE && BBI->isPHI(); ++BBI) {
      unsigned DestReg = BBI->getOperand(0).getReg();
      addReg(DestReg);
      PHISrcDefs[I].push_back(BBI);

      for (unsigned i = 1; i < BBI->getNumOperands(); i += 2) {
        MachineOperand &SrcMO = BBI->getOperand(i);
        unsigned SrcReg = SrcMO.getReg();
        addReg(SrcReg);
        unionRegs(DestReg, SrcReg);

        MachineInstr *DefMI = MRI->getVRegDef(SrcReg);
        if (DefMI)
          PHISrcDefs[DefMI->getParent()].push_back(DefMI);
      }
    }
  }

  // Perform a depth-first traversal of the dominator tree, splitting
  // interferences amongst PHI-congruence classes.
  DenseMap<unsigned, unsigned> CurrentDominatingParent;
  DenseMap<unsigned, unsigned> ImmediateDominatingParent;
  for (df_iterator<MachineDomTreeNode*> DI = df_begin(DT->getRootNode()),
       DE = df_end(DT->getRootNode()); DI != DE; ++DI) {
    SplitInterferencesForBasicBlock(*DI->getBlock(),
                                    CurrentDominatingParent,
                                    ImmediateDominatingParent);
  }

  // Insert copies for all PHI source and destination registers.
  for (MachineFunction::iterator I = MF.begin(), E = MF.end();
       I != E; ++I) {
    for (MachineBasicBlock::iterator BBI = I->begin(), BBE = I->end();
         BBI != BBE && BBI->isPHI(); ++BBI) {
      InsertCopiesForPHI(BBI, I);
    }
  }

  // FIXME: Preserve the equivalence classes during copy insertion and use
  // the preversed equivalence classes instead of recomputing them.
  RegNodeMap.clear();
  for (MachineFunction::iterator I = MF.begin(), E = MF.end();
       I != E; ++I) {
    for (MachineBasicBlock::iterator BBI = I->begin(), BBE = I->end();
         BBI != BBE && BBI->isPHI(); ++BBI) {
      unsigned DestReg = BBI->getOperand(0).getReg();
      addReg(DestReg);

      for (unsigned i = 1; i < BBI->getNumOperands(); i += 2) {
        unsigned SrcReg = BBI->getOperand(i).getReg();
        addReg(SrcReg);
        unionRegs(DestReg, SrcReg);
      }
    }
  }

  DenseMap<unsigned, unsigned> RegRenamingMap;
  bool Changed = false;
  for (MachineFunction::iterator I = MF.begin(), E = MF.end();
       I != E; ++I) {
    MachineBasicBlock::iterator BBI = I->begin(), BBE = I->end();
    while (BBI != BBE && BBI->isPHI()) {
      MachineInstr *PHI = BBI;

      assert(PHI->getNumOperands() > 0);

      unsigned SrcReg = PHI->getOperand(1).getReg();
      unsigned SrcColor = getRegColor(SrcReg);
      unsigned NewReg = RegRenamingMap[SrcColor];
      if (!NewReg) {
        NewReg = SrcReg;
        RegRenamingMap[SrcColor] = SrcReg;
      }
      MergeLIsAndRename(SrcReg, NewReg);

      unsigned DestReg = PHI->getOperand(0).getReg();
      if (!InsertedDestCopies.count(DestReg))
        MergeLIsAndRename(DestReg, NewReg);

      for (unsigned i = 3; i < PHI->getNumOperands(); i += 2) {
        unsigned SrcReg = PHI->getOperand(i).getReg();
        MergeLIsAndRename(SrcReg, NewReg);
      }

      ++BBI;
      LI->RemoveMachineInstrFromMaps(PHI);
      PHI->eraseFromParent();
      Changed = true;
    }
  }

  // Due to the insertion of copies to split live ranges, the live intervals are
  // guaranteed to not overlap, except in one case: an original PHI source and a
  // PHI destination copy. In this case, they have the same value and thus don't
  // truly intersect, so we merge them into the value live at that point.
  // FIXME: Is there some better way we can handle this?
  for (DestCopyMap::iterator I = InsertedDestCopies.begin(),
       E = InsertedDestCopies.end(); I != E; ++I) {
    unsigned DestReg = I->first;
    unsigned DestColor = getRegColor(DestReg);
    unsigned NewReg = RegRenamingMap[DestColor];

    LiveInterval &DestLI = LI->getInterval(DestReg);
    LiveInterval &NewLI = LI->getInterval(NewReg);

    assert(DestLI.ranges.size() == 1
           && "PHI destination copy's live interval should be a single live "
               "range from the beginning of the BB to the copy instruction.");
    LiveRange *DestLR = DestLI.begin();
    VNInfo *NewVNI = NewLI.getVNInfoAt(DestLR->start);
    if (!NewVNI) {
      NewVNI = NewLI.createValueCopy(DestLR->valno, LI->getVNInfoAllocator());
      MachineInstr *CopyInstr = I->second;
      CopyInstr->getOperand(1).setIsKill(true);
    }

    LiveRange NewLR(DestLR->start, DestLR->end, NewVNI);
    NewLI.addRange(NewLR);

    LI->removeInterval(DestReg);
    MRI->replaceRegWith(DestReg, NewReg);
  }

  // Adjust the live intervals of all PHI source registers to handle the case
  // where the PHIs in successor blocks were the only later uses of the source
  // register.
  for (SrcCopySet::iterator I = InsertedSrcCopySet.begin(),
       E = InsertedSrcCopySet.end(); I != E; ++I) {
    MachineBasicBlock *MBB = I->first;
    unsigned SrcReg = I->second;
    if (unsigned RenamedRegister = RegRenamingMap[getRegColor(SrcReg)])
      SrcReg = RenamedRegister;

    LiveInterval &SrcLI = LI->getInterval(SrcReg);

    bool isLiveOut = false;
    for (MachineBasicBlock::succ_iterator SI = MBB->succ_begin(),
         SE = MBB->succ_end(); SI != SE; ++SI) {
      if (SrcLI.liveAt(LI->getMBBStartIdx(*SI))) {
        isLiveOut = true;
        break;
      }
    }

    if (isLiveOut)
      continue;

    MachineOperand *LastUse = findLastUse(MBB, SrcReg);
    assert(LastUse);
    SlotIndex LastUseIndex = LI->getInstructionIndex(LastUse->getParent());
    SrcLI.removeRange(LastUseIndex.getRegSlot(), LI->getMBBEndIdx(MBB));
    LastUse->setIsKill(true);
  }

  Allocator.Reset();
  RegNodeMap.clear();
  PHISrcDefs.clear();
  InsertedSrcCopySet.clear();
  InsertedSrcCopyMap.clear();
  InsertedDestCopies.clear();

  return Changed;
}
Example #15
0
void MIPrinter::print(const MachineOperand &Op, const TargetRegisterInfo *TRI) {
  printTargetFlags(Op);
  switch (Op.getType()) {
  case MachineOperand::MO_Register:
    // TODO: Print the other register flags.
    if (Op.isImplicit())
      OS << (Op.isDef() ? "implicit-def " : "implicit ");
    if (Op.isDead())
      OS << "dead ";
    if (Op.isKill())
      OS << "killed ";
    if (Op.isUndef())
      OS << "undef ";
    if (Op.isEarlyClobber())
      OS << "early-clobber ";
    if (Op.isDebug())
      OS << "debug-use ";
    printReg(Op.getReg(), OS, TRI);
    // Print the sub register.
    if (Op.getSubReg() != 0)
      OS << ':' << TRI->getSubRegIndexName(Op.getSubReg());
    break;
  case MachineOperand::MO_Immediate:
    OS << Op.getImm();
    break;
  case MachineOperand::MO_CImmediate:
    Op.getCImm()->printAsOperand(OS, /*PrintType=*/true, MST);
    break;
  case MachineOperand::MO_FPImmediate:
    Op.getFPImm()->printAsOperand(OS, /*PrintType=*/true, MST);
    break;
  case MachineOperand::MO_MachineBasicBlock:
    printMBBReference(*Op.getMBB());
    break;
  case MachineOperand::MO_FrameIndex:
    printStackObjectReference(Op.getIndex());
    break;
  case MachineOperand::MO_ConstantPoolIndex:
    OS << "%const." << Op.getIndex();
    printOffset(Op.getOffset());
    break;
  case MachineOperand::MO_TargetIndex: {
    OS << "target-index(";
    if (const auto *Name = getTargetIndexName(
            *Op.getParent()->getParent()->getParent(), Op.getIndex()))
      OS << Name;
    else
      OS << "<unknown>";
    OS << ')';
    printOffset(Op.getOffset());
    break;
  }
  case MachineOperand::MO_JumpTableIndex:
    OS << "%jump-table." << Op.getIndex();
    break;
  case MachineOperand::MO_ExternalSymbol:
    OS << '$';
    printLLVMNameWithoutPrefix(OS, Op.getSymbolName());
    printOffset(Op.getOffset());
    break;
  case MachineOperand::MO_GlobalAddress:
    Op.getGlobal()->printAsOperand(OS, /*PrintType=*/false, MST);
    printOffset(Op.getOffset());
    break;
  case MachineOperand::MO_BlockAddress:
    OS << "blockaddress(";
    Op.getBlockAddress()->getFunction()->printAsOperand(OS, /*PrintType=*/false,
                                                        MST);
    OS << ", ";
    printIRBlockReference(*Op.getBlockAddress()->getBasicBlock());
    OS << ')';
    printOffset(Op.getOffset());
    break;
  case MachineOperand::MO_RegisterMask: {
    auto RegMaskInfo = RegisterMaskIds.find(Op.getRegMask());
    if (RegMaskInfo != RegisterMaskIds.end())
      OS << StringRef(TRI->getRegMaskNames()[RegMaskInfo->second]).lower();
    else
      llvm_unreachable("Can't print this machine register mask yet.");
    break;
  }
  case MachineOperand::MO_Metadata:
    Op.getMetadata()->printAsOperand(OS, MST);
    break;
  case MachineOperand::MO_CFIIndex: {
    const auto &MMI = Op.getParent()->getParent()->getParent()->getMMI();
    print(MMI.getFrameInstructions()[Op.getCFIIndex()], TRI);
    break;
  }
  default:
    // TODO: Print the other machine operands.
    llvm_unreachable("Can't print this machine operand at the moment");
  }
}
Example #16
0
// MI is known to be dead. Figure out what instructions
// are also made dead by this and mark them for removal.
void A15SDOptimizer::eraseInstrWithNoUses(MachineInstr *MI) {
  SmallVector<MachineInstr *, 8> Front;
  DeadInstr.insert(MI);

  DEBUG(dbgs() << "Deleting base instruction " << *MI << "\n");
  Front.push_back(MI);

  while (Front.size() != 0) {
    MI = Front.back();
    Front.pop_back();

    // MI is already known to be dead. We need to see
    // if other instructions can also be removed.
    for (unsigned int i = 0; i < MI->getNumOperands(); ++i) {
      MachineOperand &MO = MI->getOperand(i);
      if ((!MO.isReg()) || (!MO.isUse()))
        continue;
      unsigned Reg = MO.getReg();
      if (!TRI->isVirtualRegister(Reg))
        continue;
      MachineOperand *Op = MI->findRegisterDefOperand(Reg);

      if (!Op)
        continue;

      MachineInstr *Def = Op->getParent();

      // We don't need to do anything if we have already marked
      // this instruction as being dead.
      if (DeadInstr.find(Def) != DeadInstr.end())
        continue;

      // Check if all the uses of this instruction are marked as
      // dead. If so, we can also mark this instruction as being
      // dead.
      bool IsDead = true;
      for (unsigned int j = 0; j < Def->getNumOperands(); ++j) {
        MachineOperand &MODef = Def->getOperand(j);
        if ((!MODef.isReg()) || (!MODef.isDef()))
          continue;
        unsigned DefReg = MODef.getReg();
        if (!TRI->isVirtualRegister(DefReg)) {
          IsDead = false;
          break;
        }
        for (MachineRegisterInfo::use_instr_iterator
             II = MRI->use_instr_begin(Reg), EE = MRI->use_instr_end();
             II != EE; ++II) {
          // We don't care about self references.
          if (&*II == Def)
            continue;
          if (DeadInstr.find(&*II) == DeadInstr.end()) {
            IsDead = false;
            break;
          }
        }
      }

      if (!IsDead) continue;

      DEBUG(dbgs() << "Deleting instruction " << *Def << "\n");
      DeadInstr.insert(Def);
    }
  }
}
Example #17
0
/// OptimizeInstr - If instruction is a copy-like instruction, i.e. it reads
/// a single register and writes a single register and it does not modify
/// the source, and if the source value is preserved as a sub-register of
/// the result, then replace all reachable uses of the source with the subreg
/// of the result.
/// Do not generate an EXTRACT that is used only in a debug use, as this
/// changes the code.  Since this code does not currently share EXTRACTs, just
/// ignore all debug uses.
bool OptimizeExts::OptimizeInstr(MachineInstr *MI, MachineBasicBlock *MBB,
                                 SmallPtrSet<MachineInstr*, 8> &LocalMIs) {
  bool Changed = false;
  LocalMIs.insert(MI);

  unsigned SrcReg, DstReg, SubIdx;
  if (TII->isCoalescableExtInstr(*MI, SrcReg, DstReg, SubIdx)) {
    if (TargetRegisterInfo::isPhysicalRegister(DstReg) ||
        TargetRegisterInfo::isPhysicalRegister(SrcReg))
      return false;

    MachineRegisterInfo::use_nodbg_iterator UI = MRI->use_nodbg_begin(SrcReg);
    if (++UI == MRI->use_nodbg_end())
      // No other uses.
      return false;

    // Ok, the source has other uses. See if we can replace the other uses
    // with use of the result of the extension.
    SmallPtrSet<MachineBasicBlock*, 4> ReachedBBs;
    UI = MRI->use_nodbg_begin(DstReg);
    for (MachineRegisterInfo::use_nodbg_iterator UE = MRI->use_nodbg_end();
         UI != UE; ++UI)
      ReachedBBs.insert(UI->getParent());

    bool ExtendLife = true;
    // Uses that are in the same BB of uses of the result of the instruction.
    SmallVector<MachineOperand*, 8> Uses;
    // Uses that the result of the instruction can reach.
    SmallVector<MachineOperand*, 8> ExtendedUses;

    UI = MRI->use_nodbg_begin(SrcReg);
    for (MachineRegisterInfo::use_nodbg_iterator UE = MRI->use_nodbg_end();
         UI != UE; ++UI) {
      MachineOperand &UseMO = UI.getOperand();
      MachineInstr *UseMI = &*UI;
      if (UseMI == MI)
        continue;
      if (UseMI->isPHI()) {
        ExtendLife = false;
        continue;
      }

      // It's an error to translate this:
      //
      //    %reg1025 = <sext> %reg1024
      //     ...
      //    %reg1026 = SUBREG_TO_REG 0, %reg1024, 4
      //
      // into this:
      //
      //    %reg1025 = <sext> %reg1024
      //     ...
      //    %reg1027 = COPY %reg1025:4
      //    %reg1026 = SUBREG_TO_REG 0, %reg1027, 4
      //
      // The problem here is that SUBREG_TO_REG is there to assert that an
      // implicit zext occurs. It doesn't insert a zext instruction. If we allow
      // the COPY here, it will give us the value after the <sext>,
      // not the original value of %reg1024 before <sext>.
      if (UseMI->getOpcode() == TargetOpcode::SUBREG_TO_REG)
        continue;

      MachineBasicBlock *UseMBB = UseMI->getParent();
      if (UseMBB == MBB) {
        // Local uses that come after the extension.
        if (!LocalMIs.count(UseMI))
          Uses.push_back(&UseMO);
      } else if (ReachedBBs.count(UseMBB))
        // Non-local uses where the result of extension is used. Always
        // replace these unless it's a PHI.
        Uses.push_back(&UseMO);
      else if (Aggressive && DT->dominates(MBB, UseMBB))
        // We may want to extend live range of the extension result in order
        // to replace these uses.
        ExtendedUses.push_back(&UseMO);
      else {
        // Both will be live out of the def MBB anyway. Don't extend live
        // range of the extension result.
        ExtendLife = false;
        break;
      }
    }

    if (ExtendLife && !ExtendedUses.empty())
      // Ok, we'll extend the liveness of the extension result.
      std::copy(ExtendedUses.begin(), ExtendedUses.end(),
                std::back_inserter(Uses));

    // Now replace all uses.
    if (!Uses.empty()) {
      SmallPtrSet<MachineBasicBlock*, 4> PHIBBs;
      // Look for PHI uses of the extended result, we don't want to extend the
      // liveness of a PHI input. It breaks all kinds of assumptions down
      // stream. A PHI use is expected to be the kill of its source values.
      UI = MRI->use_nodbg_begin(DstReg);
      for (MachineRegisterInfo::use_nodbg_iterator UE = MRI->use_nodbg_end();
           UI != UE; ++UI)
        if (UI->isPHI())
          PHIBBs.insert(UI->getParent());

      const TargetRegisterClass *RC = MRI->getRegClass(SrcReg);
      for (unsigned i = 0, e = Uses.size(); i != e; ++i) {
        MachineOperand *UseMO = Uses[i];
        MachineInstr *UseMI = UseMO->getParent();
        MachineBasicBlock *UseMBB = UseMI->getParent();
        if (PHIBBs.count(UseMBB))
          continue;
        unsigned NewVR = MRI->createVirtualRegister(RC);
        BuildMI(*UseMBB, UseMI, UseMI->getDebugLoc(),
                TII->get(TargetOpcode::COPY), NewVR)
          .addReg(DstReg, 0, SubIdx);
        UseMO->setReg(NewVR);
        ++NumReuse;
        Changed = true;
      }
    }
  }

  return Changed;
}
Example #18
0
static MCOperand GetSymbolRef(const MachineOperand &MO, const MCSymbol *Symbol,
                              AsmPrinter &Printer, bool isDarwin) {
  MCContext &Ctx = Printer.OutContext;
  MCSymbolRefExpr::VariantKind RefKind = MCSymbolRefExpr::VK_None;

  unsigned access = MO.getTargetFlags() & PPCII::MO_ACCESS_MASK;

  switch (access) {
    case PPCII::MO_TPREL_LO:
      RefKind = MCSymbolRefExpr::VK_PPC_TPREL_LO;
      break;
    case PPCII::MO_TPREL_HA:
      RefKind = MCSymbolRefExpr::VK_PPC_TPREL_HA;
      break;
    case PPCII::MO_DTPREL_LO:
      RefKind = MCSymbolRefExpr::VK_PPC_DTPREL_LO;
      break;
    case PPCII::MO_TLSLD_LO:
      RefKind = MCSymbolRefExpr::VK_PPC_GOT_TLSLD_LO;
      break;
    case PPCII::MO_TOC_LO:
      RefKind = MCSymbolRefExpr::VK_PPC_TOC_LO;
      break;
    case PPCII::MO_TLS:
      RefKind = MCSymbolRefExpr::VK_PPC_TLS;
      break;
    case PPCII::MO_TLSGD:
      RefKind = MCSymbolRefExpr::VK_PPC_TLSGD;
      break;
    case PPCII::MO_TLSLD:
      RefKind = MCSymbolRefExpr::VK_PPC_TLSLD;
      break;
  }

  if (MO.getTargetFlags() == PPCII::MO_PLT_OR_STUB && !isDarwin)
    RefKind = MCSymbolRefExpr::VK_PLT;

  const MCExpr *Expr = MCSymbolRefExpr::Create(Symbol, RefKind, Ctx);

  if (!MO.isJTI() && MO.getOffset())
    Expr = MCBinaryExpr::CreateAdd(Expr,
                                   MCConstantExpr::Create(MO.getOffset(), Ctx),
                                   Ctx);

  // Subtract off the PIC base if required.
  if (MO.getTargetFlags() & PPCII::MO_PIC_FLAG) {
    const MachineFunction *MF = MO.getParent()->getParent()->getParent();
    
    const MCExpr *PB = MCSymbolRefExpr::Create(MF->getPICBaseSymbol(), Ctx);
    Expr = MCBinaryExpr::CreateSub(Expr, PB, Ctx);
  }

  // Add ha16() / lo16() markers if required.
  switch (access) {
    case PPCII::MO_LO:
      Expr = PPCMCExpr::CreateLo(Expr, isDarwin, Ctx);
      break;
    case PPCII::MO_HA:
      Expr = PPCMCExpr::CreateHa(Expr, isDarwin, Ctx);
      break;
  }

  return MCOperand::CreateExpr(Expr);
}
Example #19
0
void MIPrinter::print(const MachineOperand &Op, const TargetRegisterInfo *TRI,
                      unsigned I, bool ShouldPrintRegisterTies, bool IsDef) {
  printTargetFlags(Op);
  switch (Op.getType()) {
  case MachineOperand::MO_Register:
    if (Op.isImplicit())
      OS << (Op.isDef() ? "implicit-def " : "implicit ");
    else if (!IsDef && Op.isDef())
      // Print the 'def' flag only when the operand is defined after '='.
      OS << "def ";
    if (Op.isInternalRead())
      OS << "internal ";
    if (Op.isDead())
      OS << "dead ";
    if (Op.isKill())
      OS << "killed ";
    if (Op.isUndef())
      OS << "undef ";
    if (Op.isEarlyClobber())
      OS << "early-clobber ";
    if (Op.isDebug())
      OS << "debug-use ";
    printReg(Op.getReg(), OS, TRI);
    // Print the sub register.
    if (Op.getSubReg() != 0)
      OS << ':' << TRI->getSubRegIndexName(Op.getSubReg());
    if (ShouldPrintRegisterTies && Op.isTied() && !Op.isDef())
      OS << "(tied-def " << Op.getParent()->findTiedOperandIdx(I) << ")";
    break;
  case MachineOperand::MO_Immediate:
    OS << Op.getImm();
    break;
  case MachineOperand::MO_CImmediate:
    Op.getCImm()->printAsOperand(OS, /*PrintType=*/true, MST);
    break;
  case MachineOperand::MO_FPImmediate:
    Op.getFPImm()->printAsOperand(OS, /*PrintType=*/true, MST);
    break;
  case MachineOperand::MO_MachineBasicBlock:
    printMBBReference(*Op.getMBB());
    break;
  case MachineOperand::MO_FrameIndex:
    printStackObjectReference(Op.getIndex());
    break;
  case MachineOperand::MO_ConstantPoolIndex:
    OS << "%const." << Op.getIndex();
    printOffset(Op.getOffset());
    break;
  case MachineOperand::MO_TargetIndex: {
    OS << "target-index(";
    if (const auto *Name = getTargetIndexName(
            *Op.getParent()->getParent()->getParent(), Op.getIndex()))
      OS << Name;
    else
      OS << "<unknown>";
    OS << ')';
    printOffset(Op.getOffset());
    break;
  }
  case MachineOperand::MO_JumpTableIndex:
    OS << "%jump-table." << Op.getIndex();
    break;
  case MachineOperand::MO_ExternalSymbol:
    OS << '$';
    printLLVMNameWithoutPrefix(OS, Op.getSymbolName());
    printOffset(Op.getOffset());
    break;
  case MachineOperand::MO_GlobalAddress:
    Op.getGlobal()->printAsOperand(OS, /*PrintType=*/false, MST);
    printOffset(Op.getOffset());
    break;
  case MachineOperand::MO_BlockAddress:
    OS << "blockaddress(";
    Op.getBlockAddress()->getFunction()->printAsOperand(OS, /*PrintType=*/false,
                                                        MST);
    OS << ", ";
    printIRBlockReference(*Op.getBlockAddress()->getBasicBlock());
    OS << ')';
    printOffset(Op.getOffset());
    break;
  case MachineOperand::MO_RegisterMask: {
    auto RegMaskInfo = RegisterMaskIds.find(Op.getRegMask());
    if (RegMaskInfo != RegisterMaskIds.end())
      OS << StringRef(TRI->getRegMaskNames()[RegMaskInfo->second]).lower();
    else
      llvm_unreachable("Can't print this machine register mask yet.");
    break;
  }
  case MachineOperand::MO_RegisterLiveOut: {
    const uint32_t *RegMask = Op.getRegLiveOut();
    OS << "liveout(";
    bool IsCommaNeeded = false;
    for (unsigned Reg = 0, E = TRI->getNumRegs(); Reg < E; ++Reg) {
      if (RegMask[Reg / 32] & (1U << (Reg % 32))) {
        if (IsCommaNeeded)
          OS << ", ";
        printReg(Reg, OS, TRI);
        IsCommaNeeded = true;
      }
    }
    OS << ")";
    break;
  }
  case MachineOperand::MO_Metadata:
    Op.getMetadata()->printAsOperand(OS, MST);
    break;
  case MachineOperand::MO_MCSymbol:
    OS << "<mcsymbol " << *Op.getMCSymbol() << ">";
    break;
  case MachineOperand::MO_CFIIndex: {
    const auto &MMI = Op.getParent()->getParent()->getParent()->getMMI();
    print(MMI.getFrameInstructions()[Op.getCFIIndex()], TRI);
    break;
  }
  }
}
Example #20
0
void MIPrinter::print(const MachineOperand &Op, const TargetRegisterInfo *TRI,
                      unsigned I, bool ShouldPrintRegisterTies, LLT TypeToPrint,
                      bool IsDef) {
  printTargetFlags(Op);
  switch (Op.getType()) {
  case MachineOperand::MO_Register:
    if (Op.isImplicit())
      OS << (Op.isDef() ? "implicit-def " : "implicit ");
    else if (!IsDef && Op.isDef())
      // Print the 'def' flag only when the operand is defined after '='.
      OS << "def ";
    if (Op.isInternalRead())
      OS << "internal ";
    if (Op.isDead())
      OS << "dead ";
    if (Op.isKill())
      OS << "killed ";
    if (Op.isUndef())
      OS << "undef ";
    if (Op.isEarlyClobber())
      OS << "early-clobber ";
    if (Op.isDebug())
      OS << "debug-use ";
    printReg(Op.getReg(), OS, TRI);
    // Print the sub register.
    if (Op.getSubReg() != 0)
      OS << '.' << TRI->getSubRegIndexName(Op.getSubReg());
    if (ShouldPrintRegisterTies && Op.isTied() && !Op.isDef())
      OS << "(tied-def " << Op.getParent()->findTiedOperandIdx(I) << ")";
    if (TypeToPrint.isValid())
      OS << '(' << TypeToPrint << ')';
    break;
  case MachineOperand::MO_Immediate:
    OS << Op.getImm();
    break;
  case MachineOperand::MO_CImmediate:
    Op.getCImm()->printAsOperand(OS, /*PrintType=*/true, MST);
    break;
  case MachineOperand::MO_FPImmediate:
    Op.getFPImm()->printAsOperand(OS, /*PrintType=*/true, MST);
    break;
  case MachineOperand::MO_MachineBasicBlock:
    printMBBReference(*Op.getMBB());
    break;
  case MachineOperand::MO_FrameIndex:
    printStackObjectReference(Op.getIndex());
    break;
  case MachineOperand::MO_ConstantPoolIndex:
    OS << "%const." << Op.getIndex();
    printOffset(Op.getOffset());
    break;
  case MachineOperand::MO_TargetIndex:
    OS << "target-index(";
    if (const auto *Name = getTargetIndexName(
            *Op.getParent()->getParent()->getParent(), Op.getIndex()))
      OS << Name;
    else
      OS << "<unknown>";
    OS << ')';
    printOffset(Op.getOffset());
    break;
  case MachineOperand::MO_JumpTableIndex:
    OS << "%jump-table." << Op.getIndex();
    break;
  case MachineOperand::MO_ExternalSymbol: {
    StringRef Name = Op.getSymbolName();
    OS << '$';
    if (Name.empty()) {
      OS << "\"\"";
    } else {
      printLLVMNameWithoutPrefix(OS, Name);
    }
    printOffset(Op.getOffset());
    break;
  }
  case MachineOperand::MO_GlobalAddress:
    Op.getGlobal()->printAsOperand(OS, /*PrintType=*/false, MST);
    printOffset(Op.getOffset());
    break;
  case MachineOperand::MO_BlockAddress:
    OS << "blockaddress(";
    Op.getBlockAddress()->getFunction()->printAsOperand(OS, /*PrintType=*/false,
                                                        MST);
    OS << ", ";
    printIRBlockReference(*Op.getBlockAddress()->getBasicBlock());
    OS << ')';
    printOffset(Op.getOffset());
    break;
  case MachineOperand::MO_RegisterMask: {
    auto RegMaskInfo = RegisterMaskIds.find(Op.getRegMask());
    if (RegMaskInfo != RegisterMaskIds.end())
      OS << StringRef(TRI->getRegMaskNames()[RegMaskInfo->second]).lower();
    else
      printCustomRegMask(Op.getRegMask(), OS, TRI);
    break;
  }
  case MachineOperand::MO_RegisterLiveOut: {
    const uint32_t *RegMask = Op.getRegLiveOut();
    OS << "liveout(";
    bool IsCommaNeeded = false;
    for (unsigned Reg = 0, E = TRI->getNumRegs(); Reg < E; ++Reg) {
      if (RegMask[Reg / 32] & (1U << (Reg % 32))) {
        if (IsCommaNeeded)
          OS << ", ";
        printReg(Reg, OS, TRI);
        IsCommaNeeded = true;
      }
    }
    OS << ")";
    break;
  }
  case MachineOperand::MO_Metadata:
    Op.getMetadata()->printAsOperand(OS, MST);
    break;
  case MachineOperand::MO_MCSymbol:
    OS << "<mcsymbol " << *Op.getMCSymbol() << ">";
    break;
  case MachineOperand::MO_CFIIndex: {
    const MachineFunction &MF = *Op.getParent()->getParent()->getParent();
    print(MF.getFrameInstructions()[Op.getCFIIndex()], TRI);
    break;
  }
  case MachineOperand::MO_IntrinsicID: {
    Intrinsic::ID ID = Op.getIntrinsicID();
    if (ID < Intrinsic::num_intrinsics)
      OS << "intrinsic(@" << Intrinsic::getName(ID, None) << ')';
    else {
      const MachineFunction &MF = *Op.getParent()->getParent()->getParent();
      const TargetIntrinsicInfo *TII = MF.getTarget().getIntrinsicInfo();
      OS << "intrinsic(@" << TII->getName(ID) << ')';
    }
    break;
  }
  case MachineOperand::MO_Predicate: {
    auto Pred = static_cast<CmpInst::Predicate>(Op.getPredicate());
    OS << (CmpInst::isIntPredicate(Pred) ? "int" : "float") << "pred("
       << CmpInst::getPredicateName(Pred) << ')';
    break;
  }
  }
}