/// AllUsesDominatedByBlock - Return true if all uses of the specified register
/// occur in blocks dominated by the specified block.
bool MachineSinking::AllUsesDominatedByBlock(unsigned Reg, 
                                             MachineBasicBlock *MBB) const {
  assert(TargetRegisterInfo::isVirtualRegister(Reg) &&
         "Only makes sense for vregs");
  // Ignoring debug uses is necessary so debug info doesn't affect the code.
  // This may leave a referencing dbg_value in the original block, before
  // the definition of the vreg.  Dwarf generator handles this although the
  // user might not get the right info at runtime.
  for (MachineRegisterInfo::use_nodbg_iterator I = 
       RegInfo->use_nodbg_begin(Reg),
       E = RegInfo->use_nodbg_end(); I != E; ++I) {
    // Determine the block of the use.
    MachineInstr *UseInst = &*I;
    MachineBasicBlock *UseBlock = UseInst->getParent();
    if (UseInst->isPHI()) {
      // PHI nodes use the operand in the predecessor block, not the block with
      // the PHI.
      UseBlock = UseInst->getOperand(I.getOperandNo()+1).getMBB();
    }
    // Check that it dominates.
    if (!DT->dominates(MBB, UseBlock))
      return false;
  }
  return true;
}
void ProcessImplicitDefs::processImplicitDef(MachineInstr *MI) {
  DEBUG(dbgs() << "Processing " << *MI);
  unsigned Reg = MI->getOperand(0).getReg();

  if (TargetRegisterInfo::isVirtualRegister(Reg)) {
    // For virtual regiusters, mark all uses as <undef>, and convert users to
    // implicit-def when possible.
    for (MachineRegisterInfo::use_nodbg_iterator UI =
         MRI->use_nodbg_begin(Reg),
         UE = MRI->use_nodbg_end(); UI != UE; ++UI) {
      MachineOperand &MO = UI.getOperand();
      MO.setIsUndef();
      MachineInstr *UserMI = MO.getParent();
      if (!canTurnIntoImplicitDef(UserMI))
        continue;
      DEBUG(dbgs() << "Converting to IMPLICIT_DEF: " << *UserMI);
      UserMI->setDesc(TII->get(TargetOpcode::IMPLICIT_DEF));
      WorkList.insert(UserMI);
    }
    MI->eraseFromParent();
    return;
  }

  // This is a physreg implicit-def.
  // Look for the first instruction to use or define an alias.
  MachineBasicBlock::instr_iterator UserMI = MI;
  MachineBasicBlock::instr_iterator UserE = MI->getParent()->instr_end();
  bool Found = false;
  for (++UserMI; UserMI != UserE; ++UserMI) {
    for (MIOperands MO(UserMI); MO.isValid(); ++MO) {
      if (!MO->isReg())
        continue;
      unsigned UserReg = MO->getReg();
      if (!TargetRegisterInfo::isPhysicalRegister(UserReg) ||
          !TRI->regsOverlap(Reg, UserReg))
        continue;
      // UserMI uses or redefines Reg. Set <undef> flags on all uses.
      Found = true;
      if (MO->isUse())
        MO->setIsUndef();
    }
    if (Found)
      break;
  }

  // If we found the using MI, we can erase the IMPLICIT_DEF.
  if (Found) {
    DEBUG(dbgs() << "Physreg user: "******"Keeping physreg: " << *MI);
}
Exemplo n.º 3
0
/// eliminateRedundantSpills - SLI:VNI is known to be on the stack. Remove any
/// redundant spills of this value in SLI.reg and sibling copies.
void InlineSpiller::eliminateRedundantSpills(LiveInterval &SLI, VNInfo *VNI) {
  assert(VNI && "Missing value");
  SmallVector<std::pair<LiveInterval*, VNInfo*>, 8> WorkList;
  WorkList.push_back(std::make_pair(&SLI, VNI));
  assert(StackInt && "No stack slot assigned yet.");

  do {
    LiveInterval *LI;
    tie(LI, VNI) = WorkList.pop_back_val();
    unsigned Reg = LI->reg;
    DEBUG(dbgs() << "Checking redundant spills for "
                 << VNI->id << '@' << VNI->def << " in " << *LI << '\n');

    // Regs to spill are taken care of.
    if (isRegToSpill(Reg))
      continue;

    // Add all of VNI's live range to StackInt.
    StackInt->MergeValueInAsValue(*LI, VNI, StackInt->getValNumInfo(0));
    DEBUG(dbgs() << "Merged to stack int: " << *StackInt << '\n');

    // Find all spills and copies of VNI.
    for (MachineRegisterInfo::use_nodbg_iterator UI = MRI.use_nodbg_begin(Reg);
         MachineInstr *MI = UI.skipInstruction();) {
      if (!MI->isCopy() && !MI->mayStore())
        continue;
      SlotIndex Idx = LIS.getInstructionIndex(MI);
      if (LI->getVNInfoAt(Idx) != VNI)
        continue;

      // Follow sibling copies down the dominator tree.
      if (unsigned DstReg = isFullCopyOf(MI, Reg)) {
        if (isSibling(DstReg)) {
           LiveInterval &DstLI = LIS.getInterval(DstReg);
           VNInfo *DstVNI = DstLI.getVNInfoAt(Idx.getRegSlot());
           assert(DstVNI && "Missing defined value");
           assert(DstVNI->def == Idx.getRegSlot() && "Wrong copy def slot");
           WorkList.push_back(std::make_pair(&DstLI, DstVNI));
        }
        continue;
      }

      // Erase spills.
      int FI;
      if (Reg == TII.isStoreToStackSlot(MI, FI) && FI == StackSlot) {
        DEBUG(dbgs() << "Redundant spill " << Idx << '\t' << *MI);
        // eliminateDeadDefs won't normally remove stores, so switch opcode.
        MI->setDesc(TII.get(TargetOpcode::KILL));
        DeadDefs.push_back(MI);
        ++NumSpillsRemoved;
        --NumSpills;
      }
    }
  } while (!WorkList.empty());
}
Exemplo n.º 4
0
/// Helper to find a vreg use between two indices [PriorUseIdx, NextUseIdx).
static bool findUseBetween(unsigned Reg,
                           SlotIndex PriorUseIdx, SlotIndex NextUseIdx,
                           const MachineRegisterInfo *MRI,
                           const LiveIntervals *LIS) {
  for (MachineRegisterInfo::use_nodbg_iterator
         UI = MRI->use_nodbg_begin(Reg), UE = MRI->use_nodbg_end();
         UI != UE; UI.skipInstruction()) {
      const MachineInstr* MI = &*UI;
      SlotIndex InstSlot = LIS->getInstructionIndex(MI).getRegSlot();
      if (InstSlot >= PriorUseIdx && InstSlot < NextUseIdx)
        return true;
  }
  return false;
}
Exemplo n.º 5
0
/// analyzeUses - Count instructions, basic blocks, and loops using CurLI.
void SplitAnalysis::analyzeUses() {
  assert(UseSlots.empty() && "Call clear first");

  // First get all the defs from the interval values. This provides the correct
  // slots for early clobbers.
  for (LiveInterval::const_vni_iterator I = CurLI->vni_begin(),
       E = CurLI->vni_end(); I != E; ++I)
    if (!(*I)->isPHIDef() && !(*I)->isUnused())
      UseSlots.push_back((*I)->def);

  // Get use slots form the use-def chain.
  const MachineRegisterInfo &MRI = MF.getRegInfo();
  for (MachineRegisterInfo::use_nodbg_iterator
       I = MRI.use_nodbg_begin(CurLI->reg), E = MRI.use_nodbg_end(); I != E;
       ++I)
    if (!I.getOperand().isUndef())
      UseSlots.push_back(LIS.getInstructionIndex(&*I).getDefIndex());

  array_pod_sort(UseSlots.begin(), UseSlots.end());

  // Remove duplicates, keeping the smaller slot for each instruction.
  // That is what we want for early clobbers.
  UseSlots.erase(std::unique(UseSlots.begin(), UseSlots.end(),
                             SlotIndex::isSameInstr),
                 UseSlots.end());

  // Compute per-live block info.
  if (!calcLiveBlockInfo()) {
    // FIXME: calcLiveBlockInfo found inconsistencies in the live range.
    // I am looking at you, RegisterCoalescer!
    DidRepairRange = true;
    ++NumRepairs;
    DEBUG(dbgs() << "*** Fixing inconsistent live interval! ***\n");
    const_cast<LiveIntervals&>(LIS)
      .shrinkToUses(const_cast<LiveInterval*>(CurLI));
    UseBlocks.clear();
    ThroughBlocks.clear();
    bool fixed = calcLiveBlockInfo();
    (void)fixed;
    assert(fixed && "Couldn't fix broken live interval");
  }

  DEBUG(dbgs() << "Analyze counted "
               << UseSlots.size() << " instrs in "
               << UseBlocks.size() << " blocks, through "
               << NumThroughBlocks << " blocks.\n");
}
Exemplo n.º 6
0
/// reMaterializeAll - Try to rematerialize as many uses as possible,
/// and trim the live ranges after.
void InlineSpiller::reMaterializeAll() {
  // analyzeSiblingValues has already tested all relevant defining instructions.
  if (!Edit->anyRematerializable(AA))
    return;

  UsedValues.clear();

  // Try to remat before all uses of snippets.
  bool anyRemat = false;
  for (unsigned i = 0, e = RegsToSpill.size(); i != e; ++i) {
    unsigned Reg = RegsToSpill[i];
    LiveInterval &LI = LIS.getInterval(Reg);
    for (MachineRegisterInfo::use_nodbg_iterator
         RI = MRI.use_nodbg_begin(Reg);
         MachineInstr *MI = RI.skipBundle();)
      anyRemat |= reMaterializeFor(LI, MI);
  }
  if (!anyRemat)
    return;

  // Remove any values that were completely rematted.
  for (unsigned i = 0, e = RegsToSpill.size(); i != e; ++i) {
    unsigned Reg = RegsToSpill[i];
    LiveInterval &LI = LIS.getInterval(Reg);
    for (LiveInterval::vni_iterator I = LI.vni_begin(), E = LI.vni_end();
         I != E; ++I) {
      VNInfo *VNI = *I;
      if (VNI->isUnused() || VNI->isPHIDef() || UsedValues.count(VNI))
        continue;
      MachineInstr *MI = LIS.getInstructionFromIndex(VNI->def);
      MI->addRegisterDead(Reg, &TRI);
      if (!MI->allDefsAreDead())
        continue;
      DEBUG(dbgs() << "All defs dead: " << *MI);
      DeadDefs.push_back(MI);
    }
  }

  // Eliminate dead code after remat. Note that some snippet copies may be
  // deleted here.
  if (DeadDefs.empty())
    return;
  DEBUG(dbgs() << "Remat created " << DeadDefs.size() << " dead defs.\n");
  Edit->eliminateDeadDefs(DeadDefs, RegsToSpill);

  // Get rid of deleted and empty intervals.
  unsigned ResultPos = 0;
  for (unsigned i = 0, e = RegsToSpill.size(); i != e; ++i) {
    unsigned Reg = RegsToSpill[i];
    if (!LIS.hasInterval(Reg))
      continue;

    LiveInterval &LI = LIS.getInterval(Reg);
    if (LI.empty()) {
      Edit->eraseVirtReg(Reg);
      continue;
    }

    RegsToSpill[ResultPos++] = Reg;
  }
  RegsToSpill.erase(RegsToSpill.begin() + ResultPos, RegsToSpill.end());
  DEBUG(dbgs() << RegsToSpill.size() << " registers to spill after remat.\n");
}
Exemplo n.º 7
0
void
UserValue::addDefsFromCopies(LiveInterval *LI, unsigned LocNo,
                      const SmallVectorImpl<SlotIndex> &Kills,
                      SmallVectorImpl<std::pair<SlotIndex, unsigned> > &NewDefs,
                      MachineRegisterInfo &MRI, LiveIntervals &LIS) {
  if (Kills.empty())
    return;
  // Don't track copies from physregs, there are too many uses.
  if (!TargetRegisterInfo::isVirtualRegister(LI->reg))
    return;

  // Collect all the (vreg, valno) pairs that are copies of LI.
  SmallVector<std::pair<LiveInterval*, const VNInfo*>, 8> CopyValues;
  for (MachineRegisterInfo::use_nodbg_iterator
         UI = MRI.use_nodbg_begin(LI->reg),
         UE = MRI.use_nodbg_end(); UI != UE; ++UI) {
    // Copies of the full value.
    if (UI.getOperand().getSubReg() || !UI->isCopy())
      continue;
    MachineInstr *MI = &*UI;
    unsigned DstReg = MI->getOperand(0).getReg();

    // Don't follow copies to physregs. These are usually setting up call
    // arguments, and the argument registers are always call clobbered. We are
    // better off in the source register which could be a callee-saved register,
    // or it could be spilled.
    if (!TargetRegisterInfo::isVirtualRegister(DstReg))
      continue;

    // Is LocNo extended to reach this copy? If not, another def may be blocking
    // it, or we are looking at a wrong value of LI.
    SlotIndex Idx = LIS.getInstructionIndex(MI);
    LocMap::iterator I = locInts.find(Idx.getRegSlot(true));
    if (!I.valid() || I.value() != LocNo)
      continue;

    if (!LIS.hasInterval(DstReg))
      continue;
    LiveInterval *DstLI = &LIS.getInterval(DstReg);
    const VNInfo *DstVNI = DstLI->getVNInfoAt(Idx.getRegSlot());
    assert(DstVNI && DstVNI->def == Idx.getRegSlot() && "Bad copy value");
    CopyValues.push_back(std::make_pair(DstLI, DstVNI));
  }

  if (CopyValues.empty())
    return;

  DEBUG(dbgs() << "Got " << CopyValues.size() << " copies of " << *LI << '\n');

  // Try to add defs of the copied values for each kill point.
  for (unsigned i = 0, e = Kills.size(); i != e; ++i) {
    SlotIndex Idx = Kills[i];
    for (unsigned j = 0, e = CopyValues.size(); j != e; ++j) {
      LiveInterval *DstLI = CopyValues[j].first;
      const VNInfo *DstVNI = CopyValues[j].second;
      if (DstLI->getVNInfoAt(Idx) != DstVNI)
        continue;
      // Check that there isn't already a def at Idx
      LocMap::iterator I = locInts.find(Idx);
      if (I.valid() && I.start() <= Idx)
        continue;
      DEBUG(dbgs() << "Kill at " << Idx << " covered by valno #"
                   << DstVNI->id << " in " << *DstLI << '\n');
      MachineInstr *CopyMI = LIS.getInstructionFromIndex(DstVNI->def);
      assert(CopyMI && CopyMI->isCopy() && "Bad copy value");
      unsigned LocNo = getLocationNo(CopyMI->getOperand(0));
      I.insert(Idx, Idx.getNextSlot(), LocNo);
      NewDefs.push_back(std::make_pair(Idx, LocNo));
      break;
    }
  }
}
Exemplo n.º 8
0
/// optimizeExtInstr - If instruction is a copy-like instruction, i.e. it reads
/// a single register and writes a single register and it does not modify the
/// source, and if the source value is preserved as a sub-register of the
/// result, then replace all reachable uses of the source with the subreg of the
/// result.
///
/// Do not generate an EXTRACT that is used only in a debug use, as this changes
/// the code. Since this code does not currently share EXTRACTs, just ignore all
/// debug uses.
bool PeepholeOptimizer::
optimizeExtInstr(MachineInstr *MI, MachineBasicBlock *MBB,
                 SmallPtrSet<MachineInstr*, 8> &LocalMIs) {
  unsigned SrcReg, DstReg, SubIdx;
  if (!TII->isCoalescableExtInstr(*MI, SrcReg, DstReg, SubIdx))
    return false;

  if (TargetRegisterInfo::isPhysicalRegister(DstReg) ||
      TargetRegisterInfo::isPhysicalRegister(SrcReg))
    return false;

  if (MRI->hasOneNonDBGUse(SrcReg))
    // No other uses.
    return false;

  // Ensure DstReg can get a register class that actually supports
  // sub-registers. Don't change the class until we commit.
  const TargetRegisterClass *DstRC = MRI->getRegClass(DstReg);
  DstRC = TM->getRegisterInfo()->getSubClassWithSubReg(DstRC, SubIdx);
  if (!DstRC)
    return false;

  // The ext instr may be operating on a sub-register of SrcReg as well.
  // PPC::EXTSW is a 32 -> 64-bit sign extension, but it reads a 64-bit
  // register.
  // If UseSrcSubIdx is Set, SubIdx also applies to SrcReg, and only uses of
  // SrcReg:SubIdx should be replaced.
  bool UseSrcSubIdx = TM->getRegisterInfo()->
    getSubClassWithSubReg(MRI->getRegClass(SrcReg), SubIdx) != 0;

  // The source has other uses. See if we can replace the other uses with use of
  // the result of the extension.
  SmallPtrSet<MachineBasicBlock*, 4> ReachedBBs;
  for (MachineRegisterInfo::use_nodbg_iterator
       UI = MRI->use_nodbg_begin(DstReg), UE = MRI->use_nodbg_end();
       UI != UE; ++UI)
    ReachedBBs.insert(UI->getParent());

  // Uses that are in the same BB of uses of the result of the instruction.
  SmallVector<MachineOperand*, 8> Uses;

  // Uses that the result of the instruction can reach.
  SmallVector<MachineOperand*, 8> ExtendedUses;

  bool ExtendLife = true;
  for (MachineRegisterInfo::use_nodbg_iterator
       UI = MRI->use_nodbg_begin(SrcReg), UE = MRI->use_nodbg_end();
       UI != UE; ++UI) {
    MachineOperand &UseMO = UI.getOperand();
    MachineInstr *UseMI = &*UI;
    if (UseMI == MI)
      continue;

    if (UseMI->isPHI()) {
      ExtendLife = false;
      continue;
    }

    // Only accept uses of SrcReg:SubIdx.
    if (UseSrcSubIdx && UseMO.getSubReg() != SubIdx)
      continue;

    // It's an error to translate this:
    //
    //    %reg1025 = <sext> %reg1024
    //     ...
    //    %reg1026 = SUBREG_TO_REG 0, %reg1024, 4
    //
    // into this:
    //
    //    %reg1025 = <sext> %reg1024
    //     ...
    //    %reg1027 = COPY %reg1025:4
    //    %reg1026 = SUBREG_TO_REG 0, %reg1027, 4
    //
    // The problem here is that SUBREG_TO_REG is there to assert that an
    // implicit zext occurs. It doesn't insert a zext instruction. If we allow
    // the COPY here, it will give us the value after the <sext>, not the
    // original value of %reg1024 before <sext>.
    if (UseMI->getOpcode() == TargetOpcode::SUBREG_TO_REG)
      continue;

    MachineBasicBlock *UseMBB = UseMI->getParent();
    if (UseMBB == MBB) {
      // Local uses that come after the extension.
      if (!LocalMIs.count(UseMI))
        Uses.push_back(&UseMO);
    } else if (ReachedBBs.count(UseMBB)) {
      // Non-local uses where the result of the extension is used. Always
      // replace these unless it's a PHI.
      Uses.push_back(&UseMO);
    } else if (Aggressive && DT->dominates(MBB, UseMBB)) {
      // We may want to extend the live range of the extension result in order
      // to replace these uses.
      ExtendedUses.push_back(&UseMO);
    } else {
      // Both will be live out of the def MBB anyway. Don't extend live range of
      // the extension result.
      ExtendLife = false;
      break;
    }
  }

  if (ExtendLife && !ExtendedUses.empty())
    // Extend the liveness of the extension result.
    std::copy(ExtendedUses.begin(), ExtendedUses.end(),
              std::back_inserter(Uses));

  // Now replace all uses.
  bool Changed = false;
  if (!Uses.empty()) {
    SmallPtrSet<MachineBasicBlock*, 4> PHIBBs;

    // Look for PHI uses of the extended result, we don't want to extend the
    // liveness of a PHI input. It breaks all kinds of assumptions down
    // stream. A PHI use is expected to be the kill of its source values.
    for (MachineRegisterInfo::use_nodbg_iterator
         UI = MRI->use_nodbg_begin(DstReg), UE = MRI->use_nodbg_end();
         UI != UE; ++UI)
      if (UI->isPHI())
        PHIBBs.insert(UI->getParent());

    const TargetRegisterClass *RC = MRI->getRegClass(SrcReg);
    for (unsigned i = 0, e = Uses.size(); i != e; ++i) {
      MachineOperand *UseMO = Uses[i];
      MachineInstr *UseMI = UseMO->getParent();
      MachineBasicBlock *UseMBB = UseMI->getParent();
      if (PHIBBs.count(UseMBB))
        continue;

      // About to add uses of DstReg, clear DstReg's kill flags.
      if (!Changed) {
        MRI->clearKillFlags(DstReg);
        MRI->constrainRegClass(DstReg, DstRC);
      }

      unsigned NewVR = MRI->createVirtualRegister(RC);
      MachineInstr *Copy = BuildMI(*UseMBB, UseMI, UseMI->getDebugLoc(),
                                   TII->get(TargetOpcode::COPY), NewVR)
        .addReg(DstReg, 0, SubIdx);
      // SubIdx applies to both SrcReg and DstReg when UseSrcSubIdx is set.
      if (UseSrcSubIdx) {
        Copy->getOperand(0).setSubReg(SubIdx);
        Copy->getOperand(0).setIsUndef();
      }
      UseMO->setReg(NewVR);
      ++NumReuse;
      Changed = true;
    }
  }

  return Changed;
}
Exemplo n.º 9
0
/// OptimizeInstr - If instruction is a copy-like instruction, i.e. it reads
/// a single register and writes a single register and it does not modify
/// the source, and if the source value is preserved as a sub-register of
/// the result, then replace all reachable uses of the source with the subreg
/// of the result.
/// Do not generate an EXTRACT that is used only in a debug use, as this
/// changes the code.  Since this code does not currently share EXTRACTs, just
/// ignore all debug uses.
bool OptimizeExts::OptimizeInstr(MachineInstr *MI, MachineBasicBlock *MBB,
                                 SmallPtrSet<MachineInstr*, 8> &LocalMIs) {
  bool Changed = false;
  LocalMIs.insert(MI);

  unsigned SrcReg, DstReg, SubIdx;
  if (TII->isCoalescableExtInstr(*MI, SrcReg, DstReg, SubIdx)) {
    if (TargetRegisterInfo::isPhysicalRegister(DstReg) ||
        TargetRegisterInfo::isPhysicalRegister(SrcReg))
      return false;

    MachineRegisterInfo::use_nodbg_iterator UI = MRI->use_nodbg_begin(SrcReg);
    if (++UI == MRI->use_nodbg_end())
      // No other uses.
      return false;

    // Ok, the source has other uses. See if we can replace the other uses
    // with use of the result of the extension.
    SmallPtrSet<MachineBasicBlock*, 4> ReachedBBs;
    UI = MRI->use_nodbg_begin(DstReg);
    for (MachineRegisterInfo::use_nodbg_iterator UE = MRI->use_nodbg_end();
         UI != UE; ++UI)
      ReachedBBs.insert(UI->getParent());

    bool ExtendLife = true;
    // Uses that are in the same BB of uses of the result of the instruction.
    SmallVector<MachineOperand*, 8> Uses;
    // Uses that the result of the instruction can reach.
    SmallVector<MachineOperand*, 8> ExtendedUses;

    UI = MRI->use_nodbg_begin(SrcReg);
    for (MachineRegisterInfo::use_nodbg_iterator UE = MRI->use_nodbg_end();
         UI != UE; ++UI) {
      MachineOperand &UseMO = UI.getOperand();
      MachineInstr *UseMI = &*UI;
      if (UseMI == MI)
        continue;
      if (UseMI->isPHI()) {
        ExtendLife = false;
        continue;
      }

      // It's an error to translate this:
      //
      //    %reg1025 = <sext> %reg1024
      //     ...
      //    %reg1026 = SUBREG_TO_REG 0, %reg1024, 4
      //
      // into this:
      //
      //    %reg1025 = <sext> %reg1024
      //     ...
      //    %reg1027 = COPY %reg1025:4
      //    %reg1026 = SUBREG_TO_REG 0, %reg1027, 4
      //
      // The problem here is that SUBREG_TO_REG is there to assert that an
      // implicit zext occurs. It doesn't insert a zext instruction. If we allow
      // the COPY here, it will give us the value after the <sext>,
      // not the original value of %reg1024 before <sext>.
      if (UseMI->getOpcode() == TargetOpcode::SUBREG_TO_REG)
        continue;

      MachineBasicBlock *UseMBB = UseMI->getParent();
      if (UseMBB == MBB) {
        // Local uses that come after the extension.
        if (!LocalMIs.count(UseMI))
          Uses.push_back(&UseMO);
      } else if (ReachedBBs.count(UseMBB))
        // Non-local uses where the result of extension is used. Always
        // replace these unless it's a PHI.
        Uses.push_back(&UseMO);
      else if (Aggressive && DT->dominates(MBB, UseMBB))
        // We may want to extend live range of the extension result in order
        // to replace these uses.
        ExtendedUses.push_back(&UseMO);
      else {
        // Both will be live out of the def MBB anyway. Don't extend live
        // range of the extension result.
        ExtendLife = false;
        break;
      }
    }

    if (ExtendLife && !ExtendedUses.empty())
      // Ok, we'll extend the liveness of the extension result.
      std::copy(ExtendedUses.begin(), ExtendedUses.end(),
                std::back_inserter(Uses));

    // Now replace all uses.
    if (!Uses.empty()) {
      SmallPtrSet<MachineBasicBlock*, 4> PHIBBs;
      // Look for PHI uses of the extended result, we don't want to extend the
      // liveness of a PHI input. It breaks all kinds of assumptions down
      // stream. A PHI use is expected to be the kill of its source values.
      UI = MRI->use_nodbg_begin(DstReg);
      for (MachineRegisterInfo::use_nodbg_iterator UE = MRI->use_nodbg_end();
           UI != UE; ++UI)
        if (UI->isPHI())
          PHIBBs.insert(UI->getParent());

      const TargetRegisterClass *RC = MRI->getRegClass(SrcReg);
      for (unsigned i = 0, e = Uses.size(); i != e; ++i) {
        MachineOperand *UseMO = Uses[i];
        MachineInstr *UseMI = UseMO->getParent();
        MachineBasicBlock *UseMBB = UseMI->getParent();
        if (PHIBBs.count(UseMBB))
          continue;
        unsigned NewVR = MRI->createVirtualRegister(RC);
        BuildMI(*UseMBB, UseMI, UseMI->getDebugLoc(),
                TII->get(TargetOpcode::COPY), NewVR)
          .addReg(DstReg, 0, SubIdx);
        UseMO->setReg(NewVR);
        ++NumReuse;
        Changed = true;
      }
    }
  }

  return Changed;
}
Exemplo n.º 10
0
/// AllUsesDominatedByBlock - Return true if all uses of the specified register
/// occur in blocks dominated by the specified block. If any use is in the
/// definition block, then return false since it is never legal to move def
/// after uses.
bool
MachineSinking::AllUsesDominatedByBlock(unsigned Reg,
                                        MachineBasicBlock *MBB,
                                        MachineBasicBlock *DefMBB,
                                        bool &BreakPHIEdge,
                                        bool &LocalUse) const {
  assert(TargetRegisterInfo::isVirtualRegister(Reg) &&
         "Only makes sense for vregs");

  // Ignore debug uses because debug info doesn't affect the code.
  if (MRI->use_nodbg_empty(Reg))
    return true;

  // BreakPHIEdge is true if all the uses are in the successor MBB being sunken
  // into and they are all PHI nodes. In this case, machine-sink must break
  // the critical edge first. e.g.
  //
  // BB#1: derived from LLVM BB %bb4.preheader
  //   Predecessors according to CFG: BB#0
  //     ...
  //     %reg16385<def> = DEC64_32r %reg16437, %EFLAGS<imp-def,dead>
  //     ...
  //     JE_4 <BB#37>, %EFLAGS<imp-use>
  //   Successors according to CFG: BB#37 BB#2
  //
  // BB#2: derived from LLVM BB %bb.nph
  //   Predecessors according to CFG: BB#0 BB#1
  //     %reg16386<def> = PHI %reg16434, <BB#0>, %reg16385, <BB#1>
  BreakPHIEdge = true;
  for (MachineRegisterInfo::use_nodbg_iterator
         I = MRI->use_nodbg_begin(Reg), E = MRI->use_nodbg_end();
       I != E; ++I) {
    MachineInstr *UseInst = &*I;
    MachineBasicBlock *UseBlock = UseInst->getParent();
    if (!(UseBlock == MBB && UseInst->isPHI() &&
          UseInst->getOperand(I.getOperandNo()+1).getMBB() == DefMBB)) {
      BreakPHIEdge = false;
      break;
    }
  }
  if (BreakPHIEdge)
    return true;

  for (MachineRegisterInfo::use_nodbg_iterator
         I = MRI->use_nodbg_begin(Reg), E = MRI->use_nodbg_end();
       I != E; ++I) {
    // Determine the block of the use.
    MachineInstr *UseInst = &*I;
    MachineBasicBlock *UseBlock = UseInst->getParent();
    if (UseInst->isPHI()) {
      // PHI nodes use the operand in the predecessor block, not the block with
      // the PHI.
      UseBlock = UseInst->getOperand(I.getOperandNo()+1).getMBB();
    } else if (UseBlock == DefMBB) {
      LocalUse = true;
      return false;
    }

    // Check that it dominates.
    if (!DT->dominates(MBB, UseBlock))
      return false;
  }

  return true;
}
Exemplo n.º 11
0
// isProfitableToTransform - Predicate function to determine whether an
// instruction should be transformed to its equivalent AdvSIMD scalar
// instruction. "add Xd, Xn, Xm" ==> "add Dd, Da, Db", for example.
bool ARM64AdvSIMDScalar::isProfitableToTransform(const MachineInstr *MI) const {
  // If this instruction isn't eligible to be transformed (no SIMD equivalent),
  // early exit since that's the common case.
  if (!isTransformable(MI))
    return false;

  // Count the number of copies we'll need to add and approximate the number
  // of copies that a transform will enable us to remove.
  unsigned NumNewCopies = 3;
  unsigned NumRemovableCopies = 0;

  unsigned OrigSrc0 = MI->getOperand(1).getReg();
  unsigned OrigSrc1 = MI->getOperand(2).getReg();
  unsigned Src0 = 0, SubReg0;
  unsigned Src1 = 0, SubReg1;
  if (!MRI->def_empty(OrigSrc0)) {
    MachineRegisterInfo::def_iterator Def = MRI->def_begin(OrigSrc0);
    assert(llvm::next(Def) == MRI->def_end() && "Multiple def in SSA!");
    Src0 = getSrcFromCopy(&*Def, MRI, SubReg0);
    // If the source was from a copy, we don't need to insert a new copy.
    if (Src0)
      --NumNewCopies;
    // If there are no other users of the original source, we can delete
    // that instruction.
    if (Src0 && MRI->hasOneNonDBGUse(OrigSrc0))
      ++NumRemovableCopies;
  }
  if (!MRI->def_empty(OrigSrc1)) {
    MachineRegisterInfo::def_iterator Def = MRI->def_begin(OrigSrc1);
    assert(llvm::next(Def) == MRI->def_end() && "Multiple def in SSA!");
    Src1 = getSrcFromCopy(&*Def, MRI, SubReg1);
    if (Src1)
      --NumNewCopies;
    // If there are no other users of the original source, we can delete
    // that instruction.
    if (Src1 && MRI->hasOneNonDBGUse(OrigSrc1))
      ++NumRemovableCopies;
  }

  // If any of the uses of the original instructions is a cross class copy,
  // that's a copy that will be removable if we transform. Likewise, if
  // any of the uses is a transformable instruction, it's likely the tranforms
  // will chain, enabling us to save a copy there, too. This is an aggressive
  // heuristic that approximates the graph based cost analysis described above.
  unsigned Dst = MI->getOperand(0).getReg();
  bool AllUsesAreCopies = true;
  for (MachineRegisterInfo::use_nodbg_iterator Use = MRI->use_nodbg_begin(Dst),
       E = MRI->use_nodbg_end(); Use != E; ++Use) {
    unsigned SubReg;
    if (getSrcFromCopy(&*Use, MRI, SubReg) || isTransformable(&*Use))
      ++NumRemovableCopies;
    // If the use is an INSERT_SUBREG, that's still something that can
    // directly use the FPR64, so we don't invalidate AllUsesAreCopies. It's
    // preferable to have it use the FPR64 in most cases, as if the source
    // vector is an IMPLICIT_DEF, the INSERT_SUBREG just goes away entirely.
    // Ditto for a lane insert.
    else if (Use->getOpcode() == ARM64::INSERT_SUBREG ||
             Use->getOpcode() == ARM64::INSvi64gpr)
      ;
    else
      AllUsesAreCopies = false;
  }
  // If all of the uses of the original destination register are copies to
  // FPR64, then we won't end up having a new copy back to GPR64 either.
  if (AllUsesAreCopies)
    --NumNewCopies;

  // If a tranform will not increase the number of cross-class copies required,
  // return true.
  if (NumNewCopies <= NumRemovableCopies)
    return true;

  // Finally, even if we otherwise wouldn't transform, check if we're forcing
  // transformation of everything.
  return TransformAll;
}
Exemplo n.º 12
0
/// reMaterializeAll - Try to rematerialize as many uses of li_ as possible,
/// and trim the live ranges after.
void InlineSpiller::reMaterializeAll() {
  // Do a quick scan of the interval values to find if any are remattable.
  reMattable_.clear();
  usedValues_.clear();
  for (LiveInterval::const_vni_iterator I = li_->vni_begin(),
       E = li_->vni_end(); I != E; ++I) {
    VNInfo *VNI = *I;
    if (VNI->isUnused() || !VNI->isDefAccurate())
      continue;
    MachineInstr *DefMI = lis_.getInstructionFromIndex(VNI->def);
    if (!DefMI || !tii_.isTriviallyReMaterializable(DefMI))
      continue;
    reMattable_.insert(VNI);
  }

  // Often, no defs are remattable.
  if (reMattable_.empty())
    return;

  // Try to remat before all uses of li_->reg.
  bool anyRemat = false;
  for (MachineRegisterInfo::use_nodbg_iterator
       RI = mri_.use_nodbg_begin(li_->reg);
       MachineInstr *MI = RI.skipInstruction();)
     anyRemat |= reMaterializeFor(MI);

  if (!anyRemat)
    return;

  // Remove any values that were completely rematted.
  bool anyRemoved = false;
  for (SmallPtrSet<VNInfo*, 8>::iterator I = reMattable_.begin(),
       E = reMattable_.end(); I != E; ++I) {
    VNInfo *VNI = *I;
    if (VNI->hasPHIKill() || usedValues_.count(VNI))
      continue;
    MachineInstr *DefMI = lis_.getInstructionFromIndex(VNI->def);
    DEBUG(dbgs() << "\tremoving dead def: " << VNI->def << '\t' << *DefMI);
    lis_.RemoveMachineInstrFromMaps(DefMI);
    vrm_.RemoveMachineInstrFromMaps(DefMI);
    DefMI->eraseFromParent();
    VNI->setIsDefAccurate(false);
    anyRemoved = true;
  }

  if (!anyRemoved)
    return;

  // Removing values may cause debug uses where li_ is not live.
  for (MachineRegisterInfo::use_iterator RI = mri_.use_begin(li_->reg);
       MachineInstr *MI = RI.skipInstruction();) {
    if (!MI->isDebugValue())
      continue;
    // Try to preserve the debug value if li_ is live immediately after it.
    MachineBasicBlock::iterator NextMI = MI;
    ++NextMI;
    if (NextMI != MI->getParent()->end() && !lis_.isNotInMIMap(NextMI)) {
      VNInfo *VNI = li_->getVNInfoAt(lis_.getInstructionIndex(NextMI));
      if (VNI && (VNI->hasPHIKill() || usedValues_.count(VNI)))
        continue;
    }
    DEBUG(dbgs() << "Removing debug info due to remat:" << "\t" << *MI);
    MI->eraseFromParent();
  }
}