MachineBasicBlock::iterator
AArch64LoadStoreOpt::mergeUpdateInsn(MachineBasicBlock::iterator I,
                                     MachineBasicBlock::iterator Update,
                                     bool IsPreIdx) {
  assert((Update->getOpcode() == AArch64::ADDXri ||
          Update->getOpcode() == AArch64::SUBXri) &&
         "Unexpected base register update instruction to merge!");
  MachineBasicBlock::iterator NextI = I;
  // Return the instruction following the merged instruction, which is
  // the instruction following our unmerged load. Unless that's the add/sub
  // instruction we're merging, in which case it's the one after that.
  if (++NextI == Update)
    ++NextI;

  int Value = Update->getOperand(2).getImm();
  assert(AArch64_AM::getShiftValue(Update->getOperand(3).getImm()) == 0 &&
         "Can't merge 1 << 12 offset into pre-/post-indexed load / store");
  if (Update->getOpcode() == AArch64::SUBXri)
    Value = -Value;

  unsigned NewOpc = IsPreIdx ? getPreIndexedOpcode(I->getOpcode())
                             : getPostIndexedOpcode(I->getOpcode());
  MachineInstrBuilder MIB;
  if (!isPairedLdSt(I)) {
    // Non-paired instruction.
    MIB = BuildMI(*I->getParent(), I, I->getDebugLoc(), TII->get(NewOpc))
              .addOperand(getLdStRegOp(Update))
              .addOperand(getLdStRegOp(I))
              .addOperand(getLdStBaseOp(I))
              .addImm(Value);
  } else {
    // Paired instruction.
    int Scale = getMemScale(I);
    MIB = BuildMI(*I->getParent(), I, I->getDebugLoc(), TII->get(NewOpc))
              .addOperand(getLdStRegOp(Update))
              .addOperand(getLdStRegOp(I, 0))
              .addOperand(getLdStRegOp(I, 1))
              .addOperand(getLdStBaseOp(I))
              .addImm(Value / Scale);
  }
  (void)MIB;

  if (IsPreIdx)
    DEBUG(dbgs() << "Creating pre-indexed load/store.");
  else
    DEBUG(dbgs() << "Creating post-indexed load/store.");
  DEBUG(dbgs() << "    Replacing instructions:\n    ");
  DEBUG(I->print(dbgs()));
  DEBUG(dbgs() << "    ");
  DEBUG(Update->print(dbgs()));
  DEBUG(dbgs() << "  with instruction:\n    ");
  DEBUG(((MachineInstr *)MIB)->print(dbgs()));
  DEBUG(dbgs() << "\n");

  // Erase the old instructions for the block.
  I->eraseFromParent();
  Update->eraseFromParent();

  return NextI;
}
Example #2
0
void SIRegisterInfo::buildScratchLoadStore(MachineBasicBlock::iterator MI,
                                           unsigned LoadStoreOp,
                                           unsigned Value,
                                           unsigned ScratchRsrcReg,
                                           unsigned ScratchOffset,
                                           int64_t Offset,
                                           RegScavenger *RS) const {

  MachineBasicBlock *MBB = MI->getParent();
  const MachineFunction *MF = MI->getParent()->getParent();
  const SIInstrInfo *TII =
      static_cast<const SIInstrInfo *>(MF->getSubtarget().getInstrInfo());
  LLVMContext &Ctx = MF->getFunction()->getContext();
  DebugLoc DL = MI->getDebugLoc();
  bool IsLoad = TII->get(LoadStoreOp).mayLoad();

  bool RanOutOfSGPRs = false;
  unsigned SOffset = ScratchOffset;

  unsigned NumSubRegs = getNumSubRegsForSpillOp(MI->getOpcode());
  unsigned Size = NumSubRegs * 4;

  if (!isUInt<12>(Offset + Size)) {
    SOffset = RS->scavengeRegister(&AMDGPU::SGPR_32RegClass, MI, 0);
    if (SOffset == AMDGPU::NoRegister) {
      RanOutOfSGPRs = true;
      SOffset = AMDGPU::SGPR0;
    }
    BuildMI(*MBB, MI, DL, TII->get(AMDGPU::S_ADD_U32), SOffset)
            .addReg(ScratchOffset)
            .addImm(Offset);
    Offset = 0;
  }

  if (RanOutOfSGPRs)
    Ctx.emitError("Ran out of SGPRs for spilling VGPRS");

  for (unsigned i = 0, e = NumSubRegs; i != e; ++i, Offset += 4) {
    unsigned SubReg = NumSubRegs > 1 ?
        getPhysRegSubReg(Value, &AMDGPU::VGPR_32RegClass, i) :
        Value;

    BuildMI(*MBB, MI, DL, TII->get(LoadStoreOp))
      .addReg(SubReg, getDefRegState(IsLoad))
      .addReg(ScratchRsrcReg)
      .addReg(SOffset)
      .addImm(Offset)
      .addImm(0) // glc
      .addImm(0) // slc
      .addImm(0) // tfe
      .addReg(Value, RegState::Implicit | getDefRegState(IsLoad))
      .setMemRefs(MI->memoperands_begin(), MI->memoperands_end());
  }
}
MachineBasicBlock::iterator AArch64LoadStoreOpt::findMatchingUpdateInsnBackward(
    MachineBasicBlock::iterator I, unsigned Limit) {
  MachineBasicBlock::iterator B = I->getParent()->begin();
  MachineBasicBlock::iterator E = I->getParent()->end();
  MachineInstr *MemMI = I;
  MachineBasicBlock::iterator MBBI = I;

  unsigned BaseReg = getLdStBaseOp(MemMI).getReg();
  int Offset = getLdStOffsetOp(MemMI).getImm();

  // If the load/store is the first instruction in the block, there's obviously
  // not any matching update. Ditto if the memory offset isn't zero.
  if (MBBI == B || Offset != 0)
    return E;
  // If the base register overlaps a destination register, we can't
  // merge the update.
  bool IsPairedInsn = isPairedLdSt(MemMI);
  for (unsigned i = 0, e = IsPairedInsn ? 2 : 1; i != e; ++i) {
    unsigned DestReg = getLdStRegOp(MemMI, i).getReg();
    if (DestReg == BaseReg || TRI->isSubRegister(BaseReg, DestReg))
      return E;
  }

  // Track which registers have been modified and used between the first insn
  // (inclusive) and the second insn.
  BitVector ModifiedRegs, UsedRegs;
  ModifiedRegs.resize(TRI->getNumRegs());
  UsedRegs.resize(TRI->getNumRegs());
  --MBBI;
  for (unsigned Count = 0; MBBI != B; --MBBI) {
    MachineInstr *MI = MBBI;
    // Skip DBG_VALUE instructions. Otherwise debug info can affect the
    // optimization by changing how far we scan.
    if (MI->isDebugValue())
      continue;

    // Now that we know this is a real instruction, count it.
    ++Count;

    // If we found a match, return it.
    if (isMatchingUpdateInsn(I, MI, BaseReg, Offset))
      return MBBI;

    // Update the status of what the instruction clobbered and used.
    trackRegDefsUses(MI, ModifiedRegs, UsedRegs, TRI);

    // Otherwise, if the base register is used or modified, we have no match, so
    // return early.
    if (ModifiedRegs[BaseReg] || UsedRegs[BaseReg])
      return E;
  }
  return E;
}
MachineBasicBlock::iterator
SILoadStoreOptimizer::findMatchingDSInst(MachineBasicBlock::iterator I,
        unsigned EltSize) {
    MachineBasicBlock::iterator E = I->getParent()->end();
    MachineBasicBlock::iterator MBBI = I;
    ++MBBI;

    if (MBBI->getOpcode() != I->getOpcode())
        return E;

    // Don't merge volatiles.
    if (MBBI->hasOrderedMemoryRef())
        return E;

    int AddrIdx = AMDGPU::getNamedOperandIdx(I->getOpcode(), AMDGPU::OpName::addr);
    const MachineOperand &AddrReg0 = I->getOperand(AddrIdx);
    const MachineOperand &AddrReg1 = MBBI->getOperand(AddrIdx);

    // Check same base pointer. Be careful of subregisters, which can occur with
    // vectors of pointers.
    if (AddrReg0.getReg() == AddrReg1.getReg() &&
            AddrReg0.getSubReg() == AddrReg1.getSubReg()) {
        int OffsetIdx = AMDGPU::getNamedOperandIdx(I->getOpcode(),
                        AMDGPU::OpName::offset);
        unsigned Offset0 = I->getOperand(OffsetIdx).getImm() & 0xffff;
        unsigned Offset1 = MBBI->getOperand(OffsetIdx).getImm() & 0xffff;

        // Check both offsets fit in the reduced range.
        if (offsetsCanBeCombined(Offset0, Offset1, EltSize))
            return MBBI;
    }

    return E;
}
Example #5
0
/// convertLoopInstr - convert a loop instruction to a sequence of instructions
/// that set the lc and sa register explicitly.
void HexagonFixupHwLoops::convertLoopInstr(MachineFunction &MF,
                                           MachineBasicBlock::iterator &MII,
                                           RegScavenger &RS) {
  const TargetInstrInfo *TII = MF.getTarget().getInstrInfo();
  MachineBasicBlock *MBB = MII->getParent();
  DebugLoc DL = MII->getDebugLoc();
  unsigned Scratch = RS.scavengeRegister(Hexagon::IntRegsRegisterClass, MII, 0);

  // First, set the LC0 with the trip count.
  if (MII->getOperand(1).isReg()) {
    // Trip count is a register
    BuildMI(*MBB, MII, DL, TII->get(Hexagon::TFCR), Hexagon::LC0)
      .addReg(MII->getOperand(1).getReg());
  } else {
    // Trip count is an immediate.
    BuildMI(*MBB, MII, DL, TII->get(Hexagon::TFRI), Scratch)
      .addImm(MII->getOperand(1).getImm());
    BuildMI(*MBB, MII, DL, TII->get(Hexagon::TFCR), Hexagon::LC0)
      .addReg(Scratch);
  }
  // Then, set the SA0 with the loop start address.
  BuildMI(*MBB, MII, DL, TII->get(Hexagon::CONST32_Label), Scratch)
    .addMBB(MII->getOperand(0).getMBB());
  BuildMI(*MBB, MII, DL, TII->get(Hexagon::TFCR), Hexagon::SA0).addReg(Scratch);
}
Example #6
0
void GCMachineCodeAnalysis::VisitCallPoint(MachineBasicBlock::iterator CI) {
  // Find the return address (next instruction), too, so as to bracket the call
  // instruction.
  MachineBasicBlock::iterator RAI = CI;
  ++RAI;

  if (FI->getStrategy().needsSafePoint(GC::PreCall)) {
    MCSymbol *Label = InsertLabel(*CI->getParent(), CI, CI->getDebugLoc());
    FI->addSafePoint(GC::PreCall, Label, CI->getDebugLoc());
  }

  if (FI->getStrategy().needsSafePoint(GC::PostCall)) {
    MCSymbol *Label = InsertLabel(*CI->getParent(), RAI, CI->getDebugLoc());
    FI->addSafePoint(GC::PostCall, Label, CI->getDebugLoc());
  }
}
Example #7
0
unsigned RegScavenger::scavengeRegisterBackwards(const TargetRegisterClass &RC,
                                                 MachineBasicBlock::iterator To,
                                                 bool RestoreAfter, int SPAdj) {
  const MachineBasicBlock &MBB = *To->getParent();
  const MachineFunction &MF = *MBB.getParent();

  // Find the register whose use is furthest away.
  MachineBasicBlock::iterator UseMI;
  ArrayRef<MCPhysReg> AllocationOrder = RC.getRawAllocationOrder(MF);
  std::pair<MCPhysReg, MachineBasicBlock::iterator> P =
      findSurvivorBackwards(*MRI, MBBI, To, LiveUnits, AllocationOrder,
                            RestoreAfter);
  MCPhysReg Reg = P.first;
  MachineBasicBlock::iterator SpillBefore = P.second;
  assert(Reg != 0 && "No register left to scavenge!");
  // Found an available register?
  if (SpillBefore != MBB.end()) {
    MachineBasicBlock::iterator ReloadAfter =
      RestoreAfter ? std::next(MBBI) : MBBI;
    MachineBasicBlock::iterator ReloadBefore = std::next(ReloadAfter);
    if (ReloadBefore != MBB.end())
      LLVM_DEBUG(dbgs() << "Reload before: " << *ReloadBefore << '\n');
    ScavengedInfo &Scavenged = spill(Reg, RC, SPAdj, SpillBefore, ReloadBefore);
    Scavenged.Restore = &*std::prev(SpillBefore);
    LiveUnits.removeReg(Reg);
    LLVM_DEBUG(dbgs() << "Scavenged register with spill: " << printReg(Reg, TRI)
                      << " until " << *SpillBefore);
  } else {
    LLVM_DEBUG(dbgs() << "Scavenged free register: " << printReg(Reg, TRI)
                      << '\n');
  }
  return Reg;
}
/// \brief Replace loop instructions with the constant extended version.
void HexagonFixupHwLoops::useExtLoopInstr(MachineFunction &MF,
                                          MachineBasicBlock::iterator &MII) {
  const TargetInstrInfo *TII = MF.getSubtarget().getInstrInfo();
  MachineBasicBlock *MBB = MII->getParent();
  DebugLoc DL = MII->getDebugLoc();
  MachineInstrBuilder MIB;
  unsigned newOp;
  switch (MII->getOpcode()) {
  case Hexagon::J2_loop0r:
    newOp = Hexagon::J2_loop0rext;
    break;
  case Hexagon::J2_loop0i:
    newOp = Hexagon::J2_loop0iext;
    break;
  case Hexagon::J2_loop1r:
    newOp = Hexagon::J2_loop1rext;
    break;
  case Hexagon::J2_loop1i:
    newOp = Hexagon::J2_loop1iext;
    break;
  default:
    llvm_unreachable("Invalid Hardware Loop Instruction.");
  }
  MIB = BuildMI(*MBB, MII, DL, TII->get(newOp));

  for (unsigned i = 0; i < MII->getNumOperands(); ++i)
    MIB.add(MII->getOperand(i));
}
Example #9
0
// precedes - Helper function to determine with MachineInstr A
// precedes MachineInstr B within the same MBB.
static bool precedes(MachineBasicBlock::iterator A,
                     MachineBasicBlock::iterator B) {
  if (A == B)
    return false;
  
  MachineBasicBlock::iterator I = A->getParent()->begin();
  while (I != A->getParent()->end()) {
    if (I == A)
      return true;
    else if (I == B)
      return false;
    
    ++I;
  }
  
  return false;
}
Example #10
0
bool VmkitGC::findCustomSafePoints(GCFunctionInfo& FI, MachineFunction &MF) {
  for (MachineFunction::iterator BBI = MF.begin(),
                                 BBE = MF.end(); BBI != BBE; ++BBI) {
    for (MachineBasicBlock::iterator MI = BBI->begin(),
                                     ME = BBI->end(); MI != ME; ++MI) {
      if (MI->getDesc().isCall()) {
        MachineBasicBlock::iterator RAI = MI; ++RAI;                                
        MCSymbol* Label = InsertLabel(*MI->getParent(), RAI, MI->getDebugLoc());
        FI.addSafePoint(GC::PostCall, Label, MI->getDebugLoc());
      } else if (MI->getDebugLoc().getCol() == 1) {
        MCSymbol* Label = InsertLabel(*MI->getParent(), MI, MI->getDebugLoc());
        FI.addSafePoint(GC::Loop, Label, MI->getDebugLoc());
      }
    }
  }
  return false;
}
void Thumb2InstrInfo::expandLoadStackGuard(
    MachineBasicBlock::iterator MI) const {
  MachineFunction &MF = *MI->getParent()->getParent();
  if (MF.getTarget().isPositionIndependent())
    expandLoadStackGuardBase(MI, ARM::t2MOV_ga_pcrel, ARM::t2LDRi12);
  else
    expandLoadStackGuardBase(MI, ARM::t2MOVi32imm, ARM::t2LDRi12);
}
Example #12
0
static void moveInstsAfter(MachineBasicBlock::iterator I,
                           ArrayRef<MachineInstr*> InstsToMove) {
  MachineBasicBlock *MBB = I->getParent();
  ++I;
  for (MachineInstr *MI : InstsToMove) {
    MI->removeFromParent();
    MBB->insert(I, MI);
  }
}
MachineBasicBlock::iterator AArch64LoadStoreOpt::findMatchingUpdateInsnForward(
    MachineBasicBlock::iterator I, unsigned Limit, int UnscaledOffset) {
  MachineBasicBlock::iterator E = I->getParent()->end();
  MachineInstr *MemMI = I;
  MachineBasicBlock::iterator MBBI = I;

  unsigned BaseReg = getLdStBaseOp(MemMI).getReg();
  int MIUnscaledOffset = getLdStOffsetOp(MemMI).getImm() * getMemScale(MemMI);

  // Scan forward looking for post-index opportunities.  Updating instructions
  // can't be formed if the memory instruction doesn't have the offset we're
  // looking for.
  if (MIUnscaledOffset != UnscaledOffset)
    return E;

  // If the base register overlaps a destination register, we can't
  // merge the update.
  bool IsPairedInsn = isPairedLdSt(MemMI);
  for (unsigned i = 0, e = IsPairedInsn ? 2 : 1; i != e; ++i) {
    unsigned DestReg = getLdStRegOp(MemMI, i).getReg();
    if (DestReg == BaseReg || TRI->isSubRegister(BaseReg, DestReg))
      return E;
  }

  // Track which registers have been modified and used between the first insn
  // (inclusive) and the second insn.
  BitVector ModifiedRegs, UsedRegs;
  ModifiedRegs.resize(TRI->getNumRegs());
  UsedRegs.resize(TRI->getNumRegs());
  ++MBBI;
  for (unsigned Count = 0; MBBI != E; ++MBBI) {
    MachineInstr *MI = MBBI;
    // Skip DBG_VALUE instructions. Otherwise debug info can affect the
    // optimization by changing how far we scan.
    if (MI->isDebugValue())
      continue;

    // Now that we know this is a real instruction, count it.
    ++Count;

    // If we found a match, return it.
    if (isMatchingUpdateInsn(I, MI, BaseReg, UnscaledOffset))
      return MBBI;

    // Update the status of what the instruction clobbered and used.
    trackRegDefsUses(MI, ModifiedRegs, UsedRegs, TRI);

    // Otherwise, if the base register is used or modified, we have no match, so
    // return early.
    if (ModifiedRegs[BaseReg] || UsedRegs[BaseReg])
      return E;
  }
  return E;
}
/// InsertCopies - insert copies into MBB and all of its successors
void StrongPHIElimination::InsertCopies(MachineDomTreeNode* MDTN,
                                 SmallPtrSet<MachineBasicBlock*, 16>& visited) {
  MachineBasicBlock* MBB = MDTN->getBlock();
  visited.insert(MBB);
  
  std::set<unsigned> pushed;
  
  LiveIntervals& LI = getAnalysis<LiveIntervals>();
  // Rewrite register uses from Stacks
  for (MachineBasicBlock::iterator I = MBB->begin(), E = MBB->end();
      I != E; ++I) {
    if (I->isPHI())
      continue;
    
    for (unsigned i = 0; i < I->getNumOperands(); ++i)
      if (I->getOperand(i).isReg() &&
          Stacks[I->getOperand(i).getReg()].size()) {
        // Remove the live range for the old vreg.
        LiveInterval& OldInt = LI.getInterval(I->getOperand(i).getReg());
        LiveInterval::iterator OldLR =
          OldInt.FindLiveRangeContaining(LI.getInstructionIndex(I).getUseIndex());
        if (OldLR != OldInt.end())
          OldInt.removeRange(*OldLR, true);
        
        // Change the register
        I->getOperand(i).setReg(Stacks[I->getOperand(i).getReg()].back());
        
        // Add a live range for the new vreg
        LiveInterval& Int = LI.getInterval(I->getOperand(i).getReg());
        VNInfo* FirstVN = *Int.vni_begin();
        FirstVN->setHasPHIKill(false);
        LiveRange LR (LI.getMBBStartIdx(I->getParent()),
                      LI.getInstructionIndex(I).getUseIndex().getNextSlot(),
                      FirstVN);
        
        Int.addRange(LR);
      }
  }    
  
  // Schedule the copies for this block
  ScheduleCopies(MBB, pushed);
  
  // Recur down the dominator tree.
  for (MachineDomTreeNode::iterator I = MDTN->begin(),
       E = MDTN->end(); I != E; ++I)
    if (!visited.count((*I)->getBlock()))
      InsertCopies(*I, visited);
  
  // As we exit this block, pop the names we pushed while processing it
  for (std::set<unsigned>::iterator I = pushed.begin(), 
       E = pushed.end(); I != E; ++I)
    Stacks[*I].pop_back();
}
Example #15
0
/// calculateCallsInformation - Calculate the MaxCallFrameSize and AdjustsStack
/// variables for the function's frame information and eliminate call frame
/// pseudo instructions.
void PEI::calculateCallsInformation(MachineFunction &Fn) {
    const TargetRegisterInfo *RegInfo = Fn.getTarget().getRegisterInfo();
    const TargetInstrInfo &TII = *Fn.getTarget().getInstrInfo();
    const TargetFrameLowering *TFI = Fn.getTarget().getFrameLowering();
    MachineFrameInfo *MFI = Fn.getFrameInfo();

    unsigned MaxCallFrameSize = 0;
    bool AdjustsStack = MFI->adjustsStack();

    // Get the function call frame set-up and tear-down instruction opcode
    int FrameSetupOpcode   = TII.getCallFrameSetupOpcode();
    int FrameDestroyOpcode = TII.getCallFrameDestroyOpcode();

    // Early exit for targets which have no call frame setup/destroy pseudo
    // instructions.
    if (FrameSetupOpcode == -1 && FrameDestroyOpcode == -1)
        return;

    std::vector<MachineBasicBlock::iterator> FrameSDOps;
    for (MachineFunction::iterator BB = Fn.begin(), E = Fn.end(); BB != E; ++BB)
        for (MachineBasicBlock::iterator I = BB->begin(); I != BB->end(); ++I)
            if (I->getOpcode() == FrameSetupOpcode ||
                    I->getOpcode() == FrameDestroyOpcode) {
                assert(I->getNumOperands() >= 1 && "Call Frame Setup/Destroy Pseudo"
                       " instructions should have a single immediate argument!");
                unsigned Size = I->getOperand(0).getImm();
                if (Size > MaxCallFrameSize) MaxCallFrameSize = Size;
                AdjustsStack = true;
                FrameSDOps.push_back(I);
            } else if (I->isInlineAsm()) {
                // Some inline asm's need a stack frame, as indicated by operand 1.
                unsigned ExtraInfo = I->getOperand(InlineAsm::MIOp_ExtraInfo).getImm();
                if (ExtraInfo & InlineAsm::Extra_IsAlignStack)
                    AdjustsStack = true;
            }

    MFI->setAdjustsStack(AdjustsStack);
    MFI->setMaxCallFrameSize(MaxCallFrameSize);

    for (std::vector<MachineBasicBlock::iterator>::iterator
            i = FrameSDOps.begin(), e = FrameSDOps.end(); i != e; ++i) {
        MachineBasicBlock::iterator I = *i;

        // If call frames are not being included as part of the stack frame, and
        // the target doesn't indicate otherwise, remove the call frame pseudos
        // here. The sub/add sp instruction pairs are still inserted, but we don't
        // need to track the SP adjustment for frame index elimination.
        if (TFI->canSimplifyCallFramePseudos(Fn))
            RegInfo->eliminateCallFramePseudoInstr(Fn, *I->getParent(), I);
    }
}
void
Thumb2InstrInfo::ReplaceTailWithBranchTo(MachineBasicBlock::iterator Tail,
                                         MachineBasicBlock *NewDest) const {
  MachineBasicBlock *MBB = Tail->getParent();
  ARMFunctionInfo *AFI = MBB->getParent()->getInfo<ARMFunctionInfo>();
  if (!AFI->hasITBlocks() || Tail->isBranch()) {
    TargetInstrInfo::ReplaceTailWithBranchTo(Tail, NewDest);
    return;
  }

  // If the first instruction of Tail is predicated, we may have to update
  // the IT instruction.
  unsigned PredReg = 0;
  ARMCC::CondCodes CC = getInstrPredicate(*Tail, PredReg);
  MachineBasicBlock::iterator MBBI = Tail;
  if (CC != ARMCC::AL)
    // Expecting at least the t2IT instruction before it.
    --MBBI;

  // Actually replace the tail.
  TargetInstrInfo::ReplaceTailWithBranchTo(Tail, NewDest);

  // Fix up IT.
  if (CC != ARMCC::AL) {
    MachineBasicBlock::iterator E = MBB->begin();
    unsigned Count = 4; // At most 4 instructions in an IT block.
    while (Count && MBBI != E) {
      if (MBBI->isDebugValue()) {
        --MBBI;
        continue;
      }
      if (MBBI->getOpcode() == ARM::t2IT) {
        unsigned Mask = MBBI->getOperand(1).getImm();
        if (Count == 4)
          MBBI->eraseFromParent();
        else {
          unsigned MaskOn = 1 << Count;
          unsigned MaskOff = ~(MaskOn - 1);
          MBBI->getOperand(1).setImm((Mask & MaskOff) | MaskOn);
        }
        return;
      }
      --MBBI;
      --Count;
    }

    // Ctrl flow can reach here if branch folding is run before IT block
    // formation pass.
  }
}
Example #17
0
bool Mips16InstrInfo::expandPostRAPseudo(MachineBasicBlock::iterator MI) const {
  MachineBasicBlock &MBB = *MI->getParent();

  switch(MI->getDesc().getOpcode()) {
  default:
    return false;
  case Mips::RetRA16:
    ExpandRetRA16(MBB, MI, Mips::JrRa16);
    break;
  }

  MBB.erase(MI);
  return true;
}
bool CoffeeInstrInfo::expandPostRAPseudo(MachineBasicBlock::iterator MI) const {
  MachineBasicBlock &MBB = *MI->getParent();

  switch(MI->getDesc().getOpcode()) {
  default:
    return false;
  case Coffee::RetLR:
    ExpandRetLR(MBB, MI, Coffee::RET);
    break;
  }

  MBB.erase(MI);
  return true;
}
Example #19
0
void HexagonCopyToCombine::emitCombineRI(MachineBasicBlock::iterator &InsertPt,
                                         unsigned DoubleDestReg,
                                         MachineOperand &HiOperand,
                                         MachineOperand &LoOperand) {
  unsigned HiRegKillFlag = getKillRegState(HiOperand.isKill());
  unsigned HiReg = HiOperand.getReg();

  DebugLoc DL = InsertPt->getDebugLoc();
  MachineBasicBlock *BB = InsertPt->getParent();

  // Handle global.
  if (LoOperand.isGlobal()) {
    BuildMI(*BB, InsertPt, DL, TII->get(Hexagon::A4_combineri), DoubleDestReg)
      .addReg(HiReg, HiRegKillFlag)
      .addGlobalAddress(LoOperand.getGlobal(), LoOperand.getOffset(),
                        LoOperand.getTargetFlags());
    return;
  }
  // Handle block addresses.
  if (LoOperand.isBlockAddress()) {
    BuildMI(*BB, InsertPt, DL, TII->get(Hexagon::A4_combineri), DoubleDestReg)
      .addReg(HiReg, HiRegKillFlag)
      .addBlockAddress(LoOperand.getBlockAddress(), LoOperand.getOffset(),
                       LoOperand.getTargetFlags());
    return;
  }
  // Handle jump tables.
  if (LoOperand.isJTI()) {
    BuildMI(*BB, InsertPt, DL, TII->get(Hexagon::A4_combineri), DoubleDestReg)
      .addReg(HiOperand.getReg(), HiRegKillFlag)
      .addJumpTableIndex(LoOperand.getIndex(), LoOperand.getTargetFlags());
    return;
  }
  // Handle constant pools.
  if (LoOperand.isCPI()) {
    BuildMI(*BB, InsertPt, DL, TII->get(Hexagon::A4_combineri), DoubleDestReg)
      .addReg(HiOperand.getReg(), HiRegKillFlag)
      .addConstantPoolIndex(LoOperand.getIndex(), LoOperand.getOffset(),
                            LoOperand.getTargetFlags());
    return;
  }

  // Insert new combine instruction.
  //  DoubleRegDest = combine HiReg, #LoImm
  BuildMI(*BB, InsertPt, DL, TII->get(Hexagon::A4_combineri), DoubleDestReg)
    .addReg(HiReg, HiRegKillFlag)
    .addImm(LoOperand.getImm());
}
Example #20
0
bool AMDGPUInstrInfo::expandPostRAPseudo (MachineBasicBlock::iterator MI) const {
  MachineBasicBlock *MBB = MI->getParent();
   int OffsetOpIdx =
       AMDGPU::getNamedOperandIdx(MI->getOpcode(), AMDGPU::OpName::addr);
   // addr is a custom operand with multiple MI operands, and only the
   // first MI operand is given a name.
  int RegOpIdx = OffsetOpIdx + 1;
  int ChanOpIdx =
      AMDGPU::getNamedOperandIdx(MI->getOpcode(), AMDGPU::OpName::chan);

  if (isRegisterLoad(*MI)) {
    int DstOpIdx =
        AMDGPU::getNamedOperandIdx(MI->getOpcode(), AMDGPU::OpName::dst);
    unsigned RegIndex = MI->getOperand(RegOpIdx).getImm();
    unsigned Channel = MI->getOperand(ChanOpIdx).getImm();
    unsigned Address = calculateIndirectAddress(RegIndex, Channel);
    unsigned OffsetReg = MI->getOperand(OffsetOpIdx).getReg();
    if (OffsetReg == AMDGPU::INDIRECT_BASE_ADDR) {
      buildMovInstr(MBB, MI, MI->getOperand(DstOpIdx).getReg(),
                    getIndirectAddrRegClass()->getRegister(Address));
    } else {
      buildIndirectRead(MBB, MI, MI->getOperand(DstOpIdx).getReg(),
                        Address, OffsetReg);
    }
  } else if (isRegisterStore(*MI)) {
    int ValOpIdx =
        AMDGPU::getNamedOperandIdx(MI->getOpcode(), AMDGPU::OpName::val);
    AMDGPU::getNamedOperandIdx(MI->getOpcode(), AMDGPU::OpName::dst);
    unsigned RegIndex = MI->getOperand(RegOpIdx).getImm();
    unsigned Channel = MI->getOperand(ChanOpIdx).getImm();
    unsigned Address = calculateIndirectAddress(RegIndex, Channel);
    unsigned OffsetReg = MI->getOperand(OffsetOpIdx).getReg();
    if (OffsetReg == AMDGPU::INDIRECT_BASE_ADDR) {
      buildMovInstr(MBB, MI, getIndirectAddrRegClass()->getRegister(Address),
                    MI->getOperand(ValOpIdx).getReg());
    } else {
      buildIndirectWrite(MBB, MI, MI->getOperand(ValOpIdx).getReg(),
                         calculateIndirectAddress(RegIndex, Channel),
                         OffsetReg);
    }
  } else {
    return false;
  }

  MBB->erase(MI);
  return true;
}
void HexagonCopyToCombine::emitConst64(MachineBasicBlock::iterator &InsertPt,
                                       unsigned DoubleDestReg,
                                       MachineOperand &HiOperand,
                                       MachineOperand &LoOperand) {
  DEBUG(dbgs() << "Found a CONST64\n");

  DebugLoc DL = InsertPt->getDebugLoc();
  MachineBasicBlock *BB = InsertPt->getParent();
  assert(LoOperand.isImm() && HiOperand.isImm() &&
         "Both operands must be immediate");

  int64_t V = HiOperand.getImm();
  V = (V << 32) | (0x0ffffffffLL & LoOperand.getImm());
  BuildMI(*BB, InsertPt, DL, TII->get(Hexagon::CONST64_Int_Real),
    DoubleDestReg)
    .addImm(V);
}
Example #22
0
bool MipsSEInstrInfo::expandPostRAPseudo(MachineBasicBlock::iterator MI) const {
  MachineBasicBlock &MBB = *MI->getParent();

  switch(MI->getDesc().getOpcode()) {
  default:
    return false;
  case Mips::RetRA:
    expandRetRA(MBB, MI, Mips::RET);
    break;
  case Mips::PseudoCVT_S_W:
    expandCvtFPInt(MBB, MI, Mips::CVT_S_W, Mips::MTC1, false);
    break;
  case Mips::PseudoCVT_D32_W:
    expandCvtFPInt(MBB, MI, Mips::CVT_D32_W, Mips::MTC1, false);
    break;
  case Mips::PseudoCVT_S_L:
    expandCvtFPInt(MBB, MI, Mips::CVT_S_L, Mips::DMTC1, true);
    break;
  case Mips::PseudoCVT_D64_W:
    expandCvtFPInt(MBB, MI, Mips::CVT_D64_W, Mips::MTC1, true);
    break;
  case Mips::PseudoCVT_D64_L:
    expandCvtFPInt(MBB, MI, Mips::CVT_D64_L, Mips::DMTC1, true);
    break;
  case Mips::BuildPairF64:
    expandBuildPairF64(MBB, MI);
    break;
  case Mips::ExtractElementF64:
    expandExtractElementF64(MBB, MI);
    break;
  case Mips::PseudoLDC1:
    expandDPLoadStore(MBB, MI, Mips::LDC1, Mips::LWC1);
    break;
  case Mips::PseudoSDC1:
    expandDPLoadStore(MBB, MI, Mips::SDC1, Mips::SWC1);
    break;
  case Mips::MIPSeh_return32:
  case Mips::MIPSeh_return64:
    expandEhReturn(MBB, MI);
    break;
  }

  MBB.erase(MI);
  return true;
}
Example #23
0
/// ReplaceTailWithBranchTo - Delete the instruction OldInst and everything
/// after it, replacing it with an unconditional branch to NewDest.
void
TargetInstrInfo::ReplaceTailWithBranchTo(MachineBasicBlock::iterator Tail,
                                         MachineBasicBlock *NewDest) const {
  MachineBasicBlock *MBB = Tail->getParent();

  // Remove all the old successors of MBB from the CFG.
  while (!MBB->succ_empty())
    MBB->removeSuccessor(MBB->succ_begin());

  // Remove all the dead instructions from the end of MBB.
  MBB->erase(Tail, MBB->end());

  // If MBB isn't immediately before MBB, insert a branch to it.
  if (++MachineFunction::iterator(MBB) != MachineFunction::iterator(NewDest))
    InsertBranch(*MBB, NewDest, 0, SmallVector<MachineOperand, 0>(),
                 Tail->getDebugLoc());
  MBB->addSuccessor(NewDest);
}
Example #24
0
void HexagonCopyToCombine::emitCombineRR(MachineBasicBlock::iterator &InsertPt,
                                         unsigned DoubleDestReg,
                                         MachineOperand &HiOperand,
                                         MachineOperand &LoOperand) {
  unsigned LoRegKillFlag = getKillRegState(LoOperand.isKill());
  unsigned HiRegKillFlag = getKillRegState(HiOperand.isKill());
  unsigned LoReg = LoOperand.getReg();
  unsigned HiReg = HiOperand.getReg();

  DebugLoc DL = InsertPt->getDebugLoc();
  MachineBasicBlock *BB = InsertPt->getParent();

  // Insert new combine instruction.
  //  DoubleRegDest = combine HiReg, LoReg
  BuildMI(*BB, InsertPt, DL, TII->get(Hexagon::A2_combinew), DoubleDestReg)
    .addReg(HiReg, HiRegKillFlag)
    .addReg(LoReg, LoRegKillFlag);
}
Example #25
0
bool ErlangGC::findCustomSafePoints(GCFunctionInfo &FI, MachineFunction &MF) {
  for (MachineFunction::iterator BBI = MF.begin(), BBE = MF.end(); BBI != BBE;
       ++BBI)
    for (MachineBasicBlock::iterator MI = BBI->begin(), ME = BBI->end();
         MI != ME; ++MI)

      if (MI->getDesc().isCall()) {

        // Do not treat tail call sites as safe points.
        if (MI->getDesc().isTerminator())
          continue;

        /* Code copied from VisitCallPoint(...) */
        MachineBasicBlock::iterator RAI = MI; ++RAI;
        MCSymbol* Label = InsertLabel(*MI->getParent(), RAI, MI->getDebugLoc());
        FI.addSafePoint(GC::PostCall, Label, MI->getDebugLoc());
      }

  return false;
}
/// ScheduleCopies - Insert copies into predecessor blocks, scheduling
/// them properly so as to avoid the 'lost copy' and the 'virtual swap'
/// problems.
///
/// Based on "Practical Improvements to the Construction and Destruction
/// of Static Single Assignment Form" by Briggs, et al.
void StrongPHIElimination::ScheduleCopies(MachineBasicBlock* MBB,
                                          std::set<unsigned>& pushed) {
  // FIXME: This function needs to update LiveIntervals
  std::multimap<unsigned, unsigned>& copy_set= Waiting[MBB];
  
  std::multimap<unsigned, unsigned> worklist;
  std::map<unsigned, unsigned> map;
  
  // Setup worklist of initial copies
  for (std::multimap<unsigned, unsigned>::iterator I = copy_set.begin(),
       E = copy_set.end(); I != E; ) {
    map.insert(std::make_pair(I->first, I->first));
    map.insert(std::make_pair(I->second, I->second));
         
    if (!UsedByAnother.count(I->second)) {
      worklist.insert(*I);
      
      // Avoid iterator invalidation
      std::multimap<unsigned, unsigned>::iterator OI = I;
      ++I;
      copy_set.erase(OI);
    } else {
      ++I;
    }
  }
  
  LiveIntervals& LI = getAnalysis<LiveIntervals>();
  MachineFunction* MF = MBB->getParent();
  MachineRegisterInfo& MRI = MF->getRegInfo();
  const TargetInstrInfo *TII = MF->getTarget().getInstrInfo();
  
  SmallVector<std::pair<unsigned, MachineInstr*>, 4> InsertedPHIDests;
  
  // Iterate over the worklist, inserting copies
  while (!worklist.empty() || !copy_set.empty()) {
    while (!worklist.empty()) {
      std::multimap<unsigned, unsigned>::iterator WI = worklist.begin();
      std::pair<unsigned, unsigned> curr = *WI;
      worklist.erase(WI);
      
      const TargetRegisterClass *RC = MF->getRegInfo().getRegClass(curr.first);
      
      if (isLiveOut(curr.second, MBB, LI)) {
        // Create a temporary
        unsigned t = MF->getRegInfo().createVirtualRegister(RC);
        
        // Insert copy from curr.second to a temporary at
        // the Phi defining curr.second
        MachineBasicBlock::iterator PI = MRI.getVRegDef(curr.second);
        TII->copyRegToReg(*PI->getParent(), PI, t,
                          curr.second, RC, RC, DebugLoc());
        
        DEBUG(dbgs() << "Inserted copy from " << curr.second << " to " << t
                     << "\n");
        
        // Push temporary on Stacks
        Stacks[curr.second].push_back(t);
        
        // Insert curr.second in pushed
        pushed.insert(curr.second);
        
        // Create a live interval for this temporary
        InsertedPHIDests.push_back(std::make_pair(t, --PI));
      }
      
      // Insert copy from map[curr.first] to curr.second
      TII->copyRegToReg(*MBB, MBB->getFirstTerminator(), curr.second,
                        map[curr.first], RC, RC, DebugLoc());
      map[curr.first] = curr.second;
      DEBUG(dbgs() << "Inserted copy from " << curr.first << " to "
                   << curr.second << "\n");
      
      // Push this copy onto InsertedPHICopies so we can
      // update LiveIntervals with it.
      MachineBasicBlock::iterator MI = MBB->getFirstTerminator();
      InsertedPHIDests.push_back(std::make_pair(curr.second, --MI));
      
      // If curr.first is a destination in copy_set...
      for (std::multimap<unsigned, unsigned>::iterator I = copy_set.begin(),
           E = copy_set.end(); I != E; )
        if (curr.first == I->second) {
          std::pair<unsigned, unsigned> temp = *I;
          worklist.insert(temp);
          
          // Avoid iterator invalidation
          std::multimap<unsigned, unsigned>::iterator OI = I;
          ++I;
          copy_set.erase(OI);
          
          break;
        } else {
          ++I;
        }
    }
    
    if (!copy_set.empty()) {
      std::multimap<unsigned, unsigned>::iterator CI = copy_set.begin();
      std::pair<unsigned, unsigned> curr = *CI;
      worklist.insert(curr);
      copy_set.erase(CI);
      
      LiveInterval& I = LI.getInterval(curr.second);
      MachineBasicBlock::iterator term = MBB->getFirstTerminator();
      SlotIndex endIdx = SlotIndex();
      if (term != MBB->end())
        endIdx = LI.getInstructionIndex(term);
      else
        endIdx = LI.getMBBEndIdx(MBB);
      
      if (I.liveAt(endIdx)) {
        const TargetRegisterClass *RC =
                                       MF->getRegInfo().getRegClass(curr.first);
        
        // Insert a copy from dest to a new temporary t at the end of b
        unsigned t = MF->getRegInfo().createVirtualRegister(RC);
        TII->copyRegToReg(*MBB, MBB->getFirstTerminator(), t,
                          curr.second, RC, RC, DebugLoc());
        map[curr.second] = t;
        
        MachineBasicBlock::iterator TI = MBB->getFirstTerminator();
        InsertedPHIDests.push_back(std::make_pair(t, --TI));
      }
    }
  }
  
  // Renumber the instructions so that we can perform the index computations
  // needed to create new live intervals.
  LI.renumber();
  
  // For copies that we inserted at the ends of predecessors, we construct
  // live intervals.  This is pretty easy, since we know that the destination
  // register cannot have be in live at that point previously.  We just have
  // to make sure that, for registers that serve as inputs to more than one
  // PHI, we don't create multiple overlapping live intervals.
  std::set<unsigned> RegHandled;
  for (SmallVector<std::pair<unsigned, MachineInstr*>, 4>::iterator I =
       InsertedPHIDests.begin(), E = InsertedPHIDests.end(); I != E; ++I) {
    if (RegHandled.insert(I->first).second) {
      LiveInterval& Int = LI.getOrCreateInterval(I->first);
      SlotIndex instrIdx = LI.getInstructionIndex(I->second);
      if (Int.liveAt(instrIdx.getDefIndex()))
        Int.removeRange(instrIdx.getDefIndex(),
                        LI.getMBBEndIdx(I->second->getParent()).getNextSlot(),
                        true);
      
      LiveRange R = LI.addLiveRangeToEndOfBlock(I->first, I->second);
      R.valno->setCopy(I->second);
      R.valno->def = LI.getInstructionIndex(I->second).getDefIndex();
    }
  }
}
Example #27
0
MachineInstrBuilder
MipsInstrInfo::genInstrWithNewOpc(unsigned NewOpc,
                                  MachineBasicBlock::iterator I) const {
  MachineInstrBuilder MIB;

  // Certain branches have two forms: e.g beq $1, $zero, dst vs beqz $1, dest
  // Pick the zero form of the branch for readable assembly and for greater
  // branch distance in non-microMIPS mode.
  // FIXME: Certain atomic sequences on mips64 generate 32bit references to
  // Mips::ZERO, which is incorrect. This test should be updated to use
  // Subtarget.getABI().GetZeroReg() when those atomic sequences and others
  // are fixed.
  bool BranchWithZeroOperand =
      (I->isBranch() && !I->isPseudo() && I->getOperand(1).isReg() &&
       (I->getOperand(1).getReg() == Mips::ZERO ||
        I->getOperand(1).getReg() == Mips::ZERO_64));

  if (BranchWithZeroOperand) {
    switch (NewOpc) {
    case Mips::BEQC:
      NewOpc = Mips::BEQZC;
      break;
    case Mips::BNEC:
      NewOpc = Mips::BNEZC;
      break;
    case Mips::BGEC:
      NewOpc = Mips::BGEZC;
      break;
    case Mips::BLTC:
      NewOpc = Mips::BLTZC;
      break;
    }
  }

  MIB = BuildMI(*I->getParent(), I, I->getDebugLoc(), get(NewOpc));

  // For MIPSR6 JI*C requires an immediate 0 as an operand, JIALC(64) an
  // immediate 0 as an operand and requires the removal of it's %RA<imp-def>
  // implicit operand as copying the implicit operations of the instructio we're
  // looking at will give us the correct flags.
  if (NewOpc == Mips::JIC || NewOpc == Mips::JIALC || NewOpc == Mips::JIC64 ||
      NewOpc == Mips::JIALC64) {

    if (NewOpc == Mips::JIALC || NewOpc == Mips::JIALC64)
      MIB->RemoveOperand(0);

    for (unsigned J = 0, E = I->getDesc().getNumOperands(); J < E; ++J) {
      MIB.addOperand(I->getOperand(J));
    }

    MIB.addImm(0);

 } else if (BranchWithZeroOperand) {
    // For MIPSR6 and microMIPS branches with an explicit zero operand, copy
    // everything after the zero.
     MIB.addOperand(I->getOperand(0));

    for (unsigned J = 2, E = I->getDesc().getNumOperands(); J < E; ++J) {
      MIB.addOperand(I->getOperand(J));
    }
  } else {
    // All other cases copy all other operands.
    for (unsigned J = 0, E = I->getDesc().getNumOperands(); J < E; ++J) {
      MIB.addOperand(I->getOperand(J));
    }
  }

  MIB.copyImplicitOps(*I);

  MIB.setMemRefs(I->memoperands_begin(), I->memoperands_end());
  return MIB;
}
Example #28
0
MachineInstrBuilder
MipsInstrInfo::genInstrWithNewOpc(unsigned NewOpc,
                                  MachineBasicBlock::iterator I) const {
  MachineInstrBuilder MIB;

  // Certain branches have two forms: e.g beq $1, $zero, dest vs beqz $1, dest
  // Pick the zero form of the branch for readable assembly and for greater
  // branch distance in non-microMIPS mode.
  // Additional MIPSR6 does not permit the use of register $zero for compact
  // branches.
  // FIXME: Certain atomic sequences on mips64 generate 32bit references to
  // Mips::ZERO, which is incorrect. This test should be updated to use
  // Subtarget.getABI().GetZeroReg() when those atomic sequences and others
  // are fixed.
  int ZeroOperandPosition = -1;
  bool BranchWithZeroOperand = false;
  if (I->isBranch() && !I->isPseudo()) {
    auto TRI = I->getParent()->getParent()->getSubtarget().getRegisterInfo();
    ZeroOperandPosition = I->findRegisterUseOperandIdx(Mips::ZERO, false, TRI);
    BranchWithZeroOperand = ZeroOperandPosition != -1;
  }

  if (BranchWithZeroOperand) {
    switch (NewOpc) {
    case Mips::BEQC:
      NewOpc = Mips::BEQZC;
      break;
    case Mips::BNEC:
      NewOpc = Mips::BNEZC;
      break;
    case Mips::BGEC:
      NewOpc = Mips::BGEZC;
      break;
    case Mips::BLTC:
      NewOpc = Mips::BLTZC;
      break;
    case Mips::BEQC64:
      NewOpc = Mips::BEQZC64;
      break;
    case Mips::BNEC64:
      NewOpc = Mips::BNEZC64;
      break;
    }
  }

  MIB = BuildMI(*I->getParent(), I, I->getDebugLoc(), get(NewOpc));

  // For MIPSR6 JI*C requires an immediate 0 as an operand, JIALC(64) an
  // immediate 0 as an operand and requires the removal of it's %RA<imp-def>
  // implicit operand as copying the implicit operations of the instructio we're
  // looking at will give us the correct flags.
  if (NewOpc == Mips::JIC || NewOpc == Mips::JIALC || NewOpc == Mips::JIC64 ||
      NewOpc == Mips::JIALC64) {

    if (NewOpc == Mips::JIALC || NewOpc == Mips::JIALC64)
      MIB->RemoveOperand(0);

    for (unsigned J = 0, E = I->getDesc().getNumOperands(); J < E; ++J) {
      MIB.add(I->getOperand(J));
    }

    MIB.addImm(0);

  } else {
    for (unsigned J = 0, E = I->getDesc().getNumOperands(); J < E; ++J) {
      if (BranchWithZeroOperand && (unsigned)ZeroOperandPosition == J)
        continue;

      MIB.add(I->getOperand(J));
    }
  }

  MIB.copyImplicitOps(*I);

  MIB.setMemRefs(I->memoperands_begin(), I->memoperands_end());
  return MIB;
}
Example #29
0
void HexagonCopyToCombine::emitCombineII(MachineBasicBlock::iterator &InsertPt,
                                         unsigned DoubleDestReg,
                                         MachineOperand &HiOperand,
                                         MachineOperand &LoOperand) {
  DebugLoc DL = InsertPt->getDebugLoc();
  MachineBasicBlock *BB = InsertPt->getParent();

  // Handle globals.
  if (HiOperand.isGlobal()) {
    BuildMI(*BB, InsertPt, DL, TII->get(Hexagon::A2_combineii), DoubleDestReg)
      .addGlobalAddress(HiOperand.getGlobal(), HiOperand.getOffset(),
                        HiOperand.getTargetFlags())
      .addImm(LoOperand.getImm());
    return;
  }
  if (LoOperand.isGlobal()) {
    BuildMI(*BB, InsertPt, DL, TII->get(Hexagon::A4_combineii), DoubleDestReg)
      .addImm(HiOperand.getImm())
      .addGlobalAddress(LoOperand.getGlobal(), LoOperand.getOffset(),
                        LoOperand.getTargetFlags());
    return;
  }

  // Handle block addresses.
  if (HiOperand.isBlockAddress()) {
    BuildMI(*BB, InsertPt, DL, TII->get(Hexagon::A2_combineii), DoubleDestReg)
      .addBlockAddress(HiOperand.getBlockAddress(), HiOperand.getOffset(),
                       HiOperand.getTargetFlags())
      .addImm(LoOperand.getImm());
    return;
  }
  if (LoOperand.isBlockAddress()) {
    BuildMI(*BB, InsertPt, DL, TII->get(Hexagon::A4_combineii), DoubleDestReg)
      .addImm(HiOperand.getImm())
      .addBlockAddress(LoOperand.getBlockAddress(), LoOperand.getOffset(),
                       LoOperand.getTargetFlags());
    return;
  }

  // Handle jump tables.
  if (HiOperand.isJTI()) {
    BuildMI(*BB, InsertPt, DL, TII->get(Hexagon::A2_combineii), DoubleDestReg)
      .addJumpTableIndex(HiOperand.getIndex(), HiOperand.getTargetFlags())
      .addImm(LoOperand.getImm());
    return;
  }
  if (LoOperand.isJTI()) {
    BuildMI(*BB, InsertPt, DL, TII->get(Hexagon::A4_combineii), DoubleDestReg)
      .addImm(HiOperand.getImm())
      .addJumpTableIndex(LoOperand.getIndex(), LoOperand.getTargetFlags());
    return;
  }

  // Handle constant pools.
  if (HiOperand.isCPI()) {
    BuildMI(*BB, InsertPt, DL, TII->get(Hexagon::A2_combineii), DoubleDestReg)
      .addConstantPoolIndex(HiOperand.getIndex(), HiOperand.getOffset(),
                            HiOperand.getTargetFlags())
      .addImm(LoOperand.getImm());
    return;
  }
  if (LoOperand.isCPI()) {
    BuildMI(*BB, InsertPt, DL, TII->get(Hexagon::A4_combineii), DoubleDestReg)
      .addImm(HiOperand.getImm())
      .addConstantPoolIndex(LoOperand.getIndex(), LoOperand.getOffset(),
                            LoOperand.getTargetFlags());
    return;
  }

  // First preference should be given to Hexagon::A2_combineii instruction
  // as it can include U6 (in Hexagon::A4_combineii) as well.
  // In this instruction, HiOperand is const extended, if required.
  if (isInt<8>(LoOperand.getImm())) {
    BuildMI(*BB, InsertPt, DL, TII->get(Hexagon::A2_combineii), DoubleDestReg)
      .addImm(HiOperand.getImm())
      .addImm(LoOperand.getImm());
      return;
  }

  // In this instruction, LoOperand is const extended, if required.
  if (isInt<8>(HiOperand.getImm())) {
    BuildMI(*BB, InsertPt, DL, TII->get(Hexagon::A4_combineii), DoubleDestReg)
      .addImm(HiOperand.getImm())
      .addImm(LoOperand.getImm());
    return;
  }

  // Insert new combine instruction.
  //  DoubleRegDest = combine #HiImm, #LoImm
  BuildMI(*BB, InsertPt, DL, TII->get(Hexagon::A2_combineii), DoubleDestReg)
    .addImm(HiOperand.getImm())
    .addImm(LoOperand.getImm());
}
Example #30
0
/// If \p MBBI is a pseudo instruction, this method expands
/// it to the corresponding (sequence of) actual instruction(s).
/// \returns true if \p MBBI has been expanded.
bool X86ExpandPseudo::ExpandMI(MachineBasicBlock &MBB,
                               MachineBasicBlock::iterator MBBI) {
  MachineInstr &MI = *MBBI;
  unsigned Opcode = MI.getOpcode();
  DebugLoc DL = MBBI->getDebugLoc();
  switch (Opcode) {
  default:
    return false;
  case X86::TCRETURNdi:
  case X86::TCRETURNri:
  case X86::TCRETURNmi:
  case X86::TCRETURNdi64:
  case X86::TCRETURNri64:
  case X86::TCRETURNmi64: {
    bool isMem = Opcode == X86::TCRETURNmi || Opcode == X86::TCRETURNmi64;
    MachineOperand &JumpTarget = MBBI->getOperand(0);
    MachineOperand &StackAdjust = MBBI->getOperand(isMem ? 5 : 1);
    assert(StackAdjust.isImm() && "Expecting immediate value.");

    // Adjust stack pointer.
    int StackAdj = StackAdjust.getImm();

    if (StackAdj) {
      // Check for possible merge with preceding ADD instruction.
      StackAdj += X86FL->mergeSPUpdates(MBB, MBBI, true);
      X86FL->emitSPUpdate(MBB, MBBI, StackAdj, /*InEpilogue=*/true);
    }

    // Jump to label or value in register.
    bool IsWin64 = STI->isTargetWin64();
    if (Opcode == X86::TCRETURNdi || Opcode == X86::TCRETURNdi64) {
      unsigned Op = (Opcode == X86::TCRETURNdi)
                        ? X86::TAILJMPd
                        : (IsWin64 ? X86::TAILJMPd64_REX : X86::TAILJMPd64);
      MachineInstrBuilder MIB = BuildMI(MBB, MBBI, DL, TII->get(Op));
      if (JumpTarget.isGlobal())
        MIB.addGlobalAddress(JumpTarget.getGlobal(), JumpTarget.getOffset(),
                             JumpTarget.getTargetFlags());
      else {
        assert(JumpTarget.isSymbol());
        MIB.addExternalSymbol(JumpTarget.getSymbolName(),
                              JumpTarget.getTargetFlags());
      }
    } else if (Opcode == X86::TCRETURNmi || Opcode == X86::TCRETURNmi64) {
      unsigned Op = (Opcode == X86::TCRETURNmi)
                        ? X86::TAILJMPm
                        : (IsWin64 ? X86::TAILJMPm64_REX : X86::TAILJMPm64);
      MachineInstrBuilder MIB = BuildMI(MBB, MBBI, DL, TII->get(Op));
      for (unsigned i = 0; i != 5; ++i)
        MIB.addOperand(MBBI->getOperand(i));
    } else if (Opcode == X86::TCRETURNri64) {
      BuildMI(MBB, MBBI, DL,
              TII->get(IsWin64 ? X86::TAILJMPr64_REX : X86::TAILJMPr64))
          .addReg(JumpTarget.getReg(), RegState::Kill);
    } else {
      BuildMI(MBB, MBBI, DL, TII->get(X86::TAILJMPr))
          .addReg(JumpTarget.getReg(), RegState::Kill);
    }

    MachineInstr *NewMI = std::prev(MBBI);
    NewMI->copyImplicitOps(*MBBI->getParent()->getParent(), *MBBI);

    // Delete the pseudo instruction TCRETURN.
    MBB.erase(MBBI);

    return true;
  }
  case X86::EH_RETURN:
  case X86::EH_RETURN64: {
    MachineOperand &DestAddr = MBBI->getOperand(0);
    assert(DestAddr.isReg() && "Offset should be in register!");
    const bool Uses64BitFramePtr =
        STI->isTarget64BitLP64() || STI->isTargetNaCl64();
    unsigned StackPtr = TRI->getStackRegister();
    BuildMI(MBB, MBBI, DL,
            TII->get(Uses64BitFramePtr ? X86::MOV64rr : X86::MOV32rr), StackPtr)
        .addReg(DestAddr.getReg());
    // The EH_RETURN pseudo is really removed during the MC Lowering.
    return true;
  }
  case X86::IRET: {
    // Adjust stack to erase error code
    int64_t StackAdj = MBBI->getOperand(0).getImm();
    X86FL->emitSPUpdate(MBB, MBBI, StackAdj, true);
    // Replace pseudo with machine iret
    BuildMI(MBB, MBBI, DL,
            TII->get(STI->is64Bit() ? X86::IRET64 : X86::IRET32));
    MBB.erase(MBBI);
    return true;
  }
  case X86::RET: {
    // Adjust stack to erase error code
    int64_t StackAdj = MBBI->getOperand(0).getImm();
    MachineInstrBuilder MIB;
    if (StackAdj == 0) {
      MIB = BuildMI(MBB, MBBI, DL,
                    TII->get(STI->is64Bit() ? X86::RETQ : X86::RETL));
    } else if (isUInt<16>(StackAdj)) {
      MIB = BuildMI(MBB, MBBI, DL,
                    TII->get(STI->is64Bit() ? X86::RETIQ : X86::RETIL))
                .addImm(StackAdj);
    } else {
      assert(!STI->is64Bit() &&
             "shouldn't need to do this for x86_64 targets!");
      // A ret can only handle immediates as big as 2**16-1.  If we need to pop
      // off bytes before the return address, we must do it manually.
      BuildMI(MBB, MBBI, DL, TII->get(X86::POP32r)).addReg(X86::ECX, RegState::Define);
      X86FL->emitSPUpdate(MBB, MBBI, StackAdj, /*InEpilogue=*/true);
      BuildMI(MBB, MBBI, DL, TII->get(X86::PUSH32r)).addReg(X86::ECX);
      MIB = BuildMI(MBB, MBBI, DL, TII->get(X86::RETL));
    }
    for (unsigned I = 1, E = MBBI->getNumOperands(); I != E; ++I)
      MIB.addOperand(MBBI->getOperand(I));
    MBB.erase(MBBI);
    return true;
  }
  case X86::EH_RESTORE: {
    // Restore ESP and EBP, and optionally ESI if required.
    bool IsSEH = isAsynchronousEHPersonality(classifyEHPersonality(
        MBB.getParent()->getFunction()->getPersonalityFn()));
    X86FL->restoreWin32EHStackPointers(MBB, MBBI, DL, /*RestoreSP=*/IsSEH);
    MBBI->eraseFromParent();
    return true;
  }
  }
  llvm_unreachable("Previous switch has a fallthrough?");
}