Пример #1
0
void SIInsertWaits::handleExistingWait(MachineBasicBlock::iterator I) {
  assert(I->getOpcode() == AMDGPU::S_WAITCNT);

  unsigned Imm = I->getOperand(0).getImm();
  Counters Counts, WaitOn;

  Counts.Named.VM = Imm & 0xF;
  Counts.Named.EXP = (Imm >> 4) & 0x7;
  Counts.Named.LGKM = (Imm >> 8) & 0xF;

  for (unsigned i = 0; i < 3; ++i) {
    if (Counts.Array[i] <= LastIssued.Array[i])
      WaitOn.Array[i] = LastIssued.Array[i] - Counts.Array[i];
    else
      WaitOn.Array[i] = 0;
  }

  increaseCounters(DelayedWaitOn, WaitOn);
}
Пример #2
0
MachineBasicBlock::iterator Z80FrameLowering::
eliminateCallFramePseudoInstr(MachineFunction &MF, MachineBasicBlock &MBB,
                              MachineBasicBlock::iterator I) const {
  //if (!hasReservedCallFrame(MF)) {
    unsigned Amount = TII.getFrameSize(*I);
    unsigned ScratchReg = I->getOperand(I->getNumOperands() - 1).getReg();
    assert((Z80::A24RegClass.contains(ScratchReg) ||
            Z80::A16RegClass.contains(ScratchReg)) &&
           "Expected last operand to be the scratch reg.");
    if (I->getOpcode() == TII.getCallFrameDestroyOpcode()) {
      Amount -= TII.getFramePoppedByCallee(*I);
      assert(TargetRegisterInfo::isPhysicalRegister(ScratchReg) &&
             "Reg alloc should have already happened.");
      BuildStackAdjustment(MF, MBB, I, I->getDebugLoc(), ScratchReg, Amount);
    }
    //}

  return MBB.erase(I);
}
Пример #3
0
bool Filler::needsUnimp(MachineBasicBlock::iterator I, unsigned &StructSize)
{
  if (!I->isCall())
    return false;

  unsigned structSizeOpNum = 0;
  switch (I->getOpcode()) {
  default: llvm_unreachable("Unknown call opcode.");
  case SP::CALL: structSizeOpNum = 1; break;
  case SP::CALLrr:
  case SP::CALLri: structSizeOpNum = 2; break;
  case SP::TLS_CALL: return false;
  }

  const MachineOperand &MO = I->getOperand(structSizeOpNum);
  if (!MO.isImm())
    return false;
  StructSize = MO.getImm();
  return true;
}
Пример #4
0
void AlphaRegisterInfo::emitEpilogue(MachineFunction &MF,
                                     MachineBasicBlock &MBB) const {
  const MachineFrameInfo *MFI = MF.getFrameInfo();
  MachineBasicBlock::iterator MBBI = prior(MBB.end());
  assert((MBBI->getOpcode() == Alpha::RETDAG ||
          MBBI->getOpcode() == Alpha::RETDAGp)
         && "Can only insert epilog into returning blocks");
  DebugLoc dl = MBBI->getDebugLoc();

  bool FP = hasFP(MF);

  // Get the number of bytes allocated from the FrameInfo...
  long NumBytes = MFI->getStackSize();

  //now if we need to, restore the old FP
  if (FP) {
    //copy the FP into the SP (discards allocas)
    BuildMI(MBB, MBBI, dl, TII.get(Alpha::BISr), Alpha::R30).addReg(Alpha::R15)
      .addReg(Alpha::R15);
    //restore the FP
    BuildMI(MBB, MBBI, dl, TII.get(Alpha::LDQ), Alpha::R15)
      .addImm(0).addReg(Alpha::R15);
  }

  if (NumBytes != 0) {
    if (NumBytes <= IMM_HIGH) {
      BuildMI(MBB, MBBI, dl, TII.get(Alpha::LDA), Alpha::R30).addImm(NumBytes)
        .addReg(Alpha::R30);
    } else if (getUpper16(NumBytes) <= IMM_HIGH) {
      BuildMI(MBB, MBBI, dl, TII.get(Alpha::LDAH), Alpha::R30)
        .addImm(getUpper16(NumBytes)).addReg(Alpha::R30);
      BuildMI(MBB, MBBI, dl, TII.get(Alpha::LDA), Alpha::R30)
        .addImm(getLower16(NumBytes)).addReg(Alpha::R30);
    } else {
      std::string msg;
      raw_string_ostream Msg(msg); 
      Msg << "Too big a stack frame at " + NumBytes;
      llvm_report_error(Msg.str());
    }
  }
}
Пример #5
0
/// mergeSPUpdatesUp - Merge two stack-manipulating instructions upper iterator.
static
void mergeSPUpdatesUp(MachineBasicBlock &MBB, MachineBasicBlock::iterator &MBBI,
                      unsigned StackPtr, uint64_t *NumBytes = NULL) {
  if (MBBI == MBB.begin()) return;

  MachineBasicBlock::iterator PI = prior(MBBI);
  unsigned Opc = PI->getOpcode();
  if ((Opc == X86::ADD64ri32 || Opc == X86::ADD64ri8 ||
       Opc == X86::ADD32ri || Opc == X86::ADD32ri8) &&
      PI->getOperand(0).getReg() == StackPtr) {
    if (NumBytes)
      *NumBytes += PI->getOperand(2).getImm();
    MBB.erase(PI);
  } else if ((Opc == X86::SUB64ri32 || Opc == X86::SUB64ri8 ||
              Opc == X86::SUB32ri || Opc == X86::SUB32ri8) &&
             PI->getOperand(0).getReg() == StackPtr) {
    if (NumBytes)
      *NumBytes -= PI->getOperand(2).getImm();
    MBB.erase(PI);
  }
}
Пример #6
0
  bool runOnMachineFunction(MachineFunction &MF) override {
    const R600Subtarget &ST = MF.getSubtarget<R600Subtarget>();
    TII = ST.getInstrInfo();

    for (MachineFunction::iterator BB = MF.begin(), BB_E = MF.end();
                                                    BB != BB_E; ++BB) {
      MachineBasicBlock &MBB = *BB;
      MachineBasicBlock::iterator I = MBB.begin();
      if (I != MBB.end() && I->getOpcode() == R600::CF_ALU)
        continue; // BB was already parsed
      for (MachineBasicBlock::iterator E = MBB.end(); I != E;) {
        if (isALU(*I)) {
          auto next = MakeALUClause(MBB, I);
          assert(next != I);
          I = next;
        } else
          ++I;
      }
    }
    return false;
  }
Пример #7
0
unsigned BPFInstrInfo::removeBranch(MachineBasicBlock &MBB,
                                    int *BytesRemoved) const {
  assert(!BytesRemoved && "code size not handled");

  MachineBasicBlock::iterator I = MBB.end();
  unsigned Count = 0;

  while (I != MBB.begin()) {
    --I;
    if (I->isDebugValue())
      continue;
    if (I->getOpcode() != BPF::JMP)
      break;
    // Remove the branch.
    I->eraseFromParent();
    I = MBB.end();
    ++Count;
  }

  return Count;
}
bool PIC16InstrInfo::AnalyzeBranch(MachineBasicBlock &MBB,
                                   MachineBasicBlock *&TBB,
                                   MachineBasicBlock *&FBB,
                                   SmallVectorImpl<MachineOperand> &Cond,
                                   bool AllowModify) const {
  MachineBasicBlock::iterator I = MBB.end();
  if (I == MBB.begin())
    return true;

  // Get the terminator instruction.
  --I;
  // Handle unconditional branches. If the unconditional branch's target is
  // successor basic block then remove the unconditional branch. 
  if (I->getOpcode() == PIC16::br_uncond  && AllowModify) {
    if (MBB.isLayoutSuccessor(I->getOperand(0).getMBB())) {
      TBB = 0;
      I->eraseFromParent();
    }
  }
  return true;
}
Пример #9
0
unsigned LanaiInstrInfo::RemoveBranch(MachineBasicBlock &MBB) const {
  MachineBasicBlock::iterator Instruction = MBB.end();
  unsigned Count = 0;

  while (Instruction != MBB.begin()) {
    --Instruction;
    if (Instruction->isDebugValue())
      continue;
    if (Instruction->getOpcode() != Lanai::BT &&
        Instruction->getOpcode() != Lanai::BRCC) {
      break;
    }

    // Remove the branch.
    Instruction->eraseFromParent();
    Instruction = MBB.end();
    ++Count;
  }

  return Count;
}
Пример #10
0
void SystemZFrameLowering::emitEpilogue(MachineFunction &MF,
                                        MachineBasicBlock &MBB) const {
  MachineBasicBlock::iterator MBBI = MBB.getLastNonDebugInstr();
  auto *ZII =
    static_cast<const SystemZInstrInfo*>(MF.getTarget().getInstrInfo());
  SystemZMachineFunctionInfo *ZFI = MF.getInfo<SystemZMachineFunctionInfo>();

  // Skip the return instruction.
  assert(MBBI->isReturn() && "Can only insert epilogue into returning blocks");

  uint64_t StackSize = getAllocatedStackSize(MF);
  if (ZFI->getLowSavedGPR()) {
    --MBBI;
    unsigned Opcode = MBBI->getOpcode();
    if (Opcode != SystemZ::LMG)
      llvm_unreachable("Expected to see callee-save register restore code");

    unsigned AddrOpNo = 2;
    DebugLoc DL = MBBI->getDebugLoc();
    uint64_t Offset = StackSize + MBBI->getOperand(AddrOpNo + 1).getImm();
    unsigned NewOpcode = ZII->getOpcodeForOffset(Opcode, Offset);

    // If the offset is too large, use the largest stack-aligned offset
    // and add the rest to the base register (the stack or frame pointer).
    if (!NewOpcode) {
      uint64_t NumBytes = Offset - 0x7fff8;
      emitIncrement(MBB, MBBI, DL, MBBI->getOperand(AddrOpNo).getReg(),
                    NumBytes, ZII);
      Offset -= NumBytes;
      NewOpcode = ZII->getOpcodeForOffset(Opcode, Offset);
      assert(NewOpcode && "No restore instruction available");
    }

    MBBI->setDesc(ZII->get(NewOpcode));
    MBBI->getOperand(AddrOpNo + 1).ChangeToImmediate(Offset);
  } else if (StackSize) {
    DebugLoc DL = MBBI->getDebugLoc();
    emitIncrement(MBB, MBBI, DL, SystemZ::R15D, StackSize, ZII);
  }
}
Пример #11
0
bool DelJmp::
runOnMachineBasicBlock(MachineBasicBlock &MBB, MachineBasicBlock &MBBN) {
  bool Changed = false;

  MachineBasicBlock::iterator I = MBB.end();
  if (I != MBB.begin())
    I--;	// set I to the last instruction
  else
    return Changed;
    
  if (I->getOpcode() == Cpu0::JMP && I->getOperand(0).getMBB() == &MBBN) {
    // I is the instruction of "jmp #offset=0", as follows,
    //     jmp	$BB0_3
    // $BB0_3:
    //     ld	$4, 28($sp)
    ++NumDelJmp;
    MBB.erase(I);	// delete the "JMP 0" instruction
    Changed = true;	// Notify LLVM kernel Changed
  }
  return Changed;

}
Пример #12
0
static bool combineRestoreSETHIi(MachineBasicBlock::iterator RestoreMI,
                                 MachineBasicBlock::iterator SetHiMI,
                                 const TargetInstrInfo *TII)
{
  // Before:  sethi imm3, %i[0-7]
  //          restore %g0, %g0, %g0
  //
  // After :  restore %g0, (imm3<<10), %o[0-7]

  unsigned reg = SetHiMI->getOperand(0).getReg();
  if (reg < SP::I0 || reg > SP::I7)
    return false;

  if (!SetHiMI->getOperand(1).isImm())
    return false;

  int64_t imm = SetHiMI->getOperand(1).getImm();

  // Is it a 3 bit immediate?
  if (!isInt<3>(imm))
    return false;

  // Make it a 13 bit immediate.
  imm = (imm << 10) & 0x1FFF;

  assert(RestoreMI->getOpcode() == SP::RESTORErr);

  RestoreMI->setDesc(TII->get(SP::RESTOREri));

  RestoreMI->getOperand(0).setReg(reg - SP::I0 + SP::O0);
  RestoreMI->getOperand(1).setReg(SP::G0);
  RestoreMI->getOperand(2).ChangeToImmediate(imm);


  // Erase the original SETHI.
  SetHiMI->eraseFromParent();

  return true;
}
Пример #13
0
void
AArch64FrameLowering::eliminateCallFramePseudoInstr(
                                MachineFunction &MF,
                                MachineBasicBlock &MBB,
                                MachineBasicBlock::iterator MI) const {
  const AArch64InstrInfo &TII =
    *static_cast<const AArch64InstrInfo *>(MF.getTarget().getInstrInfo());
  DebugLoc dl = MI->getDebugLoc();
  int Opcode = MI->getOpcode();
  bool IsDestroy = Opcode == TII.getCallFrameDestroyOpcode();
  uint64_t CalleePopAmount = IsDestroy ? MI->getOperand(1).getImm() : 0;

  if (!hasReservedCallFrame(MF)) {
    unsigned Align = getStackAlignment();

    int64_t Amount = MI->getOperand(0).getImm();
    Amount = RoundUpToAlignment(Amount, Align);
    if (!IsDestroy) Amount = -Amount;

    // N.b. if CalleePopAmount is valid but zero (i.e. callee would pop, but it
    // doesn't have to pop anything), then the first operand will be zero too so
    // this adjustment is a no-op.
    if (CalleePopAmount == 0) {
      // FIXME: in-function stack adjustment for calls is limited to 12-bits
      // because there's no guaranteed temporary register available. Mostly call
      // frames will be allocated at the start of a function so this is OK, but
      // it is a limitation that needs dealing with.
      assert(Amount > -0xfff && Amount < 0xfff && "call frame too large");
      emitSPUpdate(MBB, MI, dl, TII, AArch64::NoRegister, Amount);
    }
  } else if (CalleePopAmount != 0) {
    // If the calling convention demands that the callee pops arguments from the
    // stack, we want to add it back if we have a reserved call frame.
    assert(CalleePopAmount < 0xfff && "call frame too large");
    emitSPUpdate(MBB, MI, dl, TII, AArch64::NoRegister, -CalleePopAmount);
  }

  MBB.erase(MI);
}
Пример #14
0
bool AMDGPUInstrInfo::expandPostRAPseudo (MachineBasicBlock::iterator MI) const {
  MachineBasicBlock *MBB = MI->getParent();

  switch(MI->getOpcode()) {
  default:
    if (isRegisterLoad(*MI)) {
      unsigned RegIndex = MI->getOperand(2).getImm();
      unsigned Channel = MI->getOperand(3).getImm();
      unsigned Address = calculateIndirectAddress(RegIndex, Channel);
      unsigned OffsetReg = MI->getOperand(1).getReg();
      if (OffsetReg == AMDGPU::INDIRECT_BASE_ADDR) {
        buildMovInstr(MBB, MI, MI->getOperand(0).getReg(),
                      getIndirectAddrRegClass()->getRegister(Address));
      } else {
        buildIndirectRead(MBB, MI, MI->getOperand(0).getReg(),
                          Address, OffsetReg);
      }
    } else if (isRegisterStore(*MI)) {
      unsigned RegIndex = MI->getOperand(2).getImm();
      unsigned Channel = MI->getOperand(3).getImm();
      unsigned Address = calculateIndirectAddress(RegIndex, Channel);
      unsigned OffsetReg = MI->getOperand(1).getReg();
      if (OffsetReg == AMDGPU::INDIRECT_BASE_ADDR) {
        buildMovInstr(MBB, MI, getIndirectAddrRegClass()->getRegister(Address),
                      MI->getOperand(0).getReg());
      } else {
        buildIndirectWrite(MBB, MI, MI->getOperand(0).getReg(),
                         calculateIndirectAddress(RegIndex, Channel),
                         OffsetReg);
      }
    } else {
      return false;
    }
  }

  MBB->erase(MI);
  return true;
}
Пример #15
0
MachineInstr *X86CallFrameOptimization::canFoldIntoRegPush(
    MachineBasicBlock::iterator FrameSetup, unsigned Reg) {
    // Do an extremely restricted form of load folding.
    // ISel will often create patterns like:
    // movl    4(%edi), %eax
    // movl    8(%edi), %ecx
    // movl    12(%edi), %edx
    // movl    %edx, 8(%esp)
    // movl    %ecx, 4(%esp)
    // movl    %eax, (%esp)
    // call
    // Get rid of those with prejudice.
    if (!TargetRegisterInfo::isVirtualRegister(Reg))
        return nullptr;

    // Make sure this is the only use of Reg.
    if (!MRI->hasOneNonDBGUse(Reg))
        return nullptr;

    MachineBasicBlock::iterator DefMI = MRI->getVRegDef(Reg);

    // Make sure the def is a MOV from memory.
    // If the def is an another block, give up.
    if (DefMI->getOpcode() != X86::MOV32rm ||
            DefMI->getParent() != FrameSetup->getParent())
        return nullptr;

    // Now, make sure everything else up until the ADJCALLSTACK is a sequence
    // of MOVs. To be less conservative would require duplicating a lot of the
    // logic from PeepholeOptimizer.
    // FIXME: A possibly better approach would be to teach the PeepholeOptimizer
    // to be smarter about folding into pushes.
    for (auto I = DefMI; I != FrameSetup; ++I)
        if (I->getOpcode() != X86::MOV32rm)
            return nullptr;

    return DefMI;
}
Пример #16
0
void SIInsertWaits::handleSendMsg(MachineBasicBlock &MBB,
                                  MachineBasicBlock::iterator I) {
  if (ST->getGeneration() < SISubtarget::VOLCANIC_ISLANDS)
    return;

  // There must be "S_NOP 0" between an instruction writing M0 and S_SENDMSG.
  if (LastInstWritesM0 && I->getOpcode() == AMDGPU::S_SENDMSG) {
    BuildMI(MBB, I, DebugLoc(), TII->get(AMDGPU::S_NOP)).addImm(0);
    LastInstWritesM0 = false;
    return;
  }

  // Set whether this instruction sets M0
  LastInstWritesM0 = false;

  unsigned NumOperands = I->getNumOperands();
  for (unsigned i = 0; i < NumOperands; i++) {
    const MachineOperand &Op = I->getOperand(i);

    if (Op.isReg() && Op.isDef() && Op.getReg() == AMDGPU::M0)
      LastInstWritesM0 = true;
  }
}
Пример #17
0
// Insert Defs and Uses of MI into the sets RegDefs and RegUses.
void Filler::insertDefsUses(MachineBasicBlock::iterator MI,
                            SmallSet<unsigned, 32>& RegDefs,
                            SmallSet<unsigned, 32>& RegUses)
{
  for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
    const MachineOperand &MO = MI->getOperand(i);
    if (!MO.isReg())
      continue;

    unsigned Reg = MO.getReg();
    if (Reg == 0)
      continue;
    if (MO.isDef())
      RegDefs.insert(Reg);
    if (MO.isUse()) {
      // Implicit register uses of retl are return values and
      // retl does not use them.
      if (MO.isImplicit() && MI->getOpcode() == SP::RETL)
        continue;
      RegUses.insert(Reg);
    }
  }
}
Пример #18
0
bool PPCCTRLoopsVerify::runOnMachineFunction(MachineFunction &MF) {
  MDT = &getAnalysis<MachineDominatorTree>();

  // Verify that all bdnz/bdz instructions are dominated by a loop mtctr before
  // any other instructions that might clobber the ctr register.
  for (MachineFunction::iterator I = MF.begin(), IE = MF.end();
       I != IE; ++I) {
    MachineBasicBlock *MBB = I;
    if (!MDT->isReachableFromEntry(MBB))
      continue;

    for (MachineBasicBlock::iterator MII = MBB->getFirstTerminator(),
      MIIE = MBB->end(); MII != MIIE; ++MII) {
      unsigned Opc = MII->getOpcode();
      if (Opc == PPC::BDNZ8 || Opc == PPC::BDNZ ||
          Opc == PPC::BDZ8  || Opc == PPC::BDZ)
        if (!verifyCTRBranch(MBB, MII))
          llvm_unreachable("Invalid PPC CTR loop!");
    }
  }

  return false;
}
void SparcFrameLowering::emitEpilogue(MachineFunction &MF,
                                      MachineBasicBlock &MBB) const {
    SparcMachineFunctionInfo *FuncInfo = MF.getInfo<SparcMachineFunctionInfo>();
    MachineBasicBlock::iterator MBBI = MBB.getLastNonDebugInstr();
    const SparcInstrInfo &TII =
        *static_cast<const SparcInstrInfo*>(MF.getTarget().getInstrInfo());
    DebugLoc dl = MBBI->getDebugLoc();
    assert(MBBI->getOpcode() == SP::RETL &&
           "Can only put epilog before 'retl' instruction!");
    if (!FuncInfo->isLeafProc()) {
        BuildMI(MBB, MBBI, dl, TII.get(SP::RESTORErr), SP::G0).addReg(SP::G0)
        .addReg(SP::G0);
        return;
    }
    MachineFrameInfo *MFI = MF.getFrameInfo();

    int NumBytes = (int) MFI->getStackSize();
    if (NumBytes == 0)
        return;

    NumBytes = SubTarget.getAdjustedFrameSize(NumBytes);
    emitSPAdjustment(MF, MBB, MBBI, NumBytes, SP::ADDrr, SP::ADDri);
}
Пример #20
0
bool AlphaBSel::runOnMachineFunction(MachineFunction &Fn) {

  for (MachineFunction::iterator MFI = Fn.begin(), E = Fn.end(); MFI != E;
       ++MFI) {
    MachineBasicBlock *MBB = MFI;
    
    for (MachineBasicBlock::iterator MBBI = MBB->begin(), EE = MBB->end();
         MBBI != EE; ++MBBI) {
      if (MBBI->getOpcode() == Alpha::COND_BRANCH_I ||
          MBBI->getOpcode() == Alpha::COND_BRANCH_F) {
        
        // condbranch operands:
        // 0. bc opcode
        // 1. reg
        // 2. target MBB
        const TargetInstrInfo *TII = Fn.getTarget().getInstrInfo();
        MBBI->setDesc(TII->get(MBBI->getOperand(0).getImm()));
      }
    }
  }
  
  return true;
}
Пример #21
0
// FIXME: Insert waits listed in Table 4.2 "Required User-Inserted Wait States"
// around other non-memory instructions.
bool SIInsertWaits::runOnMachineFunction(MachineFunction &MF) {
    bool Changes = false;

    TII = static_cast<const SIInstrInfo *>(MF.getSubtarget().getInstrInfo());
    TRI =
        static_cast<const SIRegisterInfo *>(MF.getSubtarget().getRegisterInfo());

    MRI = &MF.getRegInfo();

    WaitedOn = ZeroCounts;
    LastIssued = ZeroCounts;
    LastOpcodeType = OTHER;

    memset(&UsedRegs, 0, sizeof(UsedRegs));
    memset(&DefinedRegs, 0, sizeof(DefinedRegs));

    for (MachineFunction::iterator BI = MF.begin(), BE = MF.end();
            BI != BE; ++BI) {

        MachineBasicBlock &MBB = *BI;
        for (MachineBasicBlock::iterator I = MBB.begin(), E = MBB.end();
                I != E; ++I) {

            // Wait for everything before a barrier.
            if (I->getOpcode() == AMDGPU::S_BARRIER)
                Changes |= insertWait(MBB, I, LastIssued);
            else
                Changes |= insertWait(MBB, I, handleOperands(*I));
            pushInstruction(MBB, I);
        }

        // Wait for everything at the end of the MBB
        Changes |= insertWait(MBB, MBB.getFirstTerminator(), LastIssued);
    }

    return Changes;
}
static bool MaybeRewriteToFallthrough(MachineInstr &MI, MachineBasicBlock &MBB,
                                      const MachineFunction &MF,
                                      WebAssemblyFunctionInfo &MFI,
                                      MachineRegisterInfo &MRI,
                                      const WebAssemblyInstrInfo &TII,
                                      unsigned FallthroughOpc,
                                      unsigned CopyLocalOpc) {
  if (DisableWebAssemblyFallthroughReturnOpt)
    return false;
  if (&MBB != &MF.back())
    return false;

  MachineBasicBlock::iterator End = MBB.end();
  --End;
  assert(End->getOpcode() == WebAssembly::END_FUNCTION);
  --End;
  if (&MI != &*End)
    return false;

  if (FallthroughOpc != WebAssembly::FALLTHROUGH_RETURN_VOID) {
    // If the operand isn't stackified, insert a COPY to read the operand and
    // stackify it.
    MachineOperand &MO = MI.getOperand(0);
    unsigned Reg = MO.getReg();
    if (!MFI.isVRegStackified(Reg)) {
      unsigned NewReg = MRI.createVirtualRegister(MRI.getRegClass(Reg));
      BuildMI(MBB, MI, MI.getDebugLoc(), TII.get(CopyLocalOpc), NewReg)
          .addReg(Reg);
      MO.setReg(NewReg);
      MFI.stackifyVReg(NewReg);
    }
  }

  // Rewrite the return.
  MI.setDesc(TII.get(FallthroughOpc));
  return true;
}
Пример #23
0
bool SIMemoryLegalizer::expandAtomicFence(const SIMemOpInfo &MOI,
                                          MachineBasicBlock::iterator &MI) {
  assert(MI->getOpcode() == AMDGPU::ATOMIC_FENCE);

  AtomicPseudoMIs.push_back(MI);
  bool Changed = false;

  if (MOI.isAtomic()) {
    if (MOI.getOrdering() == AtomicOrdering::Acquire ||
        MOI.getOrdering() == AtomicOrdering::Release ||
        MOI.getOrdering() == AtomicOrdering::AcquireRelease ||
        MOI.getOrdering() == AtomicOrdering::SequentiallyConsistent)
      /// TODO: This relies on a barrier always generating a waitcnt
      /// for LDS to ensure it is not reordered with the completion of
      /// the proceeding LDS operations. If barrier had a memory
      /// ordering and memory scope, then library does not need to
      /// generate a fence. Could add support in this file for
      /// barrier. SIInsertWaitcnt.cpp could then stop unconditionally
      /// adding waitcnt before a S_BARRIER.
      Changed |= CC->insertWait(MI, MOI.getScope(),
                                MOI.getOrderingAddrSpace(),
                                SIMemOp::LOAD | SIMemOp::STORE,
                                MOI.getIsCrossAddressSpaceOrdering(),
                                Position::BEFORE);

    if (MOI.getOrdering() == AtomicOrdering::Acquire ||
        MOI.getOrdering() == AtomicOrdering::AcquireRelease ||
        MOI.getOrdering() == AtomicOrdering::SequentiallyConsistent)
      Changed |= CC->insertCacheInvalidate(MI, MOI.getScope(),
                                           MOI.getOrderingAddrSpace(),
                                           Position::BEFORE);

    return Changed;
  }

  return Changed;
}
Пример #24
0
MachineBasicBlock::iterator
X86InstrInfo::reverseBranchCondition(MachineBasicBlock::iterator MI) const {
  unsigned Opcode = MI->getOpcode();
  assert(isBranch(Opcode) && "MachineInstr must be a branch");
  unsigned ROpcode;
  switch (Opcode) {
  default: assert(0 && "Cannot reverse unconditional branches!");
  case X86::JB:  ROpcode = X86::JAE; break;
  case X86::JAE: ROpcode = X86::JB;  break;
  case X86::JE:  ROpcode = X86::JNE; break;
  case X86::JNE: ROpcode = X86::JE;  break;
  case X86::JBE: ROpcode = X86::JA;  break;
  case X86::JA:  ROpcode = X86::JBE; break;
  case X86::JS:  ROpcode = X86::JNS; break;
  case X86::JNS: ROpcode = X86::JS;  break;
  case X86::JL:  ROpcode = X86::JGE; break;
  case X86::JGE: ROpcode = X86::JL;  break;
  case X86::JLE: ROpcode = X86::JG;  break;
  case X86::JG:  ROpcode = X86::JLE; break;
  }
  MachineBasicBlock* MBB = MI->getParent();
  MachineBasicBlock* TMBB = MI->getOperand(0).getMachineBasicBlock();
  return BuildMI(*MBB, MBB->erase(MI), ROpcode, 1).addMBB(TMBB);
}
Пример #25
0
// tryOptimizeLEAtoMOV - helper function that tries to replace a LEA instruction
// of the form 'lea (%esp), %ebx' --> 'mov %esp, %ebx'.
// TODO: In this case we should be really trying first to entirely eliminate
// this instruction which is a plain copy.
static bool tryOptimizeLEAtoMOV(MachineBasicBlock::iterator II) {
  MachineInstr &MI = *II;
  unsigned Opc = II->getOpcode();
  // Check if this is a LEA of the form 'lea (%esp), %ebx'
  if ((Opc != X86::LEA32r && Opc != X86::LEA64r && Opc != X86::LEA64_32r) ||
      MI.getOperand(2).getImm() != 1 ||
      MI.getOperand(3).getReg() != X86::NoRegister ||
      MI.getOperand(4).getImm() != 0 ||
      MI.getOperand(5).getReg() != X86::NoRegister)
    return false;
  unsigned BasePtr = MI.getOperand(1).getReg();
  // In X32 mode, ensure the base-pointer is a 32-bit operand, so the LEA will
  // be replaced with a 32-bit operand MOV which will zero extend the upper
  // 32-bits of the super register.
  if (Opc == X86::LEA64_32r)
    BasePtr = getX86SubSuperRegister(BasePtr, 32);
  unsigned NewDestReg = MI.getOperand(0).getReg();
  const X86InstrInfo *TII =
      MI.getParent()->getParent()->getSubtarget<X86Subtarget>().getInstrInfo();
  TII->copyPhysReg(*MI.getParent(), II, MI.getDebugLoc(), NewDestReg, BasePtr,
                   MI.getOperand(1).isKill());
  MI.eraseFromParent();
  return true;
}
Пример #26
0
void Filler::insertCallUses(MachineBasicBlock::iterator MI,
                            SmallSet<unsigned, 32>& RegUses)
{

  switch(MI->getOpcode()) {
  default: llvm_unreachable("Unknown opcode.");
  case SP::CALL: break;
  case SP::JMPLrr:
  case SP::JMPLri:
    assert(MI->getNumOperands() >= 2);
    const MachineOperand &Reg = MI->getOperand(0);
    assert(Reg.isReg() && "JMPL first operand is not a register.");
    assert(Reg.isUse() && "JMPL first operand is not a use.");
    RegUses.insert(Reg.getReg());

    const MachineOperand &RegOrImm = MI->getOperand(1);
    if (RegOrImm.isImm())
        break;
    assert(RegOrImm.isReg() && "JMPLrr second operand is not a register.");
    assert(RegOrImm.isUse() && "JMPLrr second operand is not a use.");
    RegUses.insert(RegOrImm.getReg());
    break;
  }
}
Пример #27
0
unsigned LanaiInstrInfo::removeBranch(MachineBasicBlock &MBB,
                                      int *BytesRemoved) const {
  assert(!BytesRemoved && "code size not handled");

  MachineBasicBlock::iterator Instruction = MBB.end();
  unsigned Count = 0;

  while (Instruction != MBB.begin()) {
    --Instruction;
    if (Instruction->isDebugValue())
      continue;
    if (Instruction->getOpcode() != Lanai::BT &&
        Instruction->getOpcode() != Lanai::BRCC) {
      break;
    }

    // Remove the branch.
    Instruction->eraseFromParent();
    Instruction = MBB.end();
    ++Count;
  }

  return Count;
}
Пример #28
0
void GucRegisterInfo::emitPrologue(MachineFunction &MF) const {
  MachineBasicBlock &MBB = MF.front();   // Prolog goes in entry BB
  MachineFrameInfo *MFI = MF.getFrameInfo();
  GucMachineFunctionInfo *GucFI = MF.getInfo<GucMachineFunctionInfo>();
  MachineBasicBlock::iterator MBBI = MBB.begin();
  DebugLoc DL = MBBI != MBB.end() ? MBBI->getDebugLoc() : DebugLoc();

  // Get the number of bytes to allocate from the FrameInfo.
  uint64_t StackSize = MFI->getStackSize();

  uint64_t NumBytes = StackSize - GucFI->getCalleeSavedFrameSize();

  // Skip the callee-saved push instructions.
  while (MBBI != MBB.end() && (MBBI->getOpcode() == Guc::PUSHr))
    ++MBBI;

  if (MBBI != MBB.end())
    DL = MBBI->getDebugLoc();

  if (NumBytes) { // adjust stack pointer: SP -= numbytes
    // If there is an SUBri of SP immediately before this instruction, merge
    // the two.
    //NumBytes -= mergeSPUpdates(MBB, MBBI, true);
    // If there is an ADDri or SUBri of SP immediately after this
    // instruction, merge the two instructions.
    // mergeSPUpdatesDown(MBB, MBBI, &NumBytes);

    if (NumBytes) {
      MachineInstr *MI =
        BuildMI(MBB, MBBI, DL, TII.get(Guc::SUBsi), Guc::SP)
	  .addReg(Guc::SP).addImm(NumBytes);
      // The FLG implicit def is dead.
      MI->getOperand(3).setIsDead();
    }
  }
}
Пример #29
0
MachineInstr *X86CallFrameOptimization::canFoldIntoRegPush(
    MachineBasicBlock::iterator FrameSetup, unsigned Reg) {
  // Do an extremely restricted form of load folding.
  // ISel will often create patterns like:
  // movl    4(%edi), %eax
  // movl    8(%edi), %ecx
  // movl    12(%edi), %edx
  // movl    %edx, 8(%esp)
  // movl    %ecx, 4(%esp)
  // movl    %eax, (%esp)
  // call
  // Get rid of those with prejudice.
  if (!TargetRegisterInfo::isVirtualRegister(Reg))
    return nullptr;

  // Make sure this is the only use of Reg.
  if (!MRI->hasOneNonDBGUse(Reg))
    return nullptr;

  MachineBasicBlock::iterator DefMI = MRI->getVRegDef(Reg);

  // Make sure the def is a MOV from memory.
  // If the def is an another block, give up.
  if ((DefMI->getOpcode() != X86::MOV32rm &&
       DefMI->getOpcode() != X86::MOV64rm) ||
      DefMI->getParent() != FrameSetup->getParent())
    return nullptr;

  // Make sure we don't have any instructions between DefMI and the
  // push that make folding the load illegal.
  for (auto I = DefMI; I != FrameSetup; ++I)
    if (I->isLoadFoldBarrier())
      return nullptr;

  return DefMI;
}
Пример #30
0
// These are the common checks that need to performed
// to determine if
// 1. compare instruction can be moved before jump.
// 2. feeder to the compare instruction can be moved before jump.
static bool commonChecksToProhibitNewValueJump(bool afterRA,
                          MachineBasicBlock::iterator MII) {

  // If store in path, bail out.
  if (MII->getDesc().mayStore())
    return false;

  // if call in path, bail out.
  if (MII->getOpcode() == Hexagon::CALLv3)
    return false;

  // if NVJ is running prior to RA, do the following checks.
  if (!afterRA) {
    // The following Target Opcode instructions are spurious
    // to new value jump. If they are in the path, bail out.
    // KILL sets kill flag on the opcode. It also sets up a
    // single register, out of pair.
    //    %D0<def> = Hexagon_S2_lsr_r_p %D0<kill>, %R2<kill>
    //    %R0<def> = KILL %R0, %D0<imp-use,kill>
    //    %P0<def> = CMPEQri %R0<kill>, 0
    // PHI can be anything after RA.
    // COPY can remateriaze things in between feeder, compare and nvj.
    if (MII->getOpcode() == TargetOpcode::KILL ||
        MII->getOpcode() == TargetOpcode::PHI  ||
        MII->getOpcode() == TargetOpcode::COPY)
      return false;

    // The following pseudo Hexagon instructions sets "use" and "def"
    // of registers by individual passes in the backend. At this time,
    // we don't know the scope of usage and definitions of these
    // instructions.
    if (MII->getOpcode() == Hexagon::TFR_condset_rr ||
        MII->getOpcode() == Hexagon::TFR_condset_ii ||
        MII->getOpcode() == Hexagon::TFR_condset_ri ||
        MII->getOpcode() == Hexagon::TFR_condset_ir ||
        MII->getOpcode() == Hexagon::LDriw_pred     ||
        MII->getOpcode() == Hexagon::STriw_pred)
      return false;
  }

  return true;
}