Ejemplo n.º 1
0
bool AArch64CallLowering::lowerReturn(MachineIRBuilder &MIRBuilder,
                                        const Value *Val, unsigned VReg) const {
  MachineInstr *Return = MIRBuilder.buildInstr(AArch64::RET_ReallyLR);
  assert(Return && "Unable to build a return instruction?!");

  assert(((Val && VReg) || (!Val && !VReg)) && "Return value without a vreg");
  if (VReg) {
    assert((Val->getType()->isIntegerTy() || Val->getType()->isPointerTy()) &&
           "Type not supported yet");
    const Function &F = *MIRBuilder.getMF().getFunction();
    const DataLayout &DL = F.getParent()->getDataLayout();
    unsigned Size = DL.getTypeSizeInBits(Val->getType());
    assert((Size == 64 || Size == 32) && "Size not supported yet");
    unsigned ResReg = (Size == 32) ? AArch64::W0 : AArch64::X0;
    // Set the insertion point to be right before Return.
    MIRBuilder.setInstr(*Return, /* Before */ true);
    MachineInstr *Copy =
        MIRBuilder.buildInstr(TargetOpcode::COPY, ResReg, VReg);
    (void)Copy;
    assert(Copy->getNextNode() == Return &&
           "The insertion did not happen where we expected");
    MachineInstrBuilder(MIRBuilder.getMF(), Return)
        .addReg(ResReg, RegState::Implicit);
  }
  return true;
}
Ejemplo n.º 2
0
MachineInstr *
ReloadIPPass::addReloadIPInstr(MachineFunction::iterator &MFI,
                               MachineBasicBlock::iterator &MBBI) const {
    MachineInstr *MI = MBBI;
    StringRef JTPrefix(JumpInstrTables::jump_func_prefix);
    switch (MI->getOpcode()) {
    case X86::MOV32rm:
        if (MI->getNumOperands() > 4 && MI->getOperand(4).isGlobal() &&
                MI->getOperand(4).getGlobal()->getName().startswith(JTPrefix)) {
            if ((TM->Options.CFIType == CFIntegrity::Add && MI->getNextNode()->getOpcode() == X86::AND32rr)
                    || (TM->Options.CFIType == CFIntegrity::Sub && MI->getNextNode()->getOpcode() == X86::SUB32rr)
                    || (TM->Options.CFIType == CFIntegrity::Ror && MI->getNextNode()->getOpcode() == X86::SUB32rr)
               ) {
                //                    MI->dump(); // For debugging
                DEBUG(dbgs() << TM->getSubtargetImpl()->getInstrInfo()->getName(MI->getOpcode()) << ": "
                      << " Op" << ": "
                      << MI->getOperand(4).getGlobal()->getName().str() << "\n";);

                return MI;
            } else
Ejemplo n.º 3
0
/// Returns \p New if it's dominated by \p Old, otherwise return \p Old.
/// \p M maintains a map from instruction to its dominating order that satisfies
/// M[A] > M[B] guarantees that A is dominated by B.
/// If \p New is not in \p M, return \p Old. Otherwise if \p Old is null, return
/// \p New.
static MachineInstr *FindDominatedInstruction(MachineInstr &New,
                                              MachineInstr *Old,
                                              const InstOrderMap &M) {
  auto NewIter = M.find(&New);
  if (NewIter == M.end())
    return Old;
  if (Old == nullptr)
    return &New;
  unsigned OrderOld = M.find(Old)->second;
  unsigned OrderNew = NewIter->second;
  if (OrderOld != OrderNew)
    return OrderOld < OrderNew ? &New : Old;
  // OrderOld == OrderNew, we need to iterate down from Old to see if it
  // can reach New, if yes, New is dominated by Old.
  for (MachineInstr *I = Old->getNextNode(); M.find(I)->second == OrderNew;
       I = I->getNextNode())
    if (I == &New)
      return &New;
  return Old;
}
Ejemplo n.º 4
0
MachineInstr *ARCOptAddrMode::tryToCombine(MachineInstr &Ldst) {
  assert((Ldst.mayLoad() || Ldst.mayStore()) && "LD/ST instruction expected");

  unsigned BasePos, OffsetPos;

  LLVM_DEBUG(dbgs() << "[ABAW] tryToCombine " << Ldst);
  if (!AII->getBaseAndOffsetPosition(Ldst, BasePos, OffsetPos)) {
    LLVM_DEBUG(dbgs() << "[ABAW] Not a recognized load/store\n");
    return nullptr;
  }

  MachineOperand &Base = Ldst.getOperand(BasePos);
  MachineOperand &Offset = Ldst.getOperand(OffsetPos);

  assert(Base.isReg() && "Base operand must be register");
  if (!Offset.isImm()) {
    LLVM_DEBUG(dbgs() << "[ABAW] Offset is not immediate\n");
    return nullptr;
  }

  unsigned B = Base.getReg();
  if (TargetRegisterInfo::isStackSlot(B) ||
      !TargetRegisterInfo::isVirtualRegister(B)) {
    LLVM_DEBUG(dbgs() << "[ABAW] Base is not VReg\n");
    return nullptr;
  }

  // TODO: try to generate address preincrement
  if (Offset.getImm() != 0) {
    LLVM_DEBUG(dbgs() << "[ABAW] Non-zero offset\n");
    return nullptr;
  }

  for (auto &Add : MRI->use_nodbg_instructions(B)) {
    int64_t Incr;
    if (!isAddConstantOp(Add, Incr))
      continue;
    if (!isValidLoadStoreOffset(Incr))
      continue;

    SmallVector<MachineInstr *, 8> Uses;
    MachineInstr *MoveTo = canJoinInstructions(&Ldst, &Add, &Uses);

    if (!MoveTo)
      continue;

    if (!canFixPastUses(Uses, Add.getOperand(2), B))
      continue;

    LLVM_DEBUG(MachineInstr *First = &Ldst; MachineInstr *Last = &Add;
               if (MDT->dominates(Last, First)) std::swap(First, Last);
               dbgs() << "[ABAW] Instructions " << *First << " and " << *Last
                      << " combined\n";

    );

    MachineInstr *Result = Ldst.getNextNode();
    if (MoveTo == &Add) {
      Ldst.removeFromParent();
      Add.getParent()->insertAfter(Add.getIterator(), &Ldst);
    }
    if (Result == &Add)
      Result = Result->getNextNode();

    fixPastUses(Uses, B, Incr);

    int NewOpcode = ARC::getPostIncOpcode(Ldst.getOpcode());
    assert(NewOpcode > 0 && "No postincrement form found");
    unsigned NewBaseReg = Add.getOperand(0).getReg();
    changeToAddrMode(Ldst, NewOpcode, NewBaseReg, Add.getOperand(2));
    Add.eraseFromParent();

    return Result;
  }
Ejemplo n.º 5
0
bool LiveRangeShrink::runOnMachineFunction(MachineFunction &MF) {
  if (skipFunction(MF.getFunction()))
    return false;

  MachineRegisterInfo &MRI = MF.getRegInfo();

  LLVM_DEBUG(dbgs() << "**** Analysing " << MF.getName() << '\n');

  InstOrderMap IOM;
  // Map from register to instruction order (value of IOM) where the
  // register is used last. When moving instructions up, we need to
  // make sure all its defs (including dead def) will not cross its
  // last use when moving up.
  DenseMap<unsigned, std::pair<unsigned, MachineInstr *>> UseMap;

  for (MachineBasicBlock &MBB : MF) {
    if (MBB.empty())
      continue;
    bool SawStore = false;
    BuildInstOrderMap(MBB.begin(), IOM);
    UseMap.clear();

    for (MachineBasicBlock::iterator Next = MBB.begin(); Next != MBB.end();) {
      MachineInstr &MI = *Next;
      ++Next;
      if (MI.isPHI() || MI.isDebugInstr())
        continue;
      if (MI.mayStore())
        SawStore = true;

      unsigned CurrentOrder = IOM[&MI];
      unsigned Barrier = 0;
      MachineInstr *BarrierMI = nullptr;
      for (const MachineOperand &MO : MI.operands()) {
        if (!MO.isReg() || MO.isDebug())
          continue;
        if (MO.isUse())
          UseMap[MO.getReg()] = std::make_pair(CurrentOrder, &MI);
        else if (MO.isDead() && UseMap.count(MO.getReg()))
          // Barrier is the last instruction where MO get used. MI should not
          // be moved above Barrier.
          if (Barrier < UseMap[MO.getReg()].first) {
            Barrier = UseMap[MO.getReg()].first;
            BarrierMI = UseMap[MO.getReg()].second;
          }
      }

      if (!MI.isSafeToMove(nullptr, SawStore)) {
        // If MI has side effects, it should become a barrier for code motion.
        // IOM is rebuild from the next instruction to prevent later
        // instructions from being moved before this MI.
        if (MI.hasUnmodeledSideEffects() && Next != MBB.end()) {
          BuildInstOrderMap(Next, IOM);
          SawStore = false;
        }
        continue;
      }

      const MachineOperand *DefMO = nullptr;
      MachineInstr *Insert = nullptr;

      // Number of live-ranges that will be shortened. We do not count
      // live-ranges that are defined by a COPY as it could be coalesced later.
      unsigned NumEligibleUse = 0;

      for (const MachineOperand &MO : MI.operands()) {
        if (!MO.isReg() || MO.isDead() || MO.isDebug())
          continue;
        unsigned Reg = MO.getReg();
        // Do not move the instruction if it def/uses a physical register,
        // unless it is a constant physical register or a noreg.
        if (!TargetRegisterInfo::isVirtualRegister(Reg)) {
          if (!Reg || MRI.isConstantPhysReg(Reg))
            continue;
          Insert = nullptr;
          break;
        }
        if (MO.isDef()) {
          // Do not move if there is more than one def.
          if (DefMO) {
            Insert = nullptr;
            break;
          }
          DefMO = &MO;
        } else if (MRI.hasOneNonDBGUse(Reg) && MRI.hasOneDef(Reg) && DefMO &&
                   MRI.getRegClass(DefMO->getReg()) ==
                       MRI.getRegClass(MO.getReg())) {
          // The heuristic does not handle different register classes yet
          // (registers of different sizes, looser/tighter constraints). This
          // is because it needs more accurate model to handle register
          // pressure correctly.
          MachineInstr &DefInstr = *MRI.def_instr_begin(Reg);
          if (!DefInstr.isCopy())
            NumEligibleUse++;
          Insert = FindDominatedInstruction(DefInstr, Insert, IOM);
        } else {
          Insert = nullptr;
          break;
        }
      }

      // If Barrier equals IOM[I], traverse forward to find if BarrierMI is
      // after Insert, if yes, then we should not hoist.
      for (MachineInstr *I = Insert; I && IOM[I] == Barrier;
           I = I->getNextNode())
        if (I == BarrierMI) {
          Insert = nullptr;
          break;
        }
      // Move the instruction when # of shrunk live range > 1.
      if (DefMO && Insert && NumEligibleUse > 1 && Barrier <= IOM[Insert]) {
        MachineBasicBlock::iterator I = std::next(Insert->getIterator());
        // Skip all the PHI and debug instructions.
        while (I != MBB.end() && (I->isPHI() || I->isDebugInstr()))
          I = std::next(I);
        if (I == MI.getIterator())
          continue;

        // Update the dominator order to be the same as the insertion point.
        // We do this to maintain a non-decreasing order without need to update
        // all instruction orders after the insertion point.
        unsigned NewOrder = IOM[&*I];
        IOM[&MI] = NewOrder;
        NumInstrsHoistedToShrinkLiveRange++;

        // Find MI's debug value following MI.
        MachineBasicBlock::iterator EndIter = std::next(MI.getIterator());
        if (MI.getOperand(0).isReg())
          for (; EndIter != MBB.end() && EndIter->isDebugValue() &&
                 EndIter->getOperand(0).isReg() &&
                 EndIter->getOperand(0).getReg() == MI.getOperand(0).getReg();
               ++EndIter, ++Next)
            IOM[&*EndIter] = NewOrder;
        MBB.splice(I, &MBB, MI.getIterator(), EndIter);
      }
    }
  }
  return false;
}