Esempio n. 1
0
void llvm::scavengeFrameVirtualRegs(MachineFunction &MF, RegScavenger &RS) {
  // FIXME: Iterating over the instruction stream is unnecessary. We can simply
  // iterate over the vreg use list, which at this point only contains machine
  // operands for which eliminateFrameIndex need a new scratch reg.
  MachineRegisterInfo &MRI = MF.getRegInfo();
  // Shortcut.
  if (MRI.getNumVirtRegs() == 0) {
    MF.getProperties().set(MachineFunctionProperties::Property::NoVRegs);
    return;
  }

  // Run through the instructions and find any virtual registers.
  for (MachineBasicBlock &MBB : MF) {
    if (MBB.empty())
      continue;

    bool Again = scavengeFrameVirtualRegsInBlock(MRI, RS, MBB);
    if (Again) {
      DEBUG(dbgs() << "Warning: Required two scavenging passes for block "
            << MBB.getName() << '\n');
      Again = scavengeFrameVirtualRegsInBlock(MRI, RS, MBB);
      // The target required a 2nd run (because it created new vregs while
      // spilling). Refuse to do another pass to keep compiletime in check.
      if (Again)
        report_fatal_error("Incomplete scavenging after 2nd pass");
    }
  }

  MRI.clearVirtRegs();
  MF.getProperties().set(MachineFunctionProperties::Property::NoVRegs);
}
Esempio n. 2
0
void MIRPrinter::print(const MachineFunction &MF) {
  initRegisterMaskIds(MF);

  yaml::MachineFunction YamlMF;
  YamlMF.Name = MF.getName();
  YamlMF.Alignment = MF.getAlignment();
  YamlMF.ExposesReturnsTwice = MF.exposesReturnsTwice();
  YamlMF.HasInlineAsm = MF.hasInlineAsm();
  YamlMF.AllVRegsAllocated = MF.getProperties().hasProperty(
      MachineFunctionProperties::Property::AllVRegsAllocated);

  convert(YamlMF, MF.getRegInfo(), MF.getSubtarget().getRegisterInfo());
  ModuleSlotTracker MST(MF.getFunction()->getParent());
  MST.incorporateFunction(*MF.getFunction());
  convert(MST, YamlMF.FrameInfo, *MF.getFrameInfo());
  convertStackObjects(YamlMF, *MF.getFrameInfo(), MF.getMMI(), MST,
                      MF.getSubtarget().getRegisterInfo());
  if (const auto *ConstantPool = MF.getConstantPool())
    convert(YamlMF, *ConstantPool);
  if (const auto *JumpTableInfo = MF.getJumpTableInfo())
    convert(MST, YamlMF.JumpTableInfo, *JumpTableInfo);
  raw_string_ostream StrOS(YamlMF.Body.Value.Value);
  bool IsNewlineNeeded = false;
  for (const auto &MBB : MF) {
    if (IsNewlineNeeded)
      StrOS << "\n";
    MIPrinter(StrOS, MST, RegisterMaskIds, StackObjectOperandMapping)
        .print(MBB);
    IsNewlineNeeded = true;
  }
  StrOS.flush();
  yaml::Output Out(OS);
  Out << YamlMF;
}
Esempio n. 3
0
void MIRParserImpl::computeFunctionProperties(MachineFunction &MF) {
  MachineFunctionProperties &Properties = MF.getProperties();

  bool HasPHI = false;
  bool HasInlineAsm = false;
  for (const MachineBasicBlock &MBB : MF) {
    for (const MachineInstr &MI : MBB) {
      if (MI.isPHI())
        HasPHI = true;
      if (MI.isInlineAsm())
        HasInlineAsm = true;
    }
  }
  if (!HasPHI)
    Properties.set(MachineFunctionProperties::Property::NoPHIs);
  MF.setHasInlineAsm(HasInlineAsm);

  if (isSSA(MF))
    Properties.set(MachineFunctionProperties::Property::IsSSA);
  else
    Properties.reset(MachineFunctionProperties::Property::IsSSA);

  const MachineRegisterInfo &MRI = MF.getRegInfo();
  if (MRI.getNumVirtRegs() == 0)
    Properties.set(MachineFunctionProperties::Property::NoVRegs);
}
Esempio n. 4
0
bool MIRParserImpl::initializeMachineFunction(MachineFunction &MF) {
  auto It = Functions.find(MF.getName());
  if (It == Functions.end())
    return error(Twine("no machine function information for function '") +
                 MF.getName() + "' in the MIR file");
  // TODO: Recreate the machine function.
  const yaml::MachineFunction &YamlMF = *It->getValue();
  if (YamlMF.Alignment)
    MF.setAlignment(YamlMF.Alignment);
  MF.setExposesReturnsTwice(YamlMF.ExposesReturnsTwice);
  MF.setHasInlineAsm(YamlMF.HasInlineAsm);
  if (YamlMF.AllVRegsAllocated)
    MF.getProperties().set(MachineFunctionProperties::Property::AllVRegsAllocated);
  PerFunctionMIParsingState PFS;
  if (initializeRegisterInfo(MF, YamlMF, PFS))
    return true;
  if (!YamlMF.Constants.empty()) {
    auto *ConstantPool = MF.getConstantPool();
    assert(ConstantPool && "Constant pool must be created");
    if (initializeConstantPool(*ConstantPool, YamlMF, MF,
                               PFS.ConstantPoolSlots))
      return true;
  }

  SMDiagnostic Error;
  if (parseMachineBasicBlockDefinitions(MF, YamlMF.Body.Value.Value, PFS,
                                        IRSlots, Error)) {
    reportDiagnostic(
        diagFromBlockStringDiag(Error, YamlMF.Body.Value.SourceRange));
    return true;
  }

  if (MF.empty())
    return error(Twine("machine function '") + Twine(MF.getName()) +
                 "' requires at least one machine basic block in its body");
  // Initialize the frame information after creating all the MBBs so that the
  // MBB references in the frame information can be resolved.
  if (initializeFrameInfo(MF, YamlMF, PFS))
    return true;
  // Initialize the jump table after creating all the MBBs so that the MBB
  // references can be resolved.
  if (!YamlMF.JumpTableInfo.Entries.empty() &&
      initializeJumpTableInfo(MF, YamlMF.JumpTableInfo, PFS))
    return true;
  // Parse the machine instructions after creating all of the MBBs so that the
  // parser can resolve the MBB references.
  if (parseMachineInstructions(MF, YamlMF.Body.Value.Value, PFS, IRSlots,
                               Error)) {
    reportDiagnostic(
        diagFromBlockStringDiag(Error, YamlMF.Body.Value.SourceRange));
    return true;
  }
  inferRegisterInfo(MF, YamlMF);
  // FIXME: This is a temporary workaround until the reserved registers can be
  // serialized.
  MF.getRegInfo().freezeReservedRegs(MF);
  MF.verify();
  return false;
}
bool AArch64PreLegalizerCombiner::runOnMachineFunction(MachineFunction &MF) {
  if (MF.getProperties().hasProperty(
          MachineFunctionProperties::Property::FailedISel))
    return false;
  auto *TPC = &getAnalysis<TargetPassConfig>();
  AArch64PreLegalizerCombinerInfo PCInfo;
  Combiner C(PCInfo, TPC);
  return C.combineMachineInstrs(MF, /*CSEInfo*/ nullptr);
}
Esempio n. 6
0
void MIRPrinter::print(const MachineFunction &MF) {
  initRegisterMaskIds(MF);

  yaml::MachineFunction YamlMF;
  YamlMF.Name = MF.getName();
  YamlMF.Alignment = MF.getAlignment();
  YamlMF.ExposesReturnsTwice = MF.exposesReturnsTwice();
  YamlMF.HasWinCFI = MF.hasWinCFI();

  YamlMF.Legalized = MF.getProperties().hasProperty(
      MachineFunctionProperties::Property::Legalized);
  YamlMF.RegBankSelected = MF.getProperties().hasProperty(
      MachineFunctionProperties::Property::RegBankSelected);
  YamlMF.Selected = MF.getProperties().hasProperty(
      MachineFunctionProperties::Property::Selected);
  YamlMF.FailedISel = MF.getProperties().hasProperty(
      MachineFunctionProperties::Property::FailedISel);

  convert(YamlMF, MF.getRegInfo(), MF.getSubtarget().getRegisterInfo());
  ModuleSlotTracker MST(MF.getFunction().getParent());
  MST.incorporateFunction(MF.getFunction());
  convert(MST, YamlMF.FrameInfo, MF.getFrameInfo());
  convertStackObjects(YamlMF, MF, MST);
  if (const auto *ConstantPool = MF.getConstantPool())
    convert(YamlMF, *ConstantPool);
  if (const auto *JumpTableInfo = MF.getJumpTableInfo())
    convert(MST, YamlMF.JumpTableInfo, *JumpTableInfo);
  raw_string_ostream StrOS(YamlMF.Body.Value.Value);
  bool IsNewlineNeeded = false;
  for (const auto &MBB : MF) {
    if (IsNewlineNeeded)
      StrOS << "\n";
    MIPrinter(StrOS, MST, RegisterMaskIds, StackObjectOperandMapping)
        .print(MBB);
    IsNewlineNeeded = true;
  }
  StrOS.flush();
  yaml::Output Out(OS);
  if (!SimplifyMIR)
      Out.setWriteDefaultValues(true);
  Out << YamlMF;
}
void X86RetpolineThunks::populateThunk(MachineFunction &MF,
                                       unsigned Reg) {
  // Set MF properties. We never use vregs...
  MF.getProperties().set(MachineFunctionProperties::Property::NoVRegs);

  // Grab the entry MBB and erase any other blocks. O0 codegen appears to
  // generate two bbs for the entry block.
  MachineBasicBlock *Entry = &MF.front();
  Entry->clear();
  while (MF.size() > 1)
    MF.erase(std::next(MF.begin()));

  MachineBasicBlock *CaptureSpec = MF.CreateMachineBasicBlock(Entry->getBasicBlock());
  MachineBasicBlock *CallTarget = MF.CreateMachineBasicBlock(Entry->getBasicBlock());
  MCSymbol *TargetSym = MF.getContext().createTempSymbol();
  MF.push_back(CaptureSpec);
  MF.push_back(CallTarget);

  const unsigned CallOpc = Is64Bit ? X86::CALL64pcrel32 : X86::CALLpcrel32;
  const unsigned RetOpc = Is64Bit ? X86::RETQ : X86::RETL;

  Entry->addLiveIn(Reg);
  BuildMI(Entry, DebugLoc(), TII->get(CallOpc)).addSym(TargetSym);

  // The MIR verifier thinks that the CALL in the entry block will fall through
  // to CaptureSpec, so mark it as the successor. Technically, CaptureTarget is
  // the successor, but the MIR verifier doesn't know how to cope with that.
  Entry->addSuccessor(CaptureSpec);

  // In the capture loop for speculation, we want to stop the processor from
  // speculating as fast as possible. On Intel processors, the PAUSE instruction
  // will block speculation without consuming any execution resources. On AMD
  // processors, the PAUSE instruction is (essentially) a nop, so we also use an
  // LFENCE instruction which they have advised will stop speculation as well
  // with minimal resource utilization. We still end the capture with a jump to
  // form an infinite loop to fully guarantee that no matter what implementation
  // of the x86 ISA, speculating this code path never escapes.
  BuildMI(CaptureSpec, DebugLoc(), TII->get(X86::PAUSE));
  BuildMI(CaptureSpec, DebugLoc(), TII->get(X86::LFENCE));
  BuildMI(CaptureSpec, DebugLoc(), TII->get(X86::JMP_1)).addMBB(CaptureSpec);
  CaptureSpec->setHasAddressTaken();
  CaptureSpec->addSuccessor(CaptureSpec);

  CallTarget->addLiveIn(Reg);
  CallTarget->setHasAddressTaken();
  CallTarget->setAlignment(4);
  insertRegReturnAddrClobber(*CallTarget, Reg);
  CallTarget->back().setPreInstrSymbol(MF, TargetSym);
  BuildMI(CallTarget, DebugLoc(), TII->get(RetOpc));
}
Esempio n. 8
0
void llvm::reportGISelFailure(MachineFunction &MF, const TargetPassConfig &TPC,
                              MachineOptimizationRemarkEmitter &MORE,
                              MachineOptimizationRemarkMissed &R) {
  MF.getProperties().set(MachineFunctionProperties::Property::FailedISel);

  // Print the function name explicitly if we don't have a debug location (which
  // makes the diagnostic less useful) or if we're going to emit a raw error.
  if (!R.getLocation().isValid() || TPC.isGlobalISelAbortEnabled())
    R << (" (in function: " + MF.getName() + ")").str();

  if (TPC.isGlobalISelAbortEnabled())
    report_fatal_error(R.getMsg());
  else
    MORE.emit(R);
}
Esempio n. 9
0
bool Combiner::combineMachineInstrs(MachineFunction &MF) {
  // If the ISel pipeline failed, do not bother running this pass.
  // FIXME: Should this be here or in individual combiner passes.
  if (MF.getProperties().hasProperty(
          MachineFunctionProperties::Property::FailedISel))
    return false;

  MRI = &MF.getRegInfo();
  Builder.setMF(MF);

  DEBUG(dbgs() << "Generic MI Combiner for: " << MF.getName() << '\n');

  MachineOptimizationRemarkEmitter MORE(MF, /*MBFI=*/nullptr);

  bool MFChanged = false;
  bool Changed;

  do {
    // Collect all instructions. Do a post order traversal for basic blocks and
    // insert with list bottom up, so while we pop_back_val, we'll traverse top
    // down RPOT.
    Changed = false;
    GISelWorkList<512> WorkList;
    for (MachineBasicBlock *MBB : post_order(&MF)) {
      if (MBB->empty())
        continue;
      for (auto MII = MBB->rbegin(), MIE = MBB->rend(); MII != MIE;) {
        MachineInstr *CurMI = &*MII;
        ++MII;
        // Erase dead insts before even adding to the list.
        if (isTriviallyDead(*CurMI, *MRI)) {
          DEBUG(dbgs() << *CurMI << "Is dead; erasing.\n");
          CurMI->eraseFromParentAndMarkDBGValuesForRemoval();
          continue;
        }
        WorkList.insert(CurMI);
      }
    }
    // Main Loop. Process the instructions here.
    while (!WorkList.empty()) {
      MachineInstr *CurrInst = WorkList.pop_back_val();
      DEBUG(dbgs() << "Try combining " << *CurrInst << "\n";);
      Changed |= CInfo.combine(*CurrInst, Builder);
    }
    MFChanged |= Changed;
  } while (Changed);
Esempio n. 10
0
void X86RetpolineThunks::populateThunk(MachineFunction &MF,
                                       Optional<unsigned> Reg) {
  // Set MF properties. We never use vregs...
  MF.getProperties().set(MachineFunctionProperties::Property::NoVRegs);

  MachineBasicBlock *Entry = &MF.front();
  Entry->clear();

  MachineBasicBlock *CaptureSpec = MF.CreateMachineBasicBlock(Entry->getBasicBlock());
  MachineBasicBlock *CallTarget = MF.CreateMachineBasicBlock(Entry->getBasicBlock());
  MF.push_back(CaptureSpec);
  MF.push_back(CallTarget);

  const unsigned CallOpc = Is64Bit ? X86::CALL64pcrel32 : X86::CALLpcrel32;
  const unsigned RetOpc = Is64Bit ? X86::RETQ : X86::RETL;

  BuildMI(Entry, DebugLoc(), TII->get(CallOpc)).addMBB(CallTarget);
  Entry->addSuccessor(CallTarget);
  Entry->addSuccessor(CaptureSpec);
  CallTarget->setHasAddressTaken();

  // In the capture loop for speculation, we want to stop the processor from
  // speculating as fast as possible. On Intel processors, the PAUSE instruction
  // will block speculation without consuming any execution resources. On AMD
  // processors, the PAUSE instruction is (essentially) a nop, so we also use an
  // LFENCE instruction which they have advised will stop speculation as well
  // with minimal resource utilization. We still end the capture with a jump to
  // form an infinite loop to fully guarantee that no matter what implementation
  // of the x86 ISA, speculating this code path never escapes.
  BuildMI(CaptureSpec, DebugLoc(), TII->get(X86::PAUSE));
  BuildMI(CaptureSpec, DebugLoc(), TII->get(X86::LFENCE));
  BuildMI(CaptureSpec, DebugLoc(), TII->get(X86::JMP_1)).addMBB(CaptureSpec);
  CaptureSpec->setHasAddressTaken();
  CaptureSpec->addSuccessor(CaptureSpec);

  CallTarget->setAlignment(4);
  insertRegReturnAddrClobber(*CallTarget, *Reg);
  BuildMI(CallTarget, DebugLoc(), TII->get(RetOpc));
}
Esempio n. 11
0
void PEI::spillCalleeSavedRegs(MachineFunction &MF) {
  // We can't list this requirement in getRequiredProperties because some
  // targets (WebAssembly) use virtual registers past this point, and the pass
  // pipeline is set up without giving the passes a chance to look at the
  // TargetMachine.
  // FIXME: Find a way to express this in getRequiredProperties.
  assert(MF.getProperties().hasProperty(
      MachineFunctionProperties::Property::NoVRegs));

  const Function &F = MF.getFunction();
  const TargetFrameLowering *TFI = MF.getSubtarget().getFrameLowering();
  MachineFrameInfo &MFI = MF.getFrameInfo();
  MinCSFrameIndex = std::numeric_limits<unsigned>::max();
  MaxCSFrameIndex = 0;

  // Determine which of the registers in the callee save list should be saved.
  BitVector SavedRegs;
  TFI->determineCalleeSaves(MF, SavedRegs, RS);

  // Assign stack slots for any callee-saved registers that must be spilled.
  assignCalleeSavedSpillSlots(MF, SavedRegs, MinCSFrameIndex, MaxCSFrameIndex);

  // Add the code to save and restore the callee saved registers.
  if (!F.hasFnAttribute(Attribute::Naked)) {
    MFI.setCalleeSavedInfoValid(true);

    std::vector<CalleeSavedInfo> &CSI = MFI.getCalleeSavedInfo();
    if (!CSI.empty()) {
      for (MachineBasicBlock *SaveBlock : SaveBlocks) {
        insertCSRSaves(*SaveBlock, CSI);
        // Update the live-in information of all the blocks up to the save
        // point.
        updateLiveness(MF);
      }
      for (MachineBasicBlock *RestoreBlock : RestoreBlocks)
        insertCSRRestores(*RestoreBlock, CSI);
    }
  }
}
Esempio n. 12
0
bool Legalizer::runOnMachineFunction(MachineFunction &MF) {
  // If the ISel pipeline failed, do not bother running that pass.
  if (MF.getProperties().hasProperty(
          MachineFunctionProperties::Property::FailedISel))
    return false;
  DEBUG(dbgs() << "Legalize Machine IR for: " << MF.getName() << '\n');
  init(MF);
  const TargetPassConfig &TPC = getAnalysis<TargetPassConfig>();
  MachineOptimizationRemarkEmitter MORE(MF, /*MBFI=*/nullptr);
  LegalizerHelper Helper(MF);

  // FIXME: an instruction may need more than one pass before it is legal. For
  // example on most architectures <3 x i3> is doubly-illegal. It would
  // typically proceed along a path like: <3 x i3> -> <3 x i8> -> <8 x i8>. We
  // probably want a worklist of instructions rather than naive iterate until
  // convergence for performance reasons.
  bool Changed = false;
  MachineBasicBlock::iterator NextMI;
  using VecType = SmallSetVector<MachineInstr *, 8>;
  VecType WorkList;
  VecType CombineList;
  for (auto &MBB : MF) {
    for (auto MI = MBB.begin(); MI != MBB.end(); MI = NextMI) {
      // Get the next Instruction before we try to legalize, because there's a
      // good chance MI will be deleted.
      NextMI = std::next(MI);

      // Only legalize pre-isel generic instructions: others don't have types
      // and are assumed to be legal.
      if (!isPreISelGenericOpcode(MI->getOpcode()))
        continue;
      unsigned NumNewInsns = 0;
      WorkList.clear();
      CombineList.clear();
      Helper.MIRBuilder.recordInsertions([&](MachineInstr *MI) {
        // Only legalize pre-isel generic instructions.
        // Legalization process could generate Target specific pseudo
        // instructions with generic types. Don't record them
        if (isPreISelGenericOpcode(MI->getOpcode())) {
          ++NumNewInsns;
          WorkList.insert(MI);
          CombineList.insert(MI);
        }
      });
      WorkList.insert(&*MI);
      LegalizerCombiner C(Helper.MIRBuilder, MF.getRegInfo(),
                          Helper.getLegalizerInfo());
      bool Changed = false;
      LegalizerHelper::LegalizeResult Res;
      do {
        assert(!WorkList.empty() && "Expecting illegal ops");
        while (!WorkList.empty()) {
          NumNewInsns = 0;
          MachineInstr *CurrInst = WorkList.pop_back_val();
          Res = Helper.legalizeInstrStep(*CurrInst);
          // Error out if we couldn't legalize this instruction. We may want to
          // fall back to DAG ISel instead in the future.
          if (Res == LegalizerHelper::UnableToLegalize) {
            Helper.MIRBuilder.stopRecordingInsertions();
            if (Res == LegalizerHelper::UnableToLegalize) {
              reportGISelFailure(MF, TPC, MORE, "gisel-legalize",
                                 "unable to legalize instruction", *CurrInst);
              return false;
            }
          }
          Changed |= Res == LegalizerHelper::Legalized;
          // If CurrInst was legalized, there's a good chance that it might have
          // been erased. So remove it from the Combine List.
          if (Res == LegalizerHelper::Legalized)
            CombineList.remove(CurrInst);

#ifndef NDEBUG
          if (NumNewInsns)
            for (unsigned I = WorkList.size() - NumNewInsns,
                          E = WorkList.size();
                 I != E; ++I)
              DEBUG(dbgs() << ".. .. New MI: " << *WorkList[I];);
#endif
        }
        // Do the combines.
        while (!CombineList.empty()) {
          NumNewInsns = 0;
          MachineInstr *CurrInst = CombineList.pop_back_val();
          SmallVector<MachineInstr *, 4> DeadInstructions;
          Changed |= C.tryCombineInstruction(*CurrInst, DeadInstructions);
          for (auto *DeadMI : DeadInstructions) {
            DEBUG(dbgs() << ".. Erasing Dead Instruction " << *DeadMI);
            CombineList.remove(DeadMI);
            WorkList.remove(DeadMI);
            DeadMI->eraseFromParent();
          }
#ifndef NDEBUG
          if (NumNewInsns)
            for (unsigned I = CombineList.size() - NumNewInsns,
                          E = CombineList.size();
                 I != E; ++I)
              DEBUG(dbgs() << ".. .. Combine New MI: " << *CombineList[I];);
#endif
        }
      } while (!WorkList.empty());

      Helper.MIRBuilder.stopRecordingInsertions();
    }
Esempio n. 13
0
void Thumb1FrameLowering::emitPrologue(MachineFunction &MF,
                                       MachineBasicBlock &MBB) const {
  MachineBasicBlock::iterator MBBI = MBB.begin();
  MachineFrameInfo &MFI = MF.getFrameInfo();
  ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>();
  MachineModuleInfo &MMI = MF.getMMI();
  const MCRegisterInfo *MRI = MMI.getContext().getRegisterInfo();
  const ThumbRegisterInfo *RegInfo =
      static_cast<const ThumbRegisterInfo *>(STI.getRegisterInfo());
  const Thumb1InstrInfo &TII =
      *static_cast<const Thumb1InstrInfo *>(STI.getInstrInfo());

  unsigned ArgRegsSaveSize = AFI->getArgRegsSaveSize();
  unsigned NumBytes = MFI.getStackSize();
  assert(NumBytes >= ArgRegsSaveSize &&
         "ArgRegsSaveSize is included in NumBytes");
  const std::vector<CalleeSavedInfo> &CSI = MFI.getCalleeSavedInfo();

  // Debug location must be unknown since the first debug location is used
  // to determine the end of the prologue.
  DebugLoc dl;
  
  unsigned FramePtr = RegInfo->getFrameRegister(MF);
  unsigned BasePtr = RegInfo->getBaseRegister();
  int CFAOffset = 0;

  // Thumb add/sub sp, imm8 instructions implicitly multiply the offset by 4.
  NumBytes = (NumBytes + 3) & ~3;
  MFI.setStackSize(NumBytes);

  // Determine the sizes of each callee-save spill areas and record which frame
  // belongs to which callee-save spill areas.
  unsigned GPRCS1Size = 0, GPRCS2Size = 0, DPRCSSize = 0;
  int FramePtrSpillFI = 0;

  if (ArgRegsSaveSize) {
    emitSPUpdate(MBB, MBBI, TII, dl, *RegInfo, -ArgRegsSaveSize,
                 MachineInstr::FrameSetup);
    CFAOffset -= ArgRegsSaveSize;
    unsigned CFIIndex = MF.addFrameInst(
        MCCFIInstruction::createDefCfaOffset(nullptr, CFAOffset));
    BuildMI(MBB, MBBI, dl, TII.get(TargetOpcode::CFI_INSTRUCTION))
        .addCFIIndex(CFIIndex)
        .setMIFlags(MachineInstr::FrameSetup);
  }

  if (!AFI->hasStackFrame()) {
    if (NumBytes - ArgRegsSaveSize != 0) {
      emitSPUpdate(MBB, MBBI, TII, dl, *RegInfo, -(NumBytes - ArgRegsSaveSize),
                   MachineInstr::FrameSetup);
      CFAOffset -= NumBytes - ArgRegsSaveSize;
      unsigned CFIIndex = MF.addFrameInst(
          MCCFIInstruction::createDefCfaOffset(nullptr, CFAOffset));
      BuildMI(MBB, MBBI, dl, TII.get(TargetOpcode::CFI_INSTRUCTION))
          .addCFIIndex(CFIIndex)
          .setMIFlags(MachineInstr::FrameSetup);
    }
    return;
  }

  for (unsigned i = 0, e = CSI.size(); i != e; ++i) {
    unsigned Reg = CSI[i].getReg();
    int FI = CSI[i].getFrameIdx();
    switch (Reg) {
    case ARM::R8:
    case ARM::R9:
    case ARM::R10:
    case ARM::R11:
      if (STI.splitFramePushPop(MF)) {
        GPRCS2Size += 4;
        break;
      }
      LLVM_FALLTHROUGH;
    case ARM::R4:
    case ARM::R5:
    case ARM::R6:
    case ARM::R7:
    case ARM::LR:
      if (Reg == FramePtr)
        FramePtrSpillFI = FI;
      GPRCS1Size += 4;
      break;
    default:
      DPRCSSize += 8;
    }
  }

  if (MBBI != MBB.end() && MBBI->getOpcode() == ARM::tPUSH) {
    ++MBBI;
  }

  // Determine starting offsets of spill areas.
  unsigned DPRCSOffset  = NumBytes - ArgRegsSaveSize - (GPRCS1Size + GPRCS2Size + DPRCSSize);
  unsigned GPRCS2Offset = DPRCSOffset + DPRCSSize;
  unsigned GPRCS1Offset = GPRCS2Offset + GPRCS2Size;
  bool HasFP = hasFP(MF);
  if (HasFP)
    AFI->setFramePtrSpillOffset(MFI.getObjectOffset(FramePtrSpillFI) +
                                NumBytes);
  AFI->setGPRCalleeSavedArea1Offset(GPRCS1Offset);
  AFI->setGPRCalleeSavedArea2Offset(GPRCS2Offset);
  AFI->setDPRCalleeSavedAreaOffset(DPRCSOffset);
  NumBytes = DPRCSOffset;

  int FramePtrOffsetInBlock = 0;
  unsigned adjustedGPRCS1Size = GPRCS1Size;
  if (GPRCS1Size > 0 && GPRCS2Size == 0 &&
      tryFoldSPUpdateIntoPushPop(STI, MF, &*std::prev(MBBI), NumBytes)) {
    FramePtrOffsetInBlock = NumBytes;
    adjustedGPRCS1Size += NumBytes;
    NumBytes = 0;
  }

  if (adjustedGPRCS1Size) {
    CFAOffset -= adjustedGPRCS1Size;
    unsigned CFIIndex = MF.addFrameInst(
        MCCFIInstruction::createDefCfaOffset(nullptr, CFAOffset));
    BuildMI(MBB, MBBI, dl, TII.get(TargetOpcode::CFI_INSTRUCTION))
        .addCFIIndex(CFIIndex)
        .setMIFlags(MachineInstr::FrameSetup);
  }
  for (std::vector<CalleeSavedInfo>::const_iterator I = CSI.begin(),
         E = CSI.end(); I != E; ++I) {
    unsigned Reg = I->getReg();
    int FI = I->getFrameIdx();
    switch (Reg) {
    case ARM::R8:
    case ARM::R9:
    case ARM::R10:
    case ARM::R11:
    case ARM::R12:
      if (STI.splitFramePushPop(MF))
        break;
      LLVM_FALLTHROUGH;
    case ARM::R0:
    case ARM::R1:
    case ARM::R2:
    case ARM::R3:
    case ARM::R4:
    case ARM::R5:
    case ARM::R6:
    case ARM::R7:
    case ARM::LR:
      unsigned CFIIndex = MF.addFrameInst(MCCFIInstruction::createOffset(
          nullptr, MRI->getDwarfRegNum(Reg, true), MFI.getObjectOffset(FI)));
      BuildMI(MBB, MBBI, dl, TII.get(TargetOpcode::CFI_INSTRUCTION))
          .addCFIIndex(CFIIndex)
          .setMIFlags(MachineInstr::FrameSetup);
      break;
    }
  }

  // Adjust FP so it point to the stack slot that contains the previous FP.
  if (HasFP) {
    FramePtrOffsetInBlock +=
        MFI.getObjectOffset(FramePtrSpillFI) + GPRCS1Size + ArgRegsSaveSize;
    BuildMI(MBB, MBBI, dl, TII.get(ARM::tADDrSPi), FramePtr)
        .addReg(ARM::SP)
        .addImm(FramePtrOffsetInBlock / 4)
        .setMIFlags(MachineInstr::FrameSetup)
        .add(predOps(ARMCC::AL));
    if(FramePtrOffsetInBlock) {
      CFAOffset += FramePtrOffsetInBlock;
      unsigned CFIIndex = MF.addFrameInst(MCCFIInstruction::createDefCfa(
          nullptr, MRI->getDwarfRegNum(FramePtr, true), CFAOffset));
      BuildMI(MBB, MBBI, dl, TII.get(TargetOpcode::CFI_INSTRUCTION))
          .addCFIIndex(CFIIndex)
          .setMIFlags(MachineInstr::FrameSetup);
    } else {
      unsigned CFIIndex =
          MF.addFrameInst(MCCFIInstruction::createDefCfaRegister(
              nullptr, MRI->getDwarfRegNum(FramePtr, true)));
      BuildMI(MBB, MBBI, dl, TII.get(TargetOpcode::CFI_INSTRUCTION))
          .addCFIIndex(CFIIndex)
          .setMIFlags(MachineInstr::FrameSetup);
    }
    if (NumBytes > 508)
      // If offset is > 508 then sp cannot be adjusted in a single instruction,
      // try restoring from fp instead.
      AFI->setShouldRestoreSPFromFP(true);
  }

  // Skip past the spilling of r8-r11, which could consist of multiple tPUSH
  // and tMOVr instructions. We don't need to add any call frame information
  // in-between these instructions, because they do not modify the high
  // registers.
  while (true) {
    MachineBasicBlock::iterator OldMBBI = MBBI;
    // Skip a run of tMOVr instructions
    while (MBBI != MBB.end() && MBBI->getOpcode() == ARM::tMOVr)
      MBBI++;
    if (MBBI != MBB.end() && MBBI->getOpcode() == ARM::tPUSH) {
      MBBI++;
    } else {
      // We have reached an instruction which is not a push, so the previous
      // run of tMOVr instructions (which may have been empty) was not part of
      // the prologue. Reset MBBI back to the last PUSH of the prologue.
      MBBI = OldMBBI;
      break;
    }
  }

  // Emit call frame information for the callee-saved high registers.
  for (auto &I : CSI) {
    unsigned Reg = I.getReg();
    int FI = I.getFrameIdx();
    switch (Reg) {
    case ARM::R8:
    case ARM::R9:
    case ARM::R10:
    case ARM::R11:
    case ARM::R12: {
      unsigned CFIIndex = MF.addFrameInst(MCCFIInstruction::createOffset(
          nullptr, MRI->getDwarfRegNum(Reg, true), MFI.getObjectOffset(FI)));
      BuildMI(MBB, MBBI, dl, TII.get(TargetOpcode::CFI_INSTRUCTION))
          .addCFIIndex(CFIIndex)
          .setMIFlags(MachineInstr::FrameSetup);
      break;
    }
    default:
      break;
    }
  }

  if (NumBytes) {
    // Insert it after all the callee-save spills.
    emitSPUpdate(MBB, MBBI, TII, dl, *RegInfo, -NumBytes,
                 MachineInstr::FrameSetup);
    if (!HasFP) {
      CFAOffset -= NumBytes;
      unsigned CFIIndex = MF.addFrameInst(
          MCCFIInstruction::createDefCfaOffset(nullptr, CFAOffset));
      BuildMI(MBB, MBBI, dl, TII.get(TargetOpcode::CFI_INSTRUCTION))
          .addCFIIndex(CFIIndex)
          .setMIFlags(MachineInstr::FrameSetup);
    }
  }

  if (STI.isTargetELF() && HasFP)
    MFI.setOffsetAdjustment(MFI.getOffsetAdjustment() -
                            AFI->getFramePtrSpillOffset());

  AFI->setGPRCalleeSavedArea1Size(GPRCS1Size);
  AFI->setGPRCalleeSavedArea2Size(GPRCS2Size);
  AFI->setDPRCalleeSavedAreaSize(DPRCSSize);

  if (RegInfo->needsStackRealignment(MF)) {
    const unsigned NrBitsToZero = countTrailingZeros(MFI.getMaxAlignment());
    // Emit the following sequence, using R4 as a temporary, since we cannot use
    // SP as a source or destination register for the shifts:
    // mov  r4, sp
    // lsrs r4, r4, #NrBitsToZero
    // lsls r4, r4, #NrBitsToZero
    // mov  sp, r4
    BuildMI(MBB, MBBI, dl, TII.get(ARM::tMOVr), ARM::R4)
      .addReg(ARM::SP, RegState::Kill)
      .add(predOps(ARMCC::AL));

    BuildMI(MBB, MBBI, dl, TII.get(ARM::tLSRri), ARM::R4)
      .addDef(ARM::CPSR)
      .addReg(ARM::R4, RegState::Kill)
      .addImm(NrBitsToZero)
      .add(predOps(ARMCC::AL));

    BuildMI(MBB, MBBI, dl, TII.get(ARM::tLSLri), ARM::R4)
      .addDef(ARM::CPSR)
      .addReg(ARM::R4, RegState::Kill)
      .addImm(NrBitsToZero)
      .add(predOps(ARMCC::AL));

    BuildMI(MBB, MBBI, dl, TII.get(ARM::tMOVr), ARM::SP)
      .addReg(ARM::R4, RegState::Kill)
      .add(predOps(ARMCC::AL));

    AFI->setShouldRestoreSPFromFP(true);
  }

  // If we need a base pointer, set it up here. It's whatever the value
  // of the stack pointer is at this point. Any variable size objects
  // will be allocated after this, so we can still use the base pointer
  // to reference locals.
  if (RegInfo->hasBasePointer(MF))
    BuildMI(MBB, MBBI, dl, TII.get(ARM::tMOVr), BasePtr)
        .addReg(ARM::SP)
        .add(predOps(ARMCC::AL));

  // If the frame has variable sized objects then the epilogue must restore
  // the sp from fp. We can assume there's an FP here since hasFP already
  // checks for hasVarSizedObjects.
  if (MFI.hasVarSizedObjects())
    AFI->setShouldRestoreSPFromFP(true);

  // In some cases, virtual registers have been introduced, e.g. by uses of
  // emitThumbRegPlusImmInReg.
  MF.getProperties().reset(MachineFunctionProperties::Property::NoVRegs);
}
Esempio n. 14
0
bool IRTranslator::runOnMachineFunction(MachineFunction &MF) {
  const Function &F = *MF.getFunction();
  if (F.empty())
    return false;
  CLI = MF.getSubtarget().getCallLowering();
  MIRBuilder.setMF(MF);
  EntryBuilder.setMF(MF);
  MRI = &MF.getRegInfo();
  DL = &F.getParent()->getDataLayout();
  TPC = &getAnalysis<TargetPassConfig>();

  assert(PendingPHIs.empty() && "stale PHIs");

  // Setup the arguments.
  MachineBasicBlock &MBB = getOrCreateBB(F.front());
  MIRBuilder.setMBB(MBB);
  SmallVector<unsigned, 8> VRegArgs;
  for (const Argument &Arg: F.args())
    VRegArgs.push_back(getOrCreateVReg(Arg));
  bool Succeeded = CLI->lowerFormalArguments(MIRBuilder, F, VRegArgs);
  if (!Succeeded) {
    if (!TPC->isGlobalISelAbortEnabled()) {
      MIRBuilder.getMF().getProperties().set(
          MachineFunctionProperties::Property::FailedISel);
      return false;
    }
    report_fatal_error("Unable to lower arguments");
  }

  // Now that we've got the ABI handling code, it's safe to set a location for
  // any Constants we find in the IR.
  if (MBB.empty())
    EntryBuilder.setMBB(MBB);
  else
    EntryBuilder.setInstr(MBB.back(), /* Before */ false);

  for (const BasicBlock &BB: F) {
    MachineBasicBlock &MBB = getOrCreateBB(BB);
    // Set the insertion point of all the following translations to
    // the end of this basic block.
    MIRBuilder.setMBB(MBB);

    for (const Instruction &Inst: BB) {
      bool Succeeded = translate(Inst);
      if (!Succeeded) {
        if (TPC->isGlobalISelAbortEnabled())
          reportTranslationError(Inst, "unable to translate instruction");
        MF.getProperties().set(MachineFunctionProperties::Property::FailedISel);
        break;
      }
    }
  }

  finalizeFunction();

  // Now that the MachineFrameInfo has been configured, no further changes to
  // the reserved registers are possible.
  MRI->freezeReservedRegs(MF);

  return false;
}
Esempio n. 15
0
/// doScavengeFrameVirtualRegs - Replace all frame index virtual registers
/// with physical registers. Use the register scavenger to find an
/// appropriate register to use.
///
/// FIXME: Iterating over the instruction stream is unnecessary. We can simply
/// iterate over the vreg use list, which at this point only contains machine
/// operands for which eliminateFrameIndex need a new scratch reg.
static void
doScavengeFrameVirtualRegs(MachineFunction &MF, RegScavenger *RS) {
  // Run through the instructions and find any virtual registers.
  MachineRegisterInfo &MRI = MF.getRegInfo();
  for (MachineBasicBlock &MBB : MF) {
    RS->enterBasicBlock(MBB);

    int SPAdj = 0;

    // The instruction stream may change in the loop, so check MBB.end()
    // directly.
    for (MachineBasicBlock::iterator I = MBB.begin(); I != MBB.end(); ) {
      // We might end up here again with a NULL iterator if we scavenged a
      // register for which we inserted spill code for definition by what was
      // originally the first instruction in MBB.
      if (I == MachineBasicBlock::iterator(nullptr))
        I = MBB.begin();

      const MachineInstr &MI = *I;
      MachineBasicBlock::iterator J = std::next(I);
      MachineBasicBlock::iterator P =
                         I == MBB.begin() ? MachineBasicBlock::iterator(nullptr)
                                          : std::prev(I);

      // RS should process this instruction before we might scavenge at this
      // location. This is because we might be replacing a virtual register
      // defined by this instruction, and if so, registers killed by this
      // instruction are available, and defined registers are not.
      RS->forward(I);

      for (const MachineOperand &MO : MI.operands()) {
        if (!MO.isReg())
          continue;
        unsigned Reg = MO.getReg();
        if (!TargetRegisterInfo::isVirtualRegister(Reg))
          continue;

        // When we first encounter a new virtual register, it
        // must be a definition.
        assert(MO.isDef() && "frame index virtual missing def!");
        // Scavenge a new scratch register
        const TargetRegisterClass *RC = MRI.getRegClass(Reg);
        unsigned ScratchReg = RS->scavengeRegister(RC, J, SPAdj);

        ++NumScavengedRegs;

        // Replace this reference to the virtual register with the
        // scratch register.
        assert(ScratchReg && "Missing scratch register!");
        MRI.replaceRegWith(Reg, ScratchReg);

        // Because this instruction was processed by the RS before this
        // register was allocated, make sure that the RS now records the
        // register as being used.
        RS->setRegUsed(ScratchReg);
      }

      // If the scavenger needed to use one of its spill slots, the
      // spill code will have been inserted in between I and J. This is a
      // problem because we need the spill code before I: Move I to just
      // prior to J.
      if (I != std::prev(J)) {
        MBB.splice(J, &MBB, I);

        // Before we move I, we need to prepare the RS to visit I again.
        // Specifically, RS will assert if it sees uses of registers that
        // it believes are undefined. Because we have already processed
        // register kills in I, when it visits I again, it will believe that
        // those registers are undefined. To avoid this situation, unprocess
        // the instruction I.
        assert(RS->getCurrentPosition() == I &&
          "The register scavenger has an unexpected position");
        I = P;
        RS->unprocess(P);
      } else
        ++I;
    }
  }

  MF.getProperties().set(MachineFunctionProperties::Property::NoVRegs);
}
Esempio n. 16
0
bool
MIRParserImpl::initializeMachineFunction(const yaml::MachineFunction &YamlMF,
                                         MachineFunction &MF) {
  // TODO: Recreate the machine function.
  initNames2RegClasses(MF);
  initNames2RegBanks(MF);
  if (YamlMF.Alignment)
    MF.setAlignment(YamlMF.Alignment);
  MF.setExposesReturnsTwice(YamlMF.ExposesReturnsTwice);

  if (YamlMF.Legalized)
    MF.getProperties().set(MachineFunctionProperties::Property::Legalized);
  if (YamlMF.RegBankSelected)
    MF.getProperties().set(
        MachineFunctionProperties::Property::RegBankSelected);
  if (YamlMF.Selected)
    MF.getProperties().set(MachineFunctionProperties::Property::Selected);

  PerFunctionMIParsingState PFS(MF, SM, IRSlots, Names2RegClasses,
                                Names2RegBanks);
  if (parseRegisterInfo(PFS, YamlMF))
    return true;
  if (!YamlMF.Constants.empty()) {
    auto *ConstantPool = MF.getConstantPool();
    assert(ConstantPool && "Constant pool must be created");
    if (initializeConstantPool(PFS, *ConstantPool, YamlMF))
      return true;
  }

  StringRef BlockStr = YamlMF.Body.Value.Value;
  SMDiagnostic Error;
  SourceMgr BlockSM;
  BlockSM.AddNewSourceBuffer(
      MemoryBuffer::getMemBuffer(BlockStr, "",/*RequiresNullTerminator=*/false),
      SMLoc());
  PFS.SM = &BlockSM;
  if (parseMachineBasicBlockDefinitions(PFS, BlockStr, Error)) {
    reportDiagnostic(
        diagFromBlockStringDiag(Error, YamlMF.Body.Value.SourceRange));
    return true;
  }
  PFS.SM = &SM;

  // Initialize the frame information after creating all the MBBs so that the
  // MBB references in the frame information can be resolved.
  if (initializeFrameInfo(PFS, YamlMF))
    return true;
  // Initialize the jump table after creating all the MBBs so that the MBB
  // references can be resolved.
  if (!YamlMF.JumpTableInfo.Entries.empty() &&
      initializeJumpTableInfo(PFS, YamlMF.JumpTableInfo))
    return true;
  // Parse the machine instructions after creating all of the MBBs so that the
  // parser can resolve the MBB references.
  StringRef InsnStr = YamlMF.Body.Value.Value;
  SourceMgr InsnSM;
  InsnSM.AddNewSourceBuffer(
      MemoryBuffer::getMemBuffer(InsnStr, "", /*RequiresNullTerminator=*/false),
      SMLoc());
  PFS.SM = &InsnSM;
  if (parseMachineInstructions(PFS, InsnStr, Error)) {
    reportDiagnostic(
        diagFromBlockStringDiag(Error, YamlMF.Body.Value.SourceRange));
    return true;
  }
  PFS.SM = &SM;

  if (setupRegisterInfo(PFS, YamlMF))
    return true;

  computeFunctionProperties(MF);

  MF.verify();
  return false;
}
Esempio n. 17
0
bool InstructionSelect::runOnMachineFunction(MachineFunction &MF) {
  const MachineRegisterInfo &MRI = MF.getRegInfo();

  // No matter what happens, whether we successfully select the function or not,
  // nothing is going to use the vreg types after us.  Make sure they disappear.
  auto ClearVRegTypesOnReturn =
      make_scope_exit([&]() { MRI.getVRegToType().clear(); });

  // If the ISel pipeline failed, do not bother running that pass.
  if (MF.getProperties().hasProperty(
          MachineFunctionProperties::Property::FailedISel))
    return false;

  DEBUG(dbgs() << "Selecting function: " << MF.getName() << '\n');

  const TargetPassConfig &TPC = getAnalysis<TargetPassConfig>();
  const InstructionSelector *ISel = MF.getSubtarget().getInstructionSelector();
  assert(ISel && "Cannot work without InstructionSelector");

  // An optimization remark emitter. Used to report failures.
  MachineOptimizationRemarkEmitter MORE(MF, /*MBFI=*/nullptr);

  // FIXME: freezeReservedRegs is now done in IRTranslator, but there are many
  // other MF/MFI fields we need to initialize.

#ifndef NDEBUG
  // Check that our input is fully legal: we require the function to have the
  // Legalized property, so it should be.
  // FIXME: This should be in the MachineVerifier, but it can't use the
  // LegalizerInfo as it's currently in the separate GlobalISel library.
  // The RegBankSelected property is already checked in the verifier. Note
  // that it has the same layering problem, but we only use inline methods so
  // end up not needing to link against the GlobalISel library.
  if (const LegalizerInfo *MLI = MF.getSubtarget().getLegalizerInfo())
    for (MachineBasicBlock &MBB : MF)
      for (MachineInstr &MI : MBB)
        if (isPreISelGenericOpcode(MI.getOpcode()) && !MLI->isLegal(MI, MRI)) {
          reportGISelFailure(MF, TPC, MORE, "gisel-select",
                             "instruction is not legal", MI);
          return false;
        }

#endif
  // FIXME: We could introduce new blocks and will need to fix the outer loop.
  // Until then, keep track of the number of blocks to assert that we don't.
  const size_t NumBlocks = MF.size();

  for (MachineBasicBlock *MBB : post_order(&MF)) {
    if (MBB->empty())
      continue;

    // Select instructions in reverse block order. We permit erasing so have
    // to resort to manually iterating and recognizing the begin (rend) case.
    bool ReachedBegin = false;
    for (auto MII = std::prev(MBB->end()), Begin = MBB->begin();
         !ReachedBegin;) {
#ifndef NDEBUG
      // Keep track of the insertion range for debug printing.
      const auto AfterIt = std::next(MII);
#endif
      // Select this instruction.
      MachineInstr &MI = *MII;

      // And have our iterator point to the next instruction, if there is one.
      if (MII == Begin)
        ReachedBegin = true;
      else
        --MII;

      DEBUG(dbgs() << "Selecting: \n  " << MI);

      if (!ISel->select(MI)) {
        // FIXME: It would be nice to dump all inserted instructions.  It's
        // not obvious how, esp. considering select() can insert after MI.
        reportGISelFailure(MF, TPC, MORE, "gisel-select", "cannot select", MI);
        return false;
      }

      // Dump the range of instructions that MI expanded into.
      DEBUG({
        auto InsertedBegin = ReachedBegin ? MBB->begin() : std::next(MII);
        dbgs() << "Into:\n";
        for (auto &InsertedMI : make_range(InsertedBegin, AfterIt))
          dbgs() << "  " << InsertedMI;
        dbgs() << '\n';
      });
    }
  }
Esempio n. 18
0
bool MIRParserImpl::initializeMachineFunction(MachineFunction &MF) {
  auto It = Functions.find(MF.getName());
  if (It == Functions.end())
    return error(Twine("no machine function information for function '") +
                 MF.getName() + "' in the MIR file");
  // TODO: Recreate the machine function.
  const yaml::MachineFunction &YamlMF = *It->getValue();
  if (YamlMF.Alignment)
    MF.setAlignment(YamlMF.Alignment);
  MF.setExposesReturnsTwice(YamlMF.ExposesReturnsTwice);

  if (YamlMF.Legalized)
    MF.getProperties().set(MachineFunctionProperties::Property::Legalized);
  if (YamlMF.RegBankSelected)
    MF.getProperties().set(
        MachineFunctionProperties::Property::RegBankSelected);
  if (YamlMF.Selected)
    MF.getProperties().set(MachineFunctionProperties::Property::Selected);

  PerFunctionMIParsingState PFS(MF, SM, IRSlots);
  if (initializeRegisterInfo(PFS, YamlMF))
    return true;
  if (!YamlMF.Constants.empty()) {
    auto *ConstantPool = MF.getConstantPool();
    assert(ConstantPool && "Constant pool must be created");
    if (initializeConstantPool(PFS, *ConstantPool, YamlMF))
      return true;
  }

  StringRef BlockStr = YamlMF.Body.Value.Value;
  SMDiagnostic Error;
  SourceMgr BlockSM;
  BlockSM.AddNewSourceBuffer(
      MemoryBuffer::getMemBuffer(BlockStr, "",/*RequiresNullTerminator=*/false),
      SMLoc());
  PFS.SM = &BlockSM;
  if (parseMachineBasicBlockDefinitions(PFS, BlockStr, Error)) {
    reportDiagnostic(
        diagFromBlockStringDiag(Error, YamlMF.Body.Value.SourceRange));
    return true;
  }
  PFS.SM = &SM;

  if (MF.empty())
    return error(Twine("machine function '") + Twine(MF.getName()) +
                 "' requires at least one machine basic block in its body");
  // Initialize the frame information after creating all the MBBs so that the
  // MBB references in the frame information can be resolved.
  if (initializeFrameInfo(PFS, YamlMF))
    return true;
  // Initialize the jump table after creating all the MBBs so that the MBB
  // references can be resolved.
  if (!YamlMF.JumpTableInfo.Entries.empty() &&
      initializeJumpTableInfo(PFS, YamlMF.JumpTableInfo))
    return true;
  // Parse the machine instructions after creating all of the MBBs so that the
  // parser can resolve the MBB references.
  StringRef InsnStr = YamlMF.Body.Value.Value;
  SourceMgr InsnSM;
  InsnSM.AddNewSourceBuffer(
      MemoryBuffer::getMemBuffer(InsnStr, "", /*RequiresNullTerminator=*/false),
      SMLoc());
  PFS.SM = &InsnSM;
  if (parseMachineInstructions(PFS, InsnStr, Error)) {
    reportDiagnostic(
        diagFromBlockStringDiag(Error, YamlMF.Body.Value.SourceRange));
    return true;
  }
  PFS.SM = &SM;

  inferRegisterInfo(PFS, YamlMF);

  computeFunctionProperties(MF);

  // FIXME: This is a temporary workaround until the reserved registers can be
  // serialized.
  MF.getRegInfo().freezeReservedRegs(MF);
  MF.verify();
  return false;
}
Esempio n. 19
0
bool InstructionSelect::runOnMachineFunction(MachineFunction &MF) {
  // If the ISel pipeline failed, do not bother running that pass.
  if (MF.getProperties().hasProperty(
          MachineFunctionProperties::Property::FailedISel))
    return false;

  LLVM_DEBUG(dbgs() << "Selecting function: " << MF.getName() << '\n');

  const TargetPassConfig &TPC = getAnalysis<TargetPassConfig>();
  const InstructionSelector *ISel = MF.getSubtarget().getInstructionSelector();
  CodeGenCoverage CoverageInfo;
  assert(ISel && "Cannot work without InstructionSelector");

  // An optimization remark emitter. Used to report failures.
  MachineOptimizationRemarkEmitter MORE(MF, /*MBFI=*/nullptr);

  // FIXME: There are many other MF/MFI fields we need to initialize.

  MachineRegisterInfo &MRI = MF.getRegInfo();
#ifndef NDEBUG
  // Check that our input is fully legal: we require the function to have the
  // Legalized property, so it should be.
  // FIXME: This should be in the MachineVerifier, as the RegBankSelected
  // property check already is.
  if (!DisableGISelLegalityCheck)
    if (const MachineInstr *MI = machineFunctionIsIllegal(MF)) {
      reportGISelFailure(MF, TPC, MORE, "gisel-select",
                         "instruction is not legal", *MI);
      return false;
    }
#endif
  // FIXME: We could introduce new blocks and will need to fix the outer loop.
  // Until then, keep track of the number of blocks to assert that we don't.
  const size_t NumBlocks = MF.size();

  for (MachineBasicBlock *MBB : post_order(&MF)) {
    if (MBB->empty())
      continue;

    // Select instructions in reverse block order. We permit erasing so have
    // to resort to manually iterating and recognizing the begin (rend) case.
    bool ReachedBegin = false;
    for (auto MII = std::prev(MBB->end()), Begin = MBB->begin();
         !ReachedBegin;) {
#ifndef NDEBUG
      // Keep track of the insertion range for debug printing.
      const auto AfterIt = std::next(MII);
#endif
      // Select this instruction.
      MachineInstr &MI = *MII;

      // And have our iterator point to the next instruction, if there is one.
      if (MII == Begin)
        ReachedBegin = true;
      else
        --MII;

      LLVM_DEBUG(dbgs() << "Selecting: \n  " << MI);

      // We could have folded this instruction away already, making it dead.
      // If so, erase it.
      if (isTriviallyDead(MI, MRI)) {
        LLVM_DEBUG(dbgs() << "Is dead; erasing.\n");
        MI.eraseFromParentAndMarkDBGValuesForRemoval();
        continue;
      }

      if (!ISel->select(MI, CoverageInfo)) {
        // FIXME: It would be nice to dump all inserted instructions.  It's
        // not obvious how, esp. considering select() can insert after MI.
        reportGISelFailure(MF, TPC, MORE, "gisel-select", "cannot select", MI);
        return false;
      }

      // Dump the range of instructions that MI expanded into.
      LLVM_DEBUG({
        auto InsertedBegin = ReachedBegin ? MBB->begin() : std::next(MII);
        dbgs() << "Into:\n";
        for (auto &InsertedMI : make_range(InsertedBegin, AfterIt))
          dbgs() << "  " << InsertedMI;
        dbgs() << '\n';
      });
    }
  }