/// calculateCallsInformation - Calculate the MaxCallFrameSize and AdjustsStack
/// variables for the function's frame information and eliminate call frame
/// pseudo instructions.
void PEI::calculateCallsInformation(MachineFunction &Fn) {
  const TargetInstrInfo &TII = *Fn.getTarget().getInstrInfo();
  const TargetFrameLowering *TFI = Fn.getTarget().getFrameLowering();
  MachineFrameInfo *MFI = Fn.getFrameInfo();

  unsigned MaxCallFrameSize = 0;
  bool AdjustsStack = MFI->adjustsStack();

  // Get the function call frame set-up and tear-down instruction opcode
  int FrameSetupOpcode   = TII.getCallFrameSetupOpcode();
  int FrameDestroyOpcode = TII.getCallFrameDestroyOpcode();

  // Early exit for targets which have no call frame setup/destroy pseudo
  // instructions.
  if (FrameSetupOpcode == -1 && FrameDestroyOpcode == -1)
    return;

  std::vector<MachineBasicBlock::iterator> FrameSDOps;
  for (MachineFunction::iterator BB = Fn.begin(), E = Fn.end(); BB != E; ++BB)
    for (MachineBasicBlock::iterator I = BB->begin(); I != BB->end(); ++I)
      if (I->getOpcode() == FrameSetupOpcode ||
          I->getOpcode() == FrameDestroyOpcode) {
        assert(I->getNumOperands() >= 1 && "Call Frame Setup/Destroy Pseudo"
               " instructions should have a single immediate argument!");
        unsigned Size = I->getOperand(0).getImm();
        if (Size > MaxCallFrameSize) MaxCallFrameSize = Size;
        AdjustsStack = true;
        FrameSDOps.push_back(I);
      } else if (I->isInlineAsm()) {
        // Some inline asm's need a stack frame, as indicated by operand 1.
        unsigned ExtraInfo = I->getOperand(InlineAsm::MIOp_ExtraInfo).getImm();
        if (ExtraInfo & InlineAsm::Extra_IsAlignStack)
          AdjustsStack = true;
      }

  MFI->setAdjustsStack(AdjustsStack);
  MFI->setMaxCallFrameSize(MaxCallFrameSize);

  for (std::vector<MachineBasicBlock::iterator>::iterator
         i = FrameSDOps.begin(), e = FrameSDOps.end(); i != e; ++i) {
    MachineBasicBlock::iterator I = *i;

    // If call frames are not being included as part of the stack frame, and
    // the target doesn't indicate otherwise, remove the call frame pseudos
    // here. The sub/add sp instruction pairs are still inserted, but we don't
    // need to track the SP adjustment for frame index elimination.
    if (TFI->canSimplifyCallFramePseudos(Fn))
      TFI->eliminateCallFramePseudoInstr(Fn, *I->getParent(), I);
  }
}
Beispiel #2
0
unsigned Disassembler::printInstructions(formatted_raw_ostream &Out,
  unsigned Address, unsigned Size, bool PrintTypes) {
  MachineFunction *MF = disassemble(Address);

  MachineFunction::iterator BI = MF->begin(), BE = MF->end();
  // Skip to first basic block with instruction in desired address
  // Out << BI->instr_rbegin()->getDebugLoc().getLine() << "\n";
  while (BI != BE
    && getDebugOffset(BI->instr_rbegin()->getDebugLoc()) < Address) {
    ++BI;
  }
  if (BI == BE) {
    printError("Could not disassemble, reached end of function's basic blocks"
      " when looking for first instruction.");
    return 0;
  }


  MachineBasicBlock::iterator II = BI->instr_begin(), IE = BI->instr_end();
  // skip to first instruction
  while (getDebugOffset(II->getDebugLoc()) < Address) {
    if (II == IE) {
      printError("Unreachable: reached end of basic block whe looking for first"
        " instruction.");
      ++BI;
      II = BI->instr_begin();
      IE = BI->instr_end();
    }
    ++II;
  }
  if (Address != getDebugOffset(II->getDebugLoc())) {
    Out << "Warning: starting at " << getDebugOffset(II->getDebugLoc())
        << " instead of " << Address << ".\n";
  }

  // Function Name and Offset
  Out << "<" << MF->getName();
  if (getDebugOffset(MF->begin()->instr_begin()->getDebugLoc()) != Address) {
    Out << "+"
        << (Address
          - getDebugOffset(MF->begin()->instr_begin()->getDebugLoc()));
  }
  Out << ">:\n";

  // Print each instruction
  unsigned InstrCount = 0;
  while (BI != BE && (Size == 0 || InstrCount < Size)) {
    printInstruction(Out, II, PrintTypes);
    ++InstrCount;
    ++II;
    if (II == IE) {
      ++BI;
      II = BI->instr_begin();
      IE = BI->instr_end();
    }
  }

  return InstrCount;
}
/// scavengeFrameVirtualRegs - Replace all frame index virtual registers
/// with physical registers. Use the register scavenger to find an
/// appropriate register to use.
void PEI::scavengeFrameVirtualRegs(MachineFunction &Fn) {
  // Run through the instructions and find any virtual registers.
  for (MachineFunction::iterator BB = Fn.begin(),
       E = Fn.end(); BB != E; ++BB) {
    RS->enterBasicBlock(BB);

    unsigned VirtReg = 0;
    unsigned ScratchReg = 0;
    int SPAdj = 0;

    // The instruction stream may change in the loop, so check BB->end()
    // directly.
    for (MachineBasicBlock::iterator I = BB->begin(); I != BB->end(); ) {
      MachineInstr *MI = I;
      for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
        if (MI->getOperand(i).isReg()) {
          MachineOperand &MO = MI->getOperand(i);
          unsigned Reg = MO.getReg();
          if (Reg == 0)
            continue;
          if (!TargetRegisterInfo::isVirtualRegister(Reg))
            continue;

          ++NumVirtualFrameRegs;

          // Have we already allocated a scratch register for this virtual?
          if (Reg != VirtReg) {
            // When we first encounter a new virtual register, it
            // must be a definition.
            assert(MI->getOperand(i).isDef() &&
                   "frame index virtual missing def!");
            // Scavenge a new scratch register
            VirtReg = Reg;
            const TargetRegisterClass *RC = Fn.getRegInfo().getRegClass(Reg);
            ScratchReg = RS->scavengeRegister(RC, I, SPAdj);
            ++NumScavengedRegs;
          }
          // Replace this reference to the virtual register with the
          // scratch register.
          assert (ScratchReg && "Missing scratch register!");
          MI->getOperand(i).setReg(ScratchReg);

        }
      }
      RS->forward(I);
      ++I;
    }
  }
}
Beispiel #4
0
void GCMachineCodeAnalysis::FindSafePoints(MachineFunction &MF) {
  for (MachineFunction::iterator BBI = MF.begin(), BBE = MF.end(); BBI != BBE;
       ++BBI)
    for (MachineBasicBlock::iterator MI = BBI->begin(), ME = BBI->end();
         MI != ME; ++MI)
      if (MI->isCall()) {
        // Do not treat tail or sibling call sites as safe points.  This is
        // legal since any arguments passed to the callee which live in the
        // remnants of the callers frame will be owned and updated by the
        // callee if required.
        if (MI->isTerminator())
          continue;
        VisitCallPoint(MI);
      }
}
Beispiel #5
0
void ARM64BranchRelaxation::adjustBlockOffsets(MachineBasicBlock *Start) {
  unsigned PrevNum = Start->getNumber();
  MachineFunction::iterator MBBI = Start, E = MF->end();
  for (++MBBI; MBBI != E; ++MBBI) {
    MachineBasicBlock *MBB = MBBI;
    unsigned Num = MBB->getNumber();
    if (!Num) // block zero is never changed from offset zero.
      continue;
    // Get the offset and known bits at the end of the layout predecessor.
    // Include the alignment of the current block.
    unsigned LogAlign = MBBI->getAlignment();
    BlockInfo[Num].Offset = BlockInfo[PrevNum].postOffset(LogAlign);
    PrevNum = Num;
  }
}
/// Splice - Move the sequence of instructions [Begin,End) to just before
/// InsertPt. Update branch instructions as needed to account for broken
/// fallthrough edges and to take advantage of newly exposed fallthrough
/// opportunities.
///
void CodePlacementOpt::Splice(MachineFunction &MF,
                              MachineFunction::iterator InsertPt,
                              MachineFunction::iterator Begin,
                              MachineFunction::iterator End) {
  assert(Begin != MF.begin() && End != MF.begin() && InsertPt != MF.begin() &&
         "Splice can't change the entry block!");
  MachineFunction::iterator OldBeginPrior = prior(Begin);
  MachineFunction::iterator OldEndPrior = prior(End);

  MF.splice(InsertPt, Begin, End);

  prior(Begin)->updateTerminator();
  OldBeginPrior->updateTerminator();
  OldEndPrior->updateTerminator();
}
/// estimateRSStackSizeLimit - Look at each instruction that references stack
/// frames and return the stack size limit beyond which some of these
/// instructions will require a scratch register during their expansion later.
// FIXME: Move to TII?
static unsigned estimateRSStackSizeLimit(MachineFunction &MF,
                                         const TargetFrameLowering *TFI) {
  const ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>();
  unsigned Limit = (1 << 12) - 1;
  for (MachineFunction::iterator BB = MF.begin(),E = MF.end(); BB != E; ++BB) {
    for (MachineBasicBlock::iterator I = BB->begin(), E = BB->end();
         I != E; ++I) {
      for (unsigned i = 0, e = I->getNumOperands(); i != e; ++i) {
        if (!I->getOperand(i).isFI()) continue;

        // When using ADDri to get the address of a stack object, 255 is the
        // largest offset guaranteed to fit in the immediate offset.
        if (I->getOpcode() == ARM::ADDri) {
          Limit = std::min(Limit, (1U << 8) - 1);
          break;
        }

        // Otherwise check the addressing mode.
        switch (I->getDesc().TSFlags & ARMII::AddrModeMask) {
        case ARMII::AddrMode3:
        case ARMII::AddrModeT2_i8:
          Limit = std::min(Limit, (1U << 8) - 1);
          break;
        case ARMII::AddrMode5:
        case ARMII::AddrModeT2_i8s4:
          Limit = std::min(Limit, ((1U << 8) - 1) * 4);
          break;
        case ARMII::AddrModeT2_i12:
          // i12 supports only positive offset so these will be converted to
          // i8 opcodes. See llvm::rewriteT2FrameIndex.
          if (TFI->hasFP(MF) && AFI->hasStackFrame())
            Limit = std::min(Limit, (1U << 8) - 1);
          break;
        case ARMII::AddrMode4:
        case ARMII::AddrMode6:
          // Addressing modes 4 & 6 (load/store) instructions can't encode an
          // immediate offset for stack references.
          return 0;
        default:
          break;
        }
        break; // At most one FI per instruction
      }
    }
  }

  return Limit;
}
Beispiel #8
0
/// ConvertInstTo3Addr - Convert the specified two-address instruction into a
/// three address one. Return true if this transformation was successful.
bool
TwoAddressInstructionPass::ConvertInstTo3Addr(MachineBasicBlock::iterator &mi,
                                              MachineBasicBlock::iterator &nmi,
                                              MachineFunction::iterator &mbbi,
                                              unsigned RegB, unsigned Dist) {
  MachineInstr *NewMI = TII->convertToThreeAddress(mbbi, mi, LV);
  if (NewMI) {
    DEBUG(errs() << "2addr: CONVERTING 2-ADDR: " << *mi);
    DEBUG(errs() << "2addr:         TO 3-ADDR: " << *NewMI);
    bool Sunk = false;

    if (NewMI->findRegisterUseOperand(RegB, false, TRI))
      // FIXME: Temporary workaround. If the new instruction doesn't
      // uses RegB, convertToThreeAddress must have created more
      // then one instruction.
      Sunk = Sink3AddrInstruction(mbbi, NewMI, RegB, mi);

    mbbi->erase(mi); // Nuke the old inst.

    if (!Sunk) {
      DistanceMap.insert(std::make_pair(NewMI, Dist));
      mi = NewMI;
      nmi = next(mi);
    }
    return true;
  }

  return false;
}
void MipsSEDAGToDAGISel::processFunctionAfterISel(MachineFunction &MF) {
  initGlobalBaseReg(MF);

  MachineRegisterInfo *MRI = &MF.getRegInfo();

  for (MachineFunction::iterator MFI = MF.begin(), MFE = MF.end(); MFI != MFE;
       ++MFI)
    for (MachineBasicBlock::iterator I = MFI->begin(); I != MFI->end(); ++I) {
      if (I->getOpcode() == Mips::RDDSP)
        addDSPCtrlRegOperands(false, *I, MF);
      else if (I->getOpcode() == Mips::WRDSP)
        addDSPCtrlRegOperands(true, *I, MF);
      else
        replaceUsesWithZeroReg(MRI, *I);
    }
}
void PatmosSPMark::scanAndRewriteCalls(MachineFunction *MF, Worklist &W) {
  for (MachineFunction::iterator MBB = MF->begin(), MBBE = MF->end();
                                 MBB != MBBE; ++MBB) {
    for( MachineBasicBlock::iterator MI = MBB->begin(),
                                     ME = MBB->getFirstTerminator();
                                     MI != ME; ++MI) {
      if (MI->isCall()) {
        MachineFunction *MF = getCallTargetMF(MI);
        if (!MF) {
          dbgs() << "[Single-Path] WARNING: Cannot rewrite call in "
                 << MBB->getParent()->getFunction()->getName()
                 << " (indirect call?)\n";
          continue;
        };

        const Function *Target = getCallTarget(MI);
        if (Target->getName() == "__udivsi3") {
          //DEBUG(dbgs() << "[Single-Path] skipping call to "
          //       << Target->getName() << "\n");
          //continue;
        }


        PatmosMachineFunctionInfo *PMFI =
          MF->getInfo<PatmosMachineFunctionInfo>();
        if (!PMFI->isSinglePath()) {
          // rewrite call to _sp variant
          rewriteCall(MI);
          // set _sp MF to single path in PMFI (MF has changed!)
          MachineFunction *MF = getCallTargetMF(MI);
          PatmosMachineFunctionInfo *PMFI =
            MF->getInfo<PatmosMachineFunctionInfo>();
          // we possibly have already marked the _sp variant as single-path
          // in an earlier call
          if (!PMFI->isSinglePath()) {
            PMFI->setSinglePath();
            // add the new single-path function to the worklist
            W.push_back(MF);

            NumSPTotal++; // bump STATISTIC
            NumSPMaybe++; // bump STATISTIC
          }
        }
      }
    }
  }
}
Beispiel #11
0
bool Inserter::runOnMachineFunction(MachineFunction &F) {
  Cpu0FunctionInfo *Cpu0FI = F.getInfo<Cpu0FunctionInfo>();

  if ((TM.getRelocationModel() != Reloc::PIC_) ||
      (!Cpu0FI->globalBaseRegFixed()))
    return false;

  bool Changed = false;
  int FI = Cpu0FI->getGPFI();

  for (MachineFunction::iterator MFI = F.begin(), MFE = F.end();
       MFI != MFE; ++MFI) {
    MachineBasicBlock& MBB = *MFI;
    MachineBasicBlock::iterator I = MFI->begin();
    
    /// IsLandingPad - Indicate that this basic block is entered via an
    /// exception handler.
    // If MBB is a landing pad, insert instruction that restores $gp after
    // EH_LABEL.
    if (MBB.isLandingPad()) {
      // Find EH_LABEL first.
      for (; I->getOpcode() != TargetOpcode::EH_LABEL; ++I) ;

      // Insert ld.
      ++I;
      DebugLoc dl = I != MBB.end() ? I->getDebugLoc() : DebugLoc();
      BuildMI(MBB, I, dl, TII->get(Cpu0::LD), Cpu0::GP).addFrameIndex(FI)
                                                       .addImm(0);
      Changed = true;
    }

    while (I != MFI->end()) {
      if (I->getOpcode() != Cpu0::JALR) {
        ++I;
        continue;
      }

      DebugLoc dl = I->getDebugLoc();
      // emit lw $gp, ($gp save slot on stack) after jalr
      BuildMI(MBB, ++I, dl, TII->get(Cpu0::LD), Cpu0::GP).addFrameIndex(FI)
                                                         .addImm(0);
      Changed = true;
    }
  }

  return Changed;
}
Beispiel #12
0
/// calculateCallsInformation - Calculate the MaxCallFrameSize and HasCalls
/// variables for the function's frame information and eliminate call frame
/// pseudo instructions.
void PEI::calculateCallsInformation(MachineFunction &Fn) {
  const TargetRegisterInfo *RegInfo = Fn.getTarget().getRegisterInfo();

  unsigned MaxCallFrameSize = 0;
  bool HasCalls = false;

  // Get the function call frame set-up and tear-down instruction opcode
  int FrameSetupOpcode   = RegInfo->getCallFrameSetupOpcode();
  int FrameDestroyOpcode = RegInfo->getCallFrameDestroyOpcode();

  // Early exit for targets which have no call frame setup/destroy pseudo
  // instructions.
  if (FrameSetupOpcode == -1 && FrameDestroyOpcode == -1)
    return;

  std::vector<MachineBasicBlock::iterator> FrameSDOps;
  for (MachineFunction::iterator BB = Fn.begin(), E = Fn.end(); BB != E; ++BB)
    for (MachineBasicBlock::iterator I = BB->begin(); I != BB->end(); ++I)
      if (I->getOpcode() == FrameSetupOpcode ||
          I->getOpcode() == FrameDestroyOpcode) {
        assert(I->getNumOperands() >= 1 && "Call Frame Setup/Destroy Pseudo"
               " instructions should have a single immediate argument!");
        unsigned Size = I->getOperand(0).getImm();
        if (Size > MaxCallFrameSize) MaxCallFrameSize = Size;
        HasCalls = true;
        FrameSDOps.push_back(I);
      } else if (I->getOpcode() == TargetInstrInfo::INLINEASM) {
        // An InlineAsm might be a call; assume it is to get the stack frame
        // aligned correctly for calls.
        HasCalls = true;
      }

  MachineFrameInfo *FFI = Fn.getFrameInfo();
  FFI->setHasCalls(HasCalls);
  FFI->setMaxCallFrameSize(MaxCallFrameSize);

  for (std::vector<MachineBasicBlock::iterator>::iterator
         i = FrameSDOps.begin(), e = FrameSDOps.end(); i != e; ++i) {
    MachineBasicBlock::iterator I = *i;

    // If call frames are not being included as part of the stack frame, and
    // there is no dynamic allocation (therefore referencing frame slots off
    // sp), leave the pseudo ops alone. We'll eliminate them later.
    if (RegInfo->hasReservedCallFrame(Fn) || RegInfo->hasFP(Fn))
      RegInfo->eliminateCallFramePseudoInstr(Fn, *I->getParent(), I);
  }
}
Beispiel #13
0
bool HexagonCopyToCombine::runOnMachineFunction(MachineFunction &MF) {

  if (IsCombinesDisabled) return false;

  bool HasChanged = false;

  // Get target info.
  TRI = MF.getSubtarget().getRegisterInfo();
  TII = MF.getSubtarget<HexagonSubtarget>().getInstrInfo();

  // Combine aggressively (for code size)
  ShouldCombineAggressively =
    MF.getTarget().getOptLevel() <= CodeGenOpt::Default;

  // Traverse basic blocks.
  for (MachineFunction::iterator BI = MF.begin(), BE = MF.end(); BI != BE;
       ++BI) {
    PotentiallyNewifiableTFR.clear();
    findPotentialNewifiableTFRs(*BI);

    // Traverse instructions in basic block.
    for(MachineBasicBlock::iterator MI = BI->begin(), End = BI->end();
        MI != End;) {
      MachineInstr *I1 = MI++;
      // Don't combine a TFR whose user could be newified (instructions that
      // define double registers can not be newified - Programmer's Ref Manual
      // 5.4.2 New-value stores).
      if (ShouldCombineAggressively && PotentiallyNewifiableTFR.count(I1))
        continue;

      // Ignore instructions that are not combinable.
      if (!isCombinableInstType(I1, TII, ShouldCombineAggressively))
        continue;

      // Find a second instruction that can be merged into a combine
      // instruction.
      bool DoInsertAtI1 = false;
      MachineInstr *I2 = findPairable(I1, DoInsertAtI1);
      if (I2) {
        HasChanged = true;
        combine(I1, I2, MI, DoInsertAtI1);
      }
    }
  }

  return HasChanged;
}
Beispiel #14
0
bool VmkitGC::findCustomSafePoints(GCFunctionInfo& FI, MachineFunction &MF) {
  for (MachineFunction::iterator BBI = MF.begin(),
                                 BBE = MF.end(); BBI != BBE; ++BBI) {
    for (MachineBasicBlock::iterator MI = BBI->begin(),
                                     ME = BBI->end(); MI != ME; ++MI) {
      if (MI->getDesc().isCall()) {
        MachineBasicBlock::iterator RAI = MI; ++RAI;                                
        MCSymbol* Label = InsertLabel(*MI->getParent(), RAI, MI->getDebugLoc());
        FI.addSafePoint(GC::PostCall, Label, MI->getDebugLoc());
      } else if (MI->getDebugLoc().getCol() == 1) {
        MCSymbol* Label = InsertLabel(*MI->getParent(), MI, MI->getDebugLoc());
        FI.addSafePoint(GC::Loop, Label, MI->getDebugLoc());
      }
    }
  }
  return false;
}
Beispiel #15
0
void SystemZRegisterInfo::emitPrologue(MachineFunction &MF) const {
  MachineBasicBlock &MBB = MF.front();   // Prolog goes in entry BB
  const TargetFrameInfo &TFI = *MF.getTarget().getFrameInfo();
  MachineFrameInfo *MFI = MF.getFrameInfo();
  SystemZMachineFunctionInfo *SystemZMFI =
    MF.getInfo<SystemZMachineFunctionInfo>();
  MachineBasicBlock::iterator MBBI = MBB.begin();
  DebugLoc DL = (MBBI != MBB.end() ? MBBI->getDebugLoc() :
                 DebugLoc::getUnknownLoc());

  // Get the number of bytes to allocate from the FrameInfo.
  // Note that area for callee-saved stuff is already allocated, thus we need to
  // 'undo' the stack movement.
  uint64_t StackSize = MFI->getStackSize();
  StackSize -= SystemZMFI->getCalleeSavedFrameSize();

  uint64_t NumBytes = StackSize - TFI.getOffsetOfLocalArea();

  // Skip the callee-saved push instructions.
  while (MBBI != MBB.end() &&
         (MBBI->getOpcode() == SystemZ::MOV64mr ||
          MBBI->getOpcode() == SystemZ::MOV64mrm))
    ++MBBI;

  if (MBBI != MBB.end())
    DL = MBBI->getDebugLoc();

  // adjust stack pointer: R15 -= numbytes
  if (StackSize || MFI->hasCalls()) {
    assert(MF.getRegInfo().isPhysRegUsed(SystemZ::R15D) &&
           "Invalid stack frame calculation!");
    emitSPUpdate(MBB, MBBI, -(int64_t)NumBytes, TII);
  }

  if (hasFP(MF)) {
    // Update R11 with the new base value...
    BuildMI(MBB, MBBI, DL, TII.get(SystemZ::MOV64rr), SystemZ::R11D)
      .addReg(SystemZ::R15D);

    // Mark the FramePtr as live-in in every block except the entry.
    for (MachineFunction::iterator I = llvm::next(MF.begin()), E = MF.end();
         I != E; ++I)
      I->addLiveIn(SystemZ::R11D);

  }
}
Beispiel #16
0
void XTCFrameLowering::emitEpilogue(MachineFunction &MF,
                                   MachineBasicBlock &MBB) const {
  MachineBasicBlock::iterator MBBI = MBB.getLastNonDebugInstr();
  MachineFrameInfo *MFI            = MF.getFrameInfo();
  XTCFunctionInfo *XTCFI     = MF.getInfo<XTCFunctionInfo>();
  const XTCInstrInfo &TII =
    *static_cast<const XTCInstrInfo*>(MF.getTarget().getInstrInfo());

  DebugLoc dl = MBBI->getDebugLoc();

  CallingConv::ID CallConv = MF.getFunction()->getCallingConv();

  // Get the FI's where RA and FP are saved.
  int FPOffset = XTCFI->getFPStackOffset();
  int RAOffset = XTCFI->getRAStackOffset();

  if (hasFP(MF)) {
      /* Save current FP into stack */

    //  BuildMI(MBB, MBBI, dl, TII.get(XTC::STWPREI)).addReg(XTC::r14, RegState::Kill).addReg(XTC::r15).addImm(-4);

      /* Copy from current SP */

      BuildMI(MBB, MBBI, dl, TII.get(XTC::COPY), XTC::r15).addReg(XTC::r14);

      // Mark the FramePtr as live-in in every block except the entry.
      for (MachineFunction::iterator I = llvm::next(MF.begin()), E = MF.end();
           I != E; ++I)
          I->addLiveIn(XTC::r14);
  }
  /*
  // lwi R15, R1, stack_loc
  if (MFI->adjustsStack() || requiresRA) {
    BuildMI(MBB, MBBI, dl, TII.get(XTC::LWI), XTC::R15)
    .addReg(XTC::R1).addImm(RAOffset);
    */


  // Get the number of bytes from FrameInfo
  int StackSize = (int) MFI->getStackSize();
  if (StackSize) {
    BuildMI(MBB, MBBI, dl, TII.get(XTC::ADDI), XTC::r15)
      .addReg(XTC::r15).addImm(StackSize);
  }
}
Beispiel #17
0
void PatmosSPMark::scanAndRewriteCalls(MachineFunction *MF, Worklist &W) {
  DEBUG(dbgs() << "In function '" << MF->getName() << "':\n");
  for (MachineFunction::iterator MBB = MF->begin(), MBBE = MF->end();
                                 MBB != MBBE; ++MBB) {
    for( MachineBasicBlock::iterator MI = MBB->begin(),
                                     ME = MBB->getFirstTerminator();
                                     MI != ME; ++MI) {
      if (MI->isCall()) {
        MachineFunction *MF = getCallTargetMF(MI);
        if (!MF) {
          dbgs() << "[Single-Path] WARNING: Cannot rewrite call in "
                 << MBB->getParent()->getFunction()->getName()
                 << " (indirect call?)\n";
          continue;
        };

        const Function *Target = getCallTarget(MI);

        PatmosMachineFunctionInfo *PMFI =
          MF->getInfo<PatmosMachineFunctionInfo>();
        if (!PMFI->isSinglePath()) {
          // sp-reachable functions were already marked as single-path.
          // Hence, we have _potential_ sp-maybe functions left; the call
          // needs to be rewritten to point to the sp-maybe clone.
          rewriteCall(MI);
          // set _sp MF to single path in PMFI (MF has changed!)
          MachineFunction *MF = getCallTargetMF(MI);
          PatmosMachineFunctionInfo *PMFI =
            MF->getInfo<PatmosMachineFunctionInfo>();
          // we possibly have already marked the _sp variant as single-path
          // in an earlier call, if not, then set this final decision.
          if (!PMFI->isSinglePath()) {
            PMFI->setSinglePath();
            // add the new single-path function to the worklist
            W.push_back(MF);

            NumSPTotal++; // bump STATISTIC
            NumSPMaybe++; // bump STATISTIC
          }
        }
      }
    }
  }
}
Beispiel #18
0
// Align all targets of indirect branches on bundle size.  Used only if target
// is NaCl.
void MipsAsmPrinter::NaClAlignIndirectJumpTargets(MachineFunction &MF) {
  // Align all blocks that are jumped to through jump table.
  if (MachineJumpTableInfo *JtInfo = MF.getJumpTableInfo()) {
    const std::vector<MachineJumpTableEntry> &JT = JtInfo->getJumpTables();
    for (unsigned I = 0; I < JT.size(); ++I) {
      const std::vector<MachineBasicBlock*> &MBBs = JT[I].MBBs;

      for (unsigned J = 0; J < MBBs.size(); ++J)
        MBBs[J]->setAlignment(MIPS_NACL_BUNDLE_ALIGN);
    }
  }

  // If basic block address is taken, block can be target of indirect branch.
  for (MachineFunction::iterator MBB = MF.begin(), E = MF.end();
                                 MBB != E; ++MBB) {
    if (MBB->hasAddressTaken())
      MBB->setAlignment(MIPS_NACL_BUNDLE_ALIGN);
  }
}
Beispiel #19
0
bool SpillPlacement::runOnMachineFunction(MachineFunction &mf) {
  MF = &mf;
  bundles = &getAnalysis<EdgeBundles>();
  loops = &getAnalysis<MachineLoopInfo>();

  assert(!nodes && "Leaking node array");
  nodes = new Node[bundles->getNumBundles()];

  // Compute total ingoing and outgoing block frequencies for all bundles.
  for (MachineFunction::iterator I = mf.begin(), E = mf.end(); I != E; ++I) {
    float Freq = getBlockFrequency(I);
    unsigned Num = I->getNumber();
    nodes[bundles->getBundle(Num, 1)].Frequency[0] += Freq;
    nodes[bundles->getBundle(Num, 0)].Frequency[1] += Freq;
  }

  // We never change the function.
  return false;
}
bool X86CallFrameOptimization::runOnMachineFunction(MachineFunction &MF) {
  TII = MF.getSubtarget().getInstrInfo();
  TFL = MF.getSubtarget().getFrameLowering();
  MRI = &MF.getRegInfo();

  if (!shouldPerformTransformation(MF))
    return false;

  int FrameSetupOpcode = TII->getCallFrameSetupOpcode();

  bool Changed = false;

  for (MachineFunction::iterator BB = MF.begin(), E = MF.end(); BB != E; ++BB)
    for (MachineBasicBlock::iterator I = BB->begin(); I != BB->end(); ++I)
      if (I->getOpcode() == FrameSetupOpcode)
        Changed |= adjustCallSequence(MF, *BB, I);

  return Changed;
}
SIMachineFunctionInfo::SpilledReg SIMachineFunctionInfo::getSpilledReg(
                                                       MachineFunction *MF,
                                                       unsigned FrameIndex,
                                                       unsigned SubIdx) {
  const MachineFrameInfo *FrameInfo = MF->getFrameInfo();
  const SIRegisterInfo *TRI = static_cast<const SIRegisterInfo *>(
      MF->getSubtarget<AMDGPUSubtarget>().getRegisterInfo());
  MachineRegisterInfo &MRI = MF->getRegInfo();
  int64_t Offset = FrameInfo->getObjectOffset(FrameIndex);
  Offset += SubIdx * 4;

  unsigned LaneVGPRIdx = Offset / (64 * 4);
  unsigned Lane = (Offset / 4) % 64;

  struct SpilledReg Spill;

  if (!LaneVGPRs.count(LaneVGPRIdx)) {
    unsigned LaneVGPR = TRI->findUnusedRegister(MRI, &AMDGPU::VGPR_32RegClass);

    if (LaneVGPR == AMDGPU::NoRegister) {
      LLVMContext &Ctx = MF->getFunction()->getContext();
      Ctx.emitError("Ran out of VGPRs for spilling SGPR");

      // When compiling from inside Mesa, the compilation continues.
      // Select an arbitrary register to avoid triggering assertions
      // during subsequent passes.
      LaneVGPR = AMDGPU::VGPR0;
    }

    LaneVGPRs[LaneVGPRIdx] = LaneVGPR;

    // Add this register as live-in to all blocks to avoid machine verifer
    // complaining about use of an undefined physical register.
    for (MachineFunction::iterator BI = MF->begin(), BE = MF->end();
         BI != BE; ++BI) {
      BI->addLiveIn(LaneVGPR);
    }
  }

  Spill.VGPR = LaneVGPRs[LaneVGPRIdx];
  Spill.Lane = Lane;
  return Spill;
}
Beispiel #22
0
bool SpillPlacement::runOnMachineFunction(MachineFunction &mf) {
  MF = &mf;
  bundles = &getAnalysis<EdgeBundles>();
  loops = &getAnalysis<MachineLoopInfo>();

  assert(!nodes && "Leaking node array");
  nodes = new Node[bundles->getNumBundles()];

  // Compute total ingoing and outgoing block frequencies for all bundles.
  BlockFrequencies.resize(mf.getNumBlockIDs());
  MachineBlockFrequencyInfo &MBFI = getAnalysis<MachineBlockFrequencyInfo>();
  for (MachineFunction::iterator I = mf.begin(), E = mf.end(); I != E; ++I) {
    unsigned Num = I->getNumber();
    BlockFrequencies[Num] = MBFI.getBlockFreq(I);
  }

  // We never change the function.
  return false;
}
void MachineBlockPlacement::placeChainsTopologically(MachineFunction &F) {
  MachineBasicBlock *EntryB = &F.front();
  assert(BlockToChain[EntryB] && "Missing chain for entry block");
  assert(*BlockToChain[EntryB]->begin() == EntryB &&
         "Entry block is not the head of the entry block chain");

  // Walk the blocks in RPO, and insert each block for a chain in order the
  // first time we see that chain.
  MachineFunction::iterator InsertPos = F.begin();
  SmallPtrSet<BlockChain *, 16> VisitedChains;
  ReversePostOrderTraversal<MachineBasicBlock *> RPOT(EntryB);
  typedef ReversePostOrderTraversal<MachineBasicBlock *>::rpo_iterator
    rpo_iterator;
  for (rpo_iterator I = RPOT.begin(), E = RPOT.end(); I != E; ++I) {
    BlockChain *Chain = BlockToChain[*I];
    assert(Chain);
    if(!VisitedChains.insert(Chain))
      continue;
    for (BlockChain::iterator BI = Chain->begin(), BE = Chain->end(); BI != BE;
         ++BI) {
      DEBUG(dbgs() << (BI == Chain->begin() ? "Placing chain "
                                            : "          ... ")
                   << getBlockName(*BI) << "\n");
      if (InsertPos != MachineFunction::iterator(*BI))
        F.splice(InsertPos, *BI);
      else
        ++InsertPos;
    }
  }

  // Now that every block is in its final position, update all of the
  // terminators.
  SmallVector<MachineOperand, 4> Cond; // For AnalyzeBranch.
  for (MachineFunction::iterator FI = F.begin(), FE = F.end(); FI != FE; ++FI) {
    // FIXME: It would be awesome of updateTerminator would just return rather
    // than assert when the branch cannot be analyzed in order to remove this
    // boiler plate.
    Cond.clear();
    MachineBasicBlock *TBB = 0, *FBB = 0; // For AnalyzeBranch.
    if (!TII->AnalyzeBranch(*FI, TBB, FBB, Cond))
      FI->updateTerminator();
  }
}
Beispiel #24
0
bool MipsHazardSchedule::runOnMachineFunction(MachineFunction &MF) {

  const MipsSubtarget *STI =
      &static_cast<const MipsSubtarget &>(MF.getSubtarget());

  // Forbidden slot hazards are only defined for MIPSR6 but not microMIPSR6.
  if (!STI->hasMips32r6() || STI->inMicroMipsMode())
    return false;

  bool Changed = false;
  const MipsInstrInfo *TII = STI->getInstrInfo();

  for (MachineFunction::iterator FI = MF.begin(); FI != MF.end(); ++FI) {
    for (Iter I = FI->begin(); I != FI->end(); ++I) {

      // Forbidden slot hazard handling. Use lookahead over state.
      if (!TII->HasForbiddenSlot(*I))
        continue;

      Iter Inst;
      bool LastInstInFunction =
          std::next(I) == FI->end() && std::next(FI) == MF.end();
      if (!LastInstInFunction) {
        if (std::next(I) != FI->end()) {
          // Start looking from the next instruction in the basic block.
          Inst = getNextMachineInstr(std::next(I));
        } else {
          // Next instruction in the physical successor basic block.
          Inst = getNextMachineInstr(I);
        }
      }

      if (LastInstInFunction || !TII->SafeInForbiddenSlot(*Inst)) {
        Changed = true;
        MIBundleBuilder(&*I)
            .append(BuildMI(MF, I->getDebugLoc(), TII->get(Mips::NOP)));
        NumInsertedNops++;
      }
    }
  }
  return Changed;
}
Beispiel #25
0
/// insertPrologEpilogCode - Scan the function for modified callee saved
/// registers, insert spill code for these callee saved registers, then add
/// prolog and epilog code to the function.
///
void PEI::insertPrologEpilogCode(MachineFunction &Fn) {
  const TargetFrameLowering &TFI = *Fn.getTarget().getFrameLowering();

  // Add prologue to the function...
  TFI.emitPrologue(Fn);

  // Add epilogue to restore the callee-save registers in each exiting block
  for (MachineFunction::iterator I = Fn.begin(), E = Fn.end(); I != E; ++I) {
    // If last instruction is a return instruction, add an epilogue
    if (!I->empty() && I->back().isReturn())
      TFI.emitEpilogue(Fn, *I);
  }

  // Emit additional code that is required to support segmented stacks, if
  // we've been asked for it.  This, when linked with a runtime with support
  // for segmented stacks (libgcc is one), will result in allocating stack
  // space in small chunks instead of one large contiguous block.
  if (Fn.getTarget().Options.EnableSegmentedStacks)
    TFI.adjustForSegmentedStacks(Fn);
}
Beispiel #26
0
bool ErlangGC::findCustomSafePoints(GCFunctionInfo &FI, MachineFunction &MF) {
  for (MachineFunction::iterator BBI = MF.begin(), BBE = MF.end(); BBI != BBE;
       ++BBI)
    for (MachineBasicBlock::iterator MI = BBI->begin(), ME = BBI->end();
         MI != ME; ++MI)

      if (MI->getDesc().isCall()) {

        // Do not treat tail call sites as safe points.
        if (MI->getDesc().isTerminator())
          continue;

        /* Code copied from VisitCallPoint(...) */
        MachineBasicBlock::iterator RAI = MI; ++RAI;
        MCSymbol* Label = InsertLabel(*MI->getParent(), RAI, MI->getDebugLoc());
        FI.addSafePoint(GC::PostCall, Label, MI->getDebugLoc());
      }

  return false;
}
Beispiel #27
0
SIMachineFunctionInfo::SpilledReg SIMachineFunctionInfo::getSpilledReg (
                                                       MachineFunction *MF,
                                                       unsigned FrameIndex,
                                                       unsigned SubIdx) {
  if (!EnableSpillSGPRToVGPR)
    return SpilledReg();

  const SISubtarget &ST = MF->getSubtarget<SISubtarget>();
  const SIRegisterInfo *TRI = ST.getRegisterInfo();

  MachineFrameInfo *FrameInfo = MF->getFrameInfo();
  MachineRegisterInfo &MRI = MF->getRegInfo();
  int64_t Offset = FrameInfo->getObjectOffset(FrameIndex);
  Offset += SubIdx * 4;

  unsigned LaneVGPRIdx = Offset / (64 * 4);
  unsigned Lane = (Offset / 4) % 64;

  struct SpilledReg Spill;
  Spill.Lane = Lane;

  if (!LaneVGPRs.count(LaneVGPRIdx)) {
    unsigned LaneVGPR = TRI->findUnusedRegister(MRI, &AMDGPU::VGPR_32RegClass);

    if (LaneVGPR == AMDGPU::NoRegister)
      // We have no VGPRs left for spilling SGPRs.
      return Spill;

    LaneVGPRs[LaneVGPRIdx] = LaneVGPR;

    // Add this register as live-in to all blocks to avoid machine verifer
    // complaining about use of an undefined physical register.
    for (MachineFunction::iterator BI = MF->begin(), BE = MF->end();
         BI != BE; ++BI) {
      BI->addLiveIn(LaneVGPR);
    }
  }

  Spill.VGPR = LaneVGPRs[LaneVGPRIdx];
  return Spill;
}
Beispiel #28
0
bool MOVToLEAPass::runOnMachineFunction(MachineFunction &Fn) {
  const TargetInstrInfo *TII = Fn.getTarget().getInstrInfo();
  bool Changed = false;
  for (MachineFunction::iterator BB = Fn.begin(), E = Fn.end(); BB != E; ++BB)
    for (MachineBasicBlock::iterator I = BB->begin(); I != BB->end(); ) {
      ++PreMOVtoLEAInstructionCount;
      if (I->getNumOperands() != 2 ||
          !I->getOperand(0).isReg() || !I->getOperand(1).isReg()) {
        ++I;
        continue;
      }

      unsigned leaOpc;
      if (I->getOpcode() == X86::MOV32rr) {
        leaOpc = X86::LEA32r;
      } else if (I->getOpcode() == X86::MOV64rr) {
        leaOpc = X86::LEA64r;
      } else {
        ++I;
        continue;
      }

      unsigned int Roll = RandomNumberGenerator::Generator().Random(100);
      ++MOVCandidates;
      if (Roll >= multicompiler::getFunctionOption(
            multicompiler::MOVToLEAPercentage, *Fn.getFunction())) {
        ++I;
        continue;
      }

      ++ReplacedMOV;
      MachineBasicBlock::iterator J = I;
      ++I;
      addRegOffset(BuildMI(*BB, J, J->getDebugLoc(),
                           TII->get(leaOpc), J->getOperand(0).getReg()),
                   J->getOperand(1).getReg(), false, 0);
      J->eraseFromParent();
      Changed = true;
    }
  return Changed;
}
Beispiel #29
0
bool OptimizeExts::runOnMachineFunction(MachineFunction &MF) {
  TM = &MF.getTarget();
  TII = TM->getInstrInfo();
  MRI = &MF.getRegInfo();
  DT = Aggressive ? &getAnalysis<MachineDominatorTree>() : 0;

  bool Changed = false;

  SmallPtrSet<MachineInstr*, 8> LocalMIs;
  for (MachineFunction::iterator I = MF.begin(), E = MF.end(); I != E; ++I) {
    MachineBasicBlock *MBB = &*I;
    LocalMIs.clear();
    for (MachineBasicBlock::iterator MII = I->begin(), ME = I->end(); MII != ME;
         ++MII) {
      MachineInstr *MI = &*MII;
      Changed |= OptimizeInstr(MI, MBB, LocalMIs);
    }
  }

  return Changed;
}
bool NVPTXPrologEpilogPass::runOnMachineFunction(MachineFunction &MF) {
  const TargetMachine &TM = MF.getTarget();
  const TargetFrameLowering &TFI = *TM.getFrameLowering();
  const TargetRegisterInfo &TRI = *TM.getRegisterInfo();
  bool Modified = false;

  calculateFrameObjectOffsets(MF);

  for (MachineFunction::iterator BB = MF.begin(), E = MF.end(); BB != E; ++BB) {
    for (MachineBasicBlock::iterator I = BB->begin(); I != BB->end(); ++I) {
      MachineInstr *MI = I;
      for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
        if (!MI->getOperand(i).isFI())
          continue;
        TRI.eliminateFrameIndex(MI, 0, i, nullptr);
        Modified = true;
      }
    }
  }

  // Add function prolog/epilog
  TFI.emitPrologue(MF);

  for (MachineFunction::iterator I = MF.begin(), E = MF.end(); I != E; ++I) {
    // If last instruction is a return instruction, add an epilogue
    if (!I->empty() && I->back().isReturn())
      TFI.emitEpilogue(MF, *I);
  }

  return Modified;
}