MachineBasicBlock::iterator Filler::findDelayInstr(MachineBasicBlock &MBB, MachineBasicBlock::iterator slot) { SmallSet<unsigned, 32> RegDefs; SmallSet<unsigned, 32> RegUses; bool sawLoad = false; bool sawStore = false; MachineBasicBlock::iterator I = slot; if (slot->getOpcode() == SP::RET) return MBB.end(); if (slot->getOpcode() == SP::RETL) { --I; if (I->getOpcode() != SP::RESTORErr) return MBB.end(); //change retl to ret slot->setDesc(TII->get(SP::RET)); return I; } //Call's delay filler can def some of call's uses. if (slot->getDesc().isCall()) insertCallUses(slot, RegUses); else insertDefsUses(slot, RegDefs, RegUses); bool done = false; while (!done) { done = (I == MBB.begin()); if (!done) --I; // skip debug value if (I->isDebugValue()) continue; if (I->hasUnmodeledSideEffects() || I->isInlineAsm() || I->isLabel() || I->getDesc().hasDelaySlot() || isDelayFiller(MBB, I)) break; if (delayHasHazard(I, sawLoad, sawStore, RegDefs, RegUses)) { insertDefsUses(I, RegDefs, RegUses); continue; } return I; } return MBB.end(); }
// Insert Defs and Uses of MI into the sets RegDefs and RegUses. void Filler::insertDefsUses(MachineBasicBlock::iterator MI, SmallSet<unsigned, 32>& RegDefs, SmallSet<unsigned, 32>& RegUses) { unsigned I, E = MI->getDesc().getNumOperands(); for (I = 0; I != E; ++I) insertDefUse(MI->getOperand(I), RegDefs, RegUses); // If MI is a call, add RA to RegDefs to prevent users of RA from going into // delay slot. if (MI->isCall()) { RegDefs.insert(CoffeeCL::LR); return; } // Return if MI is a return. if (MI->isReturn()) return; // Examine the implicit operands. Exclude register AT which is in the list of // clobbered registers of branch instructions. E = MI->getNumOperands(); for (; I != E; ++I) insertDefUse(MI->getOperand(I), RegDefs, RegUses); }
Optional<SIMemOpInfo> SIMemOpAccess::getAtomicFenceInfo( const MachineBasicBlock::iterator &MI) const { assert(MI->getDesc().TSFlags & SIInstrFlags::maybeAtomic); if (MI->getOpcode() != AMDGPU::ATOMIC_FENCE) return None; AtomicOrdering Ordering = static_cast<AtomicOrdering>(MI->getOperand(0).getImm()); SyncScope::ID SSID = static_cast<SyncScope::ID>(MI->getOperand(1).getImm()); auto ScopeOrNone = toSIAtomicScope(SSID, SIAtomicAddrSpace::ATOMIC); if (!ScopeOrNone) { reportUnsupported(MI, "Unsupported atomic synchronization scope"); return None; } SIAtomicScope Scope = SIAtomicScope::NONE; SIAtomicAddrSpace OrderingAddrSpace = SIAtomicAddrSpace::NONE; bool IsCrossAddressSpaceOrdering = false; std::tie(Scope, OrderingAddrSpace, IsCrossAddressSpaceOrdering) = ScopeOrNone.getValue(); if ((OrderingAddrSpace == SIAtomicAddrSpace::NONE) || ((OrderingAddrSpace & SIAtomicAddrSpace::ATOMIC) != OrderingAddrSpace)) { reportUnsupported(MI, "Unsupported atomic address space"); return None; } return SIMemOpInfo(Ordering, Scope, OrderingAddrSpace, SIAtomicAddrSpace::ATOMIC, IsCrossAddressSpaceOrdering); }
bool MipsExpandPseudo::runOnMachineBasicBlock(MachineBasicBlock& MBB) { bool Changed = false; for (MachineBasicBlock::iterator I = MBB.begin(); I != MBB.end();) { const TargetInstrDesc& Tid = I->getDesc(); switch(Tid.getOpcode()) { default: ++I; continue; case Mips::BuildPairF64: ExpandBuildPairF64(MBB, I); break; case Mips::ExtractElementF64: ExpandExtractElementF64(MBB, I); break; } // delete original instr MBB.erase(I++); Changed = true; } return Changed; }
// Insert Defs and Uses of MI into the sets RegDefs and RegUses. void Filler::insertDefsUses(MachineBasicBlock::iterator MI, SmallSet<unsigned, 32>& RegDefs, SmallSet<unsigned, 32>& RegUses) { // If MI is a call or return, just examine the explicit non-variadic operands. MCInstrDesc MCID = MI->getDesc(); unsigned e = MI->isCall() || MI->isReturn() ? MCID.getNumOperands() : MI->getNumOperands(); // Add RA to RegDefs to prevent users of RA from going into delay slot. if (MI->isCall()) RegDefs.insert(Mips::RA); for (unsigned i = 0; i != e; ++i) { const MachineOperand &MO = MI->getOperand(i); unsigned Reg; if (!MO.isReg() || !(Reg = MO.getReg())) continue; if (MO.isDef()) RegDefs.insert(Reg); else if (MO.isUse()) RegUses.insert(Reg); } }
/// runOnMachineBasicBlock - Fill in delay slots for the given basic block. /// We assume there is only one delay slot per delayed instruction. /// bool Filler::runOnMachineBasicBlock(MachineBasicBlock &MBB) { bool Changed = false; for (MachineBasicBlock::iterator I = MBB.begin(); I != MBB.end(); ++I) if (I->getDesc().hasDelaySlot()) { MachineBasicBlock::iterator D = MBB.end(); MachineBasicBlock::iterator J = I; if (!DisableDelaySlotFiller) D = findDelayInstr(MBB, I); ++FilledSlots; Changed = true; if (D == MBB.end()) BuildMI(MBB, ++J, I->getDebugLoc(), TII->get(SP::NOP)); else MBB.splice(++J, &MBB, D); unsigned structSize = 0; if (needsUnimp(I, structSize)) { MachineBasicBlock::iterator J = I; ++J; //skip the delay filler. BuildMI(MBB, ++J, I->getDebugLoc(), TII->get(SP::UNIMP)).addImm(structSize); } } return Changed; }
bool MipsExpandPseudo::runOnMachineBasicBlock(MachineBasicBlock& MBB) { bool Changed = false; for (MachineBasicBlock::iterator I = MBB.begin(); I != MBB.end();) { const MCInstrDesc& MCid = I->getDesc(); switch(MCid.getOpcode()) { default: ++I; continue; case Mips::SETGP2: // Convert "setgp2 $globalreg, $t9" to "addu $globalreg, $v0, $t9" BuildMI(MBB, I, I->getDebugLoc(), TII->get(Mips::ADDu), I->getOperand(0).getReg()) .addReg(Mips::V0).addReg(I->getOperand(1).getReg()); break; case Mips::BuildPairF64: ExpandBuildPairF64(MBB, I); break; case Mips::ExtractElementF64: ExpandExtractElementF64(MBB, I); break; } // delete original instr MBB.erase(I++); Changed = true; } return Changed; }
void SystemZRegisterInfo::emitEpilogue(MachineFunction &MF, MachineBasicBlock &MBB) const { const MachineFrameInfo *MFI = MF.getFrameInfo(); const TargetFrameInfo &TFI = *MF.getTarget().getFrameInfo(); MachineBasicBlock::iterator MBBI = prior(MBB.end()); SystemZMachineFunctionInfo *SystemZMFI = MF.getInfo<SystemZMachineFunctionInfo>(); unsigned RetOpcode = MBBI->getOpcode(); switch (RetOpcode) { case SystemZ::RET: break; // These are ok default: assert(0 && "Can only insert epilog into returning blocks"); } // Get the number of bytes to allocate from the FrameInfo // Note that area for callee-saved stuff is already allocated, thus we need to // 'undo' the stack movement. uint64_t StackSize = MFI->getStackSize() - SystemZMFI->getCalleeSavedFrameSize(); uint64_t NumBytes = StackSize - TFI.getOffsetOfLocalArea(); // Skip the final terminator instruction. while (MBBI != MBB.begin()) { MachineBasicBlock::iterator PI = prior(MBBI); --MBBI; if (!PI->getDesc().isTerminator()) break; } // During callee-saved restores emission stack frame was not yet finialized // (and thus - the stack size was unknown). Tune the offset having full stack // size in hands. if (StackSize || MFI->hasCalls()) { assert((MBBI->getOpcode() == SystemZ::MOV64rmm || MBBI->getOpcode() == SystemZ::MOV64rm) && "Expected to see callee-save register restore code"); assert(MF.getRegInfo().isPhysRegUsed(SystemZ::R15D) && "Invalid stack frame calculation!"); unsigned i = 0; MachineInstr &MI = *MBBI; while (!MI.getOperand(i).isImm()) { ++i; assert(i < MI.getNumOperands() && "Unexpected restore code!"); } uint64_t Offset = NumBytes + MI.getOperand(i).getImm(); // If Offset does not fit into 20-bit signed displacement field we need to // emit some additional code... if (Offset > 524287) { // Fold the displacement into load instruction as much as possible. NumBytes = Offset - 524287; Offset = 524287; emitSPUpdate(MBB, MBBI, NumBytes, TII); } MI.getOperand(i).ChangeToImmediate(Offset); } }
void MSP430RegisterInfo::emitEpilogue(MachineFunction &MF, MachineBasicBlock &MBB) const { const MachineFrameInfo *MFI = MF.getFrameInfo(); MSP430MachineFunctionInfo *MSP430FI = MF.getInfo<MSP430MachineFunctionInfo>(); MachineBasicBlock::iterator MBBI = prior(MBB.end()); unsigned RetOpcode = MBBI->getOpcode(); DebugLoc DL = MBBI->getDebugLoc(); switch (RetOpcode) { case MSP430::RET: break; // These are ok default: assert(0 && "Can only insert epilog into returning blocks"); } // Get the number of bytes to allocate from the FrameInfo uint64_t StackSize = MFI->getStackSize(); unsigned CSSize = MSP430FI->getCalleeSavedFrameSize(); uint64_t NumBytes = 0; if (hasFP(MF)) { // Calculate required stack adjustment uint64_t FrameSize = StackSize - 2; NumBytes = FrameSize - CSSize; // pop FPW. BuildMI(MBB, MBBI, DL, TII.get(MSP430::POP16r), MSP430::FPW); } else NumBytes = StackSize - CSSize; // Skip the callee-saved pop instructions. MachineBasicBlock::iterator LastCSPop = MBBI; while (MBBI != MBB.begin()) { MachineBasicBlock::iterator PI = prior(MBBI); unsigned Opc = PI->getOpcode(); if (Opc != MSP430::POP16r && !PI->getDesc().isTerminator()) break; --MBBI; } DL = MBBI->getDebugLoc(); // If there is an ADD16ri or SUB16ri of SPW immediately before this // instruction, merge the two instructions. //if (NumBytes || MFI->hasVarSizedObjects()) // mergeSPUpdatesUp(MBB, MBBI, StackPtr, &NumBytes); if (MFI->hasVarSizedObjects()) { assert(0 && "Not implemented yet!"); } else { // adjust stack pointer back: SPW += numbytes if (NumBytes) { MachineInstr *MI = BuildMI(MBB, MBBI, DL, TII.get(MSP430::ADD16ri), MSP430::SPW) .addReg(MSP430::SPW).addImm(NumBytes); // The SRW implicit def is dead. MI->getOperand(3).setIsDead(); } } }
void GucRegisterInfo::emitEpilogue(MachineFunction &MF, MachineBasicBlock &MBB) const { const MachineFrameInfo *MFI = MF.getFrameInfo(); GucMachineFunctionInfo *GucFI = MF.getInfo<GucMachineFunctionInfo>(); MachineBasicBlock::iterator MBBI = prior(MBB.end()); unsigned RetOpcode = MBBI->getOpcode(); DebugLoc DL = MBBI->getDebugLoc(); switch (RetOpcode) { case Guc::RET: case Guc::RETI: break; // These are ok default: llvm_unreachable("Can only insert epilog into returning blocks"); } // Get the number of bytes to allocate from the FrameInfo uint64_t StackSize = MFI->getStackSize(); unsigned CSSize = GucFI->getCalleeSavedFrameSize(); uint64_t NumBytes = StackSize - CSSize; // Skip the callee-saved pop instructions. while (MBBI != MBB.begin()) { MachineBasicBlock::iterator PI = prior(MBBI); unsigned Opc = PI->getOpcode(); if (Opc != Guc::POPr && !PI->getDesc().isTerminator()) break; --MBBI; } DL = MBBI->getDebugLoc(); // If there is an ADDri or SUBri of SP immediately before this // instruction, merge the two instructions. //if (NumBytes || MFI->hasVarSizedObjects()) // mergeSPUpdatesUp(MBB, MBBI, StackPtr, &NumBytes); if (MFI->hasVarSizedObjects()) { assert(1 && "Unexpected FP"); BuildMI(MBB, MBBI, DL, TII.get(Guc::MOVrs), Guc::SP).addReg(Guc::FP); if (CSSize) { MachineInstr *MI = BuildMI(MBB, MBBI, DL, TII.get(Guc::SUBsi), Guc::SP) .addReg(Guc::SP).addImm(CSSize); // The FLG implicit def is dead. MI->getOperand(3).setIsDead(); } } else { // adjust stack pointer back: SP += numbytes if (NumBytes) { MachineInstr *MI = BuildMI(MBB, MBBI, DL, TII.get(Guc::ADDsi), Guc::SP) .addReg(Guc::SP).addImm(NumBytes); // The FLG implicit def is dead. MI->getOperand(3).setIsDead(); } } }
void MachineCodeAnalysis::FindSafePoints(MachineFunction &MF) { for (MachineFunction::iterator BBI = MF.begin(), BBE = MF.end(); BBI != BBE; ++BBI) for (MachineBasicBlock::iterator MI = BBI->begin(), ME = BBI->end(); MI != ME; ++MI) if (MI->getDesc().isCall()) VisitCallPoint(MI); }
bool Filler::delayHasHazard(MachineBasicBlock::iterator candidate, bool &sawLoad, bool &sawStore, SmallSet<unsigned, 32> &RegDefs, SmallSet<unsigned, 32> &RegUses) { if (candidate->isImplicitDef() || candidate->isKill()) return true; if (candidate->getDesc().mayLoad()) { sawLoad = true; if (sawStore) return true; } if (candidate->getDesc().mayStore()) { if (sawStore) return true; sawStore = true; if (sawLoad) return true; } for (unsigned i = 0, e = candidate->getNumOperands(); i!= e; ++i) { const MachineOperand &MO = candidate->getOperand(i); if (!MO.isReg()) continue; // skip unsigned Reg = MO.getReg(); if (MO.isDef()) { //check whether Reg is defined or used before delay slot. if (IsRegInSet(RegDefs, Reg) || IsRegInSet(RegUses, Reg)) return true; } if (MO.isUse()) { //check whether Reg is defined before delay slot. if (IsRegInSet(RegDefs, Reg)) return true; } } return false; }
bool ErlangGC::findCustomSafePoints(GCFunctionInfo &FI, MachineFunction &MF) { for (MachineFunction::iterator BBI = MF.begin(), BBE = MF.end(); BBI != BBE; ++BBI) for (MachineBasicBlock::iterator MI = BBI->begin(), ME = BBI->end(); MI != ME; ++MI) if (MI->getDesc().isCall()) { // Do not treat tail call sites as safe points. if (MI->getDesc().isTerminator()) continue; /* Code copied from VisitCallPoint(...) */ MachineBasicBlock::iterator RAI = MI; ++RAI; MCSymbol* Label = InsertLabel(*MI->getParent(), RAI, MI->getDebugLoc()); FI.addSafePoint(GC::PostCall, Label, MI->getDebugLoc()); } return false; }
static MachineBasicBlock::iterator firstNonBranchInst(MachineBasicBlock *BB, const TargetInstrInfo *TII) { MachineBasicBlock::iterator I = BB->end(); while (I != BB->begin()) { --I; if (!I->getDesc().isBranch()) break; } return I; }
bool Thumb2ITBlockPass::InsertITBlock(MachineInstr *First, MachineInstr *Last) { if (First == Last) return false; bool Modified = false; MachineBasicBlock *MBB = First->getParent(); MachineBasicBlock::iterator MBBI = First; MachineBasicBlock::iterator E = Last; if (First->getDesc().isBranch() || First->getDesc().isReturn()) return false; unsigned PredReg = 0; ARMCC::CondCodes CC = getPredicate(First, PredReg); if (CC == ARMCC::AL) return Modified; // Move uses of the CPSR together if possible. ARMCC::CondCodes OCC = ARMCC::getOppositeCondition(CC); do { ++MBBI; if (MBBI->getDesc().isBranch() || MBBI->getDesc().isReturn()) return Modified; MachineInstr *NMI = &*MBBI; unsigned NPredReg = 0; ARMCC::CondCodes NCC = getPredicate(NMI, NPredReg); if (NCC != CC && NCC != OCC) { if (NCC != ARMCC::AL) return Modified; assert(MBBI != E); bool Done = false; if (!MoveCPSRUseUp(*MBB, MBBI, E, PredReg, CC, OCC, Done)) return Modified; Modified = true; if (Done) MBBI = E; } } while (MBBI != E); return true; }
/// runOnMachineBasicBlock - Fill in delay slots for the given basic block. /// Currently, we fill delay slots with NOPs. We assume there is only one /// delay slot per delayed instruction. /// bool Filler::runOnMachineBasicBlock(MachineBasicBlock &MBB) { bool Changed = false; for (MachineBasicBlock::iterator I = MBB.begin(); I != MBB.end(); ++I) if (I->getDesc().hasDelaySlot()) { MachineBasicBlock::iterator J = I; ++J; BuildMI(MBB, J, DebugLoc(), TII->get(SP::NOP)); ++FilledSlots; Changed = true; } return Changed; }
bool Mips16InstrInfo::expandPostRAPseudo(MachineBasicBlock::iterator MI) const { MachineBasicBlock &MBB = *MI->getParent(); switch(MI->getDesc().getOpcode()) { default: return false; case Mips::RetRA16: ExpandRetRA16(MBB, MI, Mips::JrcRa16); break; } MBB.erase(MI); return true; }
Optional<SIMemOpInfo> SIMemOpAccess::getAtomicCmpxchgOrRmwInfo( const MachineBasicBlock::iterator &MI) const { assert(MI->getDesc().TSFlags & SIInstrFlags::maybeAtomic); if (!(MI->mayLoad() && MI->mayStore())) return None; // Be conservative if there are no memory operands. if (MI->getNumMemOperands() == 0) return SIMemOpInfo(); return constructFromMIWithMMO(MI); }
bool MipsSEInstrInfo::expandPostRAPseudo(MachineBasicBlock::iterator MI) const { MachineBasicBlock &MBB = *MI->getParent(); switch(MI->getDesc().getOpcode()) { default: return false; case Mips::RetRA: expandRetRA(MBB, MI, Mips::RET); break; case Mips::PseudoCVT_S_W: expandCvtFPInt(MBB, MI, Mips::CVT_S_W, Mips::MTC1, false); break; case Mips::PseudoCVT_D32_W: expandCvtFPInt(MBB, MI, Mips::CVT_D32_W, Mips::MTC1, false); break; case Mips::PseudoCVT_S_L: expandCvtFPInt(MBB, MI, Mips::CVT_S_L, Mips::DMTC1, true); break; case Mips::PseudoCVT_D64_W: expandCvtFPInt(MBB, MI, Mips::CVT_D64_W, Mips::MTC1, true); break; case Mips::PseudoCVT_D64_L: expandCvtFPInt(MBB, MI, Mips::CVT_D64_L, Mips::DMTC1, true); break; case Mips::BuildPairF64: expandBuildPairF64(MBB, MI, false); break; case Mips::BuildPairF64_64: expandBuildPairF64(MBB, MI, true); break; case Mips::ExtractElementF64: expandExtractElementF64(MBB, MI, false); break; case Mips::ExtractElementF64_64: expandExtractElementF64(MBB, MI, true); break; case Mips::PseudoLDC1: expandDPLoadStore(MBB, MI, Mips::LDC1, Mips::LWC1); break; case Mips::PseudoSDC1: expandDPLoadStore(MBB, MI, Mips::SDC1, Mips::SWC1); break; case Mips::MIPSeh_return32: case Mips::MIPSeh_return64: expandEhReturn(MBB, MI); break; } MBB.erase(MI); return true; }
// Cpu0InstrInfo::expandPostRAPseudo /// Expand Pseudo instructions into real backend instructions bool Cpu0InstrInfo::expandPostRAPseudo(MachineBasicBlock::iterator MI) const { MachineBasicBlock &MBB = *MI->getParent(); switch(MI->getDesc().getOpcode()) { default: return false; case Cpu0::RetLR: ExpandRetLR(MBB, MI, Cpu0::RET); break; } MBB.erase(MI); return true; }
/// CopyAndPredicateBlock - Copy and predicate instructions from source BB to /// the destination block. Skip end of block branches if IgnoreBr is true. void IfConverter::CopyAndPredicateBlock(BBInfo &ToBBI, BBInfo &FromBBI, SmallVectorImpl<MachineOperand> &Cond, bool IgnoreBr) { MachineFunction &MF = *ToBBI.BB->getParent(); for (MachineBasicBlock::iterator I = FromBBI.BB->begin(), E = FromBBI.BB->end(); I != E; ++I) { const TargetInstrDesc &TID = I->getDesc(); bool isPredicated = TII->isPredicated(I); // Do not copy the end of the block branches. if (IgnoreBr && !isPredicated && TID.isBranch()) break; MachineInstr *MI = MF.CloneMachineInstr(I); ToBBI.BB->insert(ToBBI.BB->end(), MI); ToBBI.NonPredSize++; if (!isPredicated) if (!TII->PredicateInstruction(MI, Cond)) { #ifndef NDEBUG dbgs() << "Unable to predicate " << *I << "!\n"; #endif llvm_unreachable(0); } } std::vector<MachineBasicBlock *> Succs(FromBBI.BB->succ_begin(), FromBBI.BB->succ_end()); MachineBasicBlock *NBB = getNextBlock(FromBBI.BB); MachineBasicBlock *FallThrough = FromBBI.HasFallThrough ? NBB : NULL; for (unsigned i = 0, e = Succs.size(); i != e; ++i) { MachineBasicBlock *Succ = Succs[i]; // Fallthrough edge can't be transferred. if (Succ == FallThrough) continue; ToBBI.BB->addSuccessor(Succ); } std::copy(FromBBI.Predicate.begin(), FromBBI.Predicate.end(), std::back_inserter(ToBBI.Predicate)); std::copy(Cond.begin(), Cond.end(), std::back_inserter(ToBBI.Predicate)); ToBBI.ClobbersPred |= FromBBI.ClobbersPred; ToBBI.IsAnalyzed = false; NumDupBBs++; }
/// estimateRSStackSizeLimit - Look at each instruction that references stack /// frames and return the stack size limit beyond which some of these /// instructions will require a scratch register during their expansion later. // FIXME: Move to TII? static unsigned estimateRSStackSizeLimit(MachineFunction &MF, const TargetFrameLowering *TFI) { const ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>(); unsigned Limit = (1 << 12) - 1; for (MachineFunction::iterator BB = MF.begin(),E = MF.end(); BB != E; ++BB) { for (MachineBasicBlock::iterator I = BB->begin(), E = BB->end(); I != E; ++I) { for (unsigned i = 0, e = I->getNumOperands(); i != e; ++i) { if (!I->getOperand(i).isFI()) continue; // When using ADDri to get the address of a stack object, 255 is the // largest offset guaranteed to fit in the immediate offset. if (I->getOpcode() == ARM::ADDri) { Limit = std::min(Limit, (1U << 8) - 1); break; } // Otherwise check the addressing mode. switch (I->getDesc().TSFlags & ARMII::AddrModeMask) { case ARMII::AddrMode3: case ARMII::AddrModeT2_i8: Limit = std::min(Limit, (1U << 8) - 1); break; case ARMII::AddrMode5: case ARMII::AddrModeT2_i8s4: Limit = std::min(Limit, ((1U << 8) - 1) * 4); break; case ARMII::AddrModeT2_i12: // i12 supports only positive offset so these will be converted to // i8 opcodes. See llvm::rewriteT2FrameIndex. if (TFI->hasFP(MF) && AFI->hasStackFrame()) Limit = std::min(Limit, (1U << 8) - 1); break; case ARMII::AddrMode4: case ARMII::AddrMode6: // Addressing modes 4 & 6 (load/store) instructions can't encode an // immediate offset for stack references. return 0; default: break; } break; // At most one FI per instruction } } } return Limit; }
bool MipsInstrInfo::expandPostRAPseudo(MachineBasicBlock::iterator MI) const { MachineBasicBlock &MBB = *MI->getParent(); switch(MI->getDesc().getOpcode()) { default: return false; case Mips::BuildPairF64: ExpandBuildPairF64(MBB, MI); break; case Mips::ExtractElementF64: ExpandExtractElementF64(MBB, MI); break; } MBB.erase(MI); return true; }
bool VmkitGC::findCustomSafePoints(GCFunctionInfo& FI, MachineFunction &MF) { for (MachineFunction::iterator BBI = MF.begin(), BBE = MF.end(); BBI != BBE; ++BBI) { for (MachineBasicBlock::iterator MI = BBI->begin(), ME = BBI->end(); MI != ME; ++MI) { if (MI->getDesc().isCall()) { MachineBasicBlock::iterator RAI = MI; ++RAI; MCSymbol* Label = InsertLabel(*MI->getParent(), RAI, MI->getDebugLoc()); FI.addSafePoint(GC::PostCall, Label, MI->getDebugLoc()); } else if (MI->getDebugLoc().getCol() == 1) { MCSymbol* Label = InsertLabel(*MI->getParent(), MI, MI->getDebugLoc()); FI.addSafePoint(GC::Loop, Label, MI->getDebugLoc()); } } } return false; }
/// PropagateForward - Traverse forward and look for the kill of OldReg. If /// it can successfully update all of the uses with NewReg, do so and /// return true. bool StackSlotColoring::PropagateForward(MachineBasicBlock::iterator MII, MachineBasicBlock *MBB, unsigned OldReg, unsigned NewReg) { if (MII == MBB->end()) return false; SmallVector<MachineOperand*, 4> Uses; while (++MII != MBB->end()) { bool FoundKill = false; const TargetInstrDesc &TID = MII->getDesc(); for (unsigned i = 0, e = MII->getNumOperands(); i != e; ++i) { MachineOperand &MO = MII->getOperand(i); if (!MO.isReg()) continue; unsigned Reg = MO.getReg(); if (Reg == 0) continue; if (Reg == OldReg) { if (MO.isDef() || MO.isImplicit()) return false; // Abort the use is actually a sub-register use. We don't have enough // information to figure out if it is really legal. if (MO.getSubReg()) return false; const TargetRegisterClass *RC = TID.OpInfo[i].getRegClass(TRI); if (RC && !RC->contains(NewReg)) return false; if (MO.isKill()) FoundKill = true; Uses.push_back(&MO); } else if (TRI->regsOverlap(Reg, NewReg) || TRI->regsOverlap(Reg, OldReg)) return false; } if (FoundKill) { for (unsigned i = 0, e = Uses.size(); i != e; ++i) Uses[i]->setReg(NewReg); return true; } } return false; }
// DSPInstrInfo::expandPostRAPseudo /// Expand Pseudo instructions into real backend instructions bool DSPSEInstrInfo::expandPostRAPseudo(MachineBasicBlock::iterator MI) const { MachineBasicBlock &MBB = *MI->getParent(); switch (MI->getDesc().getOpcode()) { default: return false; case DSP::RetLR: ExpandRetLR(MBB, MI, DSP::Ret); break; case DSP::MovVR: ExpandMovVR(MBB, MI, DSP::MovG2V40); break; case DSP::MovGR: ExpandMovGR(MBB, MI, DSP::MovIGH, DSP::MovIGL); break; } MBB.erase(MI); return true; }
bool Filler::needsUnimp(MachineBasicBlock::iterator I, unsigned &StructSize) { if (!I->getDesc().isCall()) return false; unsigned structSizeOpNum = 0; switch (I->getOpcode()) { default: llvm_unreachable("Unknown call opcode."); case SP::CALL: structSizeOpNum = 1; break; case SP::JMPLrr: case SP::JMPLri: structSizeOpNum = 2; break; } const MachineOperand &MO = I->getOperand(structSizeOpNum); if (!MO.isImm()) return false; StructSize = MO.getImm(); return true; }
// These are the common checks that need to performed // to determine if // 1. compare instruction can be moved before jump. // 2. feeder to the compare instruction can be moved before jump. static bool commonChecksToProhibitNewValueJump(bool afterRA, MachineBasicBlock::iterator MII) { // If store in path, bail out. if (MII->getDesc().mayStore()) return false; // if call in path, bail out. if (MII->getOpcode() == Hexagon::CALLv3) return false; // if NVJ is running prior to RA, do the following checks. if (!afterRA) { // The following Target Opcode instructions are spurious // to new value jump. If they are in the path, bail out. // KILL sets kill flag on the opcode. It also sets up a // single register, out of pair. // %D0<def> = Hexagon_S2_lsr_r_p %D0<kill>, %R2<kill> // %R0<def> = KILL %R0, %D0<imp-use,kill> // %P0<def> = CMPEQri %R0<kill>, 0 // PHI can be anything after RA. // COPY can remateriaze things in between feeder, compare and nvj. if (MII->getOpcode() == TargetOpcode::KILL || MII->getOpcode() == TargetOpcode::PHI || MII->getOpcode() == TargetOpcode::COPY) return false; // The following pseudo Hexagon instructions sets "use" and "def" // of registers by individual passes in the backend. At this time, // we don't know the scope of usage and definitions of these // instructions. if (MII->getOpcode() == Hexagon::TFR_condset_rr || MII->getOpcode() == Hexagon::TFR_condset_ii || MII->getOpcode() == Hexagon::TFR_condset_ri || MII->getOpcode() == Hexagon::TFR_condset_ir || MII->getOpcode() == Hexagon::LDriw_pred || MII->getOpcode() == Hexagon::STriw_pred) return false; } return true; }
/// runOnMachineBasicBlock - Fill in delay slots for the given basic block. /// Currently, we fill delay slots with NOPs. We assume there is only one /// delay slot per delayed instruction. bool Filler::runOnMachineBasicBlock(MachineBasicBlock &MBB) { bool Changed = false; for (MachineBasicBlock::iterator I = MBB.begin(); I != MBB.end(); ++I) if (I->getDesc().hasDelaySlot()) { MachineBasicBlock::iterator D = MBB.end(); MachineBasicBlock::iterator J = I; if (!DisableDelaySlotFiller) D = findDelayInstr(MBB,I); ++FilledSlots; Changed = true; if (D == MBB.end()) BuildMI(MBB, ++J, I->getDebugLoc(), TII->get(MBlaze::NOP)); else MBB.splice(++J, &MBB, D); } return Changed; }
/// ReplaceUsesOfBlockWith - Given a machine basic block that branched to /// 'Old', change the code and CFG so that it branches to 'New' instead. void MachineBasicBlock::ReplaceUsesOfBlockWith(MachineBasicBlock *Old, MachineBasicBlock *New) { assert(Old != New && "Cannot replace self with self!"); MachineBasicBlock::iterator I = end(); while (I != begin()) { --I; if (!I->getDesc().isTerminator()) break; // Scan the operands of this machine instruction, replacing any uses of Old // with New. for (unsigned i = 0, e = I->getNumOperands(); i != e; ++i) if (I->getOperand(i).isMBB() && I->getOperand(i).getMBB() == Old) I->getOperand(i).setMBB(New); } // Update the successor information. replaceSuccessor(Old, New); }