void SystemZRegisterInfo::emitPrologue(MachineFunction &MF) const { MachineBasicBlock &MBB = MF.front(); // Prolog goes in entry BB const TargetFrameInfo &TFI = *MF.getTarget().getFrameInfo(); MachineFrameInfo *MFI = MF.getFrameInfo(); SystemZMachineFunctionInfo *SystemZMFI = MF.getInfo<SystemZMachineFunctionInfo>(); MachineBasicBlock::iterator MBBI = MBB.begin(); DebugLoc DL = MBBI != MBB.end() ? MBBI->getDebugLoc() : DebugLoc(); // Get the number of bytes to allocate from the FrameInfo. // Note that area for callee-saved stuff is already allocated, thus we need to // 'undo' the stack movement. uint64_t StackSize = MFI->getStackSize(); StackSize -= SystemZMFI->getCalleeSavedFrameSize(); uint64_t NumBytes = StackSize - TFI.getOffsetOfLocalArea(); // Skip the callee-saved push instructions. while (MBBI != MBB.end() && (MBBI->getOpcode() == SystemZ::MOV64mr || MBBI->getOpcode() == SystemZ::MOV64mrm)) ++MBBI; if (MBBI != MBB.end()) DL = MBBI->getDebugLoc(); // adjust stack pointer: R15 -= numbytes if (StackSize || MFI->hasCalls()) { assert(MF.getRegInfo().isPhysRegUsed(SystemZ::R15D) && "Invalid stack frame calculation!"); emitSPUpdate(MBB, MBBI, -(int64_t)NumBytes, TII); } if (hasFP(MF)) { // Update R11 with the new base value... BuildMI(MBB, MBBI, DL, TII.get(SystemZ::MOV64rr), SystemZ::R11D) .addReg(SystemZ::R15D); // Mark the FramePtr as live-in in every block except the entry. for (MachineFunction::iterator I = llvm::next(MF.begin()), E = MF.end(); I != E; ++I) I->addLiveIn(SystemZ::R11D); } }
// runOnMachineFunction - Pass entry point from PassManager. bool ARM64AdvSIMDScalar::runOnMachineFunction(MachineFunction &mf) { // Early exit if pass disabled. if (!AdvSIMDScalar) return false; bool Changed = false; DEBUG(dbgs() << "***** ARM64AdvSIMDScalar *****\n"); const TargetMachine &TM = mf.getTarget(); MRI = &mf.getRegInfo(); TII = static_cast<const ARM64InstrInfo *>(TM.getInstrInfo()); // Just check things on a one-block-at-a-time basis. for (MachineFunction::iterator I = mf.begin(), E = mf.end(); I != E; ++I) if (processMachineBasicBlock(I)) Changed = true; return Changed; }
virtual bool runOnMachineFunction(MachineFunction &MF) { TII = static_cast<const R600InstrInfo *>(MF.getTarget().getInstrInfo()); for (MachineFunction::iterator BB = MF.begin(), BB_E = MF.end(); BB != BB_E; ++BB) { MachineBasicBlock &MBB = *BB; MachineBasicBlock::iterator I = MBB.begin(); if (I->getOpcode() == AMDGPU::CF_ALU) continue; // BB was already parsed for (MachineBasicBlock::iterator E = MBB.end(); I != E;) { if (isALU(I)) I = MakeALUClause(MBB, I); else ++I; } } return false; }
bool SpillPlacement::runOnMachineFunction(MachineFunction &mf) { MF = &mf; bundles = &getAnalysis<EdgeBundles>(); loops = &getAnalysis<MachineLoopInfo>(); assert(!nodes && "Leaking node array"); nodes = new Node[bundles->getNumBundles()]; // Compute total ingoing and outgoing block frequencies for all bundles. BlockFrequencies.resize(mf.getNumBlockIDs()); MachineBlockFrequencyInfo &MBFI = getAnalysis<MachineBlockFrequencyInfo>(); for (MachineFunction::iterator I = mf.begin(), E = mf.end(); I != E; ++I) { unsigned Num = I->getNumber(); BlockFrequencies[Num] = MBFI.getBlockFreq(I); } // We never change the function. return false; }
bool SpillPlacement::runOnMachineFunction(MachineFunction &mf) { MF = &mf; bundles = &getAnalysis<EdgeBundles>(); loops = &getAnalysis<MachineLoopInfo>(); assert(!nodes && "Leaking node array"); nodes = new Node[bundles->getNumBundles()]; // Compute total ingoing and outgoing block frequencies for all bundles. for (MachineFunction::iterator I = mf.begin(), E = mf.end(); I != E; ++I) { float Freq = getBlockFrequency(I); unsigned Num = I->getNumber(); nodes[bundles->getBundle(Num, 1)].Frequency[0] += Freq; nodes[bundles->getBundle(Num, 0)].Frequency[1] += Freq; } // We never change the function. return false; }
// Align all targets of indirect branches on bundle size. Used only if target // is NaCl. void MipsAsmPrinter::NaClAlignIndirectJumpTargets(MachineFunction &MF) { // Align all blocks that are jumped to through jump table. if (MachineJumpTableInfo *JtInfo = MF.getJumpTableInfo()) { const std::vector<MachineJumpTableEntry> &JT = JtInfo->getJumpTables(); for (unsigned I = 0; I < JT.size(); ++I) { const std::vector<MachineBasicBlock*> &MBBs = JT[I].MBBs; for (unsigned J = 0; J < MBBs.size(); ++J) MBBs[J]->setAlignment(MIPS_NACL_BUNDLE_ALIGN); } } // If basic block address is taken, block can be target of indirect branch. for (MachineFunction::iterator MBB = MF.begin(), E = MF.end(); MBB != E; ++MBB) { if (MBB->hasAddressTaken()) MBB->setAlignment(MIPS_NACL_BUNDLE_ALIGN); } }
void HexagonGenPredicate::collectPredicateGPR(MachineFunction &MF) { for (MachineFunction::iterator A = MF.begin(), Z = MF.end(); A != Z; ++A) { MachineBasicBlock &B = *A; for (MachineBasicBlock::iterator I = B.begin(), E = B.end(); I != E; ++I) { MachineInstr *MI = &*I; unsigned Opc = MI->getOpcode(); switch (Opc) { case Hexagon::C2_tfrpr: case TargetOpcode::COPY: if (isPredReg(MI->getOperand(1).getReg())) { Register RD = MI->getOperand(0); if (TargetRegisterInfo::isVirtualRegister(RD.R)) PredGPRs.insert(RD); } break; } } } }
bool HexagonGenPredicate::eliminatePredCopies(MachineFunction &MF) { DEBUG(dbgs() << LLVM_FUNCTION_NAME << "\n"); const TargetRegisterClass *PredRC = &Hexagon::PredRegsRegClass; bool Changed = false; VectOfInst Erase; // First, replace copies // IntR = PredR1 // PredR2 = IntR // with // PredR2 = PredR1 // Such sequences can be generated when a copy-into-pred is generated from // a gpr register holding a result of a convertible instruction. After // the convertible instruction is converted, its predicate result will be // copied back into the original gpr. for (MachineFunction::iterator A = MF.begin(), Z = MF.end(); A != Z; ++A) { MachineBasicBlock &B = *A; for (MachineBasicBlock::iterator I = B.begin(), E = B.end(); I != E; ++I) { if (I->getOpcode() != TargetOpcode::COPY) continue; Register DR = I->getOperand(0); Register SR = I->getOperand(1); if (!TargetRegisterInfo::isVirtualRegister(DR.R)) continue; if (!TargetRegisterInfo::isVirtualRegister(SR.R)) continue; if (MRI->getRegClass(DR.R) != PredRC) continue; if (MRI->getRegClass(SR.R) != PredRC) continue; assert(!DR.S && !SR.S && "Unexpected subregister"); MRI->replaceRegWith(DR.R, SR.R); Erase.insert(I); Changed = true; } } for (VectOfInst::iterator I = Erase.begin(), E = Erase.end(); I != E; ++I) (*I)->eraseFromParent(); return Changed; }
bool X86CallFrameOptimization::runOnMachineFunction(MachineFunction &MF) { TII = MF.getSubtarget().getInstrInfo(); TFL = MF.getSubtarget().getFrameLowering(); MRI = &MF.getRegInfo(); if (!shouldPerformTransformation(MF)) return false; int FrameSetupOpcode = TII->getCallFrameSetupOpcode(); bool Changed = false; for (MachineFunction::iterator BB = MF.begin(), E = MF.end(); BB != E; ++BB) for (MachineBasicBlock::iterator I = BB->begin(); I != BB->end(); ++I) if (I->getOpcode() == FrameSetupOpcode) Changed |= adjustCallSequence(MF, *BB, I); return Changed; }
bool runOnMachineFunction(MachineFunction &MF) override { const R600Subtarget &ST = MF.getSubtarget<R600Subtarget>(); TII = ST.getInstrInfo(); for (MachineFunction::iterator BB = MF.begin(), BB_E = MF.end(); BB != BB_E; ++BB) { MachineBasicBlock &MBB = *BB; MachineBasicBlock::iterator I = MBB.begin(); if (I->getOpcode() == AMDGPU::CF_ALU) continue; // BB was already parsed for (MachineBasicBlock::iterator E = MBB.end(); I != E;) { if (isALU(I)) I = MakeALUClause(MBB, I); else ++I; } } return false; }
/// Check if the CFG of \p MF is irreducible. static bool isIrreducibleCFG(const MachineFunction &MF, const MachineLoopInfo &MLI) { const MachineBasicBlock *Entry = &*MF.begin(); ReversePostOrderTraversal<const MachineBasicBlock *> RPOT(Entry); BitVector VisitedBB(MF.getNumBlockIDs()); for (const MachineBasicBlock *MBB : RPOT) { VisitedBB.set(MBB->getNumber()); for (const MachineBasicBlock *SuccBB : MBB->successors()) { if (!VisitedBB.test(SuccBB->getNumber())) continue; // We already visited SuccBB, thus MBB->SuccBB must be a backedge. // Check that the head matches what we have in the loop information. // Otherwise, we have an irreducible graph. if (!isProperBackedge(MLI, MBB, SuccBB)) return true; } } return false; }
bool MipsHazardSchedule::runOnMachineFunction(MachineFunction &MF) { const MipsSubtarget *STI = &static_cast<const MipsSubtarget &>(MF.getSubtarget()); // Forbidden slot hazards are only defined for MIPSR6 but not microMIPSR6. if (!STI->hasMips32r6() || STI->inMicroMipsMode()) return false; bool Changed = false; const MipsInstrInfo *TII = STI->getInstrInfo(); for (MachineFunction::iterator FI = MF.begin(); FI != MF.end(); ++FI) { for (Iter I = FI->begin(); I != FI->end(); ++I) { // Forbidden slot hazard handling. Use lookahead over state. if (!TII->HasForbiddenSlot(*I)) continue; Iter Inst; bool LastInstInFunction = std::next(I) == FI->end() && std::next(FI) == MF.end(); if (!LastInstInFunction) { if (std::next(I) != FI->end()) { // Start looking from the next instruction in the basic block. Inst = getNextMachineInstr(std::next(I)); } else { // Next instruction in the physical successor basic block. Inst = getNextMachineInstr(I); } } if (LastInstInFunction || !TII->SafeInForbiddenSlot(*Inst)) { Changed = true; MIBundleBuilder(&*I) .append(BuildMI(MF, I->getDebugLoc(), TII->get(Mips::NOP))); NumInsertedNops++; } } } return Changed; }
bool Thumb2ITBlockPass::runOnMachineFunction(MachineFunction &Fn) { const TargetMachine &TM = Fn.getTarget(); AFI = Fn.getInfo<ARMFunctionInfo>(); TII = static_cast<const Thumb2InstrInfo*>(TM.getInstrInfo()); if (!AFI->isThumbFunction()) return false; bool Modified = false; for (MachineFunction::iterator MFI = Fn.begin(), E = Fn.end(); MFI != E; ) { MachineBasicBlock &MBB = *MFI; ++MFI; if (PreRegAlloc) Modified |= InsertITBlocks(MBB); else Modified |= InsertITInstructions(MBB); } return Modified; }
bool ErlangGC::findCustomSafePoints(GCFunctionInfo &FI, MachineFunction &MF) { for (MachineFunction::iterator BBI = MF.begin(), BBE = MF.end(); BBI != BBE; ++BBI) for (MachineBasicBlock::iterator MI = BBI->begin(), ME = BBI->end(); MI != ME; ++MI) if (MI->getDesc().isCall()) { // Do not treat tail call sites as safe points. if (MI->getDesc().isTerminator()) continue; /* Code copied from VisitCallPoint(...) */ MachineBasicBlock::iterator RAI = MI; ++RAI; MCSymbol* Label = InsertLabel(*MI->getParent(), RAI, MI->getDebugLoc()); FI.addSafePoint(GC::PostCall, Label, MI->getDebugLoc()); } return false; }
/// insertPrologEpilogCode - Scan the function for modified callee saved /// registers, insert spill code for these callee saved registers, then add /// prolog and epilog code to the function. /// void PEI::insertPrologEpilogCode(MachineFunction &Fn) { const TargetFrameLowering &TFI = *Fn.getTarget().getFrameLowering(); // Add prologue to the function... TFI.emitPrologue(Fn); // Add epilogue to restore the callee-save registers in each exiting block for (MachineFunction::iterator I = Fn.begin(), E = Fn.end(); I != E; ++I) { // If last instruction is a return instruction, add an epilogue if (!I->empty() && I->back().isReturn()) TFI.emitEpilogue(Fn, *I); } // Emit additional code that is required to support segmented stacks, if // we've been asked for it. This, when linked with a runtime with support // for segmented stacks (libgcc is one), will result in allocating stack // space in small chunks instead of one large contiguous block. if (Fn.getTarget().Options.EnableSegmentedStacks) TFI.adjustForSegmentedStacks(Fn); }
bool MOVToLEAPass::runOnMachineFunction(MachineFunction &Fn) { const TargetInstrInfo *TII = Fn.getTarget().getInstrInfo(); bool Changed = false; for (MachineFunction::iterator BB = Fn.begin(), E = Fn.end(); BB != E; ++BB) for (MachineBasicBlock::iterator I = BB->begin(); I != BB->end(); ) { ++PreMOVtoLEAInstructionCount; if (I->getNumOperands() != 2 || !I->getOperand(0).isReg() || !I->getOperand(1).isReg()) { ++I; continue; } unsigned leaOpc; if (I->getOpcode() == X86::MOV32rr) { leaOpc = X86::LEA32r; } else if (I->getOpcode() == X86::MOV64rr) { leaOpc = X86::LEA64r; } else { ++I; continue; } unsigned int Roll = RandomNumberGenerator::Generator().Random(100); ++MOVCandidates; if (Roll >= multicompiler::getFunctionOption( multicompiler::MOVToLEAPercentage, *Fn.getFunction())) { ++I; continue; } ++ReplacedMOV; MachineBasicBlock::iterator J = I; ++I; addRegOffset(BuildMI(*BB, J, J->getDebugLoc(), TII->get(leaOpc), J->getOperand(0).getReg()), J->getOperand(1).getReg(), false, 0); J->eraseFromParent(); Changed = true; } return Changed; }
bool R600LowerShaderInstructionsPass::runOnMachineFunction(MachineFunction &MF) { MRI = &MF.getRegInfo(); for (MachineFunction::iterator BB = MF.begin(), BB_E = MF.end(); BB != BB_E; ++BB) { MachineBasicBlock &MBB = *BB; for (MachineBasicBlock::iterator I = MBB.begin(); I != MBB.end();) { MachineInstr &MI = *I; bool deleteInstr = false; switch (MI.getOpcode()) { default: break; case AMDIL::RESERVE_REG: case AMDIL::EXPORT_REG: deleteInstr = true; break; case AMDIL::LOAD_INPUT: lowerLOAD_INPUT(MI); deleteInstr = true; break; case AMDIL::STORE_OUTPUT: deleteInstr = lowerSTORE_OUTPUT(MI, MBB, I); break; } ++I; if (deleteInstr) { MI.eraseFromParent(); } } } return false; }
bool OptimizeExts::runOnMachineFunction(MachineFunction &MF) { TM = &MF.getTarget(); TII = TM->getInstrInfo(); MRI = &MF.getRegInfo(); DT = Aggressive ? &getAnalysis<MachineDominatorTree>() : 0; bool Changed = false; SmallPtrSet<MachineInstr*, 8> LocalMIs; for (MachineFunction::iterator I = MF.begin(), E = MF.end(); I != E; ++I) { MachineBasicBlock *MBB = &*I; LocalMIs.clear(); for (MachineBasicBlock::iterator MII = I->begin(), ME = I->end(); MII != ME; ++MII) { MachineInstr *MI = &*MII; Changed |= OptimizeInstr(MI, MBB, LocalMIs); } } return Changed; }
bool InsertNOPLoad::runOnMachineFunction(MachineFunction &MF) { Subtarget = &MF.getSubtarget<SparcSubtarget>(); const TargetInstrInfo &TII = *Subtarget->getInstrInfo(); DebugLoc DL = DebugLoc(); bool Modified = false; for (auto MFI = MF.begin(), E = MF.end(); MFI != E; ++MFI) { MachineBasicBlock &MBB = *MFI; for (auto MBBI = MBB.begin(), E = MBB.end(); MBBI != E; ++MBBI) { MachineInstr &MI = *MBBI; unsigned Opcode = MI.getOpcode(); if (Opcode >= SP::LDDArr && Opcode <= SP::LDrr) { MachineBasicBlock::iterator NMBBI = std::next(MBBI); BuildMI(MBB, NMBBI, DL, TII.get(SP::NOP)); Modified = true; } } } return Modified; }
bool AMDGPUConvertToISAPass::runOnMachineFunction(MachineFunction &MF) { const AMDGPUInstrInfo * TII = static_cast<const AMDGPUInstrInfo*>(TM.getInstrInfo()); for (MachineFunction::iterator BB = MF.begin(), BB_E = MF.end(); BB != BB_E; ++BB) { MachineBasicBlock &MBB = *BB; for (MachineBasicBlock::iterator I = MBB.begin(), Next = llvm::next(I); I != MBB.end(); I = Next, Next = llvm::next(I) ) { MachineInstr &MI = *I; MachineInstr * newInstr = TII->convertToISA(MI, MF, MBB.findDebugLoc(I)); if (!newInstr) { continue; } MBB.insert(I, newInstr); MI.eraseFromParent(); } } return false; }
bool MachineBlockPlacement::runOnMachineFunction(MachineFunction &F) { // Check for single-block functions and skip them. if (llvm::next(F.begin()) == F.end()) return false; MBPI = &getAnalysis<MachineBranchProbabilityInfo>(); MBFI = &getAnalysis<MachineBlockFrequencyInfo>(); MLI = &getAnalysis<MachineLoopInfo>(); TII = F.getTarget().getInstrInfo(); TLI = F.getTarget().getTargetLowering(); assert(BlockToChain.empty()); buildCFGChains(F); placeChainsTopologically(F); AlignLoops(F); BlockToChain.clear(); // We always return true as we have no way to track whether the final order // differs from the original order. return true; }
bool Thumb2ITBlockPass::runOnMachineFunction(MachineFunction &Fn) { const TargetMachine &TM = Fn.getTarget(); AFI = Fn.getInfo<ARMFunctionInfo>(); TII = static_cast<const Thumb2InstrInfo*>(TM.getInstrInfo()); TRI = TM.getRegisterInfo(); restrictIT = TM.getSubtarget<ARMSubtarget>().restrictIT(); if (!AFI->isThumbFunction()) return false; bool Modified = false; for (MachineFunction::iterator MFI = Fn.begin(), E = Fn.end(); MFI != E; ) { MachineBasicBlock &MBB = *MFI; ++MFI; Modified |= InsertITInstructions(MBB); } if (Modified) AFI->setHasITBlocks(true); return Modified; }
/// runOnMachineFunction - This uses the printMachineInstruction() /// method to print assembly for each instruction. /// bool IA64AsmPrinter::runOnMachineFunction(MachineFunction &MF) { this->MF = &MF; SetupMachineFunction(MF); O << "\n\n"; // Print out constants referenced by the function EmitConstantPool(MF.getConstantPool()); const Function *F = MF.getFunction(); SwitchToSection(TAI->SectionForGlobal(F)); // Print out labels for the function. EmitAlignment(5); O << "\t.global\t" << CurrentFnName << '\n'; printVisibility(CurrentFnName, F->getVisibility()); O << "\t.type\t" << CurrentFnName << ", @function\n"; O << CurrentFnName << ":\n"; // Print out code for the function. for (MachineFunction::const_iterator I = MF.begin(), E = MF.end(); I != E; ++I) { // Print a label for the basic block if there are any predecessors. if (!I->pred_empty()) { printBasicBlockLabel(I, true, true); O << '\n'; } for (MachineBasicBlock::const_iterator II = I->begin(), E = I->end(); II != E; ++II) { // Print the assembly for the instruction. printMachineInstruction(II); } } // We didn't modify anything. return false; }
/// ScanForSpillSlotRefs - Scan all the machine instructions for spill slot /// references and update spill slot weights. void StackSlotColoring::ScanForSpillSlotRefs(MachineFunction &MF) { SSRefs.resize(MFI->getObjectIndexEnd()); // FIXME: Need the equivalent of MachineRegisterInfo for frameindex operands. for (MachineFunction::iterator MBBI = MF.begin(), E = MF.end(); MBBI != E; ++MBBI) { MachineBasicBlock *MBB = &*MBBI; for (MachineBasicBlock::iterator MII = MBB->begin(), EE = MBB->end(); MII != EE; ++MII) { MachineInstr &MI = *MII; for (unsigned i = 0, e = MI.getNumOperands(); i != e; ++i) { MachineOperand &MO = MI.getOperand(i); if (!MO.isFI()) continue; int FI = MO.getIndex(); if (FI < 0) continue; if (!LS->hasInterval(FI)) continue; LiveInterval &li = LS->getInterval(FI); if (!MI.isDebugValue()) li.weight += LiveIntervals::getSpillWeight(false, true, MBFI, MI); } for (MachineInstr::mmo_iterator MMOI = MI.memoperands_begin(), EE = MI.memoperands_end(); MMOI != EE; ++MMOI) { MachineMemOperand *MMO = *MMOI; if (const FixedStackPseudoSourceValue *FSV = dyn_cast_or_null<FixedStackPseudoSourceValue>( MMO->getPseudoValue())) { int FI = FSV->getFrameIndex(); if (FI >= 0) SSRefs[FI].push_back(MMO); } } } } }
/// finalizeBundles - Finalize instruction bundles in the specified /// MachineFunction. Return true if any bundles are finalized. bool llvm::finalizeBundles(MachineFunction &MF) { bool Changed = false; for (MachineFunction::iterator I = MF.begin(), E = MF.end(); I != E; ++I) { MachineBasicBlock &MBB = *I; MachineBasicBlock::instr_iterator MII = MBB.instr_begin(); MachineBasicBlock::instr_iterator MIE = MBB.instr_end(); if (MII == MIE) continue; assert(!MII->isInsideBundle() && "First instr cannot be inside bundle before finalization!"); for (++MII; MII != MIE; ) { if (!MII->isInsideBundle()) ++MII; else { MII = finalizeBundle(MBB, std::prev(MII)); Changed = true; } } } return Changed; }
bool PPCCTRLoopsVerify::runOnMachineFunction(MachineFunction &MF) { MDT = &getAnalysis<MachineDominatorTree>(); // Verify that all bdnz/bdz instructions are dominated by a loop mtctr before // any other instructions that might clobber the ctr register. for (MachineFunction::iterator I = MF.begin(), IE = MF.end(); I != IE; ++I) { MachineBasicBlock *MBB = I; if (!MDT->isReachableFromEntry(MBB)) continue; for (MachineBasicBlock::iterator MII = MBB->getFirstTerminator(), MIIE = MBB->end(); MII != MIIE; ++MII) { unsigned Opc = MII->getOpcode(); if (Opc == PPC::BDNZ8 || Opc == PPC::BDNZ || Opc == PPC::BDZ8 || Opc == PPC::BDZ) if (!verifyCTRBranch(MBB, MII)) llvm_unreachable("Invalid PPC CTR loop!"); } } return false; }
bool AlphaBSel::runOnMachineFunction(MachineFunction &Fn) { for (MachineFunction::iterator MFI = Fn.begin(), E = Fn.end(); MFI != E; ++MFI) { MachineBasicBlock *MBB = MFI; for (MachineBasicBlock::iterator MBBI = MBB->begin(), EE = MBB->end(); MBBI != EE; ++MBBI) { if (MBBI->getOpcode() == Alpha::COND_BRANCH_I || MBBI->getOpcode() == Alpha::COND_BRANCH_F) { // condbranch operands: // 0. bc opcode // 1. reg // 2. target MBB const TargetInstrInfo *TII = Fn.getTarget().getInstrInfo(); MBBI->setDesc(TII->get(MBBI->getOperand(0).getImm())); } } } return true; }
// Iterates through each basic block in a machine function and replaces // ADJDYNALLOC pseudo instructions with a Lanai:ADDI with the // maximum call frame size as the immediate. void LanaiFrameLowering::replaceAdjDynAllocPseudo(MachineFunction &MF) const { const LanaiInstrInfo &LII = *static_cast<const LanaiInstrInfo *>(STI.getInstrInfo()); unsigned MaxCallFrameSize = MF.getFrameInfo()->getMaxCallFrameSize(); for (MachineFunction::iterator MBB = MF.begin(), E = MF.end(); MBB != E; ++MBB) { MachineBasicBlock::iterator MBBI = MBB->begin(); while (MBBI != MBB->end()) { MachineInstr &MI = *MBBI++; if (MI.getOpcode() == Lanai::ADJDYNALLOC) { DebugLoc DL = MI.getDebugLoc(); unsigned Dst = MI.getOperand(0).getReg(); unsigned Src = MI.getOperand(1).getReg(); BuildMI(*MBB, MI, DL, LII.get(Lanai::ADD_I_LO), Dst) .addReg(Src) .addImm(MaxCallFrameSize); MI.eraseFromParent(); } } } }
bool SIFixSGPRLiveRanges::runOnMachineFunction(MachineFunction &MF) { MachineRegisterInfo &MRI = MF.getRegInfo(); const SIRegisterInfo *TRI = static_cast<const SIRegisterInfo *>( MF.getTarget().getRegisterInfo()); LiveIntervals *LIS = &getAnalysis<LiveIntervals>(); for (MachineFunction::iterator BI = MF.begin(), BE = MF.end(); BI != BE; ++BI) { MachineBasicBlock &MBB = *BI; for (MachineBasicBlock::iterator I = MBB.begin(), E = MBB.end(); I != E; ++I) { MachineInstr &MI = *I; MachineOperand *ExecUse = MI.findRegisterUseOperand(AMDGPU::EXEC); if (ExecUse) continue; for (const MachineOperand &Def : MI.operands()) { if (!Def.isReg() || !Def.isDef() ||!TargetRegisterInfo::isVirtualRegister(Def.getReg())) continue; const TargetRegisterClass *RC = MRI.getRegClass(Def.getReg()); if (!TRI->isSGPRClass(RC)) continue; LiveInterval &LI = LIS->getInterval(Def.getReg()); for (unsigned i = 0, e = LI.size() - 1; i != e; ++i) { LiveRange::Segment &Seg = LI.segments[i]; LiveRange::Segment &Next = LI.segments[i + 1]; Seg.end = Next.start; } } } } return false; }
bool A15SDOptimizer::runOnMachineFunction(MachineFunction &Fn) { if (skipFunction(*Fn.getFunction())) return false; const ARMSubtarget &STI = Fn.getSubtarget<ARMSubtarget>(); // Since the A15SDOptimizer pass can insert VDUP instructions, it can only be // enabled when NEON is available. if (!(STI.isCortexA15() && STI.hasNEON())) return false; TII = STI.getInstrInfo(); TRI = STI.getRegisterInfo(); MRI = &Fn.getRegInfo(); bool Modified = false; DEBUG(dbgs() << "Running on function " << Fn.getName()<< "\n"); DeadInstr.clear(); Replacements.clear(); for (MachineFunction::iterator MFI = Fn.begin(), E = Fn.end(); MFI != E; ++MFI) { for (MachineBasicBlock::iterator MI = MFI->begin(), ME = MFI->end(); MI != ME;) { Modified |= runOnInstruction(MI++); } } for (std::set<MachineInstr *>::iterator I = DeadInstr.begin(), E = DeadInstr.end(); I != E; ++I) { (*I)->eraseFromParent(); } return Modified; }