bool AArch64AsmPrinter::printAsmMRegister(const MachineOperand &MO, char Mode, raw_ostream &O) { unsigned Reg = MO.getReg(); switch (Mode) { default: return true; // Unknown mode. case 'w': Reg = getWRegFromXReg(Reg); break; case 'x': Reg = getXRegFromWReg(Reg); break; } O << AArch64InstPrinter::getRegisterName(Reg); return false; }
static bool isConstant(const MachineOperand &MO, int64_t &C) { const MachineFunction *MF = MO.getParent()->getParent()->getParent(); const MachineRegisterInfo &MRI = MF->getRegInfo(); const MachineInstr *Def = MRI.getVRegDef(MO.getReg()); if (!Def) return false; if (Def->getOpcode() == AMDGPU::G_CONSTANT) { C = Def->getOperand(1).getCImm()->getSExtValue(); return true; } if (Def->getOpcode() == AMDGPU::COPY) return isConstant(Def->getOperand(1), C); return false; }
bool AArch64AsmPrinter::lowerOperand(const MachineOperand &MO, MCOperand &MCOp) const { switch (MO.getType()) { default: llvm_unreachable("unknown operand type"); case MachineOperand::MO_Register: if (MO.isImplicit()) return false; assert(!MO.getSubReg() && "Subregs should be eliminated!"); MCOp = MCOperand::CreateReg(MO.getReg()); break; case MachineOperand::MO_Immediate: MCOp = MCOperand::CreateImm(MO.getImm()); break; case MachineOperand::MO_FPImmediate: { assert(MO.getFPImm()->isZero() && "Only fp imm 0.0 is supported"); MCOp = MCOperand::CreateFPImm(0.0); break; } case MachineOperand::MO_BlockAddress: MCOp = lowerSymbolOperand(MO, GetBlockAddressSymbol(MO.getBlockAddress())); break; case MachineOperand::MO_ExternalSymbol: MCOp = lowerSymbolOperand(MO, GetExternalSymbolSymbol(MO.getSymbolName())); break; case MachineOperand::MO_GlobalAddress: MCOp = lowerSymbolOperand(MO, getSymbol(MO.getGlobal())); break; case MachineOperand::MO_MachineBasicBlock: MCOp = MCOperand::CreateExpr(MCSymbolRefExpr::Create( MO.getMBB()->getSymbol(), OutContext)); break; case MachineOperand::MO_JumpTableIndex: MCOp = lowerSymbolOperand(MO, GetJTISymbol(MO.getIndex())); break; case MachineOperand::MO_ConstantPoolIndex: MCOp = lowerSymbolOperand(MO, GetCPISymbol(MO.getIndex())); break; case MachineOperand::MO_RegisterMask: // Ignore call clobbers return false; } return true; }
void AlphaAsmPrinter::printOp(const MachineOperand &MO, bool IsCallOp) { const TargetRegisterInfo &RI = *TM.getRegisterInfo(); switch (MO.getType()) { case MachineOperand::MO_Register: O << RI.get(MO.getReg()).AsmName; return; case MachineOperand::MO_Immediate: cerr << "printOp() does not handle immediate values\n"; abort(); return; case MachineOperand::MO_MachineBasicBlock: printBasicBlockLabel(MO.getMBB()); return; case MachineOperand::MO_ConstantPoolIndex: O << TAI->getPrivateGlobalPrefix() << "CPI" << getFunctionNumber() << "_" << MO.getIndex(); return; case MachineOperand::MO_ExternalSymbol: O << MO.getSymbolName(); return; case MachineOperand::MO_GlobalAddress: { GlobalValue *GV = MO.getGlobal(); O << Mang->getValueName(GV); if (GV->isDeclaration() && GV->hasExternalWeakLinkage()) ExtWeakSymbols.insert(GV); return; } case MachineOperand::MO_JumpTableIndex: O << TAI->getPrivateGlobalPrefix() << "JTI" << getFunctionNumber() << '_' << MO.getIndex(); return; default: O << "<unknown operand type: " << MO.getType() << ">"; return; } }
bool AMDGPUMCInstLower::lowerOperand(const MachineOperand &MO, MCOperand &MCOp) const { switch (MO.getType()) { default: llvm_unreachable("unknown operand type"); case MachineOperand::MO_Immediate: MCOp = MCOperand::createImm(MO.getImm()); return true; case MachineOperand::MO_Register: MCOp = MCOperand::createReg(AMDGPU::getMCReg(MO.getReg(), ST)); return true; case MachineOperand::MO_MachineBasicBlock: { if (MO.getTargetFlags() != 0) { MCOp = MCOperand::createExpr( getLongBranchBlockExpr(*MO.getParent()->getParent(), MO)); } else { MCOp = MCOperand::createExpr( MCSymbolRefExpr::create(MO.getMBB()->getSymbol(), Ctx)); } return true; } case MachineOperand::MO_GlobalAddress: { const GlobalValue *GV = MO.getGlobal(); SmallString<128> SymbolName; AP.getNameWithPrefix(SymbolName, GV); MCSymbol *Sym = Ctx.getOrCreateSymbol(SymbolName); const MCExpr *SymExpr = MCSymbolRefExpr::create(Sym, getVariantKind(MO.getTargetFlags()),Ctx); const MCExpr *Expr = MCBinaryExpr::createAdd(SymExpr, MCConstantExpr::create(MO.getOffset(), Ctx), Ctx); MCOp = MCOperand::createExpr(Expr); return true; } case MachineOperand::MO_ExternalSymbol: { MCSymbol *Sym = Ctx.getOrCreateSymbol(StringRef(MO.getSymbolName())); Sym->setExternal(true); const MCSymbolRefExpr *Expr = MCSymbolRefExpr::create(Sym, Ctx); MCOp = MCOperand::createExpr(Expr); return true; } } }
// Return true if a new block was inserted. bool SILowerControlFlow::indirectDst(MachineInstr &MI) { MachineBasicBlock &MBB = *MI.getParent(); DebugLoc DL = MI.getDebugLoc(); unsigned Dst = MI.getOperand(0).getReg(); int Off = TII->getNamedOperand(MI, AMDGPU::OpName::offset)->getImm(); MachineOperand *Val = TII->getNamedOperand(MI, AMDGPU::OpName::val); unsigned Reg; std::tie(Reg, Off) = computeIndirectRegAndOffset(Dst, Off); MachineInstr *MovRel = BuildMI(*MBB.getParent(), DL, TII->get(AMDGPU::V_MOVRELD_B32_e32)) .addReg(Reg, RegState::Define) .addReg(Val->getReg(), getUndefRegState(Val->isUndef())) .addReg(Dst, RegState::Implicit); return loadM0(MI, MovRel, Off); }
bool AArch64MCInstLower::lowerOperand(const MachineOperand &MO, MCOperand &MCOp) const { switch (MO.getType()) { default: llvm_unreachable("unknown operand type"); case MachineOperand::MO_Register: // Ignore all implicit register operands. if (MO.isImplicit()) return false; MCOp = MCOperand::createReg(MO.getReg()); break; case MachineOperand::MO_RegisterMask: // Regmasks are like implicit defs. return false; case MachineOperand::MO_Immediate: MCOp = MCOperand::createImm(MO.getImm()); break; case MachineOperand::MO_MachineBasicBlock: MCOp = MCOperand::createExpr( MCSymbolRefExpr::create(MO.getMBB()->getSymbol(), Ctx)); break; case MachineOperand::MO_GlobalAddress: MCOp = LowerSymbolOperand(MO, GetGlobalAddressSymbol(MO)); break; case MachineOperand::MO_ExternalSymbol: MCOp = LowerSymbolOperand(MO, GetExternalSymbolSymbol(MO)); break; case MachineOperand::MO_MCSymbol: MCOp = LowerSymbolOperand(MO, MO.getMCSymbol()); break; case MachineOperand::MO_JumpTableIndex: MCOp = LowerSymbolOperand(MO, Printer.GetJTISymbol(MO.getIndex())); break; case MachineOperand::MO_ConstantPoolIndex: MCOp = LowerSymbolOperand(MO, Printer.GetCPISymbol(MO.getIndex())); break; case MachineOperand::MO_BlockAddress: MCOp = LowerSymbolOperand( MO, Printer.GetBlockAddressSymbol(MO.getBlockAddress())); break; } return true; }
/// getMachineOpValue - Return binary encoding of operand. If the machine /// operand requires relocation, record the relocation and return zero. unsigned MipsCodeEmitter::getMachineOpValue(const MachineInstr &MI, const MachineOperand &MO) const { if (MO.isReg()) return getMipsRegisterNumbering(MO.getReg()); else if (MO.isImm()) return static_cast<unsigned>(MO.getImm()); else if (MO.isGlobal()) emitGlobalAddress(MO.getGlobal(), getRelocation(MI, MO), true); else if (MO.isSymbol()) emitExternalSymbolAddress(MO.getSymbolName(), getRelocation(MI, MO)); else if (MO.isCPI()) emitConstPoolAddress(MO.getIndex(), getRelocation(MI, MO)); else if (MO.isJTI()) emitJumpTableAddress(MO.getIndex(), getRelocation(MI, MO)); else if (MO.isMBB()) emitMachineBasicBlock(MO.getMBB(), getRelocation(MI, MO)); else llvm_unreachable("Unable to encode MachineOperand!"); return 0; }
MCOperand Cpu0MCInstLower::LowerOperand(const MachineOperand& MO, unsigned offset) const { MachineOperandType MOTy = MO.getType(); switch (MOTy) { default: llvm_unreachable("unknown operand type"); case MachineOperand::MO_Register: // Ignore all implicit register operands. if (MO.isImplicit()) break; return MCOperand::CreateReg(MO.getReg()); case MachineOperand::MO_Immediate: return MCOperand::CreateImm(MO.getImm() + offset); case MachineOperand::MO_GlobalAddress: return LowerSymbolOperand(MO, MOTy, offset); case MachineOperand::MO_RegisterMask: break; } return MCOperand(); }
LaneBitmask DetectDeadLanes::transferDefinedLanes(const MachineOperand &Def, unsigned OpNum, LaneBitmask DefinedLanes) const { const MachineInstr &MI = *Def.getParent(); // Translate DefinedLanes if necessary. switch (MI.getOpcode()) { case TargetOpcode::REG_SEQUENCE: { unsigned SubIdx = MI.getOperand(OpNum + 1).getImm(); DefinedLanes = TRI->composeSubRegIndexLaneMask(SubIdx, DefinedLanes); DefinedLanes &= TRI->getSubRegIndexLaneMask(SubIdx); break; } case TargetOpcode::INSERT_SUBREG: { unsigned SubIdx = MI.getOperand(3).getImm(); if (OpNum == 2) { DefinedLanes = TRI->composeSubRegIndexLaneMask(SubIdx, DefinedLanes); DefinedLanes &= TRI->getSubRegIndexLaneMask(SubIdx); } else { assert(OpNum == 1 && "INSERT_SUBREG must have two operands"); // Ignore lanes defined by operand 2. DefinedLanes &= ~TRI->getSubRegIndexLaneMask(SubIdx); } break; } case TargetOpcode::EXTRACT_SUBREG: { unsigned SubIdx = MI.getOperand(2).getImm(); assert(OpNum == 1 && "EXTRACT_SUBREG must have one register operand only"); DefinedLanes = TRI->reverseComposeSubRegIndexLaneMask(SubIdx, DefinedLanes); break; } case TargetOpcode::COPY: case TargetOpcode::PHI: break; default: llvm_unreachable("function must be called with COPY-like instruction"); } assert(Def.getSubReg() == 0 && "Should not have subregister defs in machine SSA phase"); DefinedLanes &= MRI->getMaxLaneMaskForVReg(Def.getReg()); return DefinedLanes; }
MCOperand PTXAsmPrinter::lowerOperand(const MachineOperand &MO) { MCOperand MCOp; const PTXMachineFunctionInfo *MFI = MF->getInfo<PTXMachineFunctionInfo>(); const MCExpr *Expr; const char *RegSymbolName; switch (MO.getType()) { default: llvm_unreachable("Unknown operand type"); case MachineOperand::MO_Register: // We create register operands as symbols, since the PTXInstPrinter class // has no way to map virtual registers back to a name without some ugly // hacks. // FIXME: Figure out a better way to handle virtual register naming. RegSymbolName = MFI->getRegisterName(MO.getReg()); Expr = MCSymbolRefExpr::Create(RegSymbolName, MCSymbolRefExpr::VK_None, OutContext); MCOp = MCOperand::CreateExpr(Expr); break; case MachineOperand::MO_Immediate: MCOp = MCOperand::CreateImm(MO.getImm()); break; case MachineOperand::MO_MachineBasicBlock: MCOp = MCOperand::CreateExpr(MCSymbolRefExpr::Create( MO.getMBB()->getSymbol(), OutContext)); break; case MachineOperand::MO_GlobalAddress: MCOp = GetSymbolRef(MO, Mang->getSymbol(MO.getGlobal())); break; case MachineOperand::MO_ExternalSymbol: MCOp = GetSymbolRef(MO, GetExternalSymbolSymbol(MO.getSymbolName())); break; case MachineOperand::MO_FPImmediate: APFloat Val = MO.getFPImm()->getValueAPF(); bool ignored; Val.convert(APFloat::IEEEdouble, APFloat::rmTowardZero, &ignored); MCOp = MCOperand::CreateFPImm(Val.convertToDouble()); break; } return MCOp; }
bool HexagonOptAddrMode::hasRepForm(MachineInstr *MI, unsigned TfrDefR) { const MCInstrDesc &MID = MI->getDesc(); if ((!MID.mayStore() && !MID.mayLoad()) || HII->isPredicated(*MI)) return false; if (MID.mayStore()) { MachineOperand StOp = MI->getOperand(MI->getNumOperands() - 1); if (StOp.isReg() && StOp.getReg() == TfrDefR) return false; } if (HII->getAddrMode(MI) == HexagonII::BaseRegOffset) // Tranform to Absolute plus register offset. return (HII->getBaseWithLongOffset(MI) >= 0); else if (HII->getAddrMode(MI) == HexagonII::BaseImmOffset) // Tranform to absolute addressing mode. return (HII->getAbsoluteForm(MI) >= 0); return false; }
/// Push this operand's register onto the correct vector. void collect(const MachineOperand &MO, const TargetRegisterInfo *TRI) { if (MO.readsReg()) { if (findReg(MO.getReg(), isVReg, Uses, TRI) == Uses.end()) Uses.push_back(MO.getReg()); } if (MO.isDef()) { if (MO.isDead()) { if (findReg(MO.getReg(), isVReg, DeadDefs, TRI) == DeadDefs.end()) DeadDefs.push_back(MO.getReg()); } else { if (findReg(MO.getReg(), isVReg, Defs, TRI) == Defs.end()) Defs.push_back(MO.getReg()); } } }
static bool isCrossCopy(const MachineRegisterInfo &MRI, const MachineInstr &MI, const TargetRegisterClass *DstRC, const MachineOperand &MO) { assert(lowersToCopies(MI)); unsigned SrcReg = MO.getReg(); const TargetRegisterClass *SrcRC = MRI.getRegClass(SrcReg); if (DstRC == SrcRC) return false; unsigned SrcSubIdx = MO.getSubReg(); const TargetRegisterInfo &TRI = *MRI.getTargetRegisterInfo(); unsigned DstSubIdx = 0; switch (MI.getOpcode()) { case TargetOpcode::INSERT_SUBREG: if (MI.getOperandNo(&MO) == 2) DstSubIdx = MI.getOperand(3).getImm(); break; case TargetOpcode::REG_SEQUENCE: { unsigned OpNum = MI.getOperandNo(&MO); DstSubIdx = MI.getOperand(OpNum+1).getImm(); break; } case TargetOpcode::EXTRACT_SUBREG: { unsigned SubReg = MI.getOperand(2).getImm(); SrcSubIdx = TRI.composeSubRegIndices(SubReg, SrcSubIdx); } } unsigned PreA, PreB; // Unused. if (SrcSubIdx && DstSubIdx) return !TRI.getCommonSuperRegClass(SrcRC, SrcSubIdx, DstRC, DstSubIdx, PreA, PreB); if (SrcSubIdx) return !TRI.getMatchingSuperRegClass(SrcRC, DstRC, SrcSubIdx); if (DstSubIdx) return !TRI.getMatchingSuperRegClass(DstRC, SrcRC, DstSubIdx); return !TRI.getCommonSubClass(SrcRC, DstRC); }
MCOperand SystemZMCInstLower::lowerOperand(const MachineOperand &MO) const { switch (MO.getType()) { default: llvm_unreachable("unknown operand type"); case MachineOperand::MO_Register: return MCOperand::CreateReg(MO.getReg()); case MachineOperand::MO_Immediate: return MCOperand::CreateImm(MO.getImm()); case MachineOperand::MO_MachineBasicBlock: return lowerSymbolOperand(MO, MO.getMBB()->getSymbol(), /* MO has no offset field */0); case MachineOperand::MO_GlobalAddress: return lowerSymbolOperand(MO, Mang->getSymbol(MO.getGlobal()), MO.getOffset()); case MachineOperand::MO_ExternalSymbol: { StringRef Name = MO.getSymbolName(); return lowerSymbolOperand(MO, AsmPrinter.GetExternalSymbolSymbol(Name), MO.getOffset()); } case MachineOperand::MO_JumpTableIndex: return lowerSymbolOperand(MO, AsmPrinter.GetJTISymbol(MO.getIndex()), /* MO has no offset field */0); case MachineOperand::MO_ConstantPoolIndex: return lowerSymbolOperand(MO, AsmPrinter.GetCPISymbol(MO.getIndex()), MO.getOffset()); case MachineOperand::MO_BlockAddress: { const BlockAddress *BA = MO.getBlockAddress(); return lowerSymbolOperand(MO, AsmPrinter.GetBlockAddressSymbol(BA), MO.getOffset()); } } }
unsigned llvm::constrainOperandRegClass( const MachineFunction &MF, const TargetRegisterInfo &TRI, MachineRegisterInfo &MRI, const TargetInstrInfo &TII, const RegisterBankInfo &RBI, MachineInstr &InsertPt, const MCInstrDesc &II, const MachineOperand &RegMO, unsigned OpIdx) { unsigned Reg = RegMO.getReg(); // Assume physical registers are properly constrained. assert(TargetRegisterInfo::isVirtualRegister(Reg) && "PhysReg not implemented"); const TargetRegisterClass *RegClass = TII.getRegClass(II, OpIdx, &TRI, MF); // Some of the target independent instructions, like COPY, may not impose any // register class constraints on some of their operands: If it's a use, we can // skip constraining as the instruction defining the register would constrain // it. // We can't constrain unallocatable register classes, because we can't create // virtual registers for these classes, so we need to let targets handled this // case. if (RegClass && !RegClass->isAllocatable()) RegClass = TRI.getConstrainedRegClassForOperand(RegMO, MRI); if (!RegClass) { assert((!isTargetSpecificOpcode(II.getOpcode()) || RegMO.isUse()) && "Register class constraint is required unless either the " "instruction is target independent or the operand is a use"); // FIXME: Just bailing out like this here could be not enough, unless we // expect the users of this function to do the right thing for PHIs and // COPY: // v1 = COPY v0 // v2 = COPY v1 // v1 here may end up not being constrained at all. Please notice that to // reproduce the issue we likely need a destination pattern of a selection // rule producing such extra copies, not just an input GMIR with them as // every existing target using selectImpl handles copies before calling it // and they never reach this function. return Reg; } return constrainRegToClass(MRI, TII, RBI, InsertPt, Reg, *RegClass); }
void NVPTXReplaceImageHandles:: replaceImageHandle(MachineOperand &Op, MachineFunction &MF) { const MachineRegisterInfo &MRI = MF.getRegInfo(); NVPTXMachineFunctionInfo *MFI = MF.getInfo<NVPTXMachineFunctionInfo>(); // Which instruction defines the handle? MachineInstr *MI = MRI.getVRegDef(Op.getReg()); assert(MI && "No def for image handle vreg?"); MachineInstr &TexHandleDef = *MI; switch (TexHandleDef.getOpcode()) { case NVPTX::LD_i64_avar: { // The handle is a parameter value being loaded, replace with the // parameter symbol assert(TexHandleDef.getOperand(6).isSymbol() && "Load is not a symbol!"); StringRef Sym = TexHandleDef.getOperand(6).getSymbolName(); std::string ParamBaseName = MF.getName(); ParamBaseName += "_param_"; assert(Sym.startswith(ParamBaseName) && "Invalid symbol reference"); unsigned Param = atoi(Sym.data()+ParamBaseName.size()); std::string NewSym; raw_string_ostream NewSymStr(NewSym); NewSymStr << MF.getFunction()->getName() << "_param_" << Param; Op.ChangeToImmediate( MFI->getImageHandleSymbolIndex(NewSymStr.str().c_str())); InstrsToRemove.insert(&TexHandleDef); break; } case NVPTX::texsurf_handles: { // The handle is a global variable, replace with the global variable name assert(TexHandleDef.getOperand(1).isGlobal() && "Load is not a global!"); const GlobalValue *GV = TexHandleDef.getOperand(1).getGlobal(); assert(GV->hasName() && "Global sampler must be named!"); Op.ChangeToImmediate(MFI->getImageHandleSymbolIndex(GV->getName().data())); InstrsToRemove.insert(&TexHandleDef); break; } default: llvm_unreachable("Unknown instruction operating on handle"); } }
// All currently live registers must remain so in the remainder block. void SILowerControlFlow::splitBlockLiveIns(const MachineBasicBlock &MBB, const MachineInstr &MI, MachineBasicBlock &LoopBB, MachineBasicBlock &RemainderBB, unsigned SaveReg, const MachineOperand &IdxReg) { LivePhysRegs RemainderLiveRegs(TRI); RemainderLiveRegs.addLiveOuts(MBB); for (MachineBasicBlock::const_reverse_iterator I = MBB.rbegin(), E(&MI); I != E; ++I) { RemainderLiveRegs.stepBackward(*I); } // Add reg defined in loop body. RemainderLiveRegs.addReg(SaveReg); if (const MachineOperand *Val = TII->getNamedOperand(MI, AMDGPU::OpName::val)) { if (!Val->isUndef()) { RemainderLiveRegs.addReg(Val->getReg()); LoopBB.addLiveIn(Val->getReg()); } } const MachineRegisterInfo &MRI = MBB.getParent()->getRegInfo(); for (unsigned Reg : RemainderLiveRegs) { if (MRI.isAllocatable(Reg)) RemainderBB.addLiveIn(Reg); } const MachineOperand *Src = TII->getNamedOperand(MI, AMDGPU::OpName::src); if (!Src->isUndef()) LoopBB.addLiveIn(Src->getReg()); if (!IdxReg.isUndef()) LoopBB.addLiveIn(IdxReg.getReg()); LoopBB.sortUniqueLiveIns(); }
void AlphaAsmPrinter::printOp(const MachineOperand &MO, bool IsCallOp) { const TargetRegisterInfo &RI = *TM.getRegisterInfo(); switch (MO.getType()) { case MachineOperand::MO_Register: O << RI.get(MO.getReg()).AsmName; return; case MachineOperand::MO_Immediate: llvm_unreachable("printOp() does not handle immediate values"); return; case MachineOperand::MO_MachineBasicBlock: printBasicBlockLabel(MO.getMBB()); return; case MachineOperand::MO_ConstantPoolIndex: O << TAI->getPrivateGlobalPrefix() << "CPI" << getFunctionNumber() << "_" << MO.getIndex(); return; case MachineOperand::MO_ExternalSymbol: O << MO.getSymbolName(); return; case MachineOperand::MO_GlobalAddress: O << Mang->getMangledName(MO.getGlobal()); return; case MachineOperand::MO_JumpTableIndex: O << TAI->getPrivateGlobalPrefix() << "JTI" << getFunctionNumber() << '_' << MO.getIndex(); return; default: O << "<unknown operand type: " << MO.getType() << ">"; return; } }
bool X86IntelAsmPrinter::printAsmMRegister(const MachineOperand &MO, const char Mode) { unsigned Reg = MO.getReg(); switch (Mode) { default: return true; // Unknown mode. case 'b': // Print QImode register Reg = getX86SubSuperRegister(Reg, MVT::i8); break; case 'h': // Print QImode high register Reg = getX86SubSuperRegister(Reg, MVT::i8, true); break; case 'w': // Print HImode register Reg = getX86SubSuperRegister(Reg, MVT::i16); break; case 'k': // Print SImode register Reg = getX86SubSuperRegister(Reg, MVT::i32); break; } O << '%' << TRI->getName(Reg); return false; }
/// Returns true if the given machine operand \p MO only reads undefined lanes. /// The function only works for use operands with a subregister set. bool VirtRegRewriter::readsUndefSubreg(const MachineOperand &MO) const { // Shortcut if the operand is already marked undef. if (MO.isUndef()) return true; unsigned Reg = MO.getReg(); const LiveInterval &LI = LIS->getInterval(Reg); const MachineInstr &MI = *MO.getParent(); SlotIndex BaseIndex = LIS->getInstructionIndex(MI); // This code is only meant to handle reading undefined subregisters which // we couldn't properly detect before. assert(LI.liveAt(BaseIndex) && "Reads of completely dead register should be marked undef already"); unsigned SubRegIdx = MO.getSubReg(); LaneBitmask UseMask = TRI->getSubRegIndexLaneMask(SubRegIdx); // See if any of the relevant subregister liveranges is defined at this point. for (const LiveInterval::SubRange &SR : LI.subranges()) { if ((SR.LaneMask & UseMask) != 0 && SR.liveAt(BaseIndex)) return false; } return true; }
void MIPrinter::print(const MachineOperand &Op, const TargetRegisterInfo *TRI) { switch (Op.getType()) { case MachineOperand::MO_Register: // TODO: Print the other register flags. if (Op.isImplicit()) OS << (Op.isDef() ? "implicit-def " : "implicit "); if (Op.isDead()) OS << "dead "; if (Op.isKill()) OS << "killed "; if (Op.isUndef()) OS << "undef "; printReg(Op.getReg(), OS, TRI); // TODO: Print sub register. break; case MachineOperand::MO_Immediate: OS << Op.getImm(); break; case MachineOperand::MO_MachineBasicBlock: printMBBReference(*Op.getMBB()); break; case MachineOperand::MO_GlobalAddress: Op.getGlobal()->printAsOperand(OS, /*PrintType=*/false, MST); // TODO: Print offset and target flags. break; case MachineOperand::MO_RegisterMask: { auto RegMaskInfo = RegisterMaskIds.find(Op.getRegMask()); if (RegMaskInfo != RegisterMaskIds.end()) OS << StringRef(TRI->getRegMaskNames()[RegMaskInfo->second]).lower(); else llvm_unreachable("Can't print this machine register mask yet."); break; } default: // TODO: Print the other machine operands. llvm_unreachable("Can't print this machine operand at the moment"); } }
bool llvm::LowerRISCVMachineOperandToMCOperand(const MachineOperand &MO, MCOperand &MCOp, const AsmPrinter &AP) { switch (MO.getType()) { default: report_fatal_error("LowerRISCVMachineInstrToMCInst: unknown operand type"); case MachineOperand::MO_Register: // Ignore all implicit register operands. if (MO.isImplicit()) return false; MCOp = MCOperand::createReg(MO.getReg()); break; case MachineOperand::MO_RegisterMask: // Regmasks are like implicit defs. return false; case MachineOperand::MO_Immediate: MCOp = MCOperand::createImm(MO.getImm()); break; case MachineOperand::MO_MachineBasicBlock: MCOp = lowerSymbolOperand(MO, MO.getMBB()->getSymbol(), AP); break; case MachineOperand::MO_GlobalAddress: MCOp = lowerSymbolOperand(MO, AP.getSymbol(MO.getGlobal()), AP); break; case MachineOperand::MO_BlockAddress: MCOp = lowerSymbolOperand( MO, AP.GetBlockAddressSymbol(MO.getBlockAddress()), AP); break; case MachineOperand::MO_ExternalSymbol: MCOp = lowerSymbolOperand( MO, AP.GetExternalSymbolSymbol(MO.getSymbolName()), AP); break; case MachineOperand::MO_ConstantPoolIndex: MCOp = lowerSymbolOperand(MO, AP.GetCPISymbol(MO.getIndex()), AP); break; } return true; }
/// Add MO to the linked list of operands for its register. void MachineRegisterInfo::addRegOperandToUseList(MachineOperand *MO) { assert(!MO->isOnRegUseList() && "Already on list"); MachineOperand *&HeadRef = getRegUseDefListHead(MO->getReg()); MachineOperand *const Head = HeadRef; // Head points to the first list element. // Next is NULL on the last list element. // Prev pointers are circular, so Head->Prev == Last. // Head is NULL for an empty list. if (!Head) { MO->Contents.Reg.Prev = MO; MO->Contents.Reg.Next = nullptr; HeadRef = MO; return; } assert(MO->getReg() == Head->getReg() && "Different regs on the same list!"); // Insert MO between Last and Head in the circular Prev chain. MachineOperand *Last = Head->Contents.Reg.Prev; assert(Last && "Inconsistent use list"); assert(MO->getReg() == Last->getReg() && "Different regs on the same list!"); Head->Contents.Reg.Prev = MO; MO->Contents.Reg.Prev = Last; // Def operands always precede uses. This allows def_iterator to stop early. // Insert def operands at the front, and use operands at the back. if (MO->isDef()) { // Insert def at the front. MO->Contents.Reg.Next = Head; HeadRef = MO; } else { // Insert use at the end. MO->Contents.Reg.Next = nullptr; Last->Contents.Reg.Next = MO; } }
void AlphaAsmPrinter::printOp(const MachineOperand &MO, raw_ostream &O) { switch (MO.getType()) { case MachineOperand::MO_Register: O << getRegisterName(MO.getReg()); return; case MachineOperand::MO_Immediate: assert(0 && "printOp() does not handle immediate values"); return; case MachineOperand::MO_MachineBasicBlock: O << *MO.getMBB()->getSymbol(); return; case MachineOperand::MO_ConstantPoolIndex: O << MAI->getPrivateGlobalPrefix() << "CPI" << getFunctionNumber() << "_" << MO.getIndex(); return; case MachineOperand::MO_ExternalSymbol: O << MO.getSymbolName(); return; case MachineOperand::MO_GlobalAddress: O << *Mang->getSymbol(MO.getGlobal()); return; case MachineOperand::MO_JumpTableIndex: O << MAI->getPrivateGlobalPrefix() << "JTI" << getFunctionNumber() << '_' << MO.getIndex(); return; default: O << "<unknown operand type: " << MO.getType() << ">"; return; } }
void MachineRegisterInfo::verifyUseList(unsigned Reg) const { #ifndef NDEBUG bool Valid = true; for (MachineOperand &M : reg_operands(Reg)) { MachineOperand *MO = &M; MachineInstr *MI = MO->getParent(); if (!MI) { errs() << PrintReg(Reg, getTargetRegisterInfo()) << " use list MachineOperand " << MO << " has no parent instruction.\n"; Valid = false; continue; } MachineOperand *MO0 = &MI->getOperand(0); unsigned NumOps = MI->getNumOperands(); if (!(MO >= MO0 && MO < MO0+NumOps)) { errs() << PrintReg(Reg, getTargetRegisterInfo()) << " use list MachineOperand " << MO << " doesn't belong to parent MI: " << *MI; Valid = false; } if (!MO->isReg()) { errs() << PrintReg(Reg, getTargetRegisterInfo()) << " MachineOperand " << MO << ": " << *MO << " is not a register\n"; Valid = false; } if (MO->getReg() != Reg) { errs() << PrintReg(Reg, getTargetRegisterInfo()) << " use-list MachineOperand " << MO << ": " << *MO << " is the wrong register\n"; Valid = false; } } assert(Valid && "Invalid use list"); #endif }
void MIPrinter::print(const MachineOperand &Op, const TargetRegisterInfo *TRI) { switch (Op.getType()) { case MachineOperand::MO_Register: // TODO: Print register flags. printReg(Op.getReg(), OS, TRI); // TODO: Print sub register. break; case MachineOperand::MO_Immediate: OS << Op.getImm(); break; case MachineOperand::MO_MachineBasicBlock: OS << "%bb." << Op.getMBB()->getNumber(); if (const auto *BB = Op.getMBB()->getBasicBlock()) { if (BB->hasName()) OS << '.' << BB->getName(); } break; case MachineOperand::MO_GlobalAddress: // FIXME: Make this faster - print as operand will create a slot tracker to // print unnamed values for the whole module every time it's called, which // is inefficient. Op.getGlobal()->printAsOperand(OS, /*PrintType=*/false, &M); // TODO: Print offset and target flags. break; case MachineOperand::MO_RegisterMask: { auto RegMaskInfo = RegisterMaskIds.find(Op.getRegMask()); if (RegMaskInfo != RegisterMaskIds.end()) OS << StringRef(TRI->getRegMaskNames()[RegMaskInfo->second]).lower(); else llvm_unreachable("Can't print this machine register mask yet."); break; } default: // TODO: Print the other machine operands. llvm_unreachable("Can't print this machine operand at the moment"); } }
void HexagonCopyToCombine::emitCombineIR(MachineBasicBlock::iterator &InsertPt, unsigned DoubleDestReg, MachineOperand &HiOperand, MachineOperand &LoOperand) { unsigned LoReg = LoOperand.getReg(); unsigned LoRegKillFlag = getKillRegState(LoOperand.isKill()); DebugLoc DL = InsertPt->getDebugLoc(); MachineBasicBlock *BB = InsertPt->getParent(); // Handle global. if (HiOperand.isGlobal()) { BuildMI(*BB, InsertPt, DL, TII->get(Hexagon::A4_combineir), DoubleDestReg) .addGlobalAddress(HiOperand.getGlobal(), HiOperand.getOffset(), HiOperand.getTargetFlags()) .addReg(LoReg, LoRegKillFlag); return; } // Insert new combine instruction. // DoubleRegDest = combine #HiImm, LoReg BuildMI(*BB, InsertPt, DL, TII->get(Hexagon::A4_combineir), DoubleDestReg) .addImm(HiOperand.getImm()) .addReg(LoReg, LoRegKillFlag); }
bool OptimizePICCall::isCallViaRegister(MachineInstr &MI, unsigned &Reg, ValueType &Val) const { if (!MI.isCall()) return false; MachineOperand *MO = getCallTargetRegOpnd(MI); // Return if MI is not a function call via a register. if (!MO) return false; // Get the instruction that loads the function address from the GOT. Reg = MO->getReg(); Val = (Value*)nullptr; MachineRegisterInfo &MRI = MI.getParent()->getParent()->getRegInfo(); MachineInstr *DefMI = MRI.getVRegDef(Reg); assert(DefMI); // See if DefMI is an instruction that loads from a GOT entry that holds the // address of a lazy binding stub. if (!DefMI->mayLoad() || DefMI->getNumOperands() < 3) return true; unsigned Flags = DefMI->getOperand(2).getTargetFlags(); if (Flags != MipsII::MO_GOT_CALL && Flags != MipsII::MO_CALL_LO16) return true; // Return the underlying object for the GOT entry in Val. assert(DefMI->hasOneMemOperand()); Val = (*DefMI->memoperands_begin())->getValue(); if (!Val) Val = (*DefMI->memoperands_begin())->getPseudoValue(); return true; }
void RegAllocFast::allocVirtRegUndef(MachineOperand &MO) { assert(MO.isUndef() && "expected undef use"); unsigned VirtReg = MO.getReg(); assert(TargetRegisterInfo::isVirtualRegister(VirtReg) && "Expected virtreg"); LiveRegMap::const_iterator LRI = findLiveVirtReg(VirtReg); MCPhysReg PhysReg; if (LRI != LiveVirtRegs.end() && LRI->PhysReg) { PhysReg = LRI->PhysReg; } else { const TargetRegisterClass &RC = *MRI->getRegClass(VirtReg); ArrayRef<MCPhysReg> AllocationOrder = RegClassInfo.getOrder(&RC); assert(!AllocationOrder.empty() && "Allocation order must not be empty"); PhysReg = AllocationOrder[0]; } unsigned SubRegIdx = MO.getSubReg(); if (SubRegIdx != 0) { PhysReg = TRI->getSubReg(PhysReg, SubRegIdx); MO.setSubReg(0); } MO.setReg(PhysReg); MO.setIsRenamable(true); }