void HexagonPeephole::ChangeOpInto(MachineOperand &Dst, MachineOperand &Src) { assert (&Dst != &Src && "Cannot duplicate into itself"); switch (Dst.getType()) { case MachineOperand::MO_Register: if (Src.isReg()) { Dst.setReg(Src.getReg()); } else if (Src.isImm()) { Dst.ChangeToImmediate(Src.getImm()); } else { llvm_unreachable("Unexpected src operand type"); } break; case MachineOperand::MO_Immediate: if (Src.isImm()) { Dst.setImm(Src.getImm()); } else if (Src.isReg()) { Dst.ChangeToRegister(Src.getReg(), Src.isDef(), Src.isImplicit(), Src.isKill(), Src.isDead(), Src.isUndef(), Src.isDebug()); } else { llvm_unreachable("Unexpected src operand type"); } break; default: llvm_unreachable("Unexpected dst operand type"); break; } }
// ins, ext, dext*, dins have the following constraints: // X <= pos < Y // X < size <= Y // X < pos+size <= Y // // dinsm and dinsu have the following constraints: // X <= pos < Y // X <= size <= Y // X < pos+size <= Y // // The callee of verifyInsExtInstruction however gives the bounds of // dins[um] like the other (d)ins (d)ext(um) instructions, so that this // function doesn't have to vary it's behaviour based on the instruction // being checked. static bool verifyInsExtInstruction(const MachineInstr &MI, StringRef &ErrInfo, const int64_t PosLow, const int64_t PosHigh, const int64_t SizeLow, const int64_t SizeHigh, const int64_t BothLow, const int64_t BothHigh) { MachineOperand MOPos = MI.getOperand(2); if (!MOPos.isImm()) { ErrInfo = "Position is not an immediate!"; return false; } int64_t Pos = MOPos.getImm(); if (!((PosLow <= Pos) && (Pos < PosHigh))) { ErrInfo = "Position operand is out of range!"; return false; } MachineOperand MOSize = MI.getOperand(3); if (!MOSize.isImm()) { ErrInfo = "Size operand is not an immediate!"; return false; } int64_t Size = MOSize.getImm(); if (!((SizeLow < Size) && (Size <= SizeHigh))) { ErrInfo = "Size operand is out of range!"; return false; } if (!((BothLow < (Pos + Size)) && ((Pos + Size) <= BothHigh))) { ErrInfo = "Position + Size is out of range!"; return false; } return true; }
/// \returns true if the constant in \p Src should be replaced with a bitreverse /// of an inline immediate. static bool isReverseInlineImm(const SIInstrInfo *TII, const MachineOperand &Src, int32_t &ReverseImm) { if (!isInt<32>(Src.getImm()) || TII->isInlineConstant(Src)) return false; ReverseImm = reverseBits<int32_t>(static_cast<int32_t>(Src.getImm())); return ReverseImm >= -16 && ReverseImm <= 64; }
static bool isKImmOrKUImmOperand(const SIInstrInfo *TII, const MachineOperand &Src, bool &IsUnsigned) { if (isInt<16>(Src.getImm())) { IsUnsigned = false; return !TII->isInlineConstant(Src); } if (isUInt<16>(Src.getImm())) { IsUnsigned = true; return !TII->isInlineConstant(Src); } return false; }
void SIRegisterInfo::resolveFrameIndex(MachineInstr &MI, unsigned BaseReg, int64_t Offset) const { MachineBasicBlock *MBB = MI.getParent(); MachineFunction *MF = MBB->getParent(); const SISubtarget &Subtarget = MF->getSubtarget<SISubtarget>(); const SIInstrInfo *TII = Subtarget.getInstrInfo(); #ifndef NDEBUG // FIXME: Is it possible to be storing a frame index to itself? bool SeenFI = false; for (const MachineOperand &MO: MI.operands()) { if (MO.isFI()) { if (SeenFI) llvm_unreachable("should not see multiple frame indices"); SeenFI = true; } } #endif MachineOperand *FIOp = TII->getNamedOperand(MI, AMDGPU::OpName::vaddr); assert(FIOp && FIOp->isFI() && "frame index must be address operand"); assert(TII->isMUBUF(MI)); MachineOperand *OffsetOp = TII->getNamedOperand(MI, AMDGPU::OpName::offset); int64_t NewOffset = OffsetOp->getImm() + Offset; assert(isUInt<12>(NewOffset) && "offset should be legal"); FIOp->ChangeToRegister(BaseReg, false); OffsetOp->setImm(NewOffset); }
void HexagonCopyToCombine::emitCombineRI(MachineBasicBlock::iterator &InsertPt, unsigned DoubleDestReg, MachineOperand &HiOperand, MachineOperand &LoOperand) { unsigned HiRegKillFlag = getKillRegState(HiOperand.isKill()); unsigned HiReg = HiOperand.getReg(); DebugLoc DL = InsertPt->getDebugLoc(); MachineBasicBlock *BB = InsertPt->getParent(); // Handle global. if (LoOperand.isGlobal()) { BuildMI(*BB, InsertPt, DL, TII->get(Hexagon::A4_combineri), DoubleDestReg) .addReg(HiReg, HiRegKillFlag) .addGlobalAddress(LoOperand.getGlobal(), LoOperand.getOffset(), LoOperand.getTargetFlags()); return; } // Insert new combine instruction. // DoubleRegDest = combine HiReg, #LoImm BuildMI(*BB, InsertPt, DL, TII->get(Hexagon::A4_combineri), DoubleDestReg) .addReg(HiReg, HiRegKillFlag) .addImm(LoOperand.getImm()); }
static MCOperand LowerOperand(const MachineInstr *MI, const MachineOperand &MO, AsmPrinter &AP) { switch(MO.getType()) { default: llvm_unreachable("unknown operand type"); break; case MachineOperand::MO_Register: if (MO.isImplicit()) break; return MCOperand::createReg(MO.getReg()); case MachineOperand::MO_Immediate: return MCOperand::createImm(MO.getImm()); case MachineOperand::MO_MachineBasicBlock: case MachineOperand::MO_GlobalAddress: case MachineOperand::MO_BlockAddress: case MachineOperand::MO_ExternalSymbol: case MachineOperand::MO_ConstantPoolIndex: return LowerSymbolOperand(MI, MO, AP); case MachineOperand::MO_RegisterMask: break; } return MCOperand(); }
/// getMachineOpValue - Return binary encoding of operand. If the machine /// operand requires relocation, record the relocation and return zero. unsigned ARMCodeEmitter::getMachineOpValue(const MachineInstr &MI, const MachineOperand &MO) { if (MO.isReg()) return ARMRegisterInfo::getRegisterNumbering(MO.getReg()); else if (MO.isImm()) return static_cast<unsigned>(MO.getImm()); else if (MO.isGlobal()) emitGlobalAddress(MO.getGlobal(), ARM::reloc_arm_branch, true, false); else if (MO.isSymbol()) emitExternalSymbolAddress(MO.getSymbolName(), ARM::reloc_arm_branch); else if (MO.isCPI()) { const TargetInstrDesc &TID = MI.getDesc(); // For VFP load, the immediate offset is multiplied by 4. unsigned Reloc = ((TID.TSFlags & ARMII::FormMask) == ARMII::VFPLdStFrm) ? ARM::reloc_arm_vfp_cp_entry : ARM::reloc_arm_cp_entry; emitConstPoolAddress(MO.getIndex(), Reloc); } else if (MO.isJTI()) emitJumpTableAddress(MO.getIndex(), ARM::reloc_arm_relative); else if (MO.isMBB()) emitMachineBasicBlock(MO.getMBB(), ARM::reloc_arm_branch); else { #ifndef NDEBUG errs() << MO; #endif llvm_unreachable(0); } return 0; }
MCOperand MipsMCInstLower::LowerOperand(const MachineOperand &MO, unsigned offset) const { MachineOperandType MOTy = MO.getType(); switch (MOTy) { default: llvm_unreachable("unknown operand type"); case MachineOperand::MO_Register: // Ignore all implicit register operands. if (MO.isImplicit()) break; return MCOperand::CreateReg(MO.getReg()); case MachineOperand::MO_Immediate: return MCOperand::CreateImm(MO.getImm() + offset); case MachineOperand::MO_MachineBasicBlock: case MachineOperand::MO_GlobalAddress: case MachineOperand::MO_ExternalSymbol: case MachineOperand::MO_JumpTableIndex: case MachineOperand::MO_ConstantPoolIndex: case MachineOperand::MO_BlockAddress: return LowerSymbolOperand(MO, MOTy, offset); case MachineOperand::MO_RegisterMask: break; } return MCOperand(); }
void HexagonCopyToCombine::emitConst64(MachineBasicBlock::iterator &InsertPt, unsigned DoubleDestReg, MachineOperand &HiOperand, MachineOperand &LoOperand) { DEBUG(dbgs() << "Found a CONST64\n"); DebugLoc DL = InsertPt->getDebugLoc(); MachineBasicBlock *BB = InsertPt->getParent(); assert(LoOperand.isImm() && HiOperand.isImm() && "Both operands must be immediate"); int64_t V = HiOperand.getImm(); V = (V << 32) | (0x0ffffffffLL & LoOperand.getImm()); BuildMI(*BB, InsertPt, DL, TII->get(Hexagon::CONST64), DoubleDestReg) .addImm(V); }
unsigned MipsCodeEmitter::getOperandValue(const MachineOperand &MO, unsigned relocType) { switch (MO.getType()) { case MachineOperand::MO_Immediate: return MO.getImm(); case MachineOperand::MO_GlobalAddress: emitGlobalAddress(MO.getGlobal(), relocType, false); return 0; case MachineOperand::MO_ExternalSymbol: emitExternalSymbolAddress(MO.getSymbolName(), relocType); return 0; case MachineOperand::MO_MachineBasicBlock: emitMachineBasicBlock(MO.getMBB(), relocType, MCE.getCurrentPCValue()); return 0; case MachineOperand::MO_Register: return MipsRegisterInfo::getRegisterNumbering(MO.getReg()); case MachineOperand::MO_JumpTableIndex: emitJumpTableAddress(MO.getIndex(), relocType); return 0; case MachineOperand::MO_ConstantPoolIndex: emitConstPoolAddress(MO.getIndex(), relocType); return 0; default: return 0; } }
Optional<MCOperand> X86MCInstLower::LowerMachineOperand(const MachineInstr *MI, const MachineOperand &MO) const { switch (MO.getType()) { default: MI->dump(); llvm_unreachable("unknown operand type"); case MachineOperand::MO_Register: // Ignore all implicit register operands. if (MO.isImplicit()) return None; return MCOperand::createReg(MO.getReg()); case MachineOperand::MO_Immediate: return MCOperand::createImm(MO.getImm()); case MachineOperand::MO_MachineBasicBlock: case MachineOperand::MO_GlobalAddress: case MachineOperand::MO_ExternalSymbol: return LowerSymbolOperand(MO, GetSymbolFromOperand(MO)); case MachineOperand::MO_MCSymbol: return LowerSymbolOperand(MO, MO.getMCSymbol()); case MachineOperand::MO_JumpTableIndex: return LowerSymbolOperand(MO, AsmPrinter.GetJTISymbol(MO.getIndex())); case MachineOperand::MO_ConstantPoolIndex: return LowerSymbolOperand(MO, AsmPrinter.GetCPISymbol(MO.getIndex())); case MachineOperand::MO_BlockAddress: return LowerSymbolOperand( MO, AsmPrinter.GetBlockAddressSymbol(MO.getBlockAddress())); case MachineOperand::MO_RegisterMask: // Ignore call clobbers. return None; } }
/// getMachineOpValue - Return binary encoding of operand. If the machine /// operand requires relocation, record the relocation and return zero. unsigned MipsCodeEmitter::getMachineOpValue(const MachineInstr &MI, const MachineOperand &MO) const { if (MO.isReg()) return MipsRegisterInfo::getRegisterNumbering(MO.getReg()); else if (MO.isImm()) return static_cast<unsigned>(MO.getImm()); else if (MO.isGlobal()) { if (MI.getOpcode() == Mips::ULW || MI.getOpcode() == Mips::USW || MI.getOpcode() == Mips::ULH || MI.getOpcode() == Mips::ULHu) emitGlobalAddressUnaligned(MO.getGlobal(), getRelocation(MI, MO), 4); else if (MI.getOpcode() == Mips::USH) emitGlobalAddressUnaligned(MO.getGlobal(), getRelocation(MI, MO), 8); else emitGlobalAddress(MO.getGlobal(), getRelocation(MI, MO), true); } else if (MO.isSymbol()) emitExternalSymbolAddress(MO.getSymbolName(), getRelocation(MI, MO)); else if (MO.isCPI()) emitConstPoolAddress(MO.getIndex(), getRelocation(MI, MO)); else if (MO.isJTI()) emitJumpTableAddress(MO.getIndex(), getRelocation(MI, MO)); else if (MO.isMBB()) emitMachineBasicBlock(MO.getMBB(), getRelocation(MI, MO)); else llvm_unreachable("Unable to encode MachineOperand!"); return 0; }
/// Implements the 'w' and 'x' inline asm operand modifiers, which print a GPR /// with the obvious type and an immediate 0 as either wzr or xzr. static bool printModifiedGPRAsmOperand(const MachineOperand &MO, const TargetRegisterInfo *TRI, const TargetRegisterClass &RegClass, raw_ostream &O) { char Prefix = &RegClass == &AArch64::GPR32RegClass ? 'w' : 'x'; if (MO.isImm() && MO.getImm() == 0) { O << Prefix << "zr"; return false; } else if (MO.isReg()) { if (MO.getReg() == AArch64::XSP || MO.getReg() == AArch64::WSP) { O << (Prefix == 'x' ? "sp" : "wsp"); return false; } for (MCRegAliasIterator AR(MO.getReg(), TRI, true); AR.isValid(); ++AR) { if (RegClass.contains(*AR)) { O << AArch64InstPrinter::getRegisterName(*AR); return false; } } } return true; }
// FIXME: Does this need to check IEEE bit on function? bool SIFoldOperands::tryFoldOMod(MachineInstr &MI) { const MachineOperand *RegOp; int OMod; std::tie(RegOp, OMod) = isOMod(MI); if (OMod == SIOutMods::NONE || !RegOp->isReg() || RegOp->getSubReg() != AMDGPU::NoSubRegister || !hasOneNonDBGUseInst(*MRI, RegOp->getReg())) return false; MachineInstr *Def = MRI->getVRegDef(RegOp->getReg()); MachineOperand *DefOMod = TII->getNamedOperand(*Def, AMDGPU::OpName::omod); if (!DefOMod || DefOMod->getImm() != SIOutMods::NONE) return false; // Clamp is applied after omod. If the source already has clamp set, don't // fold it. if (TII->hasModifiersSet(*Def, AMDGPU::OpName::clamp)) return false; DEBUG(dbgs() << "Folding omod " << MI << " into " << *Def << '\n'); DefOMod->setImm(OMod); MRI->replaceRegWith(MI.getOperand(0).getReg(), Def->getOperand(0).getReg()); MI.eraseFromParent(); return true; }
MCOperand SampleMCInstLower:: LowerOperand(const MachineOperand& MO) const { DEBUG(dbgs() << ">>> LowerOperand:" << MO << " type:" << MO.getType() << "\n"); MachineOperandType MOTy = MO.getType(); switch (MOTy) { case MachineOperand::MO_Register: // Ignore all implicit register operands. if (MO.isImplicit()) break; return MCOperand::CreateReg(MO.getReg()); case MachineOperand::MO_Immediate: return MCOperand::CreateImm(MO.getImm()); case MachineOperand::MO_MachineBasicBlock: case MachineOperand::MO_GlobalAddress: case MachineOperand::MO_ExternalSymbol: case MachineOperand::MO_JumpTableIndex: case MachineOperand::MO_ConstantPoolIndex: case MachineOperand::MO_BlockAddress: return LowerSymbolOperand(MO, MOTy); case MachineOperand::MO_RegisterMask: break; default: llvm_unreachable("unknown operand type"); } return MCOperand(); }
/// isIdenticalTo - Return true if this operand is identical to the specified /// operand. bool MachineOperand::isIdenticalTo(const MachineOperand &Other) const { if (getType() != Other.getType()) return false; switch (getType()) { default: assert(0 && "Unrecognized operand type"); case MachineOperand::MO_Register: return getReg() == Other.getReg() && isDef() == Other.isDef() && getSubReg() == Other.getSubReg(); case MachineOperand::MO_Immediate: return getImm() == Other.getImm(); case MachineOperand::MO_FPImmediate: return getFPImm() == Other.getFPImm(); case MachineOperand::MO_MachineBasicBlock: return getMBB() == Other.getMBB(); case MachineOperand::MO_FrameIndex: return getIndex() == Other.getIndex(); case MachineOperand::MO_ConstantPoolIndex: return getIndex() == Other.getIndex() && getOffset() == Other.getOffset(); case MachineOperand::MO_JumpTableIndex: return getIndex() == Other.getIndex(); case MachineOperand::MO_GlobalAddress: return getGlobal() == Other.getGlobal() && getOffset() == Other.getOffset(); case MachineOperand::MO_ExternalSymbol: return !strcmp(getSymbolName(), Other.getSymbolName()) && getOffset() == Other.getOffset(); } }
void MIPrinter::print(const MachineOperand &Op, const TargetRegisterInfo *TRI) { switch (Op.getType()) { case MachineOperand::MO_Register: // TODO: Print register flags. printReg(Op.getReg(), OS, TRI); // TODO: Print sub register. break; case MachineOperand::MO_Immediate: OS << Op.getImm(); break; case MachineOperand::MO_MachineBasicBlock: OS << "%bb." << Op.getMBB()->getNumber(); if (const auto *BB = Op.getMBB()->getBasicBlock()) { if (BB->hasName()) OS << '.' << BB->getName(); } break; case MachineOperand::MO_GlobalAddress: // FIXME: Make this faster - print as operand will create a slot tracker to // print unnamed values for the whole module every time it's called, which // is inefficient. Op.getGlobal()->printAsOperand(OS, /*PrintType=*/false, &M); // TODO: Print offset and target flags. break; default: // TODO: Print the other machine operands. llvm_unreachable("Can't print this machine operand at the moment"); } }
MachineOperand AMDGPUInstructionSelector::getSubOperand64(MachineOperand &MO, unsigned SubIdx) const { MachineInstr *MI = MO.getParent(); MachineBasicBlock *BB = MO.getParent()->getParent(); MachineFunction *MF = BB->getParent(); MachineRegisterInfo &MRI = MF->getRegInfo(); unsigned DstReg = MRI.createVirtualRegister(&AMDGPU::SGPR_32RegClass); if (MO.isReg()) { unsigned ComposedSubIdx = TRI.composeSubRegIndices(MO.getSubReg(), SubIdx); unsigned Reg = MO.getReg(); BuildMI(*BB, MI, MI->getDebugLoc(), TII.get(AMDGPU::COPY), DstReg) .addReg(Reg, 0, ComposedSubIdx); return MachineOperand::CreateReg(DstReg, MO.isDef(), MO.isImplicit(), MO.isKill(), MO.isDead(), MO.isUndef(), MO.isEarlyClobber(), 0, MO.isDebug(), MO.isInternalRead()); } assert(MO.isImm()); APInt Imm(64, MO.getImm()); switch (SubIdx) { default: llvm_unreachable("do not know to split immediate with this sub index."); case AMDGPU::sub0: return MachineOperand::CreateImm(Imm.getLoBits(32).getSExtValue()); case AMDGPU::sub1: return MachineOperand::CreateImm(Imm.getHiBits(32).getSExtValue()); } }
bool SIInstrInfo::isInlineConstant(const MachineOperand &MO) const { if(MO.isImm()) { return MO.getImm() >= -16 && MO.getImm() <= 64; } if (MO.isFPImm()) { return MO.getFPImm()->isExactlyValue(0.0) || MO.getFPImm()->isExactlyValue(0.5) || MO.getFPImm()->isExactlyValue(-0.5) || MO.getFPImm()->isExactlyValue(1.0) || MO.getFPImm()->isExactlyValue(-1.0) || MO.getFPImm()->isExactlyValue(2.0) || MO.getFPImm()->isExactlyValue(-2.0) || MO.getFPImm()->isExactlyValue(4.0) || MO.getFPImm()->isExactlyValue(-4.0); } return false; }
void SIRegisterInfo::resolveFrameIndex(MachineInstr &MI, unsigned BaseReg, int64_t Offset) const { MachineBasicBlock *MBB = MI.getParent(); MachineFunction *MF = MBB->getParent(); const SISubtarget &Subtarget = MF->getSubtarget<SISubtarget>(); const SIInstrInfo *TII = Subtarget.getInstrInfo(); #ifndef NDEBUG // FIXME: Is it possible to be storing a frame index to itself? bool SeenFI = false; for (const MachineOperand &MO: MI.operands()) { if (MO.isFI()) { if (SeenFI) llvm_unreachable("should not see multiple frame indices"); SeenFI = true; } } #endif MachineOperand *FIOp = TII->getNamedOperand(MI, AMDGPU::OpName::vaddr); assert(FIOp && FIOp->isFI() && "frame index must be address operand"); assert(TII->isMUBUF(MI)); MachineOperand *OffsetOp = TII->getNamedOperand(MI, AMDGPU::OpName::offset); int64_t NewOffset = OffsetOp->getImm() + Offset; if (isUInt<12>(NewOffset)) { // If we have a legal offset, fold it directly into the instruction. FIOp->ChangeToRegister(BaseReg, false); OffsetOp->setImm(NewOffset); return; } // The offset is not legal, so we must insert an add of the offset. MachineRegisterInfo &MRI = MF->getRegInfo(); unsigned NewReg = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass); DebugLoc DL = MI.getDebugLoc(); assert(Offset != 0 && "Non-zero offset expected"); unsigned UnusedCarry = MRI.createVirtualRegister(&AMDGPU::SReg_64RegClass); unsigned OffsetReg = MRI.createVirtualRegister(&AMDGPU::SReg_32RegClass); // In the case the instruction already had an immediate offset, here only // the requested new offset is added because we are leaving the original // immediate in place. BuildMI(*MBB, MI, DL, TII->get(AMDGPU::S_MOV_B32), OffsetReg) .addImm(Offset); BuildMI(*MBB, MI, DL, TII->get(AMDGPU::V_ADD_I32_e64), NewReg) .addReg(UnusedCarry, RegState::Define | RegState::Dead) .addReg(OffsetReg, RegState::Kill) .addReg(BaseReg); FIOp->ChangeToRegister(NewReg, false); }
MCOperand LM32MCInstLower::lowerOperand(const MachineOperand &MO) const { switch (MO.getType()) { case MachineOperand::MO_Register: return MCOperand::CreateReg(MO.getReg()); case MachineOperand::MO_Immediate: return MCOperand::CreateImm(MO.getImm()); default: return MCOperand::CreateExpr(getExpr(MO)); } }
bool ARMAsmPrinter::lowerOperand(const MachineOperand &MO, MCOperand &MCOp) { switch (MO.getType()) { default: llvm_unreachable("unknown operand type"); case MachineOperand::MO_Register: // Ignore all non-CPSR implicit register operands. if (MO.isImplicit() && MO.getReg() != ARM::CPSR) return false; assert(!MO.getSubReg() && "Subregs should be eliminated!"); MCOp = MCOperand::createReg(MO.getReg()); break; case MachineOperand::MO_Immediate: MCOp = MCOperand::createImm(MO.getImm()); break; case MachineOperand::MO_MachineBasicBlock: MCOp = MCOperand::createExpr(MCSymbolRefExpr::create( MO.getMBB()->getSymbol(), OutContext)); break; case MachineOperand::MO_GlobalAddress: MCOp = GetSymbolRef(MO, GetARMGVSymbol(MO.getGlobal(), MO.getTargetFlags())); break; case MachineOperand::MO_ExternalSymbol: MCOp = GetSymbolRef(MO, GetExternalSymbolSymbol(MO.getSymbolName())); break; case MachineOperand::MO_JumpTableIndex: MCOp = GetSymbolRef(MO, GetJTISymbol(MO.getIndex())); break; case MachineOperand::MO_ConstantPoolIndex: if (Subtarget->genExecuteOnly()) llvm_unreachable("execute-only should not generate constant pools"); MCOp = GetSymbolRef(MO, GetCPISymbol(MO.getIndex())); break; case MachineOperand::MO_BlockAddress: MCOp = GetSymbolRef(MO, GetBlockAddressSymbol(MO.getBlockAddress())); break; case MachineOperand::MO_FPImmediate: { APFloat Val = MO.getFPImm()->getValueAPF(); bool ignored; Val.convert(APFloat::IEEEdouble(), APFloat::rmTowardZero, &ignored); MCOp = MCOperand::createFPImm(Val.convertToDouble()); break; } case MachineOperand::MO_RegisterMask: // Ignore call clobbers. return false; } return true; }
MCOperand SystemZMCInstLower::lowerOperand(const MachineOperand &MO) const { switch (MO.getType()) { case MachineOperand::MO_Register: return MCOperand::CreateReg(MO.getReg()); case MachineOperand::MO_Immediate: return MCOperand::CreateImm(MO.getImm()); default: { MCSymbolRefExpr::VariantKind Kind = getVariantKind(MO.getTargetFlags()); return MCOperand::CreateExpr(getExpr(MO, Kind)); } } }
/// addConstantValue - Add constant value entry in variable DIE. bool CompileUnit::addConstantValue(DIE *Die, const MachineOperand &MO, DIType Ty) { assert (MO.isImm() && "Invalid machine operand!"); DIEBlock *Block = new (DIEValueAllocator) DIEBlock(); unsigned form = dwarf::DW_FORM_udata; switch (Ty.getSizeInBits()) { case 8: form = dwarf::DW_FORM_data1; break; case 16: form = dwarf::DW_FORM_data2; break; case 32: form = dwarf::DW_FORM_data4; break; case 64: form = dwarf::DW_FORM_data8; break; default: break; } DIBasicType BTy(Ty); if (BTy.Verify() && (BTy.getEncoding() == dwarf::DW_ATE_signed || BTy.getEncoding() == dwarf::DW_ATE_signed_char)) addSInt(Block, 0, form, MO.getImm()); else addUInt(Block, 0, form, MO.getImm()); addBlock(Die, dwarf::DW_AT_const_value, 0, Block); return true; }
MCOperand SimpleMCInstLower::LowerOperand(const MachineOperand &MO, unsigned offset) const { MachineOperandType MOTy = MO.getType(); switch (MOTy) { default: llvm_unreachable("unknown operand type"); case MachineOperand::MO_Register: return MCOperand::CreateReg(MO.getReg()); case MachineOperand::MO_Immediate: return MCOperand::CreateImm(MO.getImm() + offset); } return MCOperand(); }
unsigned HexagonGenMux::getMuxOpcode(const MachineOperand &Src1, const MachineOperand &Src2) const { bool IsReg1 = Src1.isReg(), IsReg2 = Src2.isReg(); if (IsReg1) return IsReg2 ? Hexagon::C2_mux : Hexagon::C2_muxir; if (IsReg2) return Hexagon::C2_muxri; // Neither is a register. The first source is extendable, but the second // is not (s8). if (Src2.isImm() && isInt<8>(Src2.getImm())) return Hexagon::C2_muxii; return 0; }
void MIPrinter::print(const MachineOperand &Op, const TargetRegisterInfo *TRI) { switch (Op.getType()) { case MachineOperand::MO_Register: // TODO: Print register flags. printReg(Op.getReg(), OS, TRI); // TODO: Print sub register. break; case MachineOperand::MO_Immediate: OS << Op.getImm(); break; default: // TODO: Print the other machine operands. llvm_unreachable("Can't print this machine operand at the moment"); } }
bool EpiphanyAsmPrinter::lowerOperand(const MachineOperand &MO, MCOperand &MCOp) const { switch (MO.getType()) { default: llvm_unreachable("unknown operand type"); case MachineOperand::MO_Register: if (MO.isImplicit()) return false; assert(!MO.getSubReg() && "Subregs should be eliminated!"); MCOp = MCOperand::CreateReg(MO.getReg()); break; case MachineOperand::MO_Immediate: MCOp = MCOperand::CreateImm(MO.getImm()); break; case MachineOperand::MO_FPImmediate: {// a bit hacky, see arm APFloat Val = MO.getFPImm()->getValueAPF(); bool ignored; Val.convert(APFloat::IEEEdouble, APFloat::rmTowardZero, &ignored); MCOp = MCOperand::CreateFPImm(Val.convertToDouble()); break; } case MachineOperand::MO_BlockAddress: MCOp = lowerSymbolOperand(MO, GetBlockAddressSymbol(MO.getBlockAddress())); break; case MachineOperand::MO_ExternalSymbol: MCOp = lowerSymbolOperand(MO, GetExternalSymbolSymbol(MO.getSymbolName())); break; case MachineOperand::MO_GlobalAddress: MCOp = lowerSymbolOperand(MO, Mang->getSymbol(MO.getGlobal())); break; case MachineOperand::MO_MachineBasicBlock: MCOp = MCOperand::CreateExpr(MCSymbolRefExpr::Create( MO.getMBB()->getSymbol(), OutContext)); break; case MachineOperand::MO_JumpTableIndex: MCOp = lowerSymbolOperand(MO, GetJTISymbol(MO.getIndex())); break; case MachineOperand::MO_ConstantPoolIndex: MCOp = lowerSymbolOperand(MO, GetCPISymbol(MO.getIndex())); break; case MachineOperand::MO_RegisterMask: // Ignore call clobbers return false; } return true; }
/// Add 4 to the displacement of operand MO. static void fixDisp(MachineOperand &MO) { switch (MO.getType()) { default: llvm_unreachable("Unhandled operand type."); case MachineOperand::MO_Immediate: MO.setImm(MO.getImm() + 4); break; case MachineOperand::MO_GlobalAddress: case MachineOperand::MO_ConstantPoolIndex: case MachineOperand::MO_BlockAddress: case MachineOperand::MO_TargetIndex: case MachineOperand::MO_ExternalSymbol: MO.setOffset(MO.getOffset() + 4); break; } }