static DecodeStatus DecodeNegImmOperand(MCInst &Inst, unsigned Val, uint64_t Address, const void *Decoder) { Inst.addOperand(MCOperand::CreateImm(-(int64_t)Val)); return MCDisassembler::Success; }
static void LowerTlsAddr(MCStreamer &OutStreamer, X86MCInstLower &MCInstLowering, const MachineInstr &MI) { bool is64Bits = MI.getOpcode() == X86::TLS_addr64 || MI.getOpcode() == X86::TLS_base_addr64; bool needsPadding = MI.getOpcode() == X86::TLS_addr64; MCContext &context = OutStreamer.getContext(); if (needsPadding) { MCInst prefix; prefix.setOpcode(X86::DATA16_PREFIX); OutStreamer.EmitInstruction(prefix); } MCSymbolRefExpr::VariantKind SRVK; switch (MI.getOpcode()) { case X86::TLS_addr32: case X86::TLS_addr64: SRVK = MCSymbolRefExpr::VK_TLSGD; break; case X86::TLS_base_addr32: SRVK = MCSymbolRefExpr::VK_TLSLDM; break; case X86::TLS_base_addr64: SRVK = MCSymbolRefExpr::VK_TLSLD; break; default: llvm_unreachable("unexpected opcode"); } MCSymbol *sym = MCInstLowering.GetSymbolFromOperand(MI.getOperand(3)); const MCSymbolRefExpr *symRef = MCSymbolRefExpr::Create(sym, SRVK, context); MCInst LEA; if (is64Bits) { LEA.setOpcode(X86::LEA64r); LEA.addOperand(MCOperand::CreateReg(X86::RDI)); // dest LEA.addOperand(MCOperand::CreateReg(X86::RIP)); // base LEA.addOperand(MCOperand::CreateImm(1)); // scale LEA.addOperand(MCOperand::CreateReg(0)); // index LEA.addOperand(MCOperand::CreateExpr(symRef)); // disp LEA.addOperand(MCOperand::CreateReg(0)); // seg } else if (SRVK == MCSymbolRefExpr::VK_TLSLDM) { LEA.setOpcode(X86::LEA32r); LEA.addOperand(MCOperand::CreateReg(X86::EAX)); // dest LEA.addOperand(MCOperand::CreateReg(X86::EBX)); // base LEA.addOperand(MCOperand::CreateImm(1)); // scale LEA.addOperand(MCOperand::CreateReg(0)); // index LEA.addOperand(MCOperand::CreateExpr(symRef)); // disp LEA.addOperand(MCOperand::CreateReg(0)); // seg } else { LEA.setOpcode(X86::LEA32r); LEA.addOperand(MCOperand::CreateReg(X86::EAX)); // dest LEA.addOperand(MCOperand::CreateReg(0)); // base LEA.addOperand(MCOperand::CreateImm(1)); // scale LEA.addOperand(MCOperand::CreateReg(X86::EBX)); // index LEA.addOperand(MCOperand::CreateExpr(symRef)); // disp LEA.addOperand(MCOperand::CreateReg(0)); // seg } OutStreamer.EmitInstruction(LEA); if (needsPadding) { MCInst prefix; prefix.setOpcode(X86::DATA16_PREFIX); OutStreamer.EmitInstruction(prefix); prefix.setOpcode(X86::DATA16_PREFIX); OutStreamer.EmitInstruction(prefix); prefix.setOpcode(X86::REX64_PREFIX); OutStreamer.EmitInstruction(prefix); } MCInst call; if (is64Bits) call.setOpcode(X86::CALL64pcrel32); else call.setOpcode(X86::CALLpcrel32); StringRef name = is64Bits ? "__tls_get_addr" : "___tls_get_addr"; MCSymbol *tlsGetAddr = context.GetOrCreateSymbol(name); const MCSymbolRefExpr *tlsRef = MCSymbolRefExpr::Create(tlsGetAddr, MCSymbolRefExpr::VK_PLT, context); call.addOperand(MCOperand::CreateExpr(tlsRef)); OutStreamer.EmitInstruction(call); }
static DecodeStatus decodeRegisterClass(MCInst &Inst, uint64_t RegNo, const unsigned (&Regs)[N]) { assert(RegNo < N && "Invalid register number"); Inst.addOperand(MCOperand::createReg(Regs[RegNo])); return MCDisassembler::Success; }
/// LowerUnaryToTwoAddr - R = setb -> R = sbb R, R static void LowerUnaryToTwoAddr(MCInst &OutMI, unsigned NewOpc) { OutMI.setOpcode(NewOpc); OutMI.addOperand(OutMI.getOperand(0)); OutMI.addOperand(OutMI.getOperand(0)); }
/// \brief Simplify things like MOV32rm to MOV32o32a. static void SimplifyShortMoveForm(X86AsmPrinter &Printer, MCInst &Inst, unsigned Opcode) { // Don't make these simplifications in 64-bit mode; other assemblers don't // perform them because they make the code larger. if (Printer.getSubtarget().is64Bit()) return; bool IsStore = Inst.getOperand(0).isReg() && Inst.getOperand(1).isReg(); unsigned AddrBase = IsStore; unsigned RegOp = IsStore ? 0 : 5; unsigned AddrOp = AddrBase + 3; assert(Inst.getNumOperands() == 6 && Inst.getOperand(RegOp).isReg() && Inst.getOperand(AddrBase + 0).isReg() && // base Inst.getOperand(AddrBase + 1).isImm() && // scale Inst.getOperand(AddrBase + 2).isReg() && // index register (Inst.getOperand(AddrOp).isExpr() || // address Inst.getOperand(AddrOp).isImm())&& Inst.getOperand(AddrBase + 4).isReg() && // segment "Unexpected instruction!"); // Check whether the destination register can be fixed. unsigned Reg = Inst.getOperand(RegOp).getReg(); if (Reg != X86::AL && Reg != X86::AX && Reg != X86::EAX && Reg != X86::RAX) return; // Check whether this is an absolute address. // FIXME: We know TLVP symbol refs aren't, but there should be a better way // to do this here. bool Absolute = true; if (Inst.getOperand(AddrOp).isExpr()) { const MCExpr *MCE = Inst.getOperand(AddrOp).getExpr(); if (const MCSymbolRefExpr *SRE = dyn_cast<MCSymbolRefExpr>(MCE)) if (SRE->getKind() == MCSymbolRefExpr::VK_TLVP) Absolute = false; } if (Absolute && (Inst.getOperand(AddrBase + 0).getReg() != 0 || Inst.getOperand(AddrBase + 2).getReg() != 0 || Inst.getOperand(AddrBase + 4).getReg() != 0 || Inst.getOperand(AddrBase + 1).getImm() != 1)) return; // If so, rewrite the instruction. MCOperand Saved = Inst.getOperand(AddrOp); Inst = MCInst(); Inst.setOpcode(Opcode); Inst.addOperand(Saved); }
static bool getMCRDeprecationInfo(MCInst &MI, const MCSubtargetInfo &STI, std::string &Info) { if (STI.getFeatureBits()[llvm::ARM::HasV7Ops] && (MI.getOperand(0).isImm() && MI.getOperand(0).getImm() == 15) && (MI.getOperand(1).isImm() && MI.getOperand(1).getImm() == 0) && // Checks for the deprecated CP15ISB encoding: // mcr p15, #0, rX, c7, c5, #4 (MI.getOperand(3).isImm() && MI.getOperand(3).getImm() == 7)) { if ((MI.getOperand(5).isImm() && MI.getOperand(5).getImm() == 4)) { if (MI.getOperand(4).isImm() && MI.getOperand(4).getImm() == 5) { Info = "deprecated since v7, use 'isb'"; return true; } // Checks for the deprecated CP15DSB encoding: // mcr p15, #0, rX, c7, c10, #4 if (MI.getOperand(4).isImm() && MI.getOperand(4).getImm() == 10) { Info = "deprecated since v7, use 'dsb'"; return true; } } // Checks for the deprecated CP15DMB encoding: // mcr p15, #0, rX, c7, c10, #5 if (MI.getOperand(4).isImm() && MI.getOperand(4).getImm() == 10 && (MI.getOperand(5).isImm() && MI.getOperand(5).getImm() == 5)) { Info = "deprecated since v7, use 'dmb'"; return true; } } return false; }
//===----------------------------------------------------------------------===// void OR1KAsmPrinter::customEmitInstruction(const MachineInstr *MI) { OR1KMCInstLower MCInstLowering(OutContext, *Mang, *this); unsigned Opcode = MI->getOpcode(); MCSubtargetInfo STI = getSubtargetInfo(); switch (Opcode) { default: break; case OR1K::MOVHI: case OR1K::ORI: { MCSymbolRefExpr::VariantKind Kind = MCSymbolRefExpr::VK_None; if (Opcode == OR1K::MOVHI && MI->getOperand(1).getTargetFlags() == OR1KII::MO_GOTPCHI) Kind = MCSymbolRefExpr::VK_OR1K_GOTPCHI; else if (Opcode == OR1K::ORI && MI->getOperand(2).getTargetFlags() == OR1KII::MO_GOTPCLO) Kind = MCSymbolRefExpr::VK_OR1K_GOTPCLO; else break; // We want to print something like: // MYGLOBAL + (. - PICBASE) // However, we can't generate a ".", so just emit a new label here and refer // to it. MCSymbol *DotSym = OutContext.CreateTempSymbol(); const MCExpr *DotExpr = MCSymbolRefExpr::Create(DotSym, OutContext); const MCExpr *PICBase = MCSymbolRefExpr::Create(MF->getPICBaseSymbol(), OutContext); OutStreamer.EmitLabel(DotSym); // Now that we have emitted the label, lower the complex operand expression. MachineOperand MO = (MI->getOpcode() == OR1K::MOVHI) ? MI->getOperand(1) : MI->getOperand(2); MCSymbol *OpSym = MCInstLowering.GetExternalSymbolSymbol(MO); DotExpr = MCBinaryExpr::CreateSub(DotExpr, PICBase, OutContext); DotExpr = MCBinaryExpr::CreateAdd(MCSymbolRefExpr::Create(OpSym, Kind, OutContext), DotExpr, OutContext); MCInst TmpInst; TmpInst.setOpcode(MI->getOpcode()); TmpInst.addOperand(MCOperand::CreateReg(MI->getOperand(0).getReg())); if (MI->getOpcode() == OR1K::ORI) TmpInst.addOperand(MCOperand::CreateReg(MI->getOperand(1).getReg())); TmpInst.addOperand(MCOperand::CreateExpr(DotExpr)); OutStreamer.EmitInstruction(TmpInst, STI); return; } case OR1K::GETPC: { MCInst TmpInst; // This is a pseudo op for a two instruction sequence with a label, which // looks like: // l.jal .L1$pb // l.nop // .L1$pb: // Emit the call. MCSymbol *PICBase = MF->getPICBaseSymbol(); TmpInst.setOpcode(OR1K::JAL); // FIXME: We would like an efficient form for this, so we don't have to do a // lot of extra uniquing. TmpInst.addOperand(MCOperand::CreateExpr( MCSymbolRefExpr::Create(PICBase,OutContext))); OutStreamer.EmitInstruction(TmpInst, STI); // Emit delay-slot nop // FIXME: omit on no-delay-slot targets TmpInst.setOpcode(OR1K::NOP); TmpInst.getOperand(0) = MCOperand::CreateImm(0); OutStreamer.EmitInstruction(TmpInst, STI); // Emit the label. OutStreamer.EmitLabel(PICBase); return; } } MCInst TmpInst; MCInstLowering.Lower(MI, TmpInst); OutStreamer.EmitInstruction(TmpInst, STI); }
/// EmitInstruction -- Print out a single PowerPC MI in Darwin syntax to /// the current output stream. /// void PPCAsmPrinter::EmitInstruction(const MachineInstr *MI) { MCInst TmpInst; // Lower multi-instruction pseudo operations. switch (MI->getOpcode()) { default: break; case TargetOpcode::DBG_VALUE: llvm_unreachable("Should be handled target independently"); case PPC::MovePCtoLR: case PPC::MovePCtoLR8: { // Transform %LR = MovePCtoLR // Into this, where the label is the PIC base: // bl L1$pb // L1$pb: MCSymbol *PICBase = MF->getPICBaseSymbol(); // Emit the 'bl'. OutStreamer.EmitInstruction(MCInstBuilder(PPC::BL) // FIXME: We would like an efficient form for this, so we don't have to do // a lot of extra uniquing. .addExpr(MCSymbolRefExpr::Create(PICBase, OutContext))); // Emit the label. OutStreamer.EmitLabel(PICBase); return; } case PPC::LDtocJTI: case PPC::LDtocCPT: case PPC::LDtoc: { // Transform %X3 = LDtoc <ga:@min1>, %X2 LowerPPCMachineInstrToMCInst(MI, TmpInst, *this, Subtarget.isDarwin()); // Change the opcode to LD, and the global address operand to be a // reference to the TOC entry we will synthesize later. TmpInst.setOpcode(PPC::LD); const MachineOperand &MO = MI->getOperand(1); // Map symbol -> label of TOC entry assert(MO.isGlobal() || MO.isCPI() || MO.isJTI()); MCSymbol *MOSymbol = 0; if (MO.isGlobal()) MOSymbol = getSymbol(MO.getGlobal()); else if (MO.isCPI()) MOSymbol = GetCPISymbol(MO.getIndex()); else if (MO.isJTI()) MOSymbol = GetJTISymbol(MO.getIndex()); MCSymbol *TOCEntry = lookUpOrCreateTOCEntry(MOSymbol); const MCExpr *Exp = MCSymbolRefExpr::Create(TOCEntry, MCSymbolRefExpr::VK_PPC_TOC, OutContext); TmpInst.getOperand(1) = MCOperand::CreateExpr(Exp); OutStreamer.EmitInstruction(TmpInst); return; } case PPC::ADDIStocHA: { // Transform %Xd = ADDIStocHA %X2, <ga:@sym> LowerPPCMachineInstrToMCInst(MI, TmpInst, *this, Subtarget.isDarwin()); // Change the opcode to ADDIS8. If the global address is external, // has common linkage, is a function address, or is a jump table // address, then generate a TOC entry and reference that. Otherwise // reference the symbol directly. TmpInst.setOpcode(PPC::ADDIS8); const MachineOperand &MO = MI->getOperand(2); assert((MO.isGlobal() || MO.isCPI() || MO.isJTI()) && "Invalid operand for ADDIStocHA!"); MCSymbol *MOSymbol = 0; bool IsExternal = false; bool IsFunction = false; bool IsCommon = false; bool IsAvailExt = false; if (MO.isGlobal()) { const GlobalValue *GValue = MO.getGlobal(); const GlobalAlias *GAlias = dyn_cast<GlobalAlias>(GValue); const GlobalValue *RealGValue = GAlias ? GAlias->resolveAliasedGlobal(false) : GValue; MOSymbol = getSymbol(RealGValue); const GlobalVariable *GVar = dyn_cast<GlobalVariable>(RealGValue); IsExternal = GVar && !GVar->hasInitializer(); IsCommon = GVar && RealGValue->hasCommonLinkage(); IsFunction = !GVar; IsAvailExt = GVar && RealGValue->hasAvailableExternallyLinkage(); } else if (MO.isCPI()) MOSymbol = GetCPISymbol(MO.getIndex()); else if (MO.isJTI()) MOSymbol = GetJTISymbol(MO.getIndex()); if (IsExternal || IsFunction || IsCommon || IsAvailExt || MO.isJTI() || TM.getCodeModel() == CodeModel::Large) MOSymbol = lookUpOrCreateTOCEntry(MOSymbol); const MCExpr *Exp = MCSymbolRefExpr::Create(MOSymbol, MCSymbolRefExpr::VK_PPC_TOC_HA, OutContext); TmpInst.getOperand(2) = MCOperand::CreateExpr(Exp); OutStreamer.EmitInstruction(TmpInst); return; } case PPC::LDtocL: { // Transform %Xd = LDtocL <ga:@sym>, %Xs LowerPPCMachineInstrToMCInst(MI, TmpInst, *this, Subtarget.isDarwin()); // Change the opcode to LD. If the global address is external, has // common linkage, or is a jump table address, then reference the // associated TOC entry. Otherwise reference the symbol directly. TmpInst.setOpcode(PPC::LD); const MachineOperand &MO = MI->getOperand(1); assert((MO.isGlobal() || MO.isJTI() || MO.isCPI()) && "Invalid operand for LDtocL!"); MCSymbol *MOSymbol = 0; if (MO.isJTI()) MOSymbol = lookUpOrCreateTOCEntry(GetJTISymbol(MO.getIndex())); else if (MO.isCPI()) { MOSymbol = GetCPISymbol(MO.getIndex()); if (TM.getCodeModel() == CodeModel::Large) MOSymbol = lookUpOrCreateTOCEntry(MOSymbol); } else if (MO.isGlobal()) { const GlobalValue *GValue = MO.getGlobal(); const GlobalAlias *GAlias = dyn_cast<GlobalAlias>(GValue); const GlobalValue *RealGValue = GAlias ? GAlias->resolveAliasedGlobal(false) : GValue; MOSymbol = getSymbol(RealGValue); const GlobalVariable *GVar = dyn_cast<GlobalVariable>(RealGValue); if (!GVar || !GVar->hasInitializer() || RealGValue->hasCommonLinkage() || RealGValue->hasAvailableExternallyLinkage() || TM.getCodeModel() == CodeModel::Large) MOSymbol = lookUpOrCreateTOCEntry(MOSymbol); } const MCExpr *Exp = MCSymbolRefExpr::Create(MOSymbol, MCSymbolRefExpr::VK_PPC_TOC_LO, OutContext); TmpInst.getOperand(1) = MCOperand::CreateExpr(Exp); OutStreamer.EmitInstruction(TmpInst); return; } case PPC::ADDItocL: { // Transform %Xd = ADDItocL %Xs, <ga:@sym> LowerPPCMachineInstrToMCInst(MI, TmpInst, *this, Subtarget.isDarwin()); // Change the opcode to ADDI8. If the global address is external, then // generate a TOC entry and reference that. Otherwise reference the // symbol directly. TmpInst.setOpcode(PPC::ADDI8); const MachineOperand &MO = MI->getOperand(2); assert((MO.isGlobal() || MO.isCPI()) && "Invalid operand for ADDItocL"); MCSymbol *MOSymbol = 0; bool IsExternal = false; bool IsFunction = false; if (MO.isGlobal()) { const GlobalValue *GValue = MO.getGlobal(); const GlobalAlias *GAlias = dyn_cast<GlobalAlias>(GValue); const GlobalValue *RealGValue = GAlias ? GAlias->resolveAliasedGlobal(false) : GValue; MOSymbol = getSymbol(RealGValue); const GlobalVariable *GVar = dyn_cast<GlobalVariable>(RealGValue); IsExternal = GVar && !GVar->hasInitializer(); IsFunction = !GVar; } else if (MO.isCPI()) MOSymbol = GetCPISymbol(MO.getIndex()); if (IsFunction || IsExternal || TM.getCodeModel() == CodeModel::Large) MOSymbol = lookUpOrCreateTOCEntry(MOSymbol); const MCExpr *Exp = MCSymbolRefExpr::Create(MOSymbol, MCSymbolRefExpr::VK_PPC_TOC_LO, OutContext); TmpInst.getOperand(2) = MCOperand::CreateExpr(Exp); OutStreamer.EmitInstruction(TmpInst); return; } case PPC::ADDISgotTprelHA: { // Transform: %Xd = ADDISgotTprelHA %X2, <ga:@sym> // Into: %Xd = ADDIS8 %X2, sym@got@tlsgd@ha assert(Subtarget.isPPC64() && "Not supported for 32-bit PowerPC"); const MachineOperand &MO = MI->getOperand(2); const GlobalValue *GValue = MO.getGlobal(); MCSymbol *MOSymbol = getSymbol(GValue); const MCExpr *SymGotTprel = MCSymbolRefExpr::Create(MOSymbol, MCSymbolRefExpr::VK_PPC_GOT_TPREL_HA, OutContext); OutStreamer.EmitInstruction(MCInstBuilder(PPC::ADDIS8) .addReg(MI->getOperand(0).getReg()) .addReg(PPC::X2) .addExpr(SymGotTprel)); return; } case PPC::LDgotTprelL: { // Transform %Xd = LDgotTprelL <ga:@sym>, %Xs LowerPPCMachineInstrToMCInst(MI, TmpInst, *this, Subtarget.isDarwin()); // Change the opcode to LD. TmpInst.setOpcode(PPC::LD); const MachineOperand &MO = MI->getOperand(1); const GlobalValue *GValue = MO.getGlobal(); MCSymbol *MOSymbol = getSymbol(GValue); const MCExpr *Exp = MCSymbolRefExpr::Create(MOSymbol, MCSymbolRefExpr::VK_PPC_GOT_TPREL_LO, OutContext); TmpInst.getOperand(1) = MCOperand::CreateExpr(Exp); OutStreamer.EmitInstruction(TmpInst); return; } case PPC::ADDIStlsgdHA: { // Transform: %Xd = ADDIStlsgdHA %X2, <ga:@sym> // Into: %Xd = ADDIS8 %X2, sym@got@tlsgd@ha assert(Subtarget.isPPC64() && "Not supported for 32-bit PowerPC"); const MachineOperand &MO = MI->getOperand(2); const GlobalValue *GValue = MO.getGlobal(); MCSymbol *MOSymbol = getSymbol(GValue); const MCExpr *SymGotTlsGD = MCSymbolRefExpr::Create(MOSymbol, MCSymbolRefExpr::VK_PPC_GOT_TLSGD_HA, OutContext); OutStreamer.EmitInstruction(MCInstBuilder(PPC::ADDIS8) .addReg(MI->getOperand(0).getReg()) .addReg(PPC::X2) .addExpr(SymGotTlsGD)); return; } case PPC::ADDItlsgdL: { // Transform: %Xd = ADDItlsgdL %Xs, <ga:@sym> // Into: %Xd = ADDI8 %Xs, sym@got@tlsgd@l assert(Subtarget.isPPC64() && "Not supported for 32-bit PowerPC"); const MachineOperand &MO = MI->getOperand(2); const GlobalValue *GValue = MO.getGlobal(); MCSymbol *MOSymbol = getSymbol(GValue); const MCExpr *SymGotTlsGD = MCSymbolRefExpr::Create(MOSymbol, MCSymbolRefExpr::VK_PPC_GOT_TLSGD_LO, OutContext); OutStreamer.EmitInstruction(MCInstBuilder(PPC::ADDI8) .addReg(MI->getOperand(0).getReg()) .addReg(MI->getOperand(1).getReg()) .addExpr(SymGotTlsGD)); return; } case PPC::GETtlsADDR: { // Transform: %X3 = GETtlsADDR %X3, <ga:@sym> // Into: BL8_NOP_TLS __tls_get_addr(sym@tlsgd) assert(Subtarget.isPPC64() && "Not supported for 32-bit PowerPC"); StringRef Name = "__tls_get_addr"; MCSymbol *TlsGetAddr = OutContext.GetOrCreateSymbol(Name); const MCSymbolRefExpr *TlsRef = MCSymbolRefExpr::Create(TlsGetAddr, MCSymbolRefExpr::VK_None, OutContext); const MachineOperand &MO = MI->getOperand(2); const GlobalValue *GValue = MO.getGlobal(); MCSymbol *MOSymbol = getSymbol(GValue); const MCExpr *SymVar = MCSymbolRefExpr::Create(MOSymbol, MCSymbolRefExpr::VK_PPC_TLSGD, OutContext); OutStreamer.EmitInstruction(MCInstBuilder(PPC::BL8_NOP_TLS) .addExpr(TlsRef) .addExpr(SymVar)); return; } case PPC::ADDIStlsldHA: { // Transform: %Xd = ADDIStlsldHA %X2, <ga:@sym> // Into: %Xd = ADDIS8 %X2, sym@got@tlsld@ha assert(Subtarget.isPPC64() && "Not supported for 32-bit PowerPC"); const MachineOperand &MO = MI->getOperand(2); const GlobalValue *GValue = MO.getGlobal(); MCSymbol *MOSymbol = getSymbol(GValue); const MCExpr *SymGotTlsLD = MCSymbolRefExpr::Create(MOSymbol, MCSymbolRefExpr::VK_PPC_GOT_TLSLD_HA, OutContext); OutStreamer.EmitInstruction(MCInstBuilder(PPC::ADDIS8) .addReg(MI->getOperand(0).getReg()) .addReg(PPC::X2) .addExpr(SymGotTlsLD)); return; } case PPC::ADDItlsldL: { // Transform: %Xd = ADDItlsldL %Xs, <ga:@sym> // Into: %Xd = ADDI8 %Xs, sym@got@tlsld@l assert(Subtarget.isPPC64() && "Not supported for 32-bit PowerPC"); const MachineOperand &MO = MI->getOperand(2); const GlobalValue *GValue = MO.getGlobal(); MCSymbol *MOSymbol = getSymbol(GValue); const MCExpr *SymGotTlsLD = MCSymbolRefExpr::Create(MOSymbol, MCSymbolRefExpr::VK_PPC_GOT_TLSLD_LO, OutContext); OutStreamer.EmitInstruction(MCInstBuilder(PPC::ADDI8) .addReg(MI->getOperand(0).getReg()) .addReg(MI->getOperand(1).getReg()) .addExpr(SymGotTlsLD)); return; } case PPC::GETtlsldADDR: { // Transform: %X3 = GETtlsldADDR %X3, <ga:@sym> // Into: BL8_NOP_TLS __tls_get_addr(sym@tlsld) assert(Subtarget.isPPC64() && "Not supported for 32-bit PowerPC"); StringRef Name = "__tls_get_addr"; MCSymbol *TlsGetAddr = OutContext.GetOrCreateSymbol(Name); const MCSymbolRefExpr *TlsRef = MCSymbolRefExpr::Create(TlsGetAddr, MCSymbolRefExpr::VK_None, OutContext); const MachineOperand &MO = MI->getOperand(2); const GlobalValue *GValue = MO.getGlobal(); MCSymbol *MOSymbol = getSymbol(GValue); const MCExpr *SymVar = MCSymbolRefExpr::Create(MOSymbol, MCSymbolRefExpr::VK_PPC_TLSLD, OutContext); OutStreamer.EmitInstruction(MCInstBuilder(PPC::BL8_NOP_TLS) .addExpr(TlsRef) .addExpr(SymVar)); return; } case PPC::ADDISdtprelHA: { // Transform: %Xd = ADDISdtprelHA %X3, <ga:@sym> // Into: %Xd = ADDIS8 %X3, sym@dtprel@ha assert(Subtarget.isPPC64() && "Not supported for 32-bit PowerPC"); const MachineOperand &MO = MI->getOperand(2); const GlobalValue *GValue = MO.getGlobal(); MCSymbol *MOSymbol = getSymbol(GValue); const MCExpr *SymDtprel = MCSymbolRefExpr::Create(MOSymbol, MCSymbolRefExpr::VK_PPC_DTPREL_HA, OutContext); OutStreamer.EmitInstruction(MCInstBuilder(PPC::ADDIS8) .addReg(MI->getOperand(0).getReg()) .addReg(PPC::X3) .addExpr(SymDtprel)); return; } case PPC::ADDIdtprelL: { // Transform: %Xd = ADDIdtprelL %Xs, <ga:@sym> // Into: %Xd = ADDI8 %Xs, sym@dtprel@l assert(Subtarget.isPPC64() && "Not supported for 32-bit PowerPC"); const MachineOperand &MO = MI->getOperand(2); const GlobalValue *GValue = MO.getGlobal(); MCSymbol *MOSymbol = getSymbol(GValue); const MCExpr *SymDtprel = MCSymbolRefExpr::Create(MOSymbol, MCSymbolRefExpr::VK_PPC_DTPREL_LO, OutContext); OutStreamer.EmitInstruction(MCInstBuilder(PPC::ADDI8) .addReg(MI->getOperand(0).getReg()) .addReg(MI->getOperand(1).getReg()) .addExpr(SymDtprel)); return; } case PPC::MFOCRF: case PPC::MFOCRF8: if (!Subtarget.hasMFOCRF()) { // Transform: %R3 = MFOCRF %CR7 // Into: %R3 = MFCR ;; cr7 unsigned NewOpcode = MI->getOpcode() == PPC::MFOCRF ? PPC::MFCR : PPC::MFCR8; OutStreamer.AddComment(PPCInstPrinter:: getRegisterName(MI->getOperand(1).getReg())); OutStreamer.EmitInstruction(MCInstBuilder(NewOpcode) .addReg(MI->getOperand(0).getReg())); return; } break; case PPC::MTOCRF: case PPC::MTOCRF8: if (!Subtarget.hasMFOCRF()) { // Transform: %CR7 = MTOCRF %R3 // Into: MTCRF mask, %R3 ;; cr7 unsigned NewOpcode = MI->getOpcode() == PPC::MTOCRF ? PPC::MTCRF : PPC::MTCRF8; unsigned Mask = 0x80 >> OutContext.getRegisterInfo() ->getEncodingValue(MI->getOperand(0).getReg()); OutStreamer.AddComment(PPCInstPrinter:: getRegisterName(MI->getOperand(0).getReg())); OutStreamer.EmitInstruction(MCInstBuilder(NewOpcode) .addImm(Mask) .addReg(MI->getOperand(1).getReg())); return; } break; case PPC::SYNC: // In Book E sync is called msync, handle this special case here... if (Subtarget.isBookE()) { OutStreamer.EmitRawText(StringRef("\tmsync")); return; } break; case PPC::LD: case PPC::STD: case PPC::LWA_32: case PPC::LWA: { // Verify alignment is legal, so we don't create relocations // that can't be supported. // FIXME: This test is currently disabled for Darwin. The test // suite shows a handful of test cases that fail this check for // Darwin. Those need to be investigated before this sanity test // can be enabled for those subtargets. if (!Subtarget.isDarwin()) { unsigned OpNum = (MI->getOpcode() == PPC::STD) ? 2 : 1; const MachineOperand &MO = MI->getOperand(OpNum); if (MO.isGlobal() && MO.getGlobal()->getAlignment() < 4) llvm_unreachable("Global must be word-aligned for LD, STD, LWA!"); } // Now process the instruction normally. break; } }
static DecodeStatus Decode2OpInstructionFail(MCInst &Inst, unsigned Insn, uint64_t Address, const void *Decoder) { // Try and decode as a 3R instruction. unsigned Opcode = fieldFromInstruction(Insn, 11, 5); switch (Opcode) { case 0x0: Inst.setOpcode(XCore::STW_2rus); return Decode2RUSInstruction(Inst, Insn, Address, Decoder); case 0x1: Inst.setOpcode(XCore::LDW_2rus); return Decode2RUSInstruction(Inst, Insn, Address, Decoder); case 0x2: Inst.setOpcode(XCore::ADD_3r); return Decode3RInstruction(Inst, Insn, Address, Decoder); case 0x3: Inst.setOpcode(XCore::SUB_3r); return Decode3RInstruction(Inst, Insn, Address, Decoder); case 0x4: Inst.setOpcode(XCore::SHL_3r); return Decode3RInstruction(Inst, Insn, Address, Decoder); case 0x5: Inst.setOpcode(XCore::SHR_3r); return Decode3RInstruction(Inst, Insn, Address, Decoder); case 0x6: Inst.setOpcode(XCore::EQ_3r); return Decode3RInstruction(Inst, Insn, Address, Decoder); case 0x7: Inst.setOpcode(XCore::AND_3r); return Decode3RInstruction(Inst, Insn, Address, Decoder); case 0x8: Inst.setOpcode(XCore::OR_3r); return Decode3RInstruction(Inst, Insn, Address, Decoder); case 0x9: Inst.setOpcode(XCore::LDW_3r); return Decode3RInstruction(Inst, Insn, Address, Decoder); case 0x10: Inst.setOpcode(XCore::LD16S_3r); return Decode3RInstruction(Inst, Insn, Address, Decoder); case 0x11: Inst.setOpcode(XCore::LD8U_3r); return Decode3RInstruction(Inst, Insn, Address, Decoder); case 0x12: Inst.setOpcode(XCore::ADD_2rus); return Decode2RUSInstruction(Inst, Insn, Address, Decoder); case 0x13: Inst.setOpcode(XCore::SUB_2rus); return Decode2RUSInstruction(Inst, Insn, Address, Decoder); case 0x14: Inst.setOpcode(XCore::SHL_2rus); return Decode2RUSBitpInstruction(Inst, Insn, Address, Decoder); case 0x15: Inst.setOpcode(XCore::SHR_2rus); return Decode2RUSBitpInstruction(Inst, Insn, Address, Decoder); case 0x16: Inst.setOpcode(XCore::EQ_2rus); return Decode2RUSInstruction(Inst, Insn, Address, Decoder); case 0x17: Inst.setOpcode(XCore::TSETR_3r); return Decode3RImmInstruction(Inst, Insn, Address, Decoder); case 0x18: Inst.setOpcode(XCore::LSS_3r); return Decode3RInstruction(Inst, Insn, Address, Decoder); case 0x19: Inst.setOpcode(XCore::LSU_3r); return Decode3RInstruction(Inst, Insn, Address, Decoder); } return MCDisassembler::Fail; }
void HexagonMCChecker::init(MCInst const& MCI) { const MCInstrDesc& MCID = HexagonMCInstrInfo::getDesc(MCII, MCI); unsigned PredReg = Hexagon::NoRegister; bool isTrue = false; // Get used registers. for (unsigned i = MCID.getNumDefs(); i < MCID.getNumOperands(); ++i) if (MCI.getOperand(i).isReg()) { unsigned R = MCI.getOperand(i).getReg(); if (HexagonMCInstrInfo::isPredicated(MCII, MCI) && isPredicateRegister(R)) { // Note an used predicate register. PredReg = R; isTrue = HexagonMCInstrInfo::isPredicatedTrue(MCII, MCI); // Note use of new predicate register. if (HexagonMCInstrInfo::isPredicatedNew(MCII, MCI)) NewPreds.insert(PredReg); } else // Note register use. Super-registers are not tracked directly, // but their components. for(MCRegAliasIterator SRI(R, &RI, !MCSubRegIterator(R, &RI).isValid()); SRI.isValid(); ++SRI) if (!MCSubRegIterator(*SRI, &RI).isValid()) // Skip super-registers used indirectly. Uses.insert(*SRI); } // Get implicit register definitions. if (const MCPhysReg *ImpDef = MCID.getImplicitDefs()) for (; *ImpDef; ++ImpDef) { unsigned R = *ImpDef; if (Hexagon::R31 != R && MCID.isCall()) // Any register other than the LR and the PC are actually volatile ones // as defined by the ABI, not modified implicitly by the call insn. continue; if (Hexagon::PC == R) // Branches are the only insns that can change the PC, // otherwise a read-only register. continue; if (Hexagon::USR_OVF == R) // Many insns change the USR implicitly, but only one or another flag. // The instruction table models the USR.OVF flag, which can be implicitly // modified more than once, but cannot be modified in the same packet // with an instruction that modifies is explicitly. Deal with such situ- // ations individually. SoftDefs.insert(R); else if (isPredicateRegister(R) && HexagonMCInstrInfo::isPredicateLate(MCII, MCI)) // Include implicit late predicates. LatePreds.insert(R); else Defs[R].insert(PredSense(PredReg, isTrue)); } // Figure out explicit register definitions. for (unsigned i = 0; i < MCID.getNumDefs(); ++i) { unsigned R = MCI.getOperand(i).getReg(), S = Hexagon::NoRegister; // USR has subregisters (while C8 does not for technical reasons), so // reset R to USR, since we know how to handle multiple defs of USR, // taking into account its subregisters. if (R == Hexagon::C8) R = Hexagon::USR; // Note register definitions, direct ones as well as indirect side-effects. // Super-registers are not tracked directly, but their components. for(MCRegAliasIterator SRI(R, &RI, !MCSubRegIterator(R, &RI).isValid()); SRI.isValid(); ++SRI) { if (MCSubRegIterator(*SRI, &RI).isValid()) // Skip super-registers defined indirectly. continue; if (R == *SRI) { if (S == R) // Avoid scoring the defined register multiple times. continue; else // Note that the defined register has already been scored. S = R; } if (Hexagon::P3_0 != R && Hexagon::P3_0 == *SRI) // P3:0 is a special case, since multiple predicate register definitions // in a packet is allowed as the equivalent of their logical "and". // Only an explicit definition of P3:0 is noted as such; if a // side-effect, then note as a soft definition. SoftDefs.insert(*SRI); else if (HexagonMCInstrInfo::isPredicateLate(MCII, MCI) && isPredicateRegister(*SRI)) // Some insns produce predicates too late to be used in the same packet. LatePreds.insert(*SRI); else if (i == 0 && llvm::HexagonMCInstrInfo::getType(MCII, MCI) == HexagonII::TypeCVI_VM_CUR_LD) // Current loads should be used in the same packet. // TODO: relies on the impossibility of a current and a temporary loads // in the same packet. CurDefs.insert(*SRI), Defs[*SRI].insert(PredSense(PredReg, isTrue)); else if (i == 0 && llvm::HexagonMCInstrInfo::getType(MCII, MCI) == HexagonII::TypeCVI_VM_TMP_LD) // Temporary loads should be used in the same packet, but don't commit // results, so it should be disregarded if another insn changes the same // register. // TODO: relies on the impossibility of a current and a temporary loads // in the same packet. TmpDefs.insert(*SRI); else if (i <= 1 && llvm::HexagonMCInstrInfo::hasNewValue2(MCII, MCI) ) // vshuff(Vx, Vy, Rx) <- Vx(0) and Vy(1) are both source and // destination registers with this instruction. same for vdeal(Vx,Vy,Rx) Uses.insert(*SRI); else Defs[*SRI].insert(PredSense(PredReg, isTrue)); } } // Figure out register definitions that produce new values. if (HexagonMCInstrInfo::hasNewValue(MCII, MCI)) { unsigned R = HexagonMCInstrInfo::getNewValueOperand(MCII, MCI).getReg(); if (HexagonMCInstrInfo::isCompound(MCII, MCI)) compoundRegisterMap(R); // Compound insns have a limited register range. for(MCRegAliasIterator SRI(R, &RI, !MCSubRegIterator(R, &RI).isValid()); SRI.isValid(); ++SRI) if (!MCSubRegIterator(*SRI, &RI).isValid()) // No super-registers defined indirectly. NewDefs[*SRI].push_back(NewSense::Def(PredReg, HexagonMCInstrInfo::isPredicatedTrue(MCII, MCI), HexagonMCInstrInfo::isFloat(MCII, MCI))); // For fairly unique 2-dot-new producers, example: // vdeal(V1, V9, R0) V1.new and V9.new can be used by consumers. if (HexagonMCInstrInfo::hasNewValue2(MCII, MCI)) { unsigned R2 = HexagonMCInstrInfo::getNewValueOperand2(MCII, MCI).getReg(); for(MCRegAliasIterator SRI(R2, &RI, !MCSubRegIterator(R2, &RI).isValid()); SRI.isValid(); ++SRI) if (!MCSubRegIterator(*SRI, &RI).isValid()) NewDefs[*SRI].push_back(NewSense::Def(PredReg, HexagonMCInstrInfo::isPredicatedTrue(MCII, MCI), HexagonMCInstrInfo::isFloat(MCII, MCI))); } } // Figure out definitions of new predicate registers. if (HexagonMCInstrInfo::isPredicatedNew(MCII, MCI)) for (unsigned i = MCID.getNumDefs(); i < MCID.getNumOperands(); ++i) if (MCI.getOperand(i).isReg()) { unsigned P = MCI.getOperand(i).getReg(); if (isPredicateRegister(P)) NewPreds.insert(P); } // Figure out uses of new values. if (HexagonMCInstrInfo::isNewValue(MCII, MCI)) { unsigned N = HexagonMCInstrInfo::getNewValueOperand(MCII, MCI).getReg(); if (!MCSubRegIterator(N, &RI).isValid()) { // Super-registers cannot use new values. if (MCID.isBranch()) NewUses[N] = NewSense::Jmp(llvm::HexagonMCInstrInfo::getType(MCII, MCI) == HexagonII::TypeNV); else NewUses[N] = NewSense::Use(PredReg, HexagonMCInstrInfo::isPredicatedTrue(MCII, MCI)); } } }
void X86MCInstLower::Lower(const MachineInstr *MI, MCInst &OutMI) const { OutMI.setOpcode(MI->getOpcode()); for (const MachineOperand &MO : MI->operands()) if (auto MaybeMCOp = LowerMachineOperand(MI, MO)) OutMI.addOperand(MaybeMCOp.getValue()); // Handle a few special cases to eliminate operand modifiers. ReSimplify: switch (OutMI.getOpcode()) { case X86::LEA64_32r: case X86::LEA64r: case X86::LEA16r: case X86::LEA32r: // LEA should have a segment register, but it must be empty. assert(OutMI.getNumOperands() == 1+X86::AddrNumOperands && "Unexpected # of LEA operands"); assert(OutMI.getOperand(1+X86::AddrSegmentReg).getReg() == 0 && "LEA has segment specified!"); break; case X86::MOV32ri64: OutMI.setOpcode(X86::MOV32ri); break; // Commute operands to get a smaller encoding by using VEX.R instead of VEX.B // if one of the registers is extended, but other isn't. case X86::VMOVAPDrr: case X86::VMOVAPDYrr: case X86::VMOVAPSrr: case X86::VMOVAPSYrr: case X86::VMOVDQArr: case X86::VMOVDQAYrr: case X86::VMOVDQUrr: case X86::VMOVDQUYrr: case X86::VMOVUPDrr: case X86::VMOVUPDYrr: case X86::VMOVUPSrr: case X86::VMOVUPSYrr: { if (!X86II::isX86_64ExtendedReg(OutMI.getOperand(0).getReg()) && X86II::isX86_64ExtendedReg(OutMI.getOperand(1).getReg())) { unsigned NewOpc; switch (OutMI.getOpcode()) { default: llvm_unreachable("Invalid opcode"); case X86::VMOVAPDrr: NewOpc = X86::VMOVAPDrr_REV; break; case X86::VMOVAPDYrr: NewOpc = X86::VMOVAPDYrr_REV; break; case X86::VMOVAPSrr: NewOpc = X86::VMOVAPSrr_REV; break; case X86::VMOVAPSYrr: NewOpc = X86::VMOVAPSYrr_REV; break; case X86::VMOVDQArr: NewOpc = X86::VMOVDQArr_REV; break; case X86::VMOVDQAYrr: NewOpc = X86::VMOVDQAYrr_REV; break; case X86::VMOVDQUrr: NewOpc = X86::VMOVDQUrr_REV; break; case X86::VMOVDQUYrr: NewOpc = X86::VMOVDQUYrr_REV; break; case X86::VMOVUPDrr: NewOpc = X86::VMOVUPDrr_REV; break; case X86::VMOVUPDYrr: NewOpc = X86::VMOVUPDYrr_REV; break; case X86::VMOVUPSrr: NewOpc = X86::VMOVUPSrr_REV; break; case X86::VMOVUPSYrr: NewOpc = X86::VMOVUPSYrr_REV; break; } OutMI.setOpcode(NewOpc); } break; } case X86::VMOVSDrr: case X86::VMOVSSrr: { if (!X86II::isX86_64ExtendedReg(OutMI.getOperand(0).getReg()) && X86II::isX86_64ExtendedReg(OutMI.getOperand(2).getReg())) { unsigned NewOpc; switch (OutMI.getOpcode()) { default: llvm_unreachable("Invalid opcode"); case X86::VMOVSDrr: NewOpc = X86::VMOVSDrr_REV; break; case X86::VMOVSSrr: NewOpc = X86::VMOVSSrr_REV; break; } OutMI.setOpcode(NewOpc); } break; } // TAILJMPr64, CALL64r, CALL64pcrel32 - These instructions have register // inputs modeled as normal uses instead of implicit uses. As such, truncate // off all but the first operand (the callee). FIXME: Change isel. case X86::TAILJMPr64: case X86::TAILJMPr64_REX: case X86::CALL64r: case X86::CALL64pcrel32: { unsigned Opcode = OutMI.getOpcode(); MCOperand Saved = OutMI.getOperand(0); OutMI = MCInst(); OutMI.setOpcode(Opcode); OutMI.addOperand(Saved); break; } case X86::EH_RETURN: case X86::EH_RETURN64: { OutMI = MCInst(); OutMI.setOpcode(getRetOpcode(AsmPrinter.getSubtarget())); break; } // TAILJMPd, TAILJMPd64 - Lower to the correct jump instructions. case X86::TAILJMPr: case X86::TAILJMPd: case X86::TAILJMPd64: { unsigned Opcode; switch (OutMI.getOpcode()) { default: llvm_unreachable("Invalid opcode"); case X86::TAILJMPr: Opcode = X86::JMP32r; break; case X86::TAILJMPd: case X86::TAILJMPd64: Opcode = X86::JMP_1; break; } MCOperand Saved = OutMI.getOperand(0); OutMI = MCInst(); OutMI.setOpcode(Opcode); OutMI.addOperand(Saved); break; } case X86::DEC16r: case X86::DEC32r: case X86::INC16r: case X86::INC32r: // If we aren't in 64-bit mode we can use the 1-byte inc/dec instructions. if (!AsmPrinter.getSubtarget().is64Bit()) { unsigned Opcode; switch (OutMI.getOpcode()) { default: llvm_unreachable("Invalid opcode"); case X86::DEC16r: Opcode = X86::DEC16r_alt; break; case X86::DEC32r: Opcode = X86::DEC32r_alt; break; case X86::INC16r: Opcode = X86::INC16r_alt; break; case X86::INC32r: Opcode = X86::INC32r_alt; break; } OutMI.setOpcode(Opcode); } break; // These are pseudo-ops for OR to help with the OR->ADD transformation. We do // this with an ugly goto in case the resultant OR uses EAX and needs the // short form. case X86::ADD16rr_DB: OutMI.setOpcode(X86::OR16rr); goto ReSimplify; case X86::ADD32rr_DB: OutMI.setOpcode(X86::OR32rr); goto ReSimplify; case X86::ADD64rr_DB: OutMI.setOpcode(X86::OR64rr); goto ReSimplify; case X86::ADD16ri_DB: OutMI.setOpcode(X86::OR16ri); goto ReSimplify; case X86::ADD32ri_DB: OutMI.setOpcode(X86::OR32ri); goto ReSimplify; case X86::ADD64ri32_DB: OutMI.setOpcode(X86::OR64ri32); goto ReSimplify; case X86::ADD16ri8_DB: OutMI.setOpcode(X86::OR16ri8); goto ReSimplify; case X86::ADD32ri8_DB: OutMI.setOpcode(X86::OR32ri8); goto ReSimplify; case X86::ADD64ri8_DB: OutMI.setOpcode(X86::OR64ri8); goto ReSimplify; // Atomic load and store require a separate pseudo-inst because Acquire // implies mayStore and Release implies mayLoad; fix these to regular MOV // instructions here case X86::ACQUIRE_MOV8rm: OutMI.setOpcode(X86::MOV8rm); goto ReSimplify; case X86::ACQUIRE_MOV16rm: OutMI.setOpcode(X86::MOV16rm); goto ReSimplify; case X86::ACQUIRE_MOV32rm: OutMI.setOpcode(X86::MOV32rm); goto ReSimplify; case X86::ACQUIRE_MOV64rm: OutMI.setOpcode(X86::MOV64rm); goto ReSimplify; case X86::RELEASE_MOV8mr: OutMI.setOpcode(X86::MOV8mr); goto ReSimplify; case X86::RELEASE_MOV16mr: OutMI.setOpcode(X86::MOV16mr); goto ReSimplify; case X86::RELEASE_MOV32mr: OutMI.setOpcode(X86::MOV32mr); goto ReSimplify; case X86::RELEASE_MOV64mr: OutMI.setOpcode(X86::MOV64mr); goto ReSimplify; case X86::RELEASE_MOV8mi: OutMI.setOpcode(X86::MOV8mi); goto ReSimplify; case X86::RELEASE_MOV16mi: OutMI.setOpcode(X86::MOV16mi); goto ReSimplify; case X86::RELEASE_MOV32mi: OutMI.setOpcode(X86::MOV32mi); goto ReSimplify; case X86::RELEASE_MOV64mi32: OutMI.setOpcode(X86::MOV64mi32); goto ReSimplify; case X86::RELEASE_ADD8mi: OutMI.setOpcode(X86::ADD8mi); goto ReSimplify; case X86::RELEASE_ADD32mi: OutMI.setOpcode(X86::ADD32mi); goto ReSimplify; case X86::RELEASE_ADD64mi32: OutMI.setOpcode(X86::ADD64mi32); goto ReSimplify; case X86::RELEASE_AND8mi: OutMI.setOpcode(X86::AND8mi); goto ReSimplify; case X86::RELEASE_AND32mi: OutMI.setOpcode(X86::AND32mi); goto ReSimplify; case X86::RELEASE_AND64mi32: OutMI.setOpcode(X86::AND64mi32); goto ReSimplify; case X86::RELEASE_OR8mi: OutMI.setOpcode(X86::OR8mi); goto ReSimplify; case X86::RELEASE_OR32mi: OutMI.setOpcode(X86::OR32mi); goto ReSimplify; case X86::RELEASE_OR64mi32: OutMI.setOpcode(X86::OR64mi32); goto ReSimplify; case X86::RELEASE_XOR8mi: OutMI.setOpcode(X86::XOR8mi); goto ReSimplify; case X86::RELEASE_XOR32mi: OutMI.setOpcode(X86::XOR32mi); goto ReSimplify; case X86::RELEASE_XOR64mi32: OutMI.setOpcode(X86::XOR64mi32); goto ReSimplify; case X86::RELEASE_INC8m: OutMI.setOpcode(X86::INC8m); goto ReSimplify; case X86::RELEASE_INC16m: OutMI.setOpcode(X86::INC16m); goto ReSimplify; case X86::RELEASE_INC32m: OutMI.setOpcode(X86::INC32m); goto ReSimplify; case X86::RELEASE_INC64m: OutMI.setOpcode(X86::INC64m); goto ReSimplify; case X86::RELEASE_DEC8m: OutMI.setOpcode(X86::DEC8m); goto ReSimplify; case X86::RELEASE_DEC16m: OutMI.setOpcode(X86::DEC16m); goto ReSimplify; case X86::RELEASE_DEC32m: OutMI.setOpcode(X86::DEC32m); goto ReSimplify; case X86::RELEASE_DEC64m: OutMI.setOpcode(X86::DEC64m); goto ReSimplify; // We don't currently select the correct instruction form for instructions // which have a short %eax, etc. form. Handle this by custom lowering, for // now. // // Note, we are currently not handling the following instructions: // MOV64ao8, MOV64o8a // XCHG16ar, XCHG32ar, XCHG64ar case X86::MOV8mr_NOREX: case X86::MOV8mr: SimplifyShortMoveForm(AsmPrinter, OutMI, X86::MOV8o32a); break; case X86::MOV8rm_NOREX: case X86::MOV8rm: SimplifyShortMoveForm(AsmPrinter, OutMI, X86::MOV8ao32); break; case X86::MOV16mr: SimplifyShortMoveForm(AsmPrinter, OutMI, X86::MOV16o32a); break; case X86::MOV16rm: SimplifyShortMoveForm(AsmPrinter, OutMI, X86::MOV16ao32); break; case X86::MOV32mr: SimplifyShortMoveForm(AsmPrinter, OutMI, X86::MOV32o32a); break; case X86::MOV32rm: SimplifyShortMoveForm(AsmPrinter, OutMI, X86::MOV32ao32); break; case X86::ADC8ri: SimplifyShortImmForm(OutMI, X86::ADC8i8); break; case X86::ADC16ri: SimplifyShortImmForm(OutMI, X86::ADC16i16); break; case X86::ADC32ri: SimplifyShortImmForm(OutMI, X86::ADC32i32); break; case X86::ADC64ri32: SimplifyShortImmForm(OutMI, X86::ADC64i32); break; case X86::ADD8ri: SimplifyShortImmForm(OutMI, X86::ADD8i8); break; case X86::ADD16ri: SimplifyShortImmForm(OutMI, X86::ADD16i16); break; case X86::ADD32ri: SimplifyShortImmForm(OutMI, X86::ADD32i32); break; case X86::ADD64ri32: SimplifyShortImmForm(OutMI, X86::ADD64i32); break; case X86::AND8ri: SimplifyShortImmForm(OutMI, X86::AND8i8); break; case X86::AND16ri: SimplifyShortImmForm(OutMI, X86::AND16i16); break; case X86::AND32ri: SimplifyShortImmForm(OutMI, X86::AND32i32); break; case X86::AND64ri32: SimplifyShortImmForm(OutMI, X86::AND64i32); break; case X86::CMP8ri: SimplifyShortImmForm(OutMI, X86::CMP8i8); break; case X86::CMP16ri: SimplifyShortImmForm(OutMI, X86::CMP16i16); break; case X86::CMP32ri: SimplifyShortImmForm(OutMI, X86::CMP32i32); break; case X86::CMP64ri32: SimplifyShortImmForm(OutMI, X86::CMP64i32); break; case X86::OR8ri: SimplifyShortImmForm(OutMI, X86::OR8i8); break; case X86::OR16ri: SimplifyShortImmForm(OutMI, X86::OR16i16); break; case X86::OR32ri: SimplifyShortImmForm(OutMI, X86::OR32i32); break; case X86::OR64ri32: SimplifyShortImmForm(OutMI, X86::OR64i32); break; case X86::SBB8ri: SimplifyShortImmForm(OutMI, X86::SBB8i8); break; case X86::SBB16ri: SimplifyShortImmForm(OutMI, X86::SBB16i16); break; case X86::SBB32ri: SimplifyShortImmForm(OutMI, X86::SBB32i32); break; case X86::SBB64ri32: SimplifyShortImmForm(OutMI, X86::SBB64i32); break; case X86::SUB8ri: SimplifyShortImmForm(OutMI, X86::SUB8i8); break; case X86::SUB16ri: SimplifyShortImmForm(OutMI, X86::SUB16i16); break; case X86::SUB32ri: SimplifyShortImmForm(OutMI, X86::SUB32i32); break; case X86::SUB64ri32: SimplifyShortImmForm(OutMI, X86::SUB64i32); break; case X86::TEST8ri: SimplifyShortImmForm(OutMI, X86::TEST8i8); break; case X86::TEST16ri: SimplifyShortImmForm(OutMI, X86::TEST16i16); break; case X86::TEST32ri: SimplifyShortImmForm(OutMI, X86::TEST32i32); break; case X86::TEST64ri32: SimplifyShortImmForm(OutMI, X86::TEST64i32); break; case X86::XOR8ri: SimplifyShortImmForm(OutMI, X86::XOR8i8); break; case X86::XOR16ri: SimplifyShortImmForm(OutMI, X86::XOR16i16); break; case X86::XOR32ri: SimplifyShortImmForm(OutMI, X86::XOR32i32); break; case X86::XOR64ri32: SimplifyShortImmForm(OutMI, X86::XOR64i32); break; // Try to shrink some forms of movsx. case X86::MOVSX16rr8: case X86::MOVSX32rr16: case X86::MOVSX64rr32: SimplifyMOVSX(OutMI); break; } }
void AArch64AsmPrinter::EmitInstruction(const MachineInstr *MI) { // Do any auto-generated pseudo lowerings. if (emitPseudoExpansionLowering(OutStreamer, MI)) return; if (AArch64FI->getLOHRelated().count(MI)) { // Generate a label for LOH related instruction MCSymbol *LOHLabel = GetTempSymbol("loh", LOHLabelCounter++); // Associate the instruction with the label LOHInstToLabel[MI] = LOHLabel; OutStreamer.EmitLabel(LOHLabel); } // Do any manual lowerings. switch (MI->getOpcode()) { default: break; case AArch64::DBG_VALUE: { if (isVerbose() && OutStreamer.hasRawTextSupport()) { SmallString<128> TmpStr; raw_svector_ostream OS(TmpStr); PrintDebugValueComment(MI, OS); OutStreamer.EmitRawText(StringRef(OS.str())); } return; } // Tail calls use pseudo instructions so they have the proper code-gen // attributes (isCall, isReturn, etc.). We lower them to the real // instruction here. case AArch64::TCRETURNri: { MCInst TmpInst; TmpInst.setOpcode(AArch64::BR); TmpInst.addOperand(MCOperand::CreateReg(MI->getOperand(0).getReg())); EmitToStreamer(OutStreamer, TmpInst); return; } case AArch64::TCRETURNdi: { MCOperand Dest; MCInstLowering.lowerOperand(MI->getOperand(0), Dest); MCInst TmpInst; TmpInst.setOpcode(AArch64::B); TmpInst.addOperand(Dest); EmitToStreamer(OutStreamer, TmpInst); return; } case AArch64::TLSDESC_BLR: { MCOperand Callee, Sym; MCInstLowering.lowerOperand(MI->getOperand(0), Callee); MCInstLowering.lowerOperand(MI->getOperand(1), Sym); // First emit a relocation-annotation. This expands to no code, but requests // the following instruction gets an R_AARCH64_TLSDESC_CALL. MCInst TLSDescCall; TLSDescCall.setOpcode(AArch64::TLSDESCCALL); TLSDescCall.addOperand(Sym); EmitToStreamer(OutStreamer, TLSDescCall); // Other than that it's just a normal indirect call to the function loaded // from the descriptor. MCInst BLR; BLR.setOpcode(AArch64::BLR); BLR.addOperand(Callee); EmitToStreamer(OutStreamer, BLR); return; } case TargetOpcode::STACKMAP: return LowerSTACKMAP(OutStreamer, SM, *MI); case TargetOpcode::PATCHPOINT: return LowerPATCHPOINT(OutStreamer, SM, *MI); } // Finally, do the automated lowerings for everything else. MCInst TmpInst; MCInstLowering.Lower(MI, TmpInst); EmitToStreamer(OutStreamer, TmpInst); }
void ARMInstPrinter::printInst(const MCInst *MI, raw_ostream &O, StringRef Annot) { unsigned Opcode = MI->getOpcode(); // Check for HINT instructions w/ canonical names. if (Opcode == ARM::HINT || Opcode == ARM::t2HINT) { switch (MI->getOperand(0).getImm()) { case 0: O << "\tnop"; break; case 1: O << "\tyield"; break; case 2: O << "\twfe"; break; case 3: O << "\twfi"; break; case 4: O << "\tsev"; break; default: // Anything else should just print normally. printInstruction(MI, O); printAnnotation(O, Annot); return; } printPredicateOperand(MI, 1, O); if (Opcode == ARM::t2HINT) O << ".w"; printAnnotation(O, Annot); return; } // Check for MOVs and print canonical forms, instead. if (Opcode == ARM::MOVsr) { // FIXME: Thumb variants? const MCOperand &Dst = MI->getOperand(0); const MCOperand &MO1 = MI->getOperand(1); const MCOperand &MO2 = MI->getOperand(2); const MCOperand &MO3 = MI->getOperand(3); O << '\t' << ARM_AM::getShiftOpcStr(ARM_AM::getSORegShOp(MO3.getImm())); printSBitModifierOperand(MI, 6, O); printPredicateOperand(MI, 4, O); O << '\t'; printRegName(O, Dst.getReg()); O << ", "; printRegName(O, MO1.getReg()); O << ", "; printRegName(O, MO2.getReg()); assert(ARM_AM::getSORegOffset(MO3.getImm()) == 0); printAnnotation(O, Annot); return; } if (Opcode == ARM::MOVsi) { // FIXME: Thumb variants? const MCOperand &Dst = MI->getOperand(0); const MCOperand &MO1 = MI->getOperand(1); const MCOperand &MO2 = MI->getOperand(2); O << '\t' << ARM_AM::getShiftOpcStr(ARM_AM::getSORegShOp(MO2.getImm())); printSBitModifierOperand(MI, 5, O); printPredicateOperand(MI, 3, O); O << '\t'; printRegName(O, Dst.getReg()); O << ", "; printRegName(O, MO1.getReg()); if (ARM_AM::getSORegShOp(MO2.getImm()) == ARM_AM::rrx) { printAnnotation(O, Annot); return; } O << ", " << markup("<imm:") << "#" << translateShiftImm(ARM_AM::getSORegOffset(MO2.getImm())) << markup(">"); printAnnotation(O, Annot); return; } // A8.6.123 PUSH if ((Opcode == ARM::STMDB_UPD || Opcode == ARM::t2STMDB_UPD) && MI->getOperand(0).getReg() == ARM::SP && MI->getNumOperands() > 5) { // Should only print PUSH if there are at least two registers in the list. O << '\t' << "push"; printPredicateOperand(MI, 2, O); if (Opcode == ARM::t2STMDB_UPD) O << ".w"; O << '\t'; printRegisterList(MI, 4, O); printAnnotation(O, Annot); return; } if (Opcode == ARM::STR_PRE_IMM && MI->getOperand(2).getReg() == ARM::SP && MI->getOperand(3).getImm() == -4) { O << '\t' << "push"; printPredicateOperand(MI, 4, O); O << "\t{"; printRegName(O, MI->getOperand(1).getReg()); O << "}"; printAnnotation(O, Annot); return; } // A8.6.122 POP if ((Opcode == ARM::LDMIA_UPD || Opcode == ARM::t2LDMIA_UPD) && MI->getOperand(0).getReg() == ARM::SP && MI->getNumOperands() > 5) { // Should only print POP if there are at least two registers in the list. O << '\t' << "pop"; printPredicateOperand(MI, 2, O); if (Opcode == ARM::t2LDMIA_UPD) O << ".w"; O << '\t'; printRegisterList(MI, 4, O); printAnnotation(O, Annot); return; } if (Opcode == ARM::LDR_POST_IMM && MI->getOperand(2).getReg() == ARM::SP && MI->getOperand(4).getImm() == 4) { O << '\t' << "pop"; printPredicateOperand(MI, 5, O); O << "\t{"; printRegName(O, MI->getOperand(0).getReg()); O << "}"; printAnnotation(O, Annot); return; } // A8.6.355 VPUSH if ((Opcode == ARM::VSTMSDB_UPD || Opcode == ARM::VSTMDDB_UPD) && MI->getOperand(0).getReg() == ARM::SP) { O << '\t' << "vpush"; printPredicateOperand(MI, 2, O); O << '\t'; printRegisterList(MI, 4, O); printAnnotation(O, Annot); return; } // A8.6.354 VPOP if ((Opcode == ARM::VLDMSIA_UPD || Opcode == ARM::VLDMDIA_UPD) && MI->getOperand(0).getReg() == ARM::SP) { O << '\t' << "vpop"; printPredicateOperand(MI, 2, O); O << '\t'; printRegisterList(MI, 4, O); printAnnotation(O, Annot); return; } if (Opcode == ARM::tLDMIA) { bool Writeback = true; unsigned BaseReg = MI->getOperand(0).getReg(); for (unsigned i = 3; i < MI->getNumOperands(); ++i) { if (MI->getOperand(i).getReg() == BaseReg) Writeback = false; } O << "\tldm"; printPredicateOperand(MI, 1, O); O << '\t'; printRegName(O, BaseReg); if (Writeback) O << "!"; O << ", "; printRegisterList(MI, 3, O); printAnnotation(O, Annot); return; } // Thumb1 NOP if (Opcode == ARM::tMOVr && MI->getOperand(0).getReg() == ARM::R8 && MI->getOperand(1).getReg() == ARM::R8) { O << "\tnop"; printPredicateOperand(MI, 2, O); printAnnotation(O, Annot); return; } // Combine 2 GPRs from disassember into a GPRPair to match with instr def. // ldrexd/strexd require even/odd GPR pair. To enforce this constraint, // a single GPRPair reg operand is used in the .td file to replace the two // GPRs. However, when decoding them, the two GRPs cannot be automatically // expressed as a GPRPair, so we have to manually merge them. // FIXME: We would really like to be able to tablegen'erate this. if (Opcode == ARM::LDREXD || Opcode == ARM::STREXD) { const MCRegisterClass& MRC = MRI.getRegClass(ARM::GPRRegClassID); bool isStore = Opcode == ARM::STREXD; unsigned Reg = MI->getOperand(isStore ? 1 : 0).getReg(); if (MRC.contains(Reg)) { MCInst NewMI; MCOperand NewReg; NewMI.setOpcode(Opcode); if (isStore) NewMI.addOperand(MI->getOperand(0)); NewReg = MCOperand::CreateReg(MRI.getMatchingSuperReg(Reg, ARM::gsub_0, &MRI.getRegClass(ARM::GPRPairRegClassID))); NewMI.addOperand(NewReg); // Copy the rest operands into NewMI. for(unsigned i= isStore ? 3 : 2; i < MI->getNumOperands(); ++i) NewMI.addOperand(MI->getOperand(i)); printInstruction(&NewMI, O); return; } } printInstruction(MI, O); printAnnotation(O, Annot); }
void MipsAsmPrinter::EmitInstrReg(unsigned Opcode, unsigned Reg) { MCInst I; I.setOpcode(Opcode); I.addOperand(MCOperand::CreateReg(Reg)); OutStreamer.EmitInstruction(I, getSubtargetInfo()); }
/// translateImmediate - Appends an immediate operand to an MCInst. /// /// @param mcInst - The MCInst to append to. /// @param immediate - The immediate value to append. static void translateImmediate(MCInst &mcInst, uint64_t immediate) { mcInst.addOperand(MCOperand::CreateImm(immediate)); }
static DecodeStatus DecodeL2OpInstructionFail(MCInst &Inst, unsigned Insn, uint64_t Address, const void *Decoder) { // Try and decode as a L3R / L2RUS instruction. unsigned Opcode = fieldFromInstruction(Insn, 16, 4) | fieldFromInstruction(Insn, 27, 5) << 4; switch (Opcode) { case 0x0c: Inst.setOpcode(XCore::STW_l3r); return DecodeL3RInstruction(Inst, Insn, Address, Decoder); case 0x1c: Inst.setOpcode(XCore::XOR_l3r); return DecodeL3RInstruction(Inst, Insn, Address, Decoder); case 0x2c: Inst.setOpcode(XCore::ASHR_l3r); return DecodeL3RInstruction(Inst, Insn, Address, Decoder); case 0x3c: Inst.setOpcode(XCore::LDAWF_l3r); return DecodeL3RInstruction(Inst, Insn, Address, Decoder); case 0x4c: Inst.setOpcode(XCore::LDAWB_l3r); return DecodeL3RInstruction(Inst, Insn, Address, Decoder); case 0x5c: Inst.setOpcode(XCore::LDA16F_l3r); return DecodeL3RInstruction(Inst, Insn, Address, Decoder); case 0x6c: Inst.setOpcode(XCore::LDA16B_l3r); return DecodeL3RInstruction(Inst, Insn, Address, Decoder); case 0x7c: Inst.setOpcode(XCore::MUL_l3r); return DecodeL3RInstruction(Inst, Insn, Address, Decoder); case 0x8c: Inst.setOpcode(XCore::DIVS_l3r); return DecodeL3RInstruction(Inst, Insn, Address, Decoder); case 0x9c: Inst.setOpcode(XCore::DIVU_l3r); return DecodeL3RInstruction(Inst, Insn, Address, Decoder); case 0x10c: Inst.setOpcode(XCore::ST16_l3r); return DecodeL3RInstruction(Inst, Insn, Address, Decoder); case 0x11c: Inst.setOpcode(XCore::ST8_l3r); return DecodeL3RInstruction(Inst, Insn, Address, Decoder); case 0x12c: Inst.setOpcode(XCore::ASHR_l2rus); return DecodeL2RUSBitpInstruction(Inst, Insn, Address, Decoder); case 0x12d: Inst.setOpcode(XCore::OUTPW_l2rus); return DecodeL2RUSBitpInstruction(Inst, Insn, Address, Decoder); case 0x12e: Inst.setOpcode(XCore::INPW_l2rus); return DecodeL2RUSBitpInstruction(Inst, Insn, Address, Decoder); case 0x13c: Inst.setOpcode(XCore::LDAWF_l2rus); return DecodeL2RUSInstruction(Inst, Insn, Address, Decoder); case 0x14c: Inst.setOpcode(XCore::LDAWB_l2rus); return DecodeL2RUSInstruction(Inst, Insn, Address, Decoder); case 0x15c: Inst.setOpcode(XCore::CRC_l3r); return DecodeL3RSrcDstInstruction(Inst, Insn, Address, Decoder); case 0x18c: Inst.setOpcode(XCore::REMS_l3r); return DecodeL3RInstruction(Inst, Insn, Address, Decoder); case 0x19c: Inst.setOpcode(XCore::REMU_l3r); return DecodeL3RInstruction(Inst, Insn, Address, Decoder); } return MCDisassembler::Fail; }
static DecodeStatus DecodeSIMM13(MCInst &MI, unsigned insn, uint64_t Address, const void *Decoder) { unsigned tgt = SignExtend32<13>(fieldFromInstruction(insn, 0, 13)); MI.addOperand(MCOperand::createImm(tgt)); return MCDisassembler::Success; }
static void EmitIndirectBranch(const MCOperand &Op, bool Is64Bit, bool IsCall, MCStreamer &Out) { const bool HideSandboxBase = (FlagHideSandboxBase && Is64Bit && !FlagUseZeroBasedSandbox); const int JmpMask = FlagSfiX86JmpMask; unsigned Reg32 = Op.getReg(); // For NaCl64, the sequence // jmp *%rXX // is changed to // mov %rXX,%r11d // and $0xffffffe0,%r11d // add %r15,%r11 // jmpq *%r11 // // And the sequence // call *%rXX // return_addr: // is changed to // mov %rXX,%r11d // push return_addr // and $0xffffffe0,%r11d // add %r15,%r11 // jmpq *%r11 // .align 32 // return_addr: // // This avoids exposing the sandbox base address via the return // address on the stack. // For NaCl64, force an assignment of the branch target into r11, // and subsequently use r11 as the ultimate branch target, so that // only r11 (which will never be written to memory) exposes the // sandbox base address. But avoid a redundant assignment if the // original branch target is already r11 or r11d. const unsigned SafeReg32 = X86::R11D; const unsigned SafeReg64 = X86::R11; if (HideSandboxBase) { // In some cases, EmitIndirectBranch() is called with a 32-bit // register Op (e.g. r11d), and in other cases a 64-bit register // (e.g. r11), so we need to test both variants to avoid a // redundant assignment. TODO(stichnot): Make callers consistent // on 32 vs 64 bit register. if ((Reg32 != SafeReg32) && (Reg32 != SafeReg64)) { MCInst MOVInst; MOVInst.setOpcode(X86::MOV32rr); MOVInst.addOperand(MCOperand::CreateReg(SafeReg32)); MOVInst.addOperand(MCOperand::CreateReg(Reg32)); Out.EmitInstruction(MOVInst); Reg32 = SafeReg32; } } const unsigned Reg64 = getX86SubSuperRegister_(Reg32, MVT::i64); // Explicitly push the (32-bit) return address for a NaCl64 call // instruction. MCSymbol *RetTarget = NULL; if (IsCall && HideSandboxBase) { MCContext &Context = Out.getContext(); // Generate a label for the return address. RetTarget = CreateTempLabel(Context, "IndirectCallRetAddr"); const MCExpr *RetTargetExpr = MCSymbolRefExpr::Create(RetTarget, Context); // push return_addr MCInst PUSHInst; PUSHInst.setOpcode(X86::PUSH64i32); PUSHInst.addOperand(MCOperand::CreateExpr(RetTargetExpr)); Out.EmitInstruction(PUSHInst); } const bool WillEmitCallInst = IsCall && !HideSandboxBase; Out.EmitBundleLock(WillEmitCallInst); MCInst ANDInst; ANDInst.setOpcode(X86::AND32ri8); ANDInst.addOperand(MCOperand::CreateReg(Reg32)); ANDInst.addOperand(MCOperand::CreateReg(Reg32)); ANDInst.addOperand(MCOperand::CreateImm(JmpMask)); Out.EmitInstruction(ANDInst); if (Is64Bit && !FlagUseZeroBasedSandbox) { MCInst InstADD; InstADD.setOpcode(X86::ADD64rr); InstADD.addOperand(MCOperand::CreateReg(Reg64)); InstADD.addOperand(MCOperand::CreateReg(Reg64)); InstADD.addOperand(MCOperand::CreateReg(X86::R15)); Out.EmitInstruction(InstADD); } if (WillEmitCallInst) { // callq *%rXX MCInst CALLInst; CALLInst.setOpcode(Is64Bit ? X86::CALL64r : X86::CALL32r); CALLInst.addOperand(MCOperand::CreateReg(Is64Bit ? Reg64 : Reg32)); Out.EmitInstruction(CALLInst); } else { // jmpq *%rXX -or- jmpq *%r11 MCInst JMPInst; JMPInst.setOpcode(Is64Bit ? X86::JMP64r : X86::JMP32r); JMPInst.addOperand(MCOperand::CreateReg(Is64Bit ? Reg64 : Reg32)); Out.EmitInstruction(JMPInst); } Out.EmitBundleUnlock(); if (RetTarget) { Out.EmitCodeAlignment(kNaClX86InstructionBundleSize); Out.EmitLabel(RetTarget); } }
// If the D<shift> instruction has a shift amount that is greater // than 31 (checked in calling routine), lower it to a D<shift>32 instruction static void LowerLargeShift(MCInst& Inst) { assert(Inst.getNumOperands() == 3 && "Invalid no. of operands for shift!"); assert(Inst.getOperand(2).isImm()); int64_t Shift = Inst.getOperand(2).getImm(); if (Shift <= 31) return; // Do nothing Shift -= 32; // saminus32 Inst.getOperand(2).setImm(Shift); switch (Inst.getOpcode()) { default: // Calling function is not synchronized llvm_unreachable("Unexpected shift instruction"); case Mips::DSLL: Inst.setOpcode(Mips::DSLL32); return; case Mips::DSRL: Inst.setOpcode(Mips::DSRL32); return; case Mips::DSRA: Inst.setOpcode(Mips::DSRA32); return; case Mips::DROTR: Inst.setOpcode(Mips::DROTR32); return; case Mips::DSLL_MM64R6: Inst.setOpcode(Mips::DSLL32_MM64R6); return; case Mips::DSRL_MM64R6: Inst.setOpcode(Mips::DSRL32_MM64R6); return; case Mips::DSRA_MM64R6: Inst.setOpcode(Mips::DSRA32_MM64R6); return; case Mips::DROTR_MM64R6: Inst.setOpcode(Mips::DROTR32_MM64R6); return; } }
static void EmitTLSAddr32(const MCInst &Inst, MCStreamer &Out) { Out.EmitBundleLock(true); MCInst LeaInst; LeaInst.setOpcode(X86::LEA32r); LeaInst.addOperand(MCOperand::CreateReg(X86::EAX)); // DestReg LeaInst.addOperand(Inst.getOperand(0)); // BaseReg LeaInst.addOperand(Inst.getOperand(1)); // Scale LeaInst.addOperand(Inst.getOperand(2)); // IndexReg LeaInst.addOperand(Inst.getOperand(3)); // Offset LeaInst.addOperand(Inst.getOperand(4)); // SegmentReg Out.EmitInstruction(LeaInst); MCInst CALLInst; CALLInst.setOpcode(X86::CALLpcrel32); MCContext &context = Out.getContext(); const MCSymbolRefExpr *expr = MCSymbolRefExpr::Create( context.GetOrCreateSymbol(StringRef("___tls_get_addr")), MCSymbolRefExpr::VK_PLT, context); CALLInst.addOperand(MCOperand::CreateExpr(expr)); Out.EmitInstruction(CALLInst); Out.EmitBundleUnlock(); }
/// LowerSubReg32_Op0 - Things like MOVZX16rr8 -> MOVZX32rr8. static void LowerSubReg32_Op0(MCInst &OutMI, unsigned NewOpc) { OutMI.setOpcode(NewOpc); lower_subreg32(&OutMI, 0); }
static bool isReg(const MCInst &MI, unsigned OpNo) { assert(MI.getOperand(OpNo).isReg() && "Register operand expected."); return MI.getOperand(OpNo).getReg() == R; }
/// \brief Simplify FOO $imm, %{al,ax,eax,rax} to FOO $imm, for instruction with /// a short fixed-register form. static void SimplifyShortImmForm(MCInst &Inst, unsigned Opcode) { unsigned ImmOp = Inst.getNumOperands() - 1; assert(Inst.getOperand(0).isReg() && (Inst.getOperand(ImmOp).isImm() || Inst.getOperand(ImmOp).isExpr()) && ((Inst.getNumOperands() == 3 && Inst.getOperand(1).isReg() && Inst.getOperand(0).getReg() == Inst.getOperand(1).getReg()) || Inst.getNumOperands() == 2) && "Unexpected instruction!"); // Check whether the destination register can be fixed. unsigned Reg = Inst.getOperand(0).getReg(); if (Reg != X86::AL && Reg != X86::AX && Reg != X86::EAX && Reg != X86::RAX) return; // If so, rewrite the instruction. MCOperand Saved = Inst.getOperand(ImmOp); Inst = MCInst(); Inst.setOpcode(Opcode); Inst.addOperand(Saved); }
bool ARMAsmBackend::mayNeedRelaxation(const MCInst &Inst) const { if (getRelaxedOpcode(Inst.getOpcode()) != Inst.getOpcode()) return true; return false; }
void X86MCInstLower::Lower(const MachineInstr *MI, MCInst &OutMI) const { OutMI.setOpcode(MI->getOpcode()); for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) { const MachineOperand &MO = MI->getOperand(i); MCOperand MCOp; switch (MO.getType()) { default: MI->dump(); llvm_unreachable("unknown operand type"); case MachineOperand::MO_Register: // Ignore all implicit register operands. if (MO.isImplicit()) continue; MCOp = MCOperand::CreateReg(MO.getReg()); break; case MachineOperand::MO_Immediate: MCOp = MCOperand::CreateImm(MO.getImm()); break; case MachineOperand::MO_MachineBasicBlock: MCOp = MCOperand::CreateExpr(MCSymbolRefExpr::Create( MO.getMBB()->getSymbol(), Ctx)); break; case MachineOperand::MO_GlobalAddress: case MachineOperand::MO_ExternalSymbol: MCOp = LowerSymbolOperand(MO, GetSymbolFromOperand(MO)); break; case MachineOperand::MO_JumpTableIndex: MCOp = LowerSymbolOperand(MO, AsmPrinter.GetJTISymbol(MO.getIndex())); break; case MachineOperand::MO_ConstantPoolIndex: MCOp = LowerSymbolOperand(MO, AsmPrinter.GetCPISymbol(MO.getIndex())); break; case MachineOperand::MO_BlockAddress: MCOp = LowerSymbolOperand(MO, AsmPrinter.GetBlockAddressSymbol(MO.getBlockAddress())); break; case MachineOperand::MO_RegisterMask: // Ignore call clobbers. continue; } OutMI.addOperand(MCOp); } // Handle a few special cases to eliminate operand modifiers. ReSimplify: switch (OutMI.getOpcode()) { case X86::LEA64_32r: // Handle 'subreg rewriting' for the lea64_32mem operand. lower_lea64_32mem(&OutMI, 1); // FALL THROUGH. case X86::LEA64r: case X86::LEA16r: case X86::LEA32r: // LEA should have a segment register, but it must be empty. assert(OutMI.getNumOperands() == 1+X86::AddrNumOperands && "Unexpected # of LEA operands"); assert(OutMI.getOperand(1+X86::AddrSegmentReg).getReg() == 0 && "LEA has segment specified!"); break; case X86::MOVZX64rr32: LowerSubReg32_Op0(OutMI, X86::MOV32rr); break; case X86::MOVZX64rm32: LowerSubReg32_Op0(OutMI, X86::MOV32rm); break; case X86::MOV64ri64i32: LowerSubReg32_Op0(OutMI, X86::MOV32ri); break; case X86::MOVZX64rr8: LowerSubReg32_Op0(OutMI, X86::MOVZX32rr8); break; case X86::MOVZX64rm8: LowerSubReg32_Op0(OutMI, X86::MOVZX32rm8); break; case X86::MOVZX64rr16: LowerSubReg32_Op0(OutMI, X86::MOVZX32rr16); break; case X86::MOVZX64rm16: LowerSubReg32_Op0(OutMI, X86::MOVZX32rm16); break; case X86::SETB_C8r: LowerUnaryToTwoAddr(OutMI, X86::SBB8rr); break; case X86::SETB_C16r: LowerUnaryToTwoAddr(OutMI, X86::SBB16rr); break; case X86::SETB_C32r: LowerUnaryToTwoAddr(OutMI, X86::SBB32rr); break; case X86::SETB_C64r: LowerUnaryToTwoAddr(OutMI, X86::SBB64rr); break; case X86::MOV8r0: LowerUnaryToTwoAddr(OutMI, X86::XOR8rr); break; case X86::MOV32r0: LowerUnaryToTwoAddr(OutMI, X86::XOR32rr); break; case X86::MOV16r0: LowerSubReg32_Op0(OutMI, X86::MOV32r0); // MOV16r0 -> MOV32r0 LowerUnaryToTwoAddr(OutMI, X86::XOR32rr); // MOV32r0 -> XOR32rr break; case X86::MOV64r0: LowerSubReg32_Op0(OutMI, X86::MOV32r0); // MOV64r0 -> MOV32r0 LowerUnaryToTwoAddr(OutMI, X86::XOR32rr); // MOV32r0 -> XOR32rr break; // TAILJMPr64, CALL64r, CALL64pcrel32 - These instructions have register // inputs modeled as normal uses instead of implicit uses. As such, truncate // off all but the first operand (the callee). FIXME: Change isel. case X86::TAILJMPr64: case X86::CALL64r: case X86::CALL64pcrel32: { unsigned Opcode = OutMI.getOpcode(); MCOperand Saved = OutMI.getOperand(0); OutMI = MCInst(); OutMI.setOpcode(Opcode); OutMI.addOperand(Saved); break; } case X86::EH_RETURN: case X86::EH_RETURN64: { OutMI = MCInst(); OutMI.setOpcode(X86::RET); break; } // TAILJMPd, TAILJMPd64 - Lower to the correct jump instructions. case X86::TAILJMPr: case X86::TAILJMPd: case X86::TAILJMPd64: { unsigned Opcode; switch (OutMI.getOpcode()) { default: llvm_unreachable("Invalid opcode"); case X86::TAILJMPr: Opcode = X86::JMP32r; break; case X86::TAILJMPd: case X86::TAILJMPd64: Opcode = X86::JMP_1; break; } MCOperand Saved = OutMI.getOperand(0); OutMI = MCInst(); OutMI.setOpcode(Opcode); OutMI.addOperand(Saved); break; } // These are pseudo-ops for OR to help with the OR->ADD transformation. We do // this with an ugly goto in case the resultant OR uses EAX and needs the // short form. case X86::ADD16rr_DB: OutMI.setOpcode(X86::OR16rr); goto ReSimplify; case X86::ADD32rr_DB: OutMI.setOpcode(X86::OR32rr); goto ReSimplify; case X86::ADD64rr_DB: OutMI.setOpcode(X86::OR64rr); goto ReSimplify; case X86::ADD16ri_DB: OutMI.setOpcode(X86::OR16ri); goto ReSimplify; case X86::ADD32ri_DB: OutMI.setOpcode(X86::OR32ri); goto ReSimplify; case X86::ADD64ri32_DB: OutMI.setOpcode(X86::OR64ri32); goto ReSimplify; case X86::ADD16ri8_DB: OutMI.setOpcode(X86::OR16ri8); goto ReSimplify; case X86::ADD32ri8_DB: OutMI.setOpcode(X86::OR32ri8); goto ReSimplify; case X86::ADD64ri8_DB: OutMI.setOpcode(X86::OR64ri8); goto ReSimplify; // The assembler backend wants to see branches in their small form and relax // them to their large form. The JIT can only handle the large form because // it does not do relaxation. For now, translate the large form to the // small one here. case X86::JMP_4: OutMI.setOpcode(X86::JMP_1); break; case X86::JO_4: OutMI.setOpcode(X86::JO_1); break; case X86::JNO_4: OutMI.setOpcode(X86::JNO_1); break; case X86::JB_4: OutMI.setOpcode(X86::JB_1); break; case X86::JAE_4: OutMI.setOpcode(X86::JAE_1); break; case X86::JE_4: OutMI.setOpcode(X86::JE_1); break; case X86::JNE_4: OutMI.setOpcode(X86::JNE_1); break; case X86::JBE_4: OutMI.setOpcode(X86::JBE_1); break; case X86::JA_4: OutMI.setOpcode(X86::JA_1); break; case X86::JS_4: OutMI.setOpcode(X86::JS_1); break; case X86::JNS_4: OutMI.setOpcode(X86::JNS_1); break; case X86::JP_4: OutMI.setOpcode(X86::JP_1); break; case X86::JNP_4: OutMI.setOpcode(X86::JNP_1); break; case X86::JL_4: OutMI.setOpcode(X86::JL_1); break; case X86::JGE_4: OutMI.setOpcode(X86::JGE_1); break; case X86::JLE_4: OutMI.setOpcode(X86::JLE_1); break; case X86::JG_4: OutMI.setOpcode(X86::JG_1); break; // Atomic load and store require a separate pseudo-inst because Acquire // implies mayStore and Release implies mayLoad; fix these to regular MOV // instructions here case X86::ACQUIRE_MOV8rm: OutMI.setOpcode(X86::MOV8rm); goto ReSimplify; case X86::ACQUIRE_MOV16rm: OutMI.setOpcode(X86::MOV16rm); goto ReSimplify; case X86::ACQUIRE_MOV32rm: OutMI.setOpcode(X86::MOV32rm); goto ReSimplify; case X86::ACQUIRE_MOV64rm: OutMI.setOpcode(X86::MOV64rm); goto ReSimplify; case X86::RELEASE_MOV8mr: OutMI.setOpcode(X86::MOV8mr); goto ReSimplify; case X86::RELEASE_MOV16mr: OutMI.setOpcode(X86::MOV16mr); goto ReSimplify; case X86::RELEASE_MOV32mr: OutMI.setOpcode(X86::MOV32mr); goto ReSimplify; case X86::RELEASE_MOV64mr: OutMI.setOpcode(X86::MOV64mr); goto ReSimplify; // We don't currently select the correct instruction form for instructions // which have a short %eax, etc. form. Handle this by custom lowering, for // now. // // Note, we are currently not handling the following instructions: // MOV64ao8, MOV64o8a // XCHG16ar, XCHG32ar, XCHG64ar case X86::MOV8mr_NOREX: case X86::MOV8mr: SimplifyShortMoveForm(AsmPrinter, OutMI, X86::MOV8ao8); break; case X86::MOV8rm_NOREX: case X86::MOV8rm: SimplifyShortMoveForm(AsmPrinter, OutMI, X86::MOV8o8a); break; case X86::MOV16mr: SimplifyShortMoveForm(AsmPrinter, OutMI, X86::MOV16ao16); break; case X86::MOV16rm: SimplifyShortMoveForm(AsmPrinter, OutMI, X86::MOV16o16a); break; case X86::MOV32mr: SimplifyShortMoveForm(AsmPrinter, OutMI, X86::MOV32ao32); break; case X86::MOV32rm: SimplifyShortMoveForm(AsmPrinter, OutMI, X86::MOV32o32a); break; case X86::ADC8ri: SimplifyShortImmForm(OutMI, X86::ADC8i8); break; case X86::ADC16ri: SimplifyShortImmForm(OutMI, X86::ADC16i16); break; case X86::ADC32ri: SimplifyShortImmForm(OutMI, X86::ADC32i32); break; case X86::ADC64ri32: SimplifyShortImmForm(OutMI, X86::ADC64i32); break; case X86::ADD8ri: SimplifyShortImmForm(OutMI, X86::ADD8i8); break; case X86::ADD16ri: SimplifyShortImmForm(OutMI, X86::ADD16i16); break; case X86::ADD32ri: SimplifyShortImmForm(OutMI, X86::ADD32i32); break; case X86::ADD64ri32: SimplifyShortImmForm(OutMI, X86::ADD64i32); break; case X86::AND8ri: SimplifyShortImmForm(OutMI, X86::AND8i8); break; case X86::AND16ri: SimplifyShortImmForm(OutMI, X86::AND16i16); break; case X86::AND32ri: SimplifyShortImmForm(OutMI, X86::AND32i32); break; case X86::AND64ri32: SimplifyShortImmForm(OutMI, X86::AND64i32); break; case X86::CMP8ri: SimplifyShortImmForm(OutMI, X86::CMP8i8); break; case X86::CMP16ri: SimplifyShortImmForm(OutMI, X86::CMP16i16); break; case X86::CMP32ri: SimplifyShortImmForm(OutMI, X86::CMP32i32); break; case X86::CMP64ri32: SimplifyShortImmForm(OutMI, X86::CMP64i32); break; case X86::OR8ri: SimplifyShortImmForm(OutMI, X86::OR8i8); break; case X86::OR16ri: SimplifyShortImmForm(OutMI, X86::OR16i16); break; case X86::OR32ri: SimplifyShortImmForm(OutMI, X86::OR32i32); break; case X86::OR64ri32: SimplifyShortImmForm(OutMI, X86::OR64i32); break; case X86::SBB8ri: SimplifyShortImmForm(OutMI, X86::SBB8i8); break; case X86::SBB16ri: SimplifyShortImmForm(OutMI, X86::SBB16i16); break; case X86::SBB32ri: SimplifyShortImmForm(OutMI, X86::SBB32i32); break; case X86::SBB64ri32: SimplifyShortImmForm(OutMI, X86::SBB64i32); break; case X86::SUB8ri: SimplifyShortImmForm(OutMI, X86::SUB8i8); break; case X86::SUB16ri: SimplifyShortImmForm(OutMI, X86::SUB16i16); break; case X86::SUB32ri: SimplifyShortImmForm(OutMI, X86::SUB32i32); break; case X86::SUB64ri32: SimplifyShortImmForm(OutMI, X86::SUB64i32); break; case X86::TEST8ri: SimplifyShortImmForm(OutMI, X86::TEST8i8); break; case X86::TEST16ri: SimplifyShortImmForm(OutMI, X86::TEST16i16); break; case X86::TEST32ri: SimplifyShortImmForm(OutMI, X86::TEST32i32); break; case X86::TEST64ri32: SimplifyShortImmForm(OutMI, X86::TEST64i32); break; case X86::XOR8ri: SimplifyShortImmForm(OutMI, X86::XOR8i8); break; case X86::XOR16ri: SimplifyShortImmForm(OutMI, X86::XOR16i16); break; case X86::XOR32ri: SimplifyShortImmForm(OutMI, X86::XOR32i32); break; case X86::XOR64ri32: SimplifyShortImmForm(OutMI, X86::XOR64i32); break; case X86::MORESTACK_RET: OutMI.setOpcode(X86::RET); break; case X86::MORESTACK_RET_RESTORE_R10: { MCInst retInst; OutMI.setOpcode(X86::MOV64rr); OutMI.addOperand(MCOperand::CreateReg(X86::R10)); OutMI.addOperand(MCOperand::CreateReg(X86::RAX)); retInst.setOpcode(X86::RET); AsmPrinter.OutStreamer.EmitInstruction(retInst); break; } } }
// Pick a DEXT or DINS instruction variant based on the pos and size operands void Mips::LowerDextDins(MCInst& InstIn) { int Opcode = InstIn.getOpcode(); if (Opcode == Mips::DEXT) assert(InstIn.getNumOperands() == 4 && "Invalid no. of machine operands for DEXT!"); else // Only DEXT and DINS are possible assert(InstIn.getNumOperands() == 5 && "Invalid no. of machine operands for DINS!"); assert(InstIn.getOperand(2).isImm()); int64_t pos = InstIn.getOperand(2).getImm(); assert(InstIn.getOperand(3).isImm()); int64_t size = InstIn.getOperand(3).getImm(); if (size <= 32) { if (pos < 32) // DEXT/DINS, do nothing return; // DEXTU/DINSU InstIn.getOperand(2).setImm(pos - 32); InstIn.setOpcode((Opcode == Mips::DEXT) ? Mips::DEXTU : Mips::DINSU); return; } // DEXTM/DINSM assert(pos < 32 && "DEXT/DINS cannot have both size and pos > 32"); InstIn.getOperand(3).setImm(size - 32); InstIn.setOpcode((Opcode == Mips::DEXT) ? Mips::DEXTM : Mips::DINSM); return; }
void X86AsmPrinter::EmitInstruction(const MachineInstr *MI) { X86MCInstLower MCInstLowering(Mang, *MF, *this); switch (MI->getOpcode()) { case TargetOpcode::DBG_VALUE: if (isVerbose() && OutStreamer.hasRawTextSupport()) { std::string TmpStr; raw_string_ostream OS(TmpStr); PrintDebugValueComment(MI, OS); OutStreamer.EmitRawText(StringRef(OS.str())); } return; // Emit nothing here but a comment if we can. case X86::Int_MemBarrier: if (OutStreamer.hasRawTextSupport()) OutStreamer.EmitRawText(StringRef("\t#MEMBARRIER")); return; case X86::EH_RETURN: case X86::EH_RETURN64: { // Lower these as normal, but add some comments. unsigned Reg = MI->getOperand(0).getReg(); OutStreamer.AddComment(StringRef("eh_return, addr: %") + X86ATTInstPrinter::getRegisterName(Reg)); break; } case X86::TAILJMPr: case X86::TAILJMPd: case X86::TAILJMPd64: // Lower these as normal, but add some comments. OutStreamer.AddComment("TAILCALL"); break; case X86::TLS_addr32: case X86::TLS_addr64: case X86::TLS_base_addr32: case X86::TLS_base_addr64: return LowerTlsAddr(OutStreamer, MCInstLowering, *MI); case X86::MOVPC32r: { MCInst TmpInst; // This is a pseudo op for a two instruction sequence with a label, which // looks like: // call "L1$pb" // "L1$pb": // popl %esi // Emit the call. MCSymbol *PICBase = MF->getPICBaseSymbol(); TmpInst.setOpcode(X86::CALLpcrel32); // FIXME: We would like an efficient form for this, so we don't have to do a // lot of extra uniquing. TmpInst.addOperand(MCOperand::CreateExpr(MCSymbolRefExpr::Create(PICBase, OutContext))); OutStreamer.EmitInstruction(TmpInst); // Emit the label. OutStreamer.EmitLabel(PICBase); // popl $reg TmpInst.setOpcode(X86::POP32r); TmpInst.getOperand(0) = MCOperand::CreateReg(MI->getOperand(0).getReg()); OutStreamer.EmitInstruction(TmpInst); return; } case X86::ADD32ri: { // Lower the MO_GOT_ABSOLUTE_ADDRESS form of ADD32ri. if (MI->getOperand(2).getTargetFlags() != X86II::MO_GOT_ABSOLUTE_ADDRESS) break; // Okay, we have something like: // EAX = ADD32ri EAX, MO_GOT_ABSOLUTE_ADDRESS(@MYGLOBAL) // For this, we want to print something like: // MYGLOBAL + (. - PICBASE) // However, we can't generate a ".", so just emit a new label here and refer // to it. MCSymbol *DotSym = OutContext.CreateTempSymbol(); OutStreamer.EmitLabel(DotSym); // Now that we have emitted the label, lower the complex operand expression. MCSymbol *OpSym = MCInstLowering.GetSymbolFromOperand(MI->getOperand(2)); const MCExpr *DotExpr = MCSymbolRefExpr::Create(DotSym, OutContext); const MCExpr *PICBase = MCSymbolRefExpr::Create(MF->getPICBaseSymbol(), OutContext); DotExpr = MCBinaryExpr::CreateSub(DotExpr, PICBase, OutContext); DotExpr = MCBinaryExpr::CreateAdd(MCSymbolRefExpr::Create(OpSym,OutContext), DotExpr, OutContext); MCInst TmpInst; TmpInst.setOpcode(X86::ADD32ri); TmpInst.addOperand(MCOperand::CreateReg(MI->getOperand(0).getReg())); TmpInst.addOperand(MCOperand::CreateReg(MI->getOperand(1).getReg())); TmpInst.addOperand(MCOperand::CreateExpr(DotExpr)); OutStreamer.EmitInstruction(TmpInst); return; } } MCInst TmpInst; MCInstLowering.Lower(MI, TmpInst); OutStreamer.EmitInstruction(TmpInst); }
/// EmitInstruction -- Print out a single PowerPC MI in Darwin syntax to /// the current output stream. /// void PPCAsmPrinter::EmitInstruction(const MachineInstr *MI) { MCInst TmpInst; // Lower multi-instruction pseudo operations. switch (MI->getOpcode()) { default: break; case TargetOpcode::DBG_VALUE: { if (!isVerbose() || !OutStreamer.hasRawTextSupport()) return; SmallString<32> Str; raw_svector_ostream O(Str); unsigned NOps = MI->getNumOperands(); assert(NOps==4); O << '\t' << MAI->getCommentString() << "DEBUG_VALUE: "; // cast away const; DIetc do not take const operands for some reason. DIVariable V(const_cast<MDNode *>(MI->getOperand(NOps-1).getMetadata())); O << V.getName(); O << " <- "; // Frame address. Currently handles register +- offset only. assert(MI->getOperand(0).isReg() && MI->getOperand(1).isImm()); O << '['; printOperand(MI, 0, O); O << '+'; printOperand(MI, 1, O); O << ']'; O << "+"; printOperand(MI, NOps-2, O); OutStreamer.EmitRawText(O.str()); return; } case PPC::MovePCtoLR: case PPC::MovePCtoLR8: { // Transform %LR = MovePCtoLR // Into this, where the label is the PIC base: // bl L1$pb // L1$pb: MCSymbol *PICBase = MF->getPICBaseSymbol(); // Emit the 'bl'. TmpInst.setOpcode(PPC::BL_Darwin); // Darwin vs SVR4 doesn't matter here. // FIXME: We would like an efficient form for this, so we don't have to do // a lot of extra uniquing. TmpInst.addOperand(MCOperand::CreateExpr(MCSymbolRefExpr:: Create(PICBase, OutContext))); OutStreamer.EmitInstruction(TmpInst); // Emit the label. OutStreamer.EmitLabel(PICBase); return; } case PPC::LDtoc: { // Transform %X3 = LDtoc <ga:@min1>, %X2 LowerPPCMachineInstrToMCInst(MI, TmpInst, *this, Subtarget.isDarwin()); // Change the opcode to LD, and the global address operand to be a // reference to the TOC entry we will synthesize later. TmpInst.setOpcode(PPC::LD); const MachineOperand &MO = MI->getOperand(1); assert(MO.isGlobal()); // Map symbol -> label of TOC entry. MCSymbol *&TOCEntry = TOC[Mang->getSymbol(MO.getGlobal())]; if (TOCEntry == 0) TOCEntry = GetTempSymbol("C", TOCLabelID++); const MCExpr *Exp = MCSymbolRefExpr::Create(TOCEntry, MCSymbolRefExpr::VK_PPC_TOC, OutContext); TmpInst.getOperand(1) = MCOperand::CreateExpr(Exp); OutStreamer.EmitInstruction(TmpInst); return; } case PPC::MFCRpseud: // Transform: %R3 = MFCRpseud %CR7 // Into: %R3 = MFCR ;; cr7 OutStreamer.AddComment(PPCInstPrinter:: getRegisterName(MI->getOperand(1).getReg())); TmpInst.setOpcode(PPC::MFCR); TmpInst.addOperand(MCOperand::CreateReg(MI->getOperand(0).getReg())); OutStreamer.EmitInstruction(TmpInst); return; } LowerPPCMachineInstrToMCInst(MI, TmpInst, *this, Subtarget.isDarwin()); OutStreamer.EmitInstruction(TmpInst); }
static DecodeStatus decodeUImmOperand(MCInst &Inst, uint64_t Imm, int64_t Address, const void *Decoder) { assert(isUInt<N>(Imm) && "Invalid immediate"); Inst.addOperand(MCOperand::createImm(Imm)); return MCDisassembler::Success; }
MCInst HexagonMCInstrInfo::deriveSubInst(MCInst const &Inst) { MCInst Result; bool Absolute; int64_t Value; switch (Inst.getOpcode()) { default: // dbgs() << "opcode: "<< Inst->getOpcode() << "\n"; llvm_unreachable("Unimplemented subinstruction \n"); break; case Hexagon::A2_addi: Absolute = Inst.getOperand(2).getExpr()->evaluateAsAbsolute(Value); if (Absolute) { if (Value == 1) { Result.setOpcode(Hexagon::SA1_inc); addOps(Result, Inst, 0); addOps(Result, Inst, 1); break; } // 1,2 SUBInst $Rd = add($Rs, #1) if (Value == -1) { Result.setOpcode(Hexagon::SA1_dec); addOps(Result, Inst, 0); addOps(Result, Inst, 1); addOps(Result, Inst, 2); break; } // 1,2 SUBInst $Rd = add($Rs,#-1) if (Inst.getOperand(1).getReg() == Hexagon::R29) { Result.setOpcode(Hexagon::SA1_addsp); addOps(Result, Inst, 0); addOps(Result, Inst, 2); break; } // 1,3 SUBInst $Rd = add(r29, #$u6_2) } Result.setOpcode(Hexagon::SA1_addi); addOps(Result, Inst, 0); addOps(Result, Inst, 1); addOps(Result, Inst, 2); break; // 1,2,3 SUBInst $Rx = add($Rx, #$s7) case Hexagon::A2_add: Result.setOpcode(Hexagon::SA1_addrx); addOps(Result, Inst, 0); addOps(Result, Inst, 1); addOps(Result, Inst, 2); break; // 1,2,3 SUBInst $Rx = add($_src_, $Rs) case Hexagon::S2_allocframe: Result.setOpcode(Hexagon::SS2_allocframe); addOps(Result, Inst, 0); break; // 1 SUBInst allocframe(#$u5_3) case Hexagon::A2_andir: if (minConstant(Inst, 2) == 255) { Result.setOpcode(Hexagon::SA1_zxtb); addOps(Result, Inst, 0); addOps(Result, Inst, 1); break; // 1,2 $Rd = and($Rs, #255) } else { Result.setOpcode(Hexagon::SA1_and1); addOps(Result, Inst, 0); addOps(Result, Inst, 1); break; // 1,2 SUBInst $Rd = and($Rs, #1) } case Hexagon::C2_cmpeqi: Result.setOpcode(Hexagon::SA1_cmpeqi); addOps(Result, Inst, 1); addOps(Result, Inst, 2); break; // 2,3 SUBInst p0 = cmp.eq($Rs, #$u2) case Hexagon::A4_combineii: case Hexagon::A2_combineii: Absolute = Inst.getOperand(1).getExpr()->evaluateAsAbsolute(Value); assert(Absolute);(void)Absolute; if (Value == 1) { Result.setOpcode(Hexagon::SA1_combine1i); addOps(Result, Inst, 0); addOps(Result, Inst, 2); break; // 1,3 SUBInst $Rdd = combine(#1, #$u2) } if (Value == 3) { Result.setOpcode(Hexagon::SA1_combine3i); addOps(Result, Inst, 0); addOps(Result, Inst, 2); break; // 1,3 SUBInst $Rdd = combine(#3, #$u2) } if (Value == 0) { Result.setOpcode(Hexagon::SA1_combine0i); addOps(Result, Inst, 0); addOps(Result, Inst, 2); break; // 1,3 SUBInst $Rdd = combine(#0, #$u2) } if (Value == 2) { Result.setOpcode(Hexagon::SA1_combine2i); addOps(Result, Inst, 0); addOps(Result, Inst, 2); break; // 1,3 SUBInst $Rdd = combine(#2, #$u2) } case Hexagon::A4_combineir: Result.setOpcode(Hexagon::SA1_combinezr); addOps(Result, Inst, 0); addOps(Result, Inst, 2); break; // 1,3 SUBInst $Rdd = combine(#0, $Rs) case Hexagon::A4_combineri: Result.setOpcode(Hexagon::SA1_combinerz); addOps(Result, Inst, 0); addOps(Result, Inst, 1); break; // 1,2 SUBInst $Rdd = combine($Rs, #0) case Hexagon::L4_return_tnew_pnt: case Hexagon::L4_return_tnew_pt: Result.setOpcode(Hexagon::SL2_return_tnew); break; // none SUBInst if (p0.new) dealloc_return:nt case Hexagon::L4_return_fnew_pnt: case Hexagon::L4_return_fnew_pt: Result.setOpcode(Hexagon::SL2_return_fnew); break; // none SUBInst if (!p0.new) dealloc_return:nt case Hexagon::L4_return_f: Result.setOpcode(Hexagon::SL2_return_f); break; // none SUBInst if (!p0) dealloc_return case Hexagon::L4_return_t: Result.setOpcode(Hexagon::SL2_return_t); break; // none SUBInst if (p0) dealloc_return case Hexagon::L4_return: Result.setOpcode(Hexagon::SL2_return); break; // none SUBInst dealloc_return case Hexagon::L2_deallocframe: Result.setOpcode(Hexagon::SL2_deallocframe); break; // none SUBInst deallocframe case Hexagon::EH_RETURN_JMPR: case Hexagon::J2_jumpr: case Hexagon::PS_jmpret: Result.setOpcode(Hexagon::SL2_jumpr31); break; // none SUBInst jumpr r31 case Hexagon::J2_jumprf: case Hexagon::PS_jmpretf: Result.setOpcode(Hexagon::SL2_jumpr31_f); break; // none SUBInst if (!p0) jumpr r31 case Hexagon::J2_jumprfnew: case Hexagon::J2_jumprfnewpt: case Hexagon::PS_jmpretfnewpt: case Hexagon::PS_jmpretfnew: Result.setOpcode(Hexagon::SL2_jumpr31_fnew); break; // none SUBInst if (!p0.new) jumpr:nt r31 case Hexagon::J2_jumprt: case Hexagon::PS_jmprett: Result.setOpcode(Hexagon::SL2_jumpr31_t); break; // none SUBInst if (p0) jumpr r31 case Hexagon::J2_jumprtnew: case Hexagon::J2_jumprtnewpt: case Hexagon::PS_jmprettnewpt: case Hexagon::PS_jmprettnew: Result.setOpcode(Hexagon::SL2_jumpr31_tnew); break; // none SUBInst if (p0.new) jumpr:nt r31 case Hexagon::L2_loadrb_io: Result.setOpcode(Hexagon::SL2_loadrb_io); addOps(Result, Inst, 0); addOps(Result, Inst, 1); addOps(Result, Inst, 2); break; // 1,2,3 SUBInst $Rd = memb($Rs + #$u3_0) case Hexagon::L2_loadrd_io: Result.setOpcode(Hexagon::SL2_loadrd_sp); addOps(Result, Inst, 0); addOps(Result, Inst, 2); break; // 1,3 SUBInst $Rdd = memd(r29 + #$u5_3) case Hexagon::L2_loadrh_io: Result.setOpcode(Hexagon::SL2_loadrh_io); addOps(Result, Inst, 0); addOps(Result, Inst, 1); addOps(Result, Inst, 2); break; // 1,2,3 SUBInst $Rd = memh($Rs + #$u3_1) case Hexagon::L2_loadrub_io: Result.setOpcode(Hexagon::SL1_loadrub_io); addOps(Result, Inst, 0); addOps(Result, Inst, 1); addOps(Result, Inst, 2); break; // 1,2,3 SUBInst $Rd = memub($Rs + #$u4_0) case Hexagon::L2_loadruh_io: Result.setOpcode(Hexagon::SL2_loadruh_io); addOps(Result, Inst, 0); addOps(Result, Inst, 1); addOps(Result, Inst, 2); break; // 1,2,3 SUBInst $Rd = memuh($Rs + #$u3_1) case Hexagon::L2_loadri_io: if (Inst.getOperand(1).getReg() == Hexagon::R29) { Result.setOpcode(Hexagon::SL2_loadri_sp); addOps(Result, Inst, 0); addOps(Result, Inst, 2); break; // 2 1,3 SUBInst $Rd = memw(r29 + #$u5_2) } else { Result.setOpcode(Hexagon::SL1_loadri_io); addOps(Result, Inst, 0); addOps(Result, Inst, 1); addOps(Result, Inst, 2); break; // 1,2,3 SUBInst $Rd = memw($Rs + #$u4_2) } case Hexagon::S4_storeirb_io: Absolute = Inst.getOperand(2).getExpr()->evaluateAsAbsolute(Value); assert(Absolute);(void)Absolute; if (Value == 0) { Result.setOpcode(Hexagon::SS2_storebi0); addOps(Result, Inst, 0); addOps(Result, Inst, 1); break; // 1,2 SUBInst memb($Rs + #$u4_0)=#0 } else if (Value == 1) { Result.setOpcode(Hexagon::SS2_storebi1); addOps(Result, Inst, 0); addOps(Result, Inst, 1); break; // 2 1,2 SUBInst memb($Rs + #$u4_0)=#1 } case Hexagon::S2_storerb_io: Result.setOpcode(Hexagon::SS1_storeb_io); addOps(Result, Inst, 0); addOps(Result, Inst, 1); addOps(Result, Inst, 2); break; // 1,2,3 SUBInst memb($Rs + #$u4_0) = $Rt case Hexagon::S2_storerd_io: Result.setOpcode(Hexagon::SS2_stored_sp); addOps(Result, Inst, 1); addOps(Result, Inst, 2); break; // 2,3 SUBInst memd(r29 + #$s6_3) = $Rtt case Hexagon::S2_storerh_io: Result.setOpcode(Hexagon::SS2_storeh_io); addOps(Result, Inst, 0); addOps(Result, Inst, 1); addOps(Result, Inst, 2); break; // 1,2,3 SUBInst memb($Rs + #$u4_0) = $Rt case Hexagon::S4_storeiri_io: Absolute = Inst.getOperand(2).getExpr()->evaluateAsAbsolute(Value); assert(Absolute);(void)Absolute; if (Value == 0) { Result.setOpcode(Hexagon::SS2_storewi0); addOps(Result, Inst, 0); addOps(Result, Inst, 1); break; // 3 1,2 SUBInst memw($Rs + #$u4_2)=#0 } else if (Value == 1) { Result.setOpcode(Hexagon::SS2_storewi1); addOps(Result, Inst, 0); addOps(Result, Inst, 1); break; // 3 1,2 SUBInst memw($Rs + #$u4_2)=#1 } else if (Inst.getOperand(0).getReg() == Hexagon::R29) { Result.setOpcode(Hexagon::SS2_storew_sp); addOps(Result, Inst, 1); addOps(Result, Inst, 2); break; // 1 2,3 SUBInst memw(r29 + #$u5_2) = $Rt } case Hexagon::S2_storeri_io: if (Inst.getOperand(0).getReg() == Hexagon::R29) { Result.setOpcode(Hexagon::SS2_storew_sp); addOps(Result, Inst, 1); addOps(Result, Inst, 2); // 1,2,3 SUBInst memw(sp + #$u5_2) = $Rt } else { Result.setOpcode(Hexagon::SS1_storew_io); addOps(Result, Inst, 0); addOps(Result, Inst, 1); addOps(Result, Inst, 2); // 1,2,3 SUBInst memw($Rs + #$u4_2) = $Rt } break; case Hexagon::A2_sxtb: Result.setOpcode(Hexagon::SA1_sxtb); addOps(Result, Inst, 0); addOps(Result, Inst, 1); break; // 1,2 SUBInst $Rd = sxtb($Rs) case Hexagon::A2_sxth: Result.setOpcode(Hexagon::SA1_sxth); addOps(Result, Inst, 0); addOps(Result, Inst, 1); break; // 1,2 SUBInst $Rd = sxth($Rs) case Hexagon::A2_tfr: Result.setOpcode(Hexagon::SA1_tfr); addOps(Result, Inst, 0); addOps(Result, Inst, 1); break; // 1,2 SUBInst $Rd = $Rs case Hexagon::C2_cmovenewif: Result.setOpcode(Hexagon::SA1_clrfnew); addOps(Result, Inst, 0); addOps(Result, Inst, 1); break; // 2 SUBInst if (!p0.new) $Rd = #0 case Hexagon::C2_cmovenewit: Result.setOpcode(Hexagon::SA1_clrtnew); addOps(Result, Inst, 0); addOps(Result, Inst, 1); break; // 2 SUBInst if (p0.new) $Rd = #0 case Hexagon::C2_cmoveif: Result.setOpcode(Hexagon::SA1_clrf); addOps(Result, Inst, 0); addOps(Result, Inst, 1); break; // 2 SUBInst if (!p0) $Rd = #0 case Hexagon::C2_cmoveit: Result.setOpcode(Hexagon::SA1_clrt); addOps(Result, Inst, 0); addOps(Result, Inst, 1); break; // 2 SUBInst if (p0) $Rd = #0 case Hexagon::A2_tfrsi: Absolute = Inst.getOperand(1).getExpr()->evaluateAsAbsolute(Value); if (Absolute && Value == -1) { Result.setOpcode(Hexagon::SA1_setin1); addOps(Result, Inst, 0); addOps(Result, Inst, 1); break; // 2 1 SUBInst $Rd = #-1 } else { Result.setOpcode(Hexagon::SA1_seti); addOps(Result, Inst, 0); addOps(Result, Inst, 1); break; // 1,2 SUBInst $Rd = #$u6 } case Hexagon::A2_zxtb: Result.setOpcode(Hexagon::SA1_zxtb); addOps(Result, Inst, 0); addOps(Result, Inst, 1); break; // 1,2 $Rd = and($Rs, #255) case Hexagon::A2_zxth: Result.setOpcode(Hexagon::SA1_zxth); addOps(Result, Inst, 0); addOps(Result, Inst, 1); break; // 1,2 SUBInst $Rd = zxth($Rs) } return Result; }