void AMDGPUAsmParser::cvtVOP3(MCInst &Inst, const OperandVector &Operands) { ((AMDGPUOperand &)*Operands[1]).addRegOperands(Inst, 1); unsigned i = 2; std::map<enum AMDGPUOperand::ImmTy, unsigned> OptionalIdx; if (operandsHaveModifiers(Operands)) { for (unsigned e = Operands.size(); i != e; ++i) { AMDGPUOperand &Op = ((AMDGPUOperand &)*Operands[i]); if (Op.isRegWithInputMods()) { ((AMDGPUOperand &)*Operands[i]).addRegWithInputModsOperands(Inst, 2); continue; } OptionalIdx[Op.getImmTy()] = i; } unsigned ClampIdx = OptionalIdx[AMDGPUOperand::ImmTyClamp]; unsigned OModIdx = OptionalIdx[AMDGPUOperand::ImmTyOMod]; ((AMDGPUOperand &)*Operands[ClampIdx]).addImmOperands(Inst, 1); ((AMDGPUOperand &)*Operands[OModIdx]).addImmOperands(Inst, 1); } else { for (unsigned e = Operands.size(); i != e; ++i) ((AMDGPUOperand &)*Operands[i]).addRegOrImmOperands(Inst, 1); } }
bool AMDGPUAsmParser::MatchAndEmitInstruction(SMLoc IDLoc, unsigned &Opcode, OperandVector &Operands, MCStreamer &Out, uint64_t &ErrorInfo, bool MatchingInlineAsm) { MCInst Inst; switch (MatchInstructionImpl(Operands, Inst, ErrorInfo, MatchingInlineAsm)) { default: break; case Match_Success: Inst.setLoc(IDLoc); Out.EmitInstruction(Inst, STI); return false; case Match_MissingFeature: return Error(IDLoc, "instruction not supported on this GPU"); case Match_MnemonicFail: return Error(IDLoc, "unrecognized instruction mnemonic"); case Match_InvalidOperand: { SMLoc ErrorLoc = IDLoc; if (ErrorInfo != ~0ULL) { if (ErrorInfo >= Operands.size()) { if (isForcedVOP3()) { // If 64-bit encoding has been forced we can end up with no // clamp or omod operands if none of the registers have modifiers, // so we need to add these to the operand list. AMDGPUOperand &LastOp = ((AMDGPUOperand &)*Operands[Operands.size() - 1]); if (LastOp.isRegKind() || (LastOp.isImm() && LastOp.getImmTy() != AMDGPUOperand::ImmTyNone)) { SMLoc S = Parser.getTok().getLoc(); Operands.push_back(AMDGPUOperand::CreateImm(0, S, AMDGPUOperand::ImmTyClamp)); Operands.push_back(AMDGPUOperand::CreateImm(0, S, AMDGPUOperand::ImmTyOMod)); bool Res = MatchAndEmitInstruction(IDLoc, Opcode, Operands, Out, ErrorInfo, MatchingInlineAsm); if (!Res) return Res; } } return Error(IDLoc, "too few operands for instruction"); } ErrorLoc = ((AMDGPUOperand &)*Operands[ErrorInfo]).getStartLoc(); if (ErrorLoc == SMLoc()) ErrorLoc = IDLoc; } return Error(ErrorLoc, "invalid operand for instruction"); } } llvm_unreachable("Implement any new match types added!"); }
bool NyuziAsmParser::MatchAndEmitInstruction(SMLoc IDLoc, unsigned &Opcode, OperandVector &Operands, MCStreamer &Out, uint64_t &ErrorInfo, bool MatchingInlineAsm) { MCInst Inst; SMLoc ErrorLoc; SmallVector<std::pair<unsigned, std::string>, 4> MapAndConstraints; switch (MatchInstructionImpl(Operands, Inst, ErrorInfo, MatchingInlineAsm)) { default: break; case Match_Success: Out.EmitInstruction(Inst, STI); return false; case Match_MissingFeature: return Error(IDLoc, "Instruction use requires option to be enabled"); case Match_MnemonicFail: return Error(IDLoc, "Unrecognized instruction mnemonic"); case Match_InvalidOperand: ErrorLoc = IDLoc; if (ErrorInfo != ~0U) { if (ErrorInfo >= Operands.size()) return Error(IDLoc, "Too few operands for instruction"); ErrorLoc = ((NyuziOperand &)*Operands[ErrorInfo]).getStartLoc(); if (ErrorLoc == SMLoc()) ErrorLoc = IDLoc; } return Error(ErrorLoc, "Invalid operand for instruction"); } llvm_unreachable("Unknown match type detected!"); }
AMDGPUAsmParser::OperandMatchResultTy AMDGPUAsmParser::parseVOP3OptionalOps(OperandVector &Operands) { // The value returned by this function may change after parsing // an operand so store the original value here. bool HasModifiers = operandsHaveModifiers(Operands); bool IsVOP3 = isVOP3(Operands); if (HasModifiers || IsVOP3 || getLexer().isNot(AsmToken::EndOfStatement) || getForcedEncodingSize() == 64) { AMDGPUAsmParser::OperandMatchResultTy Res = parseOptionalOps(VOP3OptionalOps, Operands); if (!HasModifiers && Res == MatchOperand_Success) { // We have added a modifier operation, so we need to make sure all // previous register operands have modifiers for (unsigned i = 2, e = Operands.size(); i != e; ++i) { AMDGPUOperand &Op = ((AMDGPUOperand&)*Operands[i]); if (Op.isReg()) Op.setModifiers(0); } } return Res; } return MatchOperand_NoMatch; }
bool AMDGPUAsmParser::MatchAndEmitInstruction(SMLoc IDLoc, unsigned &Opcode, OperandVector &Operands, MCStreamer &Out, uint64_t &ErrorInfo, bool MatchingInlineAsm) { MCInst Inst; switch (MatchInstructionImpl(Operands, Inst, ErrorInfo, MatchingInlineAsm)) { case Match_Success: Inst.setLoc(IDLoc); Out.EmitInstruction(Inst, STI); return false; case Match_MissingFeature: return Error(IDLoc, "instruction use requires an option to be enabled"); case Match_MnemonicFail: return Error(IDLoc, "unrecognized instruction mnemonic"); case Match_InvalidOperand: { if (ErrorInfo != ~0ULL) { if (ErrorInfo >= Operands.size()) return Error(IDLoc, "too few operands for instruction"); } return Error(IDLoc, "invalid operand for instruction"); } } llvm_unreachable("Implement any new match types added!"); }
void AMDGPUAsmParser::cvtDSOffset01(MCInst &Inst, const OperandVector &Operands) { std::map<enum AMDGPUOperand::ImmTy, unsigned> OptionalIdx; for (unsigned i = 1, e = Operands.size(); i != e; ++i) { AMDGPUOperand &Op = ((AMDGPUOperand &)*Operands[i]); // Add the register arguments if (Op.isReg()) { Op.addRegOperands(Inst, 1); continue; } // Handle optional arguments OptionalIdx[Op.getImmTy()] = i; } unsigned Offset0Idx = OptionalIdx[AMDGPUOperand::ImmTyDSOffset0]; unsigned Offset1Idx = OptionalIdx[AMDGPUOperand::ImmTyDSOffset1]; unsigned GDSIdx = OptionalIdx[AMDGPUOperand::ImmTyGDS]; ((AMDGPUOperand &)*Operands[Offset0Idx]).addImmOperands(Inst, 1); // offset0 ((AMDGPUOperand &)*Operands[Offset1Idx]).addImmOperands(Inst, 1); // offset1 ((AMDGPUOperand &)*Operands[GDSIdx]).addImmOperands(Inst, 1); // gds Inst.addOperand(MCOperand::CreateReg(AMDGPU::M0)); // m0 }
bool MSP430AsmParser::MatchAndEmitInstruction(SMLoc Loc, unsigned &Opcode, OperandVector &Operands, MCStreamer &Out, uint64_t &ErrorInfo, bool MatchingInlineAsm) { MCInst Inst; unsigned MatchResult = MatchInstructionImpl(Operands, Inst, ErrorInfo, MatchingInlineAsm); switch (MatchResult) { case Match_Success: Inst.setLoc(Loc); Out.EmitInstruction(Inst, STI); return false; case Match_MnemonicFail: return Error(Loc, "invalid instruction mnemonic"); case Match_InvalidOperand: { SMLoc ErrorLoc = Loc; if (ErrorInfo != ~0U) { if (ErrorInfo >= Operands.size()) return Error(ErrorLoc, "too few operands for instruction"); ErrorLoc = ((MSP430Operand &)*Operands[ErrorInfo]).getStartLoc(); if (ErrorLoc == SMLoc()) ErrorLoc = Loc; } return Error(ErrorLoc, "invalid operand for instruction"); } default: return true; } }
void AMDGPUAsmParser::cvtDS(MCInst &Inst, const OperandVector &Operands) { std::map<enum AMDGPUOperand::ImmTy, unsigned> OptionalIdx; bool GDSOnly = false; for (unsigned i = 1, e = Operands.size(); i != e; ++i) { AMDGPUOperand &Op = ((AMDGPUOperand &)*Operands[i]); // Add the register arguments if (Op.isReg()) { Op.addRegOperands(Inst, 1); continue; } if (Op.isToken() && Op.getToken() == "gds") { GDSOnly = true; continue; } // Handle optional arguments OptionalIdx[Op.getImmTy()] = i; } unsigned OffsetIdx = OptionalIdx[AMDGPUOperand::ImmTyOffset]; ((AMDGPUOperand &)*Operands[OffsetIdx]).addImmOperands(Inst, 1); // offset if (!GDSOnly) { unsigned GDSIdx = OptionalIdx[AMDGPUOperand::ImmTyGDS]; ((AMDGPUOperand &)*Operands[GDSIdx]).addImmOperands(Inst, 1); // gds } Inst.addOperand(MCOperand::CreateReg(AMDGPU::M0)); // m0 }
bool SystemZAsmParser::MatchAndEmitInstruction(SMLoc IDLoc, unsigned &Opcode, OperandVector &Operands, MCStreamer &Out, uint64_t &ErrorInfo, bool MatchingInlineAsm) { MCInst Inst; unsigned MatchResult; MatchResult = MatchInstructionImpl(Operands, Inst, ErrorInfo, MatchingInlineAsm); switch (MatchResult) { case Match_Success: Inst.setLoc(IDLoc); Out.EmitInstruction(Inst, getSTI()); return false; case Match_MissingFeature: { assert(ErrorInfo && "Unknown missing feature!"); // Special case the error message for the very common case where only // a single subtarget feature is missing std::string Msg = "instruction requires:"; uint64_t Mask = 1; for (unsigned I = 0; I < sizeof(ErrorInfo) * 8 - 1; ++I) { if (ErrorInfo & Mask) { Msg += " "; Msg += getSubtargetFeatureName(ErrorInfo & Mask); } Mask <<= 1; } return Error(IDLoc, Msg); } case Match_InvalidOperand: { SMLoc ErrorLoc = IDLoc; if (ErrorInfo != ~0ULL) { if (ErrorInfo >= Operands.size()) return Error(IDLoc, "too few operands for instruction"); ErrorLoc = ((SystemZOperand &)*Operands[ErrorInfo]).getStartLoc(); if (ErrorLoc == SMLoc()) ErrorLoc = IDLoc; } return Error(ErrorLoc, "invalid operand for instruction"); } case Match_MnemonicFail: { uint64_t FBS = ComputeAvailableFeatures(getSTI().getFeatureBits()); std::string Suggestion = SystemZMnemonicSpellCheck( ((SystemZOperand &)*Operands[0]).getToken(), FBS); return Error(IDLoc, "invalid instruction" + Suggestion, ((SystemZOperand &)*Operands[0]).getLocRange()); } } llvm_unreachable("Unexpected match type"); }
static bool operandsHasOptionalOp(const OperandVector &Operands, const OptionalOperand &OOp) { for (unsigned i = 0; i < Operands.size(); i++) { const AMDGPUOperand &ParsedOp = ((const AMDGPUOperand &)*Operands[i]); if ((ParsedOp.isImm() && ParsedOp.getImmTy() == OOp.Type) || (ParsedOp.isToken() && ParsedOp.getToken() == OOp.Name)) return true; } return false; }
static bool isVOP3(OperandVector &Operands) { if (operandsHaveModifiers(Operands)) return true; AMDGPUOperand &DstOp = ((AMDGPUOperand&)*Operands[1]); if (DstOp.isReg() && DstOp.isRegClass(AMDGPU::SGPR_64RegClassID)) return true; if (Operands.size() >= 5) return true; if (Operands.size() > 3) { AMDGPUOperand &Src1Op = ((AMDGPUOperand&)*Operands[3]); if (Src1Op.getReg() && (Src1Op.isRegClass(AMDGPU::SReg_32RegClassID) || Src1Op.isRegClass(AMDGPU::SReg_64RegClassID))) return true; } return false; }
static bool operandsHaveModifiers(const OperandVector &Operands) { for (unsigned i = 0, e = Operands.size(); i != e; ++i) { const AMDGPUOperand &Op = ((AMDGPUOperand&)*Operands[i]); if (Op.isRegKind() && Op.hasModifiers()) return true; if (Op.isImm() && (Op.getImmTy() == AMDGPUOperand::ImmTyOMod || Op.getImmTy() == AMDGPUOperand::ImmTyClamp)) return true; } return false; }
bool SparcAsmParser::MatchAndEmitInstruction(SMLoc IDLoc, unsigned &Opcode, OperandVector &Operands, MCStreamer &Out, uint64_t &ErrorInfo, bool MatchingInlineAsm) { MCInst Inst; SmallVector<MCInst, 8> Instructions; unsigned MatchResult = MatchInstructionImpl(Operands, Inst, ErrorInfo, MatchingInlineAsm); switch (MatchResult) { case Match_Success: { switch (Inst.getOpcode()) { default: Inst.setLoc(IDLoc); Instructions.push_back(Inst); break; case SP::SET: if (expandSET(Inst, IDLoc, Instructions)) return true; break; } for (const MCInst &I : Instructions) { Out.EmitInstruction(I, getSTI()); } return false; } case Match_MissingFeature: return Error(IDLoc, "instruction requires a CPU feature not currently enabled"); case Match_InvalidOperand: { SMLoc ErrorLoc = IDLoc; if (ErrorInfo != ~0ULL) { if (ErrorInfo >= Operands.size()) return Error(IDLoc, "too few operands for instruction"); ErrorLoc = ((SparcOperand &)*Operands[ErrorInfo]).getStartLoc(); if (ErrorLoc == SMLoc()) ErrorLoc = IDLoc; } return Error(ErrorLoc, "invalid operand for instruction"); } case Match_MnemonicFail: return Error(IDLoc, "invalid instruction mnemonic"); } llvm_unreachable("Implement any new match types added!"); }
void AMDGPUAsmParser::cvtMubuf(MCInst &Inst, const OperandVector &Operands) { std::map<enum AMDGPUOperand::ImmTy, unsigned> OptionalIdx; for (unsigned i = 1, e = Operands.size(); i != e; ++i) { AMDGPUOperand &Op = ((AMDGPUOperand &)*Operands[i]); // Add the register arguments if (Op.isReg()) { Op.addRegOperands(Inst, 1); continue; } // Handle the case where soffset is an immediate if (Op.isImm() && Op.getImmTy() == AMDGPUOperand::ImmTyNone) { Op.addImmOperands(Inst, 1); continue; } // Handle tokens like 'offen' which are sometimes hard-coded into the // asm string. There are no MCInst operands for these. if (Op.isToken()) { continue; } assert(Op.isImm()); // Handle optional arguments OptionalIdx[Op.getImmTy()] = i; } assert(OptionalIdx.size() == 4); unsigned OffsetIdx = OptionalIdx[AMDGPUOperand::ImmTyOffset]; unsigned GLCIdx = OptionalIdx[AMDGPUOperand::ImmTyGLC]; unsigned SLCIdx = OptionalIdx[AMDGPUOperand::ImmTySLC]; unsigned TFEIdx = OptionalIdx[AMDGPUOperand::ImmTyTFE]; ((AMDGPUOperand &)*Operands[OffsetIdx]).addImmOperands(Inst, 1); ((AMDGPUOperand &)*Operands[GLCIdx]).addImmOperands(Inst, 1); ((AMDGPUOperand &)*Operands[SLCIdx]).addImmOperands(Inst, 1); ((AMDGPUOperand &)*Operands[TFEIdx]).addImmOperands(Inst, 1); }
bool SparcAsmParser::MatchAndEmitInstruction(SMLoc IDLoc, unsigned &Opcode, OperandVector &Operands, MCStreamer &Out, unsigned &ErrorInfo, bool MatchingInlineAsm) { MCInst Inst; SmallVector<MCInst, 8> Instructions; unsigned MatchResult = MatchInstructionImpl(Operands, Inst, ErrorInfo, MatchingInlineAsm); switch (MatchResult) { default: break; case Match_Success: { Inst.setLoc(IDLoc); Out.EmitInstruction(Inst, STI); return false; } case Match_MissingFeature: return Error(IDLoc, "instruction requires a CPU feature not currently enabled"); case Match_InvalidOperand: { SMLoc ErrorLoc = IDLoc; if (ErrorInfo != ~0U) { if (ErrorInfo >= Operands.size()) return Error(IDLoc, "too few operands for instruction"); ErrorLoc = ((SparcOperand &)*Operands[ErrorInfo]).getStartLoc(); if (ErrorLoc == SMLoc()) ErrorLoc = IDLoc; } return Error(ErrorLoc, "invalid operand for instruction"); } case Match_MnemonicFail: return Error(IDLoc, "invalid instruction mnemonic"); } return true; }
bool BPFAsmParser::PreMatchCheck(OperandVector &Operands) { if (Operands.size() == 4) { // check "reg1 = -reg2" and "reg1 = be16/be32/be64/le16/le32/le64 reg2", // reg1 must be the same as reg2 BPFOperand &Op0 = (BPFOperand &)*Operands[0]; BPFOperand &Op1 = (BPFOperand &)*Operands[1]; BPFOperand &Op2 = (BPFOperand &)*Operands[2]; BPFOperand &Op3 = (BPFOperand &)*Operands[3]; if (Op0.isReg() && Op1.isToken() && Op2.isToken() && Op3.isReg() && Op1.getToken() == "=" && (Op2.getToken() == "-" || Op2.getToken() == "be16" || Op2.getToken() == "be32" || Op2.getToken() == "be64" || Op2.getToken() == "le16" || Op2.getToken() == "le32" || Op2.getToken() == "le64") && Op0.getReg() != Op3.getReg()) return true; } return false; }
bool BPFAsmParser::MatchAndEmitInstruction(SMLoc IDLoc, unsigned &Opcode, OperandVector &Operands, MCStreamer &Out, uint64_t &ErrorInfo, bool MatchingInlineAsm) { MCInst Inst; SMLoc ErrorLoc; if (PreMatchCheck(Operands)) return Error(IDLoc, "additional inst constraint not met"); switch (MatchInstructionImpl(Operands, Inst, ErrorInfo, MatchingInlineAsm)) { default: break; case Match_Success: Inst.setLoc(IDLoc); Out.EmitInstruction(Inst, getSTI()); return false; case Match_MissingFeature: return Error(IDLoc, "instruction use requires an option to be enabled"); case Match_MnemonicFail: return Error(IDLoc, "unrecognized instruction mnemonic"); case Match_InvalidOperand: ErrorLoc = IDLoc; if (ErrorInfo != ~0U) { if (ErrorInfo >= Operands.size()) return Error(ErrorLoc, "too few operands for instruction"); ErrorLoc = ((BPFOperand &)*Operands[ErrorInfo]).getStartLoc(); if (ErrorLoc == SMLoc()) ErrorLoc = IDLoc; } return Error(ErrorLoc, "invalid operand for instruction"); } llvm_unreachable("Unknown match type detected!"); }
AMDGPUAsmParser::OperandMatchResultTy AMDGPUAsmParser::parseOperand(OperandVector &Operands, StringRef Mnemonic) { // Try to parse with a custom parser OperandMatchResultTy ResTy = MatchOperandParserImpl(Operands, Mnemonic); // If we successfully parsed the operand or if there as an error parsing, // we are done. // // If we are parsing after we reach EndOfStatement then this means we // are appending default values to the Operands list. This is only done // by custom parser, so we shouldn't continue on to the generic parsing. if (ResTy == MatchOperand_Success || ResTy == MatchOperand_ParseFail || getLexer().is(AsmToken::EndOfStatement)) return ResTy; bool Negate = false, Abs = false; if (getLexer().getKind()== AsmToken::Minus) { Parser.Lex(); Negate = true; } if (getLexer().getKind() == AsmToken::Pipe) { Parser.Lex(); Abs = true; } switch(getLexer().getKind()) { case AsmToken::Integer: { SMLoc S = Parser.getTok().getLoc(); int64_t IntVal; if (getParser().parseAbsoluteExpression(IntVal)) return MatchOperand_ParseFail; APInt IntVal32(32, IntVal); if (IntVal32.getSExtValue() != IntVal) { Error(S, "invalid immediate: only 32-bit values are legal"); return MatchOperand_ParseFail; } IntVal = IntVal32.getSExtValue(); if (Negate) IntVal *= -1; Operands.push_back(AMDGPUOperand::CreateImm(IntVal, S)); return MatchOperand_Success; } case AsmToken::Real: { // FIXME: We should emit an error if a double precisions floating-point // value is used. I'm not sure the best way to detect this. SMLoc S = Parser.getTok().getLoc(); int64_t IntVal; if (getParser().parseAbsoluteExpression(IntVal)) return MatchOperand_ParseFail; APFloat F((float)BitsToDouble(IntVal)); if (Negate) F.changeSign(); Operands.push_back( AMDGPUOperand::CreateImm(F.bitcastToAPInt().getZExtValue(), S)); return MatchOperand_Success; } case AsmToken::Identifier: { SMLoc S, E; unsigned RegNo; if (!ParseRegister(RegNo, S, E)) { bool HasModifiers = operandsHaveModifiers(Operands); unsigned Modifiers = 0; if (Negate) Modifiers |= 0x1; if (Abs) { if (getLexer().getKind() != AsmToken::Pipe) return MatchOperand_ParseFail; Parser.Lex(); Modifiers |= 0x2; } if (Modifiers && !HasModifiers) { // We are adding a modifier to src1 or src2 and previous sources // don't have modifiers, so we need to go back and empty modifers // for each previous source. for (unsigned PrevRegIdx = Operands.size() - 1; PrevRegIdx > 1; --PrevRegIdx) { AMDGPUOperand &RegOp = ((AMDGPUOperand&)*Operands[PrevRegIdx]); RegOp.setModifiers(0); } } Operands.push_back(AMDGPUOperand::CreateReg( RegNo, S, E, getContext().getRegisterInfo(), isForcedVOP3())); if (HasModifiers || Modifiers) { AMDGPUOperand &RegOp = ((AMDGPUOperand&)*Operands[Operands.size() - 1]); RegOp.setModifiers(Modifiers); } } else { Operands.push_back(AMDGPUOperand::CreateToken(Parser.getTok().getString(), S)); Parser.Lex(); } return MatchOperand_Success; } default: return MatchOperand_NoMatch; } }