void AMDGPUAsmParser::cvtVOP3(MCInst &Inst, const OperandVector &Operands) { ((AMDGPUOperand &)*Operands[1]).addRegOperands(Inst, 1); unsigned i = 2; std::map<enum AMDGPUOperand::ImmTy, unsigned> OptionalIdx; if (operandsHaveModifiers(Operands)) { for (unsigned e = Operands.size(); i != e; ++i) { AMDGPUOperand &Op = ((AMDGPUOperand &)*Operands[i]); if (Op.isRegWithInputMods()) { ((AMDGPUOperand &)*Operands[i]).addRegWithInputModsOperands(Inst, 2); continue; } OptionalIdx[Op.getImmTy()] = i; } unsigned ClampIdx = OptionalIdx[AMDGPUOperand::ImmTyClamp]; unsigned OModIdx = OptionalIdx[AMDGPUOperand::ImmTyOMod]; ((AMDGPUOperand &)*Operands[ClampIdx]).addImmOperands(Inst, 1); ((AMDGPUOperand &)*Operands[OModIdx]).addImmOperands(Inst, 1); } else { for (unsigned e = Operands.size(); i != e; ++i) ((AMDGPUOperand &)*Operands[i]).addRegOrImmOperands(Inst, 1); } }
bool NyuziAsmParser::ParseOperand(OperandVector &Operands, StringRef Mnemonic) { // Check if the current operand has a custom associated parser, if so, try to // custom parse the operand, or fallback to the general approach. OperandMatchResultTy ResTy = MatchOperandParserImpl(Operands, Mnemonic); if (ResTy == MatchOperand_Success) return false; unsigned RegNo; SMLoc StartLoc; SMLoc EndLoc; // Attempt to parse token as register if (!ParseRegister(RegNo, StartLoc, EndLoc)) { Operands.push_back(NyuziOperand::createReg(RegNo, StartLoc, EndLoc)); return false; } if (!ParseImmediate(Operands)) return false; // Identifier const MCExpr *IdVal; SMLoc S = Parser.getTok().getLoc(); if (!getParser().parseExpression(IdVal)) { SMLoc E = SMLoc::getFromPointer(Parser.getTok().getLoc().getPointer() - 1); Operands.push_back(NyuziOperand::createImm(IdVal, S, E)); return false; } // Error Error(Parser.getTok().getLoc(), "unknown operand"); return true; }
// Parse any type of register (including integers) and add it to Operands. OperandMatchResultTy SystemZAsmParser::parseAnyRegister(OperandVector &Operands) { // Handle integer values. if (Parser.getTok().is(AsmToken::Integer)) { const MCExpr *Register; SMLoc StartLoc = Parser.getTok().getLoc(); if (Parser.parseExpression(Register)) return MatchOperand_ParseFail; if (auto *CE = dyn_cast<MCConstantExpr>(Register)) { int64_t Value = CE->getValue(); if (Value < 0 || Value > 15) { Error(StartLoc, "invalid register"); return MatchOperand_ParseFail; } } SMLoc EndLoc = SMLoc::getFromPointer(Parser.getTok().getLoc().getPointer() - 1); Operands.push_back(SystemZOperand::createImm(Register, StartLoc, EndLoc)); } else { Register Reg; if (parseRegister(Reg)) return MatchOperand_ParseFail; // Map to the correct register kind. RegisterKind Kind; unsigned RegNo; if (Reg.Group == RegGR) { Kind = GR64Reg; RegNo = SystemZMC::GR64Regs[Reg.Num]; } else if (Reg.Group == RegFP) { Kind = FP64Reg; RegNo = SystemZMC::FP64Regs[Reg.Num]; } else if (Reg.Group == RegV) { Kind = VR128Reg; RegNo = SystemZMC::VR128Regs[Reg.Num]; } else if (Reg.Group == RegAR) { Kind = AR32Reg; RegNo = SystemZMC::AR32Regs[Reg.Num]; } else if (Reg.Group == RegCR) { Kind = CR64Reg; RegNo = SystemZMC::CR64Regs[Reg.Num]; } else { return MatchOperand_ParseFail; } Operands.push_back(SystemZOperand::createReg(Kind, RegNo, Reg.StartLoc, Reg.EndLoc)); } return MatchOperand_Success; }
SparcAsmParser::OperandMatchResultTy SparcAsmParser::parseOperand(OperandVector &Operands, StringRef Mnemonic) { OperandMatchResultTy ResTy = MatchOperandParserImpl(Operands, Mnemonic); // If there wasn't a custom match, try the generic matcher below. Otherwise, // there was a match, but an error occurred, in which case, just return that // the operand parsing failed. if (ResTy == MatchOperand_Success || ResTy == MatchOperand_ParseFail) return ResTy; if (getLexer().is(AsmToken::LBrac)) { // Memory operand Operands.push_back(SparcOperand::CreateToken("[", Parser.getTok().getLoc())); Parser.Lex(); // Eat the [ if (Mnemonic == "cas" || Mnemonic == "casx") { SMLoc S = Parser.getTok().getLoc(); if (getLexer().getKind() != AsmToken::Percent) return MatchOperand_NoMatch; Parser.Lex(); // eat % unsigned RegNo, RegKind; if (!matchRegisterName(Parser.getTok(), RegNo, RegKind)) return MatchOperand_NoMatch; Parser.Lex(); // Eat the identifier token. SMLoc E = SMLoc::getFromPointer(Parser.getTok().getLoc().getPointer()-1); Operands.push_back(SparcOperand::CreateReg(RegNo, RegKind, S, E)); ResTy = MatchOperand_Success; } else { ResTy = parseMEMOperand(Operands); } if (ResTy != MatchOperand_Success) return ResTy; if (!getLexer().is(AsmToken::RBrac)) return MatchOperand_ParseFail; Operands.push_back(SparcOperand::CreateToken("]", Parser.getTok().getLoc())); Parser.Lex(); // Eat the ] return MatchOperand_Success; } std::unique_ptr<SparcOperand> Op; ResTy = parseSparcAsmOperand(Op, (Mnemonic == "call")); if (ResTy != MatchOperand_Success || !Op) return MatchOperand_ParseFail; // Push the parsed operand into the list of operands Operands.push_back(std::move(Op)); return MatchOperand_Success; }
bool SystemZAsmParser::parseOperand(OperandVector &Operands, StringRef Mnemonic) { // Check if the current operand has a custom associated parser, if so, try to // custom parse the operand, or fallback to the general approach. Force all // features to be available during the operand check, or else we will fail to // find the custom parser, and then we will later get an InvalidOperand error // instead of a MissingFeature errror. uint64_t AvailableFeatures = getAvailableFeatures(); setAvailableFeatures(~(uint64_t)0); OperandMatchResultTy ResTy = MatchOperandParserImpl(Operands, Mnemonic); setAvailableFeatures(AvailableFeatures); if (ResTy == MatchOperand_Success) return false; // If there wasn't a custom match, try the generic matcher below. Otherwise, // there was a match, but an error occurred, in which case, just return that // the operand parsing failed. if (ResTy == MatchOperand_ParseFail) return true; // Check for a register. All real register operands should have used // a context-dependent parse routine, which gives the required register // class. The code is here to mop up other cases, like those where // the instruction isn't recognized. if (Parser.getTok().is(AsmToken::Percent)) { Register Reg; if (parseRegister(Reg)) return true; Operands.push_back(SystemZOperand::createInvalid(Reg.StartLoc, Reg.EndLoc)); return false; } // The only other type of operand is an immediate or address. As above, // real address operands should have used a context-dependent parse routine, // so we treat any plain expression as an immediate. SMLoc StartLoc = Parser.getTok().getLoc(); Register Reg1, Reg2; bool HaveReg1, HaveReg2; const MCExpr *Expr; const MCExpr *Length; if (parseAddress(HaveReg1, Reg1, HaveReg2, Reg2, Expr, Length)) return true; // If the register combination is not valid for any instruction, reject it. // Otherwise, fall back to reporting an unrecognized instruction. if (HaveReg1 && Reg1.Group != RegGR && Reg1.Group != RegV && parseAddressRegister(Reg1)) return true; if (HaveReg2 && parseAddressRegister(Reg2)) return true; SMLoc EndLoc = SMLoc::getFromPointer(Parser.getTok().getLoc().getPointer() - 1); if (HaveReg1 || HaveReg2 || Length) Operands.push_back(SystemZOperand::createInvalid(StartLoc, EndLoc)); else Operands.push_back(SystemZOperand::createImm(Expr, StartLoc, EndLoc)); return false; }
bool AMDGPUAsmParser::MatchAndEmitInstruction(SMLoc IDLoc, unsigned &Opcode, OperandVector &Operands, MCStreamer &Out, uint64_t &ErrorInfo, bool MatchingInlineAsm) { MCInst Inst; switch (MatchInstructionImpl(Operands, Inst, ErrorInfo, MatchingInlineAsm)) { default: break; case Match_Success: Inst.setLoc(IDLoc); Out.EmitInstruction(Inst, STI); return false; case Match_MissingFeature: return Error(IDLoc, "instruction not supported on this GPU"); case Match_MnemonicFail: return Error(IDLoc, "unrecognized instruction mnemonic"); case Match_InvalidOperand: { SMLoc ErrorLoc = IDLoc; if (ErrorInfo != ~0ULL) { if (ErrorInfo >= Operands.size()) { if (isForcedVOP3()) { // If 64-bit encoding has been forced we can end up with no // clamp or omod operands if none of the registers have modifiers, // so we need to add these to the operand list. AMDGPUOperand &LastOp = ((AMDGPUOperand &)*Operands[Operands.size() - 1]); if (LastOp.isRegKind() || (LastOp.isImm() && LastOp.getImmTy() != AMDGPUOperand::ImmTyNone)) { SMLoc S = Parser.getTok().getLoc(); Operands.push_back(AMDGPUOperand::CreateImm(0, S, AMDGPUOperand::ImmTyClamp)); Operands.push_back(AMDGPUOperand::CreateImm(0, S, AMDGPUOperand::ImmTyOMod)); bool Res = MatchAndEmitInstruction(IDLoc, Opcode, Operands, Out, ErrorInfo, MatchingInlineAsm); if (!Res) return Res; } } return Error(IDLoc, "too few operands for instruction"); } ErrorLoc = ((AMDGPUOperand &)*Operands[ErrorInfo]).getStartLoc(); if (ErrorLoc == SMLoc()) ErrorLoc = IDLoc; } return Error(ErrorLoc, "invalid operand for instruction"); } } llvm_unreachable("Implement any new match types added!"); }
NyuziAsmParser::OperandMatchResultTy NyuziAsmParser::ParseMemoryOperand(OperandVector &Operands) { SMLoc S = Parser.getTok().getLoc(); if (getLexer().is(AsmToken::Identifier)) { // PC relative memory label memory access // load_32 s0, aLabel const MCExpr *IdVal; if (getParser().parseExpression(IdVal)) return MatchOperand_ParseFail; // Bad identifier SMLoc E = SMLoc::getFromPointer(Parser.getTok().getLoc().getPointer() - 1); // This will be turned into a PC relative load. Operands.push_back( NyuziOperand::createMem(MatchRegisterName("pc"), IdVal, S, E)); return MatchOperand_Success; } const MCExpr *Offset; if (getLexer().is(AsmToken::Integer) || getLexer().is(AsmToken::Minus) || getLexer().is(AsmToken::Plus)) { if (getParser().parseExpression(Offset)) return MatchOperand_ParseFail; } else Offset = NULL; if (!getLexer().is(AsmToken::LParen)) { Error(Parser.getTok().getLoc(), "bad memory operand, missing ("); return MatchOperand_ParseFail; } getLexer().Lex(); unsigned RegNo; SMLoc _S, _E; if (ParseRegister(RegNo, _S, _E)) { Error(Parser.getTok().getLoc(), "bad memory operand: invalid register"); return MatchOperand_ParseFail; } if (getLexer().isNot(AsmToken::RParen)) { Error(Parser.getTok().getLoc(), "bad memory operand, missing )"); return MatchOperand_ParseFail; } getLexer().Lex(); SMLoc E = SMLoc::getFromPointer(Parser.getTok().getLoc().getPointer() - 1); Operands.push_back(NyuziOperand::createMem(RegNo, Offset, S, E)); return MatchOperand_Success; }
Call::OperandVector Call::arguments() { assert(reads.size() > 1); OperandVector operands; auto read = reads.begin(); ++read; ++read; for(; read != reads.end(); ++read) { operands.push_back(*read); } return operands; }
bool NyuziAsmParser::MatchAndEmitInstruction(SMLoc IDLoc, unsigned &Opcode, OperandVector &Operands, MCStreamer &Out, uint64_t &ErrorInfo, bool MatchingInlineAsm) { MCInst Inst; SMLoc ErrorLoc; SmallVector<std::pair<unsigned, std::string>, 4> MapAndConstraints; switch (MatchInstructionImpl(Operands, Inst, ErrorInfo, MatchingInlineAsm)) { default: break; case Match_Success: Out.EmitInstruction(Inst, STI); return false; case Match_MissingFeature: return Error(IDLoc, "Instruction use requires option to be enabled"); case Match_MnemonicFail: return Error(IDLoc, "Unrecognized instruction mnemonic"); case Match_InvalidOperand: ErrorLoc = IDLoc; if (ErrorInfo != ~0U) { if (ErrorInfo >= Operands.size()) return Error(IDLoc, "Too few operands for instruction"); ErrorLoc = ((NyuziOperand &)*Operands[ErrorInfo]).getStartLoc(); if (ErrorLoc == SMLoc()) ErrorLoc = IDLoc; } return Error(ErrorLoc, "Invalid operand for instruction"); } llvm_unreachable("Unknown match type detected!"); }
bool AMDGPUAsmParser::MatchAndEmitInstruction(SMLoc IDLoc, unsigned &Opcode, OperandVector &Operands, MCStreamer &Out, uint64_t &ErrorInfo, bool MatchingInlineAsm) { MCInst Inst; switch (MatchInstructionImpl(Operands, Inst, ErrorInfo, MatchingInlineAsm)) { case Match_Success: Inst.setLoc(IDLoc); Out.EmitInstruction(Inst, STI); return false; case Match_MissingFeature: return Error(IDLoc, "instruction use requires an option to be enabled"); case Match_MnemonicFail: return Error(IDLoc, "unrecognized instruction mnemonic"); case Match_InvalidOperand: { if (ErrorInfo != ~0ULL) { if (ErrorInfo >= Operands.size()) return Error(IDLoc, "too few operands for instruction"); } return Error(IDLoc, "invalid operand for instruction"); } } llvm_unreachable("Implement any new match types added!"); }
bool MSP430AsmParser::MatchAndEmitInstruction(SMLoc Loc, unsigned &Opcode, OperandVector &Operands, MCStreamer &Out, uint64_t &ErrorInfo, bool MatchingInlineAsm) { MCInst Inst; unsigned MatchResult = MatchInstructionImpl(Operands, Inst, ErrorInfo, MatchingInlineAsm); switch (MatchResult) { case Match_Success: Inst.setLoc(Loc); Out.EmitInstruction(Inst, STI); return false; case Match_MnemonicFail: return Error(Loc, "invalid instruction mnemonic"); case Match_InvalidOperand: { SMLoc ErrorLoc = Loc; if (ErrorInfo != ~0U) { if (ErrorInfo >= Operands.size()) return Error(ErrorLoc, "too few operands for instruction"); ErrorLoc = ((MSP430Operand &)*Operands[ErrorInfo]).getStartLoc(); if (ErrorLoc == SMLoc()) ErrorLoc = Loc; } return Error(ErrorLoc, "invalid operand for instruction"); } default: return true; } }
SystemZAsmParser::OperandMatchResultTy SystemZAsmParser::parsePCRel(OperandVector &Operands, int64_t MinVal, int64_t MaxVal) { MCContext &Ctx = getContext(); MCStreamer &Out = getStreamer(); const MCExpr *Expr; SMLoc StartLoc = Parser.getTok().getLoc(); if (getParser().parseExpression(Expr)) return MatchOperand_NoMatch; // For consistency with the GNU assembler, treat immediates as offsets // from ".". if (auto *CE = dyn_cast<MCConstantExpr>(Expr)) { int64_t Value = CE->getValue(); if ((Value & 1) || Value < MinVal || Value > MaxVal) { Error(StartLoc, "offset out of range"); return MatchOperand_ParseFail; } MCSymbol *Sym = Ctx.CreateTempSymbol(); Out.EmitLabel(Sym); const MCExpr *Base = MCSymbolRefExpr::Create(Sym, MCSymbolRefExpr::VK_None, Ctx); Expr = Value == 0 ? Base : MCBinaryExpr::CreateAdd(Base, Expr, Ctx); } SMLoc EndLoc = SMLoc::getFromPointer(Parser.getTok().getLoc().getPointer() - 1); Operands.push_back(SystemZOperand::createImm(Expr, StartLoc, EndLoc)); return MatchOperand_Success; }
void AMDGPUAsmParser::cvtDS(MCInst &Inst, const OperandVector &Operands) { std::map<enum AMDGPUOperand::ImmTy, unsigned> OptionalIdx; bool GDSOnly = false; for (unsigned i = 1, e = Operands.size(); i != e; ++i) { AMDGPUOperand &Op = ((AMDGPUOperand &)*Operands[i]); // Add the register arguments if (Op.isReg()) { Op.addRegOperands(Inst, 1); continue; } if (Op.isToken() && Op.getToken() == "gds") { GDSOnly = true; continue; } // Handle optional arguments OptionalIdx[Op.getImmTy()] = i; } unsigned OffsetIdx = OptionalIdx[AMDGPUOperand::ImmTyOffset]; ((AMDGPUOperand &)*Operands[OffsetIdx]).addImmOperands(Inst, 1); // offset if (!GDSOnly) { unsigned GDSIdx = OptionalIdx[AMDGPUOperand::ImmTyGDS]; ((AMDGPUOperand &)*Operands[GDSIdx]).addImmOperands(Inst, 1); // gds } Inst.addOperand(MCOperand::CreateReg(AMDGPU::M0)); // m0 }
AMDGPUAsmParser::OperandMatchResultTy AMDGPUAsmParser::parseSWaitCntOps(OperandVector &Operands) { // Disable all counters by default. // vmcnt [3:0] // expcnt [6:4] // lgkmcnt [10:8] int64_t CntVal = 0x77f; SMLoc S = Parser.getTok().getLoc(); switch(getLexer().getKind()) { default: return MatchOperand_ParseFail; case AsmToken::Integer: // The operand can be an integer value. if (getParser().parseAbsoluteExpression(CntVal)) return MatchOperand_ParseFail; break; case AsmToken::Identifier: do { if (parseCnt(CntVal)) return MatchOperand_ParseFail; } while(getLexer().isNot(AsmToken::EndOfStatement)); break; } Operands.push_back(AMDGPUOperand::CreateImm(CntVal, S)); return MatchOperand_Success; }
AMDGPUAsmParser::OperandMatchResultTy AMDGPUAsmParser::parseOptionalOps(const ArrayRef<OptionalOperand> &OptionalOps, OperandVector &Operands) { SMLoc S = Parser.getTok().getLoc(); for (const OptionalOperand &Op : OptionalOps) { if (operandsHasOptionalOp(Operands, Op)) continue; AMDGPUAsmParser::OperandMatchResultTy Res; int64_t Value; if (Op.IsBit) { Res = parseNamedBit(Op.Name, Operands, Op.Type); if (Res == MatchOperand_NoMatch) continue; return Res; } Res = parseIntWithPrefix(Op.Name, Value, Op.Default); if (Res == MatchOperand_NoMatch) continue; if (Res != MatchOperand_Success) return Res; if (Op.ConvertResult && !Op.ConvertResult(Value)) { return MatchOperand_ParseFail; } Operands.push_back(AMDGPUOperand::CreateImm(Value, S, Op.Type)); return MatchOperand_Success; } return MatchOperand_NoMatch; }
// Parse a memory operand and add it to Operands. The other arguments // are as above. SystemZAsmParser::OperandMatchResultTy SystemZAsmParser::parseAddress(OperandVector &Operands, const unsigned *Regs, RegisterKind RegKind, MemoryKind MemKind) { SMLoc StartLoc = Parser.getTok().getLoc(); unsigned Base, Index; const MCExpr *Disp; const MCExpr *Length; if (parseAddress(Base, Disp, Index, Length, Regs, RegKind)) return MatchOperand_ParseFail; if (Index && MemKind != BDXMem) { Error(StartLoc, "invalid use of indexed addressing"); return MatchOperand_ParseFail; } if (Length && MemKind != BDLMem) { Error(StartLoc, "invalid use of length addressing"); return MatchOperand_ParseFail; } if (!Length && MemKind == BDLMem) { Error(StartLoc, "missing length in address"); return MatchOperand_ParseFail; } SMLoc EndLoc = SMLoc::getFromPointer(Parser.getTok().getLoc().getPointer() - 1); Operands.push_back(SystemZOperand::createMem(RegKind, Base, Disp, Index, Length, StartLoc, EndLoc)); return MatchOperand_Success; }
AMDGPUAsmParser::OperandMatchResultTy AMDGPUAsmParser::parseNamedBit(const char *Name, OperandVector &Operands, enum AMDGPUOperand::ImmTy ImmTy) { int64_t Bit = 0; SMLoc S = Parser.getTok().getLoc(); // We are at the end of the statement, and this is a default argument, so // use a default value. if (getLexer().isNot(AsmToken::EndOfStatement)) { switch(getLexer().getKind()) { case AsmToken::Identifier: { StringRef Tok = Parser.getTok().getString(); if (Tok == Name) { Bit = 1; Parser.Lex(); } else if (Tok.startswith("no") && Tok.endswith(Name)) { Bit = 0; Parser.Lex(); } else { return MatchOperand_NoMatch; } break; } default: return MatchOperand_NoMatch; } } Operands.push_back(AMDGPUOperand::CreateImm(Bit, S, ImmTy)); return MatchOperand_Success; }
AMDGPUAsmParser::OperandMatchResultTy AMDGPUAsmParser::parseVOP3OptionalOps(OperandVector &Operands) { // The value returned by this function may change after parsing // an operand so store the original value here. bool HasModifiers = operandsHaveModifiers(Operands); bool IsVOP3 = isVOP3(Operands); if (HasModifiers || IsVOP3 || getLexer().isNot(AsmToken::EndOfStatement) || getForcedEncodingSize() == 64) { AMDGPUAsmParser::OperandMatchResultTy Res = parseOptionalOps(VOP3OptionalOps, Operands); if (!HasModifiers && Res == MatchOperand_Success) { // We have added a modifier operation, so we need to make sure all // previous register operands have modifiers for (unsigned i = 2, e = Operands.size(); i != e; ++i) { AMDGPUOperand &Op = ((AMDGPUOperand&)*Operands[i]); if (Op.isReg()) Op.setModifiers(0); } } return Res; } return MatchOperand_NoMatch; }
bool SystemZAsmParser::ParseInstruction(ParseInstructionInfo &Info, StringRef Name, SMLoc NameLoc, OperandVector &Operands) { Operands.push_back(SystemZOperand::createToken(Name, NameLoc)); // Read the remaining operands. if (getLexer().isNot(AsmToken::EndOfStatement)) { // Read the first operand. if (parseOperand(Operands, Name)) { Parser.eatToEndOfStatement(); return true; } // Read any subsequent operands. while (getLexer().is(AsmToken::Comma)) { Parser.Lex(); if (parseOperand(Operands, Name)) { Parser.eatToEndOfStatement(); return true; } } if (getLexer().isNot(AsmToken::EndOfStatement)) { SMLoc Loc = getLexer().getLoc(); Parser.eatToEndOfStatement(); return Error(Loc, "unexpected token in argument list"); } } // Consume the EndOfStatement. Parser.Lex(); return false; }
void AMDGPUAsmParser::cvtDSOffset01(MCInst &Inst, const OperandVector &Operands) { std::map<enum AMDGPUOperand::ImmTy, unsigned> OptionalIdx; for (unsigned i = 1, e = Operands.size(); i != e; ++i) { AMDGPUOperand &Op = ((AMDGPUOperand &)*Operands[i]); // Add the register arguments if (Op.isReg()) { Op.addRegOperands(Inst, 1); continue; } // Handle optional arguments OptionalIdx[Op.getImmTy()] = i; } unsigned Offset0Idx = OptionalIdx[AMDGPUOperand::ImmTyDSOffset0]; unsigned Offset1Idx = OptionalIdx[AMDGPUOperand::ImmTyDSOffset1]; unsigned GDSIdx = OptionalIdx[AMDGPUOperand::ImmTyGDS]; ((AMDGPUOperand &)*Operands[Offset0Idx]).addImmOperands(Inst, 1); // offset0 ((AMDGPUOperand &)*Operands[Offset1Idx]).addImmOperands(Inst, 1); // offset1 ((AMDGPUOperand &)*Operands[GDSIdx]).addImmOperands(Inst, 1); // gds Inst.addOperand(MCOperand::CreateReg(AMDGPU::M0)); // m0 }
bool SystemZAsmParser::MatchAndEmitInstruction(SMLoc IDLoc, unsigned &Opcode, OperandVector &Operands, MCStreamer &Out, uint64_t &ErrorInfo, bool MatchingInlineAsm) { MCInst Inst; unsigned MatchResult; MatchResult = MatchInstructionImpl(Operands, Inst, ErrorInfo, MatchingInlineAsm); switch (MatchResult) { case Match_Success: Inst.setLoc(IDLoc); Out.EmitInstruction(Inst, getSTI()); return false; case Match_MissingFeature: { assert(ErrorInfo && "Unknown missing feature!"); // Special case the error message for the very common case where only // a single subtarget feature is missing std::string Msg = "instruction requires:"; uint64_t Mask = 1; for (unsigned I = 0; I < sizeof(ErrorInfo) * 8 - 1; ++I) { if (ErrorInfo & Mask) { Msg += " "; Msg += getSubtargetFeatureName(ErrorInfo & Mask); } Mask <<= 1; } return Error(IDLoc, Msg); } case Match_InvalidOperand: { SMLoc ErrorLoc = IDLoc; if (ErrorInfo != ~0ULL) { if (ErrorInfo >= Operands.size()) return Error(IDLoc, "too few operands for instruction"); ErrorLoc = ((SystemZOperand &)*Operands[ErrorInfo]).getStartLoc(); if (ErrorLoc == SMLoc()) ErrorLoc = IDLoc; } return Error(ErrorLoc, "invalid operand for instruction"); } case Match_MnemonicFail: { uint64_t FBS = ComputeAvailableFeatures(getSTI().getFeatureBits()); std::string Suggestion = SystemZMnemonicSpellCheck( ((SystemZOperand &)*Operands[0]).getToken(), FBS); return Error(IDLoc, "invalid instruction" + Suggestion, ((SystemZOperand &)*Operands[0]).getLocRange()); } } llvm_unreachable("Unexpected match type"); }
bool SparcAsmParser::ParseInstruction(ParseInstructionInfo &Info, StringRef Name, SMLoc NameLoc, OperandVector &Operands) { // First operand in MCInst is instruction mnemonic. Operands.push_back(SparcOperand::CreateToken(Name, NameLoc)); // apply mnemonic aliases, if any, so that we can parse operands correctly. applyMnemonicAliases(Name, getAvailableFeatures(), 0); if (getLexer().isNot(AsmToken::EndOfStatement)) { // Read the first operand. if (getLexer().is(AsmToken::Comma)) { if (parseBranchModifiers(Operands) != MatchOperand_Success) { SMLoc Loc = getLexer().getLoc(); return Error(Loc, "unexpected token"); } } if (parseOperand(Operands, Name) != MatchOperand_Success) { SMLoc Loc = getLexer().getLoc(); return Error(Loc, "unexpected token"); } while (getLexer().is(AsmToken::Comma) || getLexer().is(AsmToken::Plus)) { if (getLexer().is(AsmToken::Plus)) { // Plus tokens are significant in software_traps (p83, sparcv8.pdf). We must capture them. Operands.push_back(SparcOperand::CreateToken("+", Parser.getTok().getLoc())); } Parser.Lex(); // Eat the comma or plus. // Parse and remember the operand. if (parseOperand(Operands, Name) != MatchOperand_Success) { SMLoc Loc = getLexer().getLoc(); return Error(Loc, "unexpected token"); } } } if (getLexer().isNot(AsmToken::EndOfStatement)) { SMLoc Loc = getLexer().getLoc(); return Error(Loc, "unexpected token"); } Parser.Lex(); // Consume the EndOfStatement. return false; }
bool SystemZAsmParser::parseOperand(OperandVector &Operands, StringRef Mnemonic) { // Check if the current operand has a custom associated parser, if so, try to // custom parse the operand, or fallback to the general approach. OperandMatchResultTy ResTy = MatchOperandParserImpl(Operands, Mnemonic); if (ResTy == MatchOperand_Success) return false; // If there wasn't a custom match, try the generic matcher below. Otherwise, // there was a match, but an error occurred, in which case, just return that // the operand parsing failed. if (ResTy == MatchOperand_ParseFail) return true; // Check for a register. All real register operands should have used // a context-dependent parse routine, which gives the required register // class. The code is here to mop up other cases, like those where // the instruction isn't recognized. if (Parser.getTok().is(AsmToken::Percent)) { Register Reg; if (parseRegister(Reg)) return true; Operands.push_back(SystemZOperand::createInvalid(Reg.StartLoc, Reg.EndLoc)); return false; } // The only other type of operand is an immediate or address. As above, // real address operands should have used a context-dependent parse routine, // so we treat any plain expression as an immediate. SMLoc StartLoc = Parser.getTok().getLoc(); unsigned Base, Index; const MCExpr *Expr, *Length; if (parseAddress(Base, Expr, Index, Length, SystemZMC::GR64Regs, ADDR64Reg)) return true; SMLoc EndLoc = SMLoc::getFromPointer(Parser.getTok().getLoc().getPointer() - 1); if (Base || Index || Length) Operands.push_back(SystemZOperand::createInvalid(StartLoc, EndLoc)); else Operands.push_back(SystemZOperand::createImm(Expr, StartLoc, EndLoc)); return false; }
static bool operandsHasOptionalOp(const OperandVector &Operands, const OptionalOperand &OOp) { for (unsigned i = 0; i < Operands.size(); i++) { const AMDGPUOperand &ParsedOp = ((const AMDGPUOperand &)*Operands[i]); if ((ParsedOp.isImm() && ParsedOp.getImmTy() == OOp.Type) || (ParsedOp.isToken() && ParsedOp.getToken() == OOp.Name)) return true; } return false; }
static bool isVOP3(OperandVector &Operands) { if (operandsHaveModifiers(Operands)) return true; AMDGPUOperand &DstOp = ((AMDGPUOperand&)*Operands[1]); if (DstOp.isReg() && DstOp.isRegClass(AMDGPU::SGPR_64RegClassID)) return true; if (Operands.size() >= 5) return true; if (Operands.size() > 3) { AMDGPUOperand &Src1Op = ((AMDGPUOperand&)*Operands[3]); if (Src1Op.getReg() && (Src1Op.isRegClass(AMDGPU::SReg_32RegClassID) || Src1Op.isRegClass(AMDGPU::SReg_64RegClassID))) return true; } return false; }
/// ParseInstruction - Parse an BPF instruction which is in BPF verifier /// format. bool BPFAsmParser::ParseInstruction(ParseInstructionInfo &Info, StringRef Name, SMLoc NameLoc, OperandVector &Operands) { // The first operand could be either register or actually an operator. unsigned RegNo = MatchRegisterName(Name); if (RegNo != 0) { SMLoc E = SMLoc::getFromPointer(NameLoc.getPointer() - 1); Operands.push_back(BPFOperand::createReg(RegNo, NameLoc, E)); } else if (BPFOperand::isValidIdAtStart (Name)) Operands.push_back(BPFOperand::createToken(Name, NameLoc)); else return Error(NameLoc, "invalid register/token name"); while (!getLexer().is(AsmToken::EndOfStatement)) { // Attempt to parse token as operator if (parseOperandAsOperator(Operands) == MatchOperand_Success) continue; // Attempt to parse token as register if (parseRegister(Operands) == MatchOperand_Success) continue; // Attempt to parse token as an immediate if (parseImmediate(Operands) != MatchOperand_Success) { SMLoc Loc = getLexer().getLoc(); return Error(Loc, "unexpected token"); } } if (getLexer().isNot(AsmToken::EndOfStatement)) { SMLoc Loc = getLexer().getLoc(); getParser().eatToEndOfStatement(); return Error(Loc, "unexpected token"); } // Consume the EndOfStatement. getParser().Lex(); return false; }
static bool operandsHaveModifiers(const OperandVector &Operands) { for (unsigned i = 0, e = Operands.size(); i != e; ++i) { const AMDGPUOperand &Op = ((AMDGPUOperand&)*Operands[i]); if (Op.isRegKind() && Op.hasModifiers()) return true; if (Op.isImm() && (Op.getImmTy() == AMDGPUOperand::ImmTyOMod || Op.getImmTy() == AMDGPUOperand::ImmTyClamp)) return true; } return false; }
AMDGPUAsmParser::OperandMatchResultTy AMDGPUAsmParser::parseDSOffsetOptional(OperandVector &Operands) { SMLoc S = Parser.getTok().getLoc(); AMDGPUAsmParser::OperandMatchResultTy Res = parseIntWithPrefix("offset", Operands, AMDGPUOperand::ImmTyOffset); if (Res == MatchOperand_NoMatch) { Operands.push_back(AMDGPUOperand::CreateImm(0, S, AMDGPUOperand::ImmTyOffset)); Res = MatchOperand_Success; } return Res; }
AMDGPUAsmParser::OperandMatchResultTy AMDGPUAsmParser::parseSOppBrTarget(OperandVector &Operands) { SMLoc S = Parser.getTok().getLoc(); switch (getLexer().getKind()) { default: return MatchOperand_ParseFail; case AsmToken::Integer: { int64_t Imm; if (getParser().parseAbsoluteExpression(Imm)) return MatchOperand_ParseFail; Operands.push_back(AMDGPUOperand::CreateImm(Imm, S)); return MatchOperand_Success; } case AsmToken::Identifier: Operands.push_back(AMDGPUOperand::CreateExpr( MCSymbolRefExpr::Create(getContext().GetOrCreateSymbol( Parser.getTok().getString()), getContext()), S)); Parser.Lex(); return MatchOperand_Success; } }
bool NyuziAsmParser::ParseInstruction(ParseInstructionInfo &Info, StringRef Name, SMLoc NameLoc, OperandVector &Operands) { size_t dotLoc = Name.find('.'); StringRef stem = Name.substr(0, dotLoc); Operands.push_back(NyuziOperand::createToken(stem, NameLoc)); if (dotLoc < Name.size()) { size_t dotLoc2 = Name.rfind('.'); if (dotLoc == dotLoc2) Operands.push_back( NyuziOperand::createToken(Name.substr(dotLoc), NameLoc)); else { Operands.push_back(NyuziOperand::createToken( Name.substr(dotLoc, dotLoc2 - dotLoc), NameLoc)); Operands.push_back( NyuziOperand::createToken(Name.substr(dotLoc2), NameLoc)); } } // If there are no more operands, then finish // XXX hash should start a comment, should the lexer just be consuming that? if (getLexer().is(AsmToken::EndOfStatement) || getLexer().is(AsmToken::Hash)) return false; // parse operands for (;;) { if (ParseOperand(Operands, stem)) return true; if (getLexer().isNot(AsmToken::Comma)) break; // Consume comma token getLexer().Lex(); } return false; }