Optional<ReplacementItem> formatv_object_base::parseReplacementItem(StringRef Spec) { StringRef RepString = Spec.trim("{}"); // If the replacement sequence does not start with a non-negative integer, // this is an error. char Pad = ' '; std::size_t Align = 0; AlignStyle Where = AlignStyle::Right; StringRef Options; size_t Index = 0; RepString = RepString.trim(); if (RepString.consumeInteger(0, Index)) { assert(false && "Invalid replacement sequence index!"); return ReplacementItem{}; } RepString = RepString.trim(); if (!RepString.empty() && RepString.front() == ',') { RepString = RepString.drop_front(); if (!consumeFieldLayout(RepString, Where, Align, Pad)) assert(false && "Invalid replacement field layout specification!"); } RepString = RepString.trim(); if (!RepString.empty() && RepString.front() == ':') { Options = RepString.drop_front().trim(); RepString = StringRef(); } RepString = RepString.trim(); if (!RepString.empty()) { assert(false && "Unexpected characters found in replacement string!"); } return ReplacementItem{Spec, Index, Align, Where, Pad, Options}; }
bool formatv_object_base::consumeFieldLayout(StringRef &Spec, AlignStyle &Where, size_t &Align, char &Pad) { Where = AlignStyle::Right; Align = 0; Pad = ' '; if (Spec.empty()) return true; if (Spec.size() > 1) { // A maximum of 2 characters at the beginning can be used for something // other // than the width. // If Spec[1] is a loc char, then Spec[0] is a pad char and Spec[2:...] // contains the width. // Otherwise, if Spec[0] is a loc char, then Spec[1:...] contains the width. // Otherwise, Spec[0:...] contains the width. if (auto Loc = translateLocChar(Spec[1])) { Pad = Spec[0]; Where = *Loc; Spec = Spec.drop_front(2); } else if (auto Loc = translateLocChar(Spec[0])) { Where = *Loc; Spec = Spec.drop_front(1); } } bool Failed = Spec.consumeInteger(0, Align); return !Failed; }
TEST_F(GDBRemoteCommunicationClientTest, TestPacketSpeedJSON) { std::thread server_thread([this] { for (;;) { StringExtractorGDBRemote request; PacketResult result = server.GetPacket(request); if (result == PacketResult::ErrorDisconnected) return; ASSERT_EQ(PacketResult::Success, result); StringRef ref = request.GetStringRef(); ASSERT_TRUE(ref.consume_front("qSpeedTest:response_size:")); int size; ASSERT_FALSE(ref.consumeInteger(10, size)) << "ref: " << ref; std::string response(size, 'X'); ASSERT_EQ(PacketResult::Success, server.SendPacket(response)); } }); StreamString ss; client.TestPacketSpeed(10, 32, 32, 4096, true, ss); client.Disconnect(); server_thread.join(); GTEST_LOG_(INFO) << "Formatted output: " << ss.GetData(); auto object_sp = StructuredData::ParseJSON(ss.GetString()); ASSERT_TRUE(bool(object_sp)); auto dict_sp = object_sp->GetAsDictionary(); ASSERT_TRUE(bool(dict_sp)); object_sp = dict_sp->GetValueForKey("packet_speeds"); ASSERT_TRUE(bool(object_sp)); dict_sp = object_sp->GetAsDictionary(); ASSERT_TRUE(bool(dict_sp)); int num_packets; ASSERT_TRUE(dict_sp->GetValueForKeyAsInteger("num_packets", num_packets)) << ss.GetString(); ASSERT_EQ(10, num_packets); }
SIMachineFunctionInfo::SIMachineFunctionInfo(const MachineFunction &MF) : AMDGPUMachineFunction(MF), PrivateSegmentBuffer(false), DispatchPtr(false), QueuePtr(false), KernargSegmentPtr(false), DispatchID(false), FlatScratchInit(false), WorkGroupIDX(false), WorkGroupIDY(false), WorkGroupIDZ(false), WorkGroupInfo(false), PrivateSegmentWaveByteOffset(false), WorkItemIDX(false), WorkItemIDY(false), WorkItemIDZ(false), ImplicitBufferPtr(false), ImplicitArgPtr(false), GITPtrHigh(0xffffffff), HighBitsOf32BitAddress(0) { const GCNSubtarget &ST = MF.getSubtarget<GCNSubtarget>(); const Function &F = MF.getFunction(); FlatWorkGroupSizes = ST.getFlatWorkGroupSizes(F); WavesPerEU = ST.getWavesPerEU(F); Occupancy = getMaxWavesPerEU(); limitOccupancy(MF); CallingConv::ID CC = F.getCallingConv(); if (CC == CallingConv::AMDGPU_KERNEL || CC == CallingConv::SPIR_KERNEL) { if (!F.arg_empty()) KernargSegmentPtr = true; WorkGroupIDX = true; WorkItemIDX = true; } else if (CC == CallingConv::AMDGPU_PS) { PSInputAddr = AMDGPU::getInitialPSInputAddr(F); } if (!isEntryFunction()) { // Non-entry functions have no special inputs for now, other registers // required for scratch access. ScratchRSrcReg = AMDGPU::SGPR0_SGPR1_SGPR2_SGPR3; ScratchWaveOffsetReg = AMDGPU::SGPR4; FrameOffsetReg = AMDGPU::SGPR5; StackPtrOffsetReg = AMDGPU::SGPR32; ArgInfo.PrivateSegmentBuffer = ArgDescriptor::createRegister(ScratchRSrcReg); ArgInfo.PrivateSegmentWaveByteOffset = ArgDescriptor::createRegister(ScratchWaveOffsetReg); if (F.hasFnAttribute("amdgpu-implicitarg-ptr")) ImplicitArgPtr = true; } else { if (F.hasFnAttribute("amdgpu-implicitarg-ptr")) { KernargSegmentPtr = true; MaxKernArgAlign = std::max(ST.getAlignmentForImplicitArgPtr(), MaxKernArgAlign); } } if (ST.debuggerEmitPrologue()) { // Enable everything. WorkGroupIDX = true; WorkGroupIDY = true; WorkGroupIDZ = true; WorkItemIDX = true; WorkItemIDY = true; WorkItemIDZ = true; } else { if (F.hasFnAttribute("amdgpu-work-group-id-x")) WorkGroupIDX = true; if (F.hasFnAttribute("amdgpu-work-group-id-y")) WorkGroupIDY = true; if (F.hasFnAttribute("amdgpu-work-group-id-z")) WorkGroupIDZ = true; if (F.hasFnAttribute("amdgpu-work-item-id-x")) WorkItemIDX = true; if (F.hasFnAttribute("amdgpu-work-item-id-y")) WorkItemIDY = true; if (F.hasFnAttribute("amdgpu-work-item-id-z")) WorkItemIDZ = true; } const MachineFrameInfo &FrameInfo = MF.getFrameInfo(); bool HasStackObjects = FrameInfo.hasStackObjects(); if (isEntryFunction()) { // X, XY, and XYZ are the only supported combinations, so make sure Y is // enabled if Z is. if (WorkItemIDZ) WorkItemIDY = true; PrivateSegmentWaveByteOffset = true; // HS and GS always have the scratch wave offset in SGPR5 on GFX9. if (ST.getGeneration() >= AMDGPUSubtarget::GFX9 && (CC == CallingConv::AMDGPU_HS || CC == CallingConv::AMDGPU_GS)) ArgInfo.PrivateSegmentWaveByteOffset = ArgDescriptor::createRegister(AMDGPU::SGPR5); } bool isAmdHsaOrMesa = ST.isAmdHsaOrMesa(F); if (isAmdHsaOrMesa) { PrivateSegmentBuffer = true; if (F.hasFnAttribute("amdgpu-dispatch-ptr")) DispatchPtr = true; if (F.hasFnAttribute("amdgpu-queue-ptr")) QueuePtr = true; if (F.hasFnAttribute("amdgpu-dispatch-id")) DispatchID = true; } else if (ST.isMesaGfxShader(F)) { ImplicitBufferPtr = true; } if (F.hasFnAttribute("amdgpu-kernarg-segment-ptr")) KernargSegmentPtr = true; if (ST.hasFlatAddressSpace() && isEntryFunction() && isAmdHsaOrMesa) { // TODO: This could be refined a lot. The attribute is a poor way of // detecting calls that may require it before argument lowering. if (HasStackObjects || F.hasFnAttribute("amdgpu-flat-scratch")) FlatScratchInit = true; } Attribute A = F.getFnAttribute("amdgpu-git-ptr-high"); StringRef S = A.getValueAsString(); if (!S.empty()) S.consumeInteger(0, GITPtrHigh); A = F.getFnAttribute("amdgpu-32bit-address-high-bits"); S = A.getValueAsString(); if (!S.empty()) S.consumeInteger(0, HighBitsOf32BitAddress); }