コード例 #1
0
ファイル: D3D12Buffer.cpp プロジェクト: KitoHo/rpcs3
std::tuple<bool, size_t, std::vector<D3D12_SHADER_RESOURCE_VIEW_DESC>>
D3D12GSRender::upload_and_set_vertex_index_data(ID3D12GraphicsCommandList* command_list)
{
	return std::apply_visitor(
		draw_command_visitor(command_list, m_buffer_data, m_vertex_buffer_data.Get(),
			[this](
				const auto& state, const auto& list) { return get_vertex_buffers(state, list); }),
		get_draw_command(rsx::method_registers));
}
コード例 #2
0
ファイル: VKVertexBuffers.cpp プロジェクト: PZerua/rpcs3
std::tuple<VkPrimitiveTopology, u32, u32, u32, std::optional<std::tuple<VkDeviceSize, VkIndexType> > >
VKGSRender::upload_vertex_data()
{
	m_vertex_layout = analyse_inputs_interleaved();

	draw_command_visitor visitor(m_index_buffer_ring_info, m_vertex_layout);
	auto result = std::apply_visitor(visitor, get_draw_command(rsx::method_registers));

	auto &vertex_count = result.allocated_vertex_count;
	auto &vertex_base = result.vertex_data_base;

	//Do actual vertex upload
	auto required = calculate_memory_requirements(m_vertex_layout, vertex_count);
	size_t persistent_offset = UINT64_MAX, volatile_offset = UINT64_MAX;

	m_persistent_attribute_storage = VK_NULL_HANDLE;
	m_volatile_attribute_storage = VK_NULL_HANDLE;

	if (required.first > 0)
	{
		//Check if cacheable
		//Only data in the 'persistent' block may be cached
		//TODO: make vertex cache keep local data beyond frame boundaries and hook notify command
		bool in_cache = false;
		bool to_store = false;
		u32  storage_address = UINT32_MAX;

		if (m_vertex_layout.interleaved_blocks.size() == 1 &&
			rsx::method_registers.current_draw_clause.command != rsx::draw_command::inlined_array)
		{
			storage_address = m_vertex_layout.interleaved_blocks[0].real_offset_address + vertex_base;
			if (auto cached = m_vertex_cache->find_vertex_range(storage_address, VK_FORMAT_R8_UINT, required.first))
			{
				in_cache = true;
				m_current_frame->buffer_views_to_clean.push_back(std::make_unique<vk::buffer_view>(*m_device,
					m_attrib_ring_info.heap->value, VK_FORMAT_R8_UINT, cached->offset_in_heap, required.first));
			}
			else
			{
				to_store = true;
			}
		}

		if (!in_cache)
		{
			persistent_offset = (u32)m_attrib_ring_info.alloc<256>(required.first);
			m_current_frame->buffer_views_to_clean.push_back(std::make_unique<vk::buffer_view>(*m_device,
				m_attrib_ring_info.heap->value, VK_FORMAT_R8_UINT, persistent_offset, required.first));

			if (to_store)
			{
				//store ref in vertex cache
				m_vertex_cache->store_range(storage_address, VK_FORMAT_R8_UINT, required.first, (u32)persistent_offset);
			}
		}

		m_persistent_attribute_storage = m_current_frame->buffer_views_to_clean.back()->value;
	}
	else
	{
		m_persistent_attribute_storage = null_buffer_view->value;
	}

	if (required.second > 0)
	{
		volatile_offset = (u32)m_attrib_ring_info.alloc<256>(required.second);
		m_current_frame->buffer_views_to_clean.push_back(std::make_unique<vk::buffer_view>(*m_device,
			m_attrib_ring_info.heap->value, VK_FORMAT_R8_UINT, volatile_offset, required.second));

		m_volatile_attribute_storage = m_current_frame->buffer_views_to_clean.back()->value;
	}
	else
	{
		m_volatile_attribute_storage = null_buffer_view->value;
	}

	//Write all the data once if possible
	if (required.first && required.second && volatile_offset > persistent_offset)
	{
		//Do this once for both to save time on map/unmap cycles
		const size_t block_end = (volatile_offset + required.second);
		const size_t block_size = block_end - persistent_offset;
		const size_t volatile_offset_in_block = volatile_offset - persistent_offset;

		void *block_mapping = m_attrib_ring_info.map(persistent_offset, block_size);
		write_vertex_data_to_memory(m_vertex_layout, vertex_base, vertex_count, block_mapping, (char*)block_mapping + volatile_offset_in_block);
		m_attrib_ring_info.unmap();
	}
	else
	{
		if (required.first > 0 && persistent_offset != UINT64_MAX)
		{
			void *persistent_mapping = m_attrib_ring_info.map(persistent_offset, required.first);
			write_vertex_data_to_memory(m_vertex_layout, vertex_base, vertex_count, persistent_mapping, nullptr);
			m_attrib_ring_info.unmap();
		}

		if (required.second > 0)
		{
			void *volatile_mapping = m_attrib_ring_info.map(volatile_offset, required.second);
			write_vertex_data_to_memory(m_vertex_layout, vertex_base, vertex_count, nullptr, volatile_mapping);
			m_attrib_ring_info.unmap();
		}
	}

	return std::make_tuple(result.native_primitive_type, result.vertex_draw_count, result.allocated_vertex_count, result.vertex_index_base, result.index_info);
}