コード例 #1
0
//==============================================================================
void PipelineImpl::initGraphics(const PipelineInitInfo& init)
{
	FilledGraphicsPipelineCreateInfo ci = FILLED;

	ci.pStages = &ci.m_stages[0];
	initShaders(init, ci);

	// Init sub-states
	ci.pVertexInputState = initVertexStage(init.m_vertex, ci.m_vertex);
	ci.pInputAssemblyState =
		initInputAssemblyState(init.m_inputAssembler, ci.m_ia);
	ci.pTessellationState =
		initTessellationState(init.m_tessellation, ci.m_tess);
	ci.pViewportState = initViewportState(ci.m_vp);
	ci.pRasterizationState = initRasterizerState(init.m_rasterizer, ci.m_rast);
	ci.pMultisampleState = initMsState(ci.m_ms);
	ci.pDepthStencilState = initDsState(init.m_depthStencil, ci.m_ds);
	ci.pColorBlendState = initColorState(init.m_color, ci.m_color);
	ci.pDynamicState = nullptr; // No dynamic state as static at the moment

	// Finalize
	ci.layout = getGrManagerImpl().m_globalPipelineLayout;
	ci.renderPass = getGrManagerImpl().getOrCreateCompatibleRenderPass(init);
	ci.basePipelineHandle = VK_NULL_HANDLE;

	ANKI_VK_CHECK(vkCreateGraphicsPipelines(
		getDevice(), nullptr, 1, &ci, nullptr, &m_handle));
}
コード例 #2
0
//==============================================================================
void CommandBufferImpl::beginRenderPassInternal()
{
	VkRenderPassBeginInfo bi = {};
	bi.sType = VK_STRUCTURE_TYPE_RENDER_PASS_BEGIN_INFO;
	FramebufferImpl& impl = m_activeFb->getImplementation();
	bi.renderPass = impl.getRenderPassHandle();
	bi.clearValueCount = impl.getAttachmentCount();
	bi.pClearValues = impl.getClearValues();

	if(!impl.isDefaultFramebuffer())
	{
		// Bind a non-default FB

		bi.framebuffer = impl.getFramebufferHandle(0);

		impl.getAttachmentsSize(
			bi.renderArea.extent.width, bi.renderArea.extent.height);
	}
	else
	{
		// Bind the default FB
		m_renderedToDefaultFb = true;

		bi.framebuffer = impl.getFramebufferHandle(
			getGrManagerImpl().getFrame() % MAX_FRAMES_IN_FLIGHT);

		bi.renderArea.extent.width =
			getGrManagerImpl().getDefaultSurfaceWidth();
		bi.renderArea.extent.height =
			getGrManagerImpl().getDefaultSurfaceHeight();
	}

	vkCmdBeginRenderPass(m_handle, &bi, VK_SUBPASS_CONTENTS_INLINE);
}
コード例 #3
0
//==============================================================================
Error CommandBufferImpl::init(const CommandBufferInitInfo& init)
{
	auto& pool = getGrManagerImpl().getAllocator().getMemoryPool();
	m_alloc = StackAllocator<U8>(pool.getAllocationCallback(),
		pool.getAllocationCallbackUserData(),
		init.m_hints.m_chunkSize,
		1.0,
		0,
		false);

	m_flags = init.m_flags;
	m_tid = Thread::getCurrentThreadId();

	Bool secondLevel = (m_flags & CommandBufferFlag::SECOND_LEVEL)
		== CommandBufferFlag::SECOND_LEVEL;
	m_handle = getGrManagerImpl().newCommandBuffer(m_tid, secondLevel);
	ANKI_ASSERT(m_handle);

	// Begin recording
	VkCommandBufferInheritanceInfo inheritance = {};
	inheritance.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_INHERITANCE_INFO;

	if(secondLevel)
	{
		ANKI_ASSERT(0 && "TODO");
	}

	VkCommandBufferBeginInfo begin = {};
	begin.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_BEGIN_INFO;
	begin.flags = VK_COMMAND_BUFFER_USAGE_ONE_TIME_SUBMIT_BIT;
	begin.pInheritanceInfo = &inheritance;

	vkBeginCommandBuffer(m_handle, &begin);

	// If it's the frame's first command buffer then do the default fb image
	// transition
	if((m_flags & CommandBufferFlag::FRAME_FIRST)
		== CommandBufferFlag::FRAME_FIRST)
	{
		// Default FB barrier/transition
		setImageBarrier(VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT,
			VK_ACCESS_MEMORY_READ_BIT,
			VK_IMAGE_LAYOUT_UNDEFINED,
			VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT,
			VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT,
			VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL,
			getGrManagerImpl().getDefaultSurfaceImage(
				getGrManagerImpl().getFrame() % MAX_FRAMES_IN_FLIGHT),
			VkImageSubresourceRange{VK_IMAGE_ASPECT_COLOR_BIT, 0, 1, 0, 1});
	}

	return ErrorCode::NONE;
}
コード例 #4
0
//==============================================================================
CommandBufferImpl::~CommandBufferImpl()
{
	if(m_empty)
	{
		ANKI_LOGW("Command buffer was empty");
	}

	if(!m_finalized)
	{
		ANKI_LOGW("Command buffer was not flushed");
	}

	if(m_handle)
	{
		Bool secondLevel = (m_flags & CommandBufferFlag::SECOND_LEVEL)
			== CommandBufferFlag::SECOND_LEVEL;
		getGrManagerImpl().deleteCommandBuffer(m_handle, secondLevel, m_tid);
	}

	m_pplineList.destroy(m_alloc);
	m_fbList.destroy(m_alloc);
	m_rcList.destroy(m_alloc);
	m_texList.destroy(m_alloc);
	m_queryList.destroy(m_alloc);
}
コード例 #5
0
//==============================================================================
void CommandBufferImpl::bindResourceGroup(
	ResourceGroupPtr rc, U slot, const TransientMemoryInfo* dynInfo)
{
	// TODO set the correct binding point

	commandCommon();
	const ResourceGroupImpl& impl = rc->getImplementation();

	if(impl.hasDescriptorSet())
	{
		Array<U32, MAX_UNIFORM_BUFFER_BINDINGS + MAX_STORAGE_BUFFER_BINDINGS>
			dynOffsets = {{}};

		impl.setupDynamicOffsets(dynInfo, &dynOffsets[0]);

		VkDescriptorSet dset = impl.getHandle();
		vkCmdBindDescriptorSets(m_handle,
			VK_PIPELINE_BIND_POINT_GRAPHICS,
			getGrManagerImpl().getGlobalPipelineLayout(),
			slot,
			1,
			&dset,
			dynOffsets.getSize(),
			&dynOffsets[0]);
	}

	// Bind vertex and index buffer only in the first set
	if(slot == 0)
	{
		const VkBuffer* buffers = nullptr;
		const VkDeviceSize* offsets = nullptr;
		U bindingCount = 0;
		impl.getVertexBindingInfo(buffers, offsets, bindingCount);
		if(bindingCount)
		{
			vkCmdBindVertexBuffers(m_handle, 0, bindingCount, buffers, offsets);
		}

		VkBuffer idxBuff;
		VkDeviceSize idxBuffOffset;
		VkIndexType idxType;
		if(impl.getIndexBufferInfo(idxBuff, idxBuffOffset, idxType))
		{
			vkCmdBindIndexBuffer(m_handle, idxBuff, idxBuffOffset, idxType);
		}
	}

	// Hold the reference
	m_rcList.pushBack(m_alloc, rc);
}
コード例 #6
0
BufferImpl::~BufferImpl()
{
	ANKI_ASSERT(!m_mapped);

	if(m_handle)
	{
		vkDestroyBuffer(getDevice(), m_handle, nullptr);
	}

	if(m_memHandle)
	{
		getGrManagerImpl().getGpuMemoryManager().freeMemory(m_memHandle);
	}
}
コード例 #7
0
//==============================================================================
void CommandBufferImpl::endRecording()
{
	commandCommon();
	ANKI_ASSERT(!m_finalized);
	ANKI_ASSERT(!m_empty);

	if((m_flags & CommandBufferFlag::FRAME_LAST)
		== CommandBufferFlag::FRAME_LAST)
	{
		// Default FB barrier/transition
		setImageBarrier(VK_PIPELINE_STAGE_ALL_GRAPHICS_BIT,
			VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT,
			VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL,
			VK_PIPELINE_STAGE_BOTTOM_OF_PIPE_BIT,
			VK_ACCESS_MEMORY_READ_BIT,
			VK_IMAGE_LAYOUT_PRESENT_SRC_KHR,
			getGrManagerImpl().getDefaultSurfaceImage(
				getGrManagerImpl().getFrame() % MAX_FRAMES_IN_FLIGHT),
			VkImageSubresourceRange{VK_IMAGE_ASPECT_COLOR_BIT, 0, 1, 0, 1});
	}

	ANKI_VK_CHECKF(vkEndCommandBuffer(m_handle));
	m_finalized = true;
}
コード例 #8
0
void* BufferImpl::map(PtrSize offset, PtrSize range, BufferMapAccessBit access)
{
	ANKI_ASSERT(isCreated());
	ANKI_ASSERT(access != BufferMapAccessBit::NONE);
	ANKI_ASSERT((access & m_access) != BufferMapAccessBit::NONE);
	ANKI_ASSERT(!m_mapped);
	ANKI_ASSERT(offset + range <= m_size);

	void* ptr = getGrManagerImpl().getGpuMemoryManager().getMappedAddress(m_memHandle);
	ANKI_ASSERT(ptr);

#if ANKI_EXTRA_CHECKS
	m_mapped = true;
#endif

	// TODO Flush or invalidate caches

	return static_cast<void*>(static_cast<U8*>(ptr) + offset);
}
コード例 #9
0
//==============================================================================
void PipelineImpl::initCompute(const PipelineInitInfo& init)
{
	VkComputePipelineCreateInfo ci;
	ci.sType = VK_STRUCTURE_TYPE_COMPUTE_PIPELINE_CREATE_INFO;
	ci.pNext = nullptr;
	ci.layout = getGrManagerImpl().m_globalPipelineLayout;
	ci.basePipelineHandle = VK_NULL_HANDLE;

	VkPipelineShaderStageCreateInfo& stage = ci.stage;
	stage.sType = VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO;
	stage.pNext = nullptr;
	stage.flags = 0;
	stage.stage = VK_SHADER_STAGE_COMPUTE_BIT;
	stage.module =
		init.m_shaders[ShaderType::COMPUTE]->getImplementation().m_handle;
	stage.pName = "main";
	stage.pSpecializationInfo = nullptr;

	ANKI_VK_CHECK(vkCreateComputePipelines(
		getDevice(), nullptr, 1, &ci, nullptr, &m_handle));
}
コード例 #10
0
Error BufferImpl::init(const BufferInitInfo& inf)
{
	ANKI_ASSERT(!isCreated());

	PtrSize size = inf.m_size;
	BufferMapAccessBit access = inf.m_access;
	BufferUsageBit usage = inf.m_usage;

	ANKI_ASSERT(size > 0);
	ANKI_ASSERT(usage != BufferUsageBit::NONE);

	// Align the size to satisfy fill buffer
	alignRoundUp(4, size);

	// Create the buffer
	VkBufferCreateInfo ci = {};
	ci.sType = VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO;
	ci.size = size;
	ci.usage = convertBufferUsageBit(usage);
	ci.sharingMode = VK_SHARING_MODE_EXCLUSIVE;
	ci.queueFamilyIndexCount = 1;
	U32 queueIdx = getGrManagerImpl().getGraphicsQueueFamily();
	ci.pQueueFamilyIndices = &queueIdx;
	ANKI_VK_CHECK(vkCreateBuffer(getDevice(), &ci, nullptr, &m_handle));
	getGrManagerImpl().trySetVulkanHandleName(inf.getName(), VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT, m_handle);

	// Get mem requirements
	VkMemoryRequirements req;
	vkGetBufferMemoryRequirements(getDevice(), m_handle, &req);
	U memIdx = MAX_U32;

	if(access == BufferMapAccessBit::WRITE)
	{
		// Only write, probably for uploads

		VkMemoryPropertyFlags preferDeviceLocal;
		VkMemoryPropertyFlags avoidDeviceLocal;
		if((usage & (~BufferUsageBit::TRANSFER_ALL)) != BufferUsageBit::NONE)
		{
			// Will be used for something other than transfer, try to put it in the device
			preferDeviceLocal = VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT;
			avoidDeviceLocal = 0;
		}
		else
		{
			// Will be used only for transfers, don't want it in the device
			preferDeviceLocal = 0;
			avoidDeviceLocal = VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT;
		}

		// Device & host & coherent but not cached
		memIdx = getGrManagerImpl().getGpuMemoryManager().findMemoryType(req.memoryTypeBits,
			VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT | preferDeviceLocal,
			VK_MEMORY_PROPERTY_HOST_CACHED_BIT | avoidDeviceLocal);

		// Fallback: host & coherent and not cached
		if(memIdx == MAX_U32)
		{
			memIdx = getGrManagerImpl().getGpuMemoryManager().findMemoryType(req.memoryTypeBits,
				VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT,
				VK_MEMORY_PROPERTY_HOST_CACHED_BIT | avoidDeviceLocal);
		}

		// Fallback: just host
		if(memIdx == MAX_U32)
		{
			ANKI_VK_LOGW("Using a fallback mode for write-only buffer");
			memIdx = getGrManagerImpl().getGpuMemoryManager().findMemoryType(
				req.memoryTypeBits, VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT, 0);
		}
	}
	else if((access & BufferMapAccessBit::READ) != BufferMapAccessBit::NONE)
	{
		// Read or read/write

		// Cached & coherent
		memIdx = getGrManagerImpl().getGpuMemoryManager().findMemoryType(req.memoryTypeBits,
			VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_CACHED_BIT
				| VK_MEMORY_PROPERTY_HOST_COHERENT_BIT,
			0);

		// Fallback: Just cached
		if(memIdx == MAX_U32)
		{
			memIdx = getGrManagerImpl().getGpuMemoryManager().findMemoryType(
				req.memoryTypeBits, VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_CACHED_BIT, 0);
		}

		// Fallback: Just host
		if(memIdx == MAX_U32)
		{
			ANKI_VK_LOGW("Using a fallback mode for read/write buffer");
			memIdx = getGrManagerImpl().getGpuMemoryManager().findMemoryType(
				req.memoryTypeBits, VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT, 0);
		}
	}
	else
	{
		// Not mapped

		ANKI_ASSERT(access == BufferMapAccessBit::NONE);

		// Device only
		memIdx = getGrManagerImpl().getGpuMemoryManager().findMemoryType(
			req.memoryTypeBits, VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT, VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT);

		// Fallback: Device with anything else
		if(memIdx == MAX_U32)
		{
			memIdx = getGrManagerImpl().getGpuMemoryManager().findMemoryType(
				req.memoryTypeBits, VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT, 0);
		}
	}

	ANKI_ASSERT(memIdx != MAX_U32);

	const VkPhysicalDeviceMemoryProperties& props = getGrManagerImpl().getMemoryProperties();
	m_memoryFlags = props.memoryTypes[memIdx].propertyFlags;

	// Allocate
	getGrManagerImpl().getGpuMemoryManager().allocateMemory(memIdx, req.size, req.alignment, true, m_memHandle);

	// Bind mem to buffer
	{
		ANKI_TRACE_SCOPED_EVENT(VK_BIND_OBJECT);
		ANKI_VK_CHECK(vkBindBufferMemory(getDevice(), m_handle, m_memHandle.m_memory, m_memHandle.m_offset));
	}

	m_access = access;
	m_size = inf.m_size;
	m_actualSize = size;
	m_usage = usage;
	return Error::NONE;
}