void
radv_temp_descriptor_set_destroy(struct radv_device *device,
				 VkDescriptorSet _set)
{
	RADV_FROM_HANDLE(radv_descriptor_set, set, _set);

	radv_descriptor_set_destroy(device, NULL, set, false);
}
示例#2
0
VkResult radv_FreeDescriptorSets(
	VkDevice                                    _device,
	VkDescriptorPool                            descriptorPool,
	uint32_t                                    count,
	const VkDescriptorSet*                      pDescriptorSets)
{
	RADV_FROM_HANDLE(radv_device, device, _device);
	RADV_FROM_HANDLE(radv_descriptor_pool, pool, descriptorPool);

	for (uint32_t i = 0; i < count; i++) {
		RADV_FROM_HANDLE(radv_descriptor_set, set, pDescriptorSets[i]);

		if (set && !pool->host_memory_base)
			radv_descriptor_set_destroy(device, pool, set, true);
	}
	return VK_SUCCESS;
}
示例#3
0
VkResult radv_ResetDescriptorPool(
	VkDevice                                    _device,
	VkDescriptorPool                            descriptorPool,
	VkDescriptorPoolResetFlags                  flags)
{
	RADV_FROM_HANDLE(radv_device, device, _device);
	RADV_FROM_HANDLE(radv_descriptor_pool, pool, descriptorPool);

	if (!pool->host_memory_base) {
		for(int i = 0; i < pool->entry_count; ++i) {
			radv_descriptor_set_destroy(device, pool, pool->entries[i].set, false);
		}
		pool->entry_count = 0;
	}

	pool->current_offset = 0;
	pool->host_memory_ptr = pool->host_memory_base;

	return VK_SUCCESS;
}
void radv_DestroyDescriptorPool(
	VkDevice                                    _device,
	VkDescriptorPool                            _pool,
	const VkAllocationCallbacks*                pAllocator)
{
	RADV_FROM_HANDLE(radv_device, device, _device);
	RADV_FROM_HANDLE(radv_descriptor_pool, pool, _pool);

	if (!pool)
		return;

	list_for_each_entry_safe(struct radv_descriptor_set, set,
				 &pool->descriptor_sets, descriptor_pool) {
		radv_descriptor_set_destroy(device, pool, set, false);
	}

	if (pool->bo)
		device->ws->buffer_destroy(pool->bo);
	vk_free2(&device->alloc, pAllocator, pool);
}
示例#5
0
void radv_DestroyDescriptorPool(
	VkDevice                                    _device,
	VkDescriptorPool                            _pool,
	const VkAllocationCallbacks*                pAllocator)
{
	RADV_FROM_HANDLE(radv_device, device, _device);
	RADV_FROM_HANDLE(radv_descriptor_pool, pool, _pool);

	if (!pool)
		return;

	if (!pool->host_memory_base) {
		for(int i = 0; i < pool->entry_count; ++i) {
			radv_descriptor_set_destroy(device, pool, pool->entries[i].set, false);
		}
	}

	if (pool->bo)
		device->ws->buffer_destroy(pool->bo);
	vk_free2(&device->alloc, pAllocator, pool);
}
VkResult radv_ResetDescriptorPool(
	VkDevice                                    _device,
	VkDescriptorPool                            descriptorPool,
	VkDescriptorPoolResetFlags                  flags)
{
	RADV_FROM_HANDLE(radv_device, device, _device);
	RADV_FROM_HANDLE(radv_descriptor_pool, pool, descriptorPool);

	list_for_each_entry_safe(struct radv_descriptor_set, set,
				 &pool->descriptor_sets, descriptor_pool) {
		radv_descriptor_set_destroy(device, pool, set, false);
	}

	pool->allocated_sets = 0;
	pool->current_offset = 0;
	pool->free_list = -1;
	pool->full_list = 0;
	pool->free_nodes[pool->max_sets - 1].next = -1;

	for (int i = 0; i  + 1 < pool->max_sets; ++i)
		pool->free_nodes[i].next = i + 1;

	return VK_SUCCESS;
}