コード例 #1
0
VkResult WrappedVulkan::vkBindBufferMemory(
    VkDevice                                    device,
    VkBuffer                                    buffer,
    VkDeviceMemory                              mem,
    VkDeviceSize                                memOffset)
{
	VkResourceRecord *record = GetRecord(buffer);

	if(m_State >= WRITING)
	{
		Chunk *chunk = NULL;

		{
			CACHE_THREAD_SERIALISER();
		
			SCOPED_SERIALISE_CONTEXT(BIND_BUFFER_MEM);
			Serialise_vkBindBufferMemory(localSerialiser, device, buffer, mem, memOffset);

			chunk = scope.Get();
		}
	
		// memory object bindings are immutable and must happen before creation or use,
		// so this can always go into the record, even if a resource is created and bound
		// to memory mid-frame
		record->AddChunk(chunk);

		record->AddParent(GetRecord(mem));
		record->baseResource = GetResID(mem);
	}

	return ObjDisp(device)->BindBufferMemory(Unwrap(device), Unwrap(buffer), Unwrap(mem), memOffset);
}
コード例 #2
0
VkResult WrappedVulkan::vkCreateBuffer(
			VkDevice                                    device,
			const VkBufferCreateInfo*                   pCreateInfo,
			const VkAllocationCallbacks*                pAllocator,
			VkBuffer*                                   pBuffer)
{
	VkResult ret = ObjDisp(device)->CreateBuffer(Unwrap(device), pCreateInfo, pAllocator, pBuffer);
	
	// SHARING: pCreateInfo sharingMode, queueFamilyCount, pQueueFamilyIndices

	if(ret == VK_SUCCESS)
	{
		ResourceId id = GetResourceManager()->WrapResource(Unwrap(device), *pBuffer);
		
		if(m_State >= WRITING)
		{
			Chunk *chunk = NULL;

			{
				CACHE_THREAD_SERIALISER();
		
				SCOPED_SERIALISE_CONTEXT(CREATE_BUFFER);
				Serialise_vkCreateBuffer(localSerialiser, device, pCreateInfo, NULL, pBuffer);

				chunk = scope.Get();
			}

			VkResourceRecord *record = GetResourceManager()->AddResourceRecord(*pBuffer);
			record->AddChunk(chunk);

			if(pCreateInfo->flags & (VK_BUFFER_CREATE_SPARSE_BINDING_BIT|VK_BUFFER_CREATE_SPARSE_RESIDENCY_BIT))
			{
				record->sparseInfo = new SparseMapping();

				// buffers are always bound opaquely and in arbitrary divisions, sparse residency
				// only means not all the buffer needs to be bound, which is not that interesting for
				// our purposes

				{
					SCOPED_LOCK(m_CapTransitionLock);
					if(m_State != WRITING_CAPFRAME)
						GetResourceManager()->MarkDirtyResource(id);
					else
						GetResourceManager()->MarkPendingDirty(id);
				}
			}
		}
		else
		{
			GetResourceManager()->AddLiveResource(id, *pBuffer);

			m_CreationInfo.m_Buffer[id].Init(GetResourceManager(), m_CreationInfo, pCreateInfo);
		}
	}

	return ret;
}
コード例 #3
0
ファイル: vk_shader_funcs.cpp プロジェクト: AJ92/renderdoc
VkResult WrappedVulkan::vkCreatePipelineLayout(VkDevice device,
                                               const VkPipelineLayoutCreateInfo *pCreateInfo,
                                               const VkAllocationCallbacks *pAllocator,
                                               VkPipelineLayout *pPipelineLayout)
{
  VkDescriptorSetLayout *unwrapped = GetTempArray<VkDescriptorSetLayout>(pCreateInfo->setLayoutCount);
  for(uint32_t i = 0; i < pCreateInfo->setLayoutCount; i++)
    unwrapped[i] = Unwrap(pCreateInfo->pSetLayouts[i]);

  VkPipelineLayoutCreateInfo unwrappedInfo = *pCreateInfo;
  unwrappedInfo.pSetLayouts = unwrapped;

  VkResult ret = ObjDisp(device)->CreatePipelineLayout(Unwrap(device), &unwrappedInfo, pAllocator,
                                                       pPipelineLayout);

  if(ret == VK_SUCCESS)
  {
    ResourceId id = GetResourceManager()->WrapResource(Unwrap(device), *pPipelineLayout);

    if(m_State >= WRITING)
    {
      Chunk *chunk = NULL;

      {
        CACHE_THREAD_SERIALISER();

        SCOPED_SERIALISE_CONTEXT(CREATE_PIPE_LAYOUT);
        Serialise_vkCreatePipelineLayout(localSerialiser, device, pCreateInfo, NULL, pPipelineLayout);

        chunk = scope.Get();
      }

      VkResourceRecord *record = GetResourceManager()->AddResourceRecord(*pPipelineLayout);
      record->AddChunk(chunk);

      for(uint32_t i = 0; i < pCreateInfo->setLayoutCount; i++)
      {
        VkResourceRecord *layoutrecord = GetRecord(pCreateInfo->pSetLayouts[i]);
        record->AddParent(layoutrecord);
      }
    }
    else
    {
      GetResourceManager()->AddLiveResource(id, *pPipelineLayout);

      m_CreationInfo.m_PipelineLayout[id].Init(GetResourceManager(), m_CreationInfo, &unwrappedInfo);
    }
  }

  return ret;
}
コード例 #4
0
VkResult WrappedVulkan::vkCreateImageView(
    VkDevice                                    device,
    const VkImageViewCreateInfo*                pCreateInfo,
		const VkAllocationCallbacks*                pAllocator,
    VkImageView*                                pView)
{
	VkImageViewCreateInfo unwrappedInfo = *pCreateInfo;
	unwrappedInfo.image = Unwrap(unwrappedInfo.image);
	VkResult ret = ObjDisp(device)->CreateImageView(Unwrap(device), &unwrappedInfo, pAllocator, pView);

	if(ret == VK_SUCCESS)
	{
		ResourceId id = GetResourceManager()->WrapResource(Unwrap(device), *pView);
		
		if(m_State >= WRITING)
		{
			Chunk *chunk = NULL;

			{
				CACHE_THREAD_SERIALISER();
		
				SCOPED_SERIALISE_CONTEXT(CREATE_IMAGE_VIEW);
				Serialise_vkCreateImageView(localSerialiser, device, pCreateInfo, NULL, pView);

				chunk = scope.Get();
			}

			VkResourceRecord *imageRecord = GetRecord(pCreateInfo->image);

			VkResourceRecord *record = GetResourceManager()->AddResourceRecord(*pView);
			record->AddChunk(chunk);
			record->AddParent(imageRecord);
			
			// store the base resource. Note images have a baseResource pointing
			// to their memory, which we will also need so we store that separately
			record->baseResource = imageRecord->GetResourceID();
			record->baseResourceMem = imageRecord->baseResource;
			record->sparseInfo = imageRecord->sparseInfo;
		}
		else
		{
			GetResourceManager()->AddLiveResource(id, *pView);
		
			m_CreationInfo.m_ImageView[id].Init(GetResourceManager(), m_CreationInfo, &unwrappedInfo);
		}
	}

	return ret;
}
コード例 #5
0
ファイル: vk_shader_funcs.cpp プロジェクト: AJ92/renderdoc
VkResult WrappedVulkan::vkCreatePipelineCache(VkDevice device,
                                              const VkPipelineCacheCreateInfo *pCreateInfo,
                                              const VkAllocationCallbacks *pAllocator,
                                              VkPipelineCache *pPipelineCache)
{
  // pretend the user didn't provide any cache data

  VkPipelineCacheCreateInfo createInfo = *pCreateInfo;
  createInfo.initialDataSize = 0;
  createInfo.pInitialData = NULL;

  if(pCreateInfo->initialDataSize > 0)
  {
    RDCWARN(
        "Application provided pipeline cache data! This is invalid, as RenderDoc reports "
        "incompatibility with previous caches");
  }

  VkResult ret =
      ObjDisp(device)->CreatePipelineCache(Unwrap(device), &createInfo, pAllocator, pPipelineCache);

  if(ret == VK_SUCCESS)
  {
    ResourceId id = GetResourceManager()->WrapResource(Unwrap(device), *pPipelineCache);

    if(m_State >= WRITING)
    {
      Chunk *chunk = NULL;

      {
        CACHE_THREAD_SERIALISER();

        SCOPED_SERIALISE_CONTEXT(CREATE_PIPE_CACHE);
        Serialise_vkCreatePipelineCache(localSerialiser, device, &createInfo, NULL, pPipelineCache);

        chunk = scope.Get();
      }

      VkResourceRecord *record = GetResourceManager()->AddResourceRecord(*pPipelineCache);
      record->AddChunk(chunk);
    }
    else
    {
      GetResourceManager()->AddLiveResource(id, *pPipelineCache);
    }
  }

  return ret;
}
コード例 #6
0
VkResult WrappedVulkan::vkCreateBufferView(
			VkDevice                                    device,
			const VkBufferViewCreateInfo*               pCreateInfo,
			const VkAllocationCallbacks*                pAllocator,
			VkBufferView*                               pView)
{
	VkBufferViewCreateInfo unwrappedInfo = *pCreateInfo;
	unwrappedInfo.buffer = Unwrap(unwrappedInfo.buffer);
	VkResult ret = ObjDisp(device)->CreateBufferView(Unwrap(device), &unwrappedInfo, pAllocator, pView);

	if(ret == VK_SUCCESS)
	{
		ResourceId id = GetResourceManager()->WrapResource(Unwrap(device), *pView);
		
		if(m_State >= WRITING)
		{
			Chunk *chunk = NULL;

			{
				CACHE_THREAD_SERIALISER();
		
				SCOPED_SERIALISE_CONTEXT(CREATE_BUFFER_VIEW);
				Serialise_vkCreateBufferView(localSerialiser, device, pCreateInfo, NULL, pView);

				chunk = scope.Get();
			}

			VkResourceRecord *bufferRecord = GetRecord(pCreateInfo->buffer);

			VkResourceRecord *record = GetResourceManager()->AddResourceRecord(*pView);
			record->AddChunk(chunk);
			record->AddParent(bufferRecord);

			// store the base resource
			record->baseResource = bufferRecord->baseResource;
			record->sparseInfo = bufferRecord->sparseInfo;
		}
		else
		{
			GetResourceManager()->AddLiveResource(id, *pView);
		
			m_CreationInfo.m_BufferView[id].Init(GetResourceManager(), m_CreationInfo, &unwrappedInfo);
		}
	}

	return ret;
}
コード例 #7
0
ファイル: vk_dynamic_funcs.cpp プロジェクト: DrChat/renderdoc
void WrappedVulkan::vkCmdSetBlendConstants(VkCommandBuffer cmdBuffer, const float *blendConst)
{
  SCOPED_DBG_SINK();

  ObjDisp(cmdBuffer)->CmdSetBlendConstants(Unwrap(cmdBuffer), blendConst);

  if(m_State >= WRITING)
  {
    VkResourceRecord *record = GetRecord(cmdBuffer);

    CACHE_THREAD_SERIALISER();

    SCOPED_SERIALISE_CONTEXT(SET_BLEND_CONST);
    Serialise_vkCmdSetBlendConstants(localSerialiser, cmdBuffer, blendConst);

    record->AddChunk(scope.Get());
  }
}
コード例 #8
0
ファイル: vk_dynamic_funcs.cpp プロジェクト: DrChat/renderdoc
void WrappedVulkan::vkCmdSetLineWidth(VkCommandBuffer cmdBuffer, float lineWidth)
{
  SCOPED_DBG_SINK();

  ObjDisp(cmdBuffer)->CmdSetLineWidth(Unwrap(cmdBuffer), lineWidth);

  if(m_State >= WRITING)
  {
    VkResourceRecord *record = GetRecord(cmdBuffer);

    CACHE_THREAD_SERIALISER();

    SCOPED_SERIALISE_CONTEXT(SET_LINE_WIDTH);
    Serialise_vkCmdSetLineWidth(localSerialiser, cmdBuffer, lineWidth);

    record->AddChunk(scope.Get());
  }
}
コード例 #9
0
ファイル: vk_dynamic_funcs.cpp プロジェクト: DrChat/renderdoc
void WrappedVulkan::vkCmdSetViewport(VkCommandBuffer cmdBuffer, uint32_t firstViewport,
                                     uint32_t viewportCount, const VkViewport *pViewports)
{
  SCOPED_DBG_SINK();

  ObjDisp(cmdBuffer)->CmdSetViewport(Unwrap(cmdBuffer), firstViewport, viewportCount, pViewports);

  if(m_State >= WRITING)
  {
    VkResourceRecord *record = GetRecord(cmdBuffer);

    CACHE_THREAD_SERIALISER();

    SCOPED_SERIALISE_CONTEXT(SET_VP);
    Serialise_vkCmdSetViewport(localSerialiser, cmdBuffer, firstViewport, viewportCount, pViewports);

    record->AddChunk(scope.Get());
  }
}
コード例 #10
0
ファイル: vk_dynamic_funcs.cpp プロジェクト: DrChat/renderdoc
void WrappedVulkan::vkCmdSetStencilReference(VkCommandBuffer cmdBuffer, VkStencilFaceFlags faceMask,
                                             uint32_t reference)
{
  SCOPED_DBG_SINK();

  ObjDisp(cmdBuffer)->CmdSetStencilReference(Unwrap(cmdBuffer), faceMask, reference);

  if(m_State >= WRITING)
  {
    VkResourceRecord *record = GetRecord(cmdBuffer);

    CACHE_THREAD_SERIALISER();

    SCOPED_SERIALISE_CONTEXT(SET_STENCIL_REF);
    Serialise_vkCmdSetStencilReference(localSerialiser, cmdBuffer, faceMask, reference);

    record->AddChunk(scope.Get());
  }
}
コード例 #11
0
ファイル: vk_dynamic_funcs.cpp プロジェクト: DrChat/renderdoc
void WrappedVulkan::vkCmdSetDepthBounds(VkCommandBuffer cmdBuffer, float minDepthBounds,
                                        float maxDepthBounds)
{
  SCOPED_DBG_SINK();

  ObjDisp(cmdBuffer)->CmdSetDepthBounds(Unwrap(cmdBuffer), minDepthBounds, maxDepthBounds);

  if(m_State >= WRITING)
  {
    VkResourceRecord *record = GetRecord(cmdBuffer);

    CACHE_THREAD_SERIALISER();

    SCOPED_SERIALISE_CONTEXT(SET_DEPTH_BOUNDS);
    Serialise_vkCmdSetDepthBounds(localSerialiser, cmdBuffer, minDepthBounds, maxDepthBounds);

    record->AddChunk(scope.Get());
  }
}
コード例 #12
0
ファイル: vk_dynamic_funcs.cpp プロジェクト: DrChat/renderdoc
void WrappedVulkan::vkCmdSetScissor(VkCommandBuffer cmdBuffer, uint32_t firstScissor,
                                    uint32_t scissorCount, const VkRect2D *pScissors)
{
  SCOPED_DBG_SINK();

  ObjDisp(cmdBuffer)->CmdSetScissor(Unwrap(cmdBuffer), firstScissor, scissorCount, pScissors);

  if(m_State >= WRITING)
  {
    VkResourceRecord *record = GetRecord(cmdBuffer);

    CACHE_THREAD_SERIALISER();

    SCOPED_SERIALISE_CONTEXT(SET_SCISSOR);
    Serialise_vkCmdSetScissor(localSerialiser, cmdBuffer, firstScissor, scissorCount, pScissors);

    record->AddChunk(scope.Get());
  }
}
コード例 #13
0
VkResult WrappedVulkan::vkRegisterDeviceEventEXT(VkDevice device,
                                                 const VkDeviceEventInfoEXT *pDeviceEventInfo,
                                                 const VkAllocationCallbacks *pAllocator,
                                                 VkFence *pFence)
{
  // for now we emulate this on replay as just a regular fence create, since we don't faithfully
  // replay sync events anyway.
  VkResult ret =
      ObjDisp(device)->RegisterDeviceEventEXT(Unwrap(device), pDeviceEventInfo, pAllocator, pFence);

  if(ret == VK_SUCCESS)
  {
    ResourceId id = GetResourceManager()->WrapResource(Unwrap(device), *pFence);

    if(m_State >= WRITING)
    {
      Chunk *chunk = NULL;

      {
        CACHE_THREAD_SERIALISER();

        VkFenceCreateInfo createInfo = {
            VK_STRUCTURE_TYPE_FENCE_CREATE_INFO, NULL, VK_FENCE_CREATE_SIGNALED_BIT,
        };

        SCOPED_SERIALISE_CONTEXT(CREATE_FENCE);
        Serialise_vkCreateFence(localSerialiser, device, &createInfo, NULL, pFence);

        chunk = scope.Get();
      }

      VkResourceRecord *record = GetResourceManager()->AddResourceRecord(*pFence);
      record->AddChunk(chunk);
    }
    else
    {
      GetResourceManager()->AddLiveResource(id, *pFence);
    }
  }

  return ret;
}
コード例 #14
0
ファイル: vk_dynamic_funcs.cpp プロジェクト: DrChat/renderdoc
void WrappedVulkan::vkCmdSetDepthBias(VkCommandBuffer cmdBuffer, float depthBias,
                                      float depthBiasClamp, float slopeScaledDepthBias)
{
  SCOPED_DBG_SINK();

  ObjDisp(cmdBuffer)->CmdSetDepthBias(Unwrap(cmdBuffer), depthBias, depthBiasClamp,
                                      slopeScaledDepthBias);

  if(m_State >= WRITING)
  {
    VkResourceRecord *record = GetRecord(cmdBuffer);

    CACHE_THREAD_SERIALISER();

    SCOPED_SERIALISE_CONTEXT(SET_DEPTH_BIAS);
    Serialise_vkCmdSetDepthBias(localSerialiser, cmdBuffer, depthBias, depthBiasClamp,
                                slopeScaledDepthBias);

    record->AddChunk(scope.Get());
  }
}
コード例 #15
0
void WrappedVulkan::vkCmdResetEvent(
    VkCommandBuffer                                 cmdBuffer,
    VkEvent                                     event,
		VkPipelineStageFlags                        stageMask)
{
	SCOPED_DBG_SINK();

	ObjDisp(cmdBuffer)->CmdResetEvent(Unwrap(cmdBuffer), Unwrap(event), stageMask);

	if(m_State >= WRITING)
	{
		VkResourceRecord *record = GetRecord(cmdBuffer);

		CACHE_THREAD_SERIALISER();

		SCOPED_SERIALISE_CONTEXT(CMD_RESET_EVENT);
		Serialise_vkCmdResetEvent(localSerialiser, cmdBuffer, event, stageMask);

		record->AddChunk(scope.Get());
		record->MarkResourceFrameReferenced(GetResID(event), eFrameRef_Read);
	}
}
コード例 #16
0
ファイル: vk_misc_funcs.cpp プロジェクト: Althar93/renderdoc
VkResult WrappedVulkan::vkCreateSampler(
			VkDevice                                    device,
			const VkSamplerCreateInfo*                  pCreateInfo,
			const VkAllocationCallbacks*                pAllocator,
			VkSampler*                                  pSampler)
{
	VkResult ret = ObjDisp(device)->CreateSampler(Unwrap(device), pCreateInfo, pAllocator, pSampler);

	if(ret == VK_SUCCESS)
	{
		ResourceId id = GetResourceManager()->WrapResource(Unwrap(device), *pSampler);
		
		if(m_State >= WRITING)
		{
			Chunk *chunk = NULL;

			{
				CACHE_THREAD_SERIALISER();

				SCOPED_SERIALISE_CONTEXT(CREATE_SAMPLER);
				Serialise_vkCreateSampler(localSerialiser, device, pCreateInfo, NULL, pSampler);

				chunk = scope.Get();
			}

			VkResourceRecord *record = GetResourceManager()->AddResourceRecord(*pSampler);
			record->AddChunk(chunk);
		}
		else
		{
			GetResourceManager()->AddLiveResource(id, *pSampler);
		
			m_CreationInfo.m_Sampler[id].Init(GetResourceManager(), m_CreationInfo, pCreateInfo);
		}
	}

	return ret;
}
コード例 #17
0
VkResult WrappedVulkan::vkBindImageMemory(
    VkDevice                                    device,
    VkImage                                     image,
    VkDeviceMemory                              mem,
    VkDeviceSize                                memOffset)
{
	VkResourceRecord *record = GetRecord(image);

	if(m_State >= WRITING)
	{
		Chunk *chunk = NULL;

		{
			CACHE_THREAD_SERIALISER();
		
			SCOPED_SERIALISE_CONTEXT(BIND_IMAGE_MEM);
			Serialise_vkBindImageMemory(localSerialiser, device, image, mem, memOffset);

			chunk = scope.Get();
		}
		
		// memory object bindings are immutable and must happen before creation or use,
		// so this can always go into the record, even if a resource is created and bound
		// to memory mid-frame
		record->AddChunk(chunk);

		record->AddParent(GetRecord(mem));

		// images are a base resource but we want to track where their memory comes from.
		// Anything that looks up a baseResource for an image knows not to chase further
		// than the image.
		record->baseResource = GetResID(mem);
	}

	return ObjDisp(device)->BindImageMemory(Unwrap(device), Unwrap(image), Unwrap(mem), memOffset);
}
コード例 #18
0
ファイル: vk_misc_funcs.cpp プロジェクト: Althar93/renderdoc
VkResult WrappedVulkan::vkDbgSetObjectName(
		VkDevice device,
		VkDebugReportObjectTypeEXT objType,
		uint64_t object,
		size_t nameSize,
		const char* pName)
{
	if(ObjDisp(device)->DbgSetObjectName)
		ObjDisp(device)->DbgSetObjectName(device, objType, object, nameSize, pName);
	
	if(m_State >= WRITING)
	{
		Chunk *chunk = NULL;
		
		VkResourceRecord *record = GetObjRecord(objType, object);

		if(!record)
		{
			RDCERR("Unrecognised object %d %llu", objType, object);
			return VK_SUCCESS;
		}

		{
			CACHE_THREAD_SERIALISER();

			SCOPED_SERIALISE_CONTEXT(SET_NAME);
			Serialise_vkDbgSetObjectName(localSerialiser, device, objType, object, nameSize, pName);

			chunk = scope.Get();
		}

		record->AddChunk(chunk);
	}

	return VK_SUCCESS;
}
コード例 #19
0
VkResult WrappedVulkan::vkCreateDescriptorPool(
    VkDevice                                    device,
    const VkDescriptorPoolCreateInfo*           pCreateInfo,
    const VkAllocationCallbacks*                pAllocator,
    VkDescriptorPool*                           pDescriptorPool)
{
    VkResult ret = ObjDisp(device)->CreateDescriptorPool(Unwrap(device), pCreateInfo, pAllocator, pDescriptorPool);

    if(ret == VK_SUCCESS)
    {
        ResourceId id = GetResourceManager()->WrapResource(Unwrap(device), *pDescriptorPool);

        if(m_State >= WRITING)
        {
            Chunk *chunk = NULL;

            {
                CACHE_THREAD_SERIALISER();

                SCOPED_SERIALISE_CONTEXT(CREATE_DESCRIPTOR_POOL);
                Serialise_vkCreateDescriptorPool(localSerialiser, device, pCreateInfo, NULL, pDescriptorPool);

                chunk = scope.Get();
            }

            VkResourceRecord *record = GetResourceManager()->AddResourceRecord(*pDescriptorPool);
            record->AddChunk(chunk);
        }
        else
        {
            GetResourceManager()->AddLiveResource(id, *pDescriptorPool);
        }
    }

    return ret;
}
コード例 #20
0
void WrappedVulkan::vkUnmapMemory(
    VkDevice                                    device,
    VkDeviceMemory                              mem)
{
	if(m_State >= WRITING)
	{
		ResourceId id = GetResID(mem);

		VkResourceRecord *memrecord = GetRecord(mem);

		RDCASSERT(memrecord->memMapState);
		MemMapState &state = *memrecord->memMapState;

		{
			// decide atomically if this chunk should be in-frame or not
			// so that we're not in the else branch but haven't marked
			// dirty when capframe starts, then we mark dirty while in-frame

			bool capframe = false;
			{
				SCOPED_LOCK(m_CapTransitionLock);
				capframe = (m_State == WRITING_CAPFRAME);

				if(!capframe)
					GetResourceManager()->MarkDirtyResource(id);
			}

			if(capframe)
			{
				// coherent maps must always serialise all data on unmap, even if a flush was seen, because
				// unflushed data is *also* visible. This is a bit redundant since data is serialised here
				// and in any flushes, but that's the app's fault - the spec calls out flushing coherent maps
				// as inefficient
				// if the memory is not coherent, we must have a flush for every region written while it is 
				// mapped, there is no implicit flush on unmap, so we follow the spec strictly on this.
				if(state.mapCoherent)
				{
					CACHE_THREAD_SERIALISER();

					SCOPED_SERIALISE_CONTEXT(UNMAP_MEM);
					Serialise_vkUnmapMemory(localSerialiser, device, mem);

					VkResourceRecord *record = GetRecord(mem);

					if(m_State == WRITING_IDLE)
					{
						record->AddChunk(scope.Get());
					}
					else
					{
						m_FrameCaptureRecord->AddChunk(scope.Get());
						GetResourceManager()->MarkResourceFrameReferenced(id, eFrameRef_Write);
					}
				}
			}

			state.mappedPtr = NULL;
		}

		Serialiser::FreeAlignedBuffer(state.refData);

		if(state.mapCoherent)
		{
			SCOPED_LOCK(m_CoherentMapsLock);

			auto it = std::find(m_CoherentMaps.begin(), m_CoherentMaps.end(), memrecord);
			if(it == m_CoherentMaps.end())
				RDCERR("vkUnmapMemory for memory handle that's not currently mapped");

			m_CoherentMaps.erase(it);
		}
	}

	ObjDisp(device)->UnmapMemory(Unwrap(device), Unwrap(mem));
}
コード例 #21
0
VkResult WrappedVulkan::vkAllocateMemory(
			VkDevice                                    device,
			const VkMemoryAllocateInfo*                 pAllocateInfo,
			const VkAllocationCallbacks*                pAllocator,
			VkDeviceMemory*                             pMemory)
{
	VkMemoryAllocateInfo info = *pAllocateInfo;
	if(m_State >= WRITING)
		info.memoryTypeIndex = GetRecord(device)->memIdxMap[info.memoryTypeIndex];
	VkResult ret = ObjDisp(device)->AllocateMemory(Unwrap(device), &info, pAllocator, pMemory);
	
	if(ret == VK_SUCCESS)
	{
		ResourceId id = GetResourceManager()->WrapResource(Unwrap(device), *pMemory);

		if(m_State >= WRITING)
		{
			Chunk *chunk = NULL;

			{
				CACHE_THREAD_SERIALISER();
					
				SCOPED_SERIALISE_CONTEXT(ALLOC_MEM);
				Serialise_vkAllocateMemory(localSerialiser, device, pAllocateInfo, NULL, pMemory);

				chunk = scope.Get();
			}
			
			// create resource record for gpu memory
			VkResourceRecord *record = GetResourceManager()->AddResourceRecord(*pMemory);
			RDCASSERT(record);

			record->AddChunk(chunk);

			record->Length = pAllocateInfo->allocationSize;

			uint32_t memProps = m_PhysicalDeviceData.fakeMemProps->memoryTypes[pAllocateInfo->memoryTypeIndex].propertyFlags;

			// if memory is not host visible, so not mappable, don't create map state at all
			if((memProps & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) != 0)
			{
				record->memMapState = new MemMapState();
				record->memMapState->mapCoherent = (memProps & VK_MEMORY_PROPERTY_HOST_COHERENT_BIT) != 0;
				record->memMapState->refData = NULL;
			}
		}
		else
		{
			GetResourceManager()->AddLiveResource(id, *pMemory);

			m_CreationInfo.m_Memory[id].Init(GetResourceManager(), m_CreationInfo, pAllocateInfo);

			// create a buffer with the whole memory range bound, for copying to and from
			// conveniently (for initial state data)
			VkBuffer buf = VK_NULL_HANDLE;

			VkBufferCreateInfo bufInfo = {
				VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO, NULL, 0,
				info.allocationSize, VK_BUFFER_USAGE_TRANSFER_SRC_BIT|VK_BUFFER_USAGE_TRANSFER_DST_BIT,
			};

			ret = ObjDisp(device)->CreateBuffer(Unwrap(device), &bufInfo, NULL, &buf);
			RDCASSERTEQUAL(ret, VK_SUCCESS);

			ResourceId bufid = GetResourceManager()->WrapResource(Unwrap(device), buf);

			ObjDisp(device)->BindBufferMemory(Unwrap(device), Unwrap(buf), Unwrap(*pMemory), 0);
			
			// register as a live-only resource, so it is cleaned up properly
			GetResourceManager()->AddLiveResource(bufid, buf);

			m_CreationInfo.m_Memory[id].wholeMemBuf = buf;
		}
	}

	return ret;
}
コード例 #22
0
VkResult WrappedVulkan::vkCreateImage(
			VkDevice                                    device,
			const VkImageCreateInfo*                    pCreateInfo,
			const VkAllocationCallbacks*                pAllocator,
			VkImage*                                    pImage)
{
	VkImageCreateInfo createInfo_adjusted = *pCreateInfo;

	createInfo_adjusted.usage |= VK_IMAGE_USAGE_TRANSFER_SRC_BIT;

	VkResult ret = ObjDisp(device)->CreateImage(Unwrap(device), &createInfo_adjusted, pAllocator, pImage);
	
	// SHARING: pCreateInfo sharingMode, queueFamilyCount, pQueueFamilyIndices

	if(ret == VK_SUCCESS)
	{
		ResourceId id = GetResourceManager()->WrapResource(Unwrap(device), *pImage);
		
		if(m_State >= WRITING)
		{
			Chunk *chunk = NULL;

			{
				CACHE_THREAD_SERIALISER();
		
				SCOPED_SERIALISE_CONTEXT(CREATE_IMAGE);
				Serialise_vkCreateImage(localSerialiser, device, pCreateInfo, NULL, pImage);

				chunk = scope.Get();
			}

			VkResourceRecord *record = GetResourceManager()->AddResourceRecord(*pImage);
			record->AddChunk(chunk);

			if(pCreateInfo->flags & (VK_IMAGE_CREATE_SPARSE_BINDING_BIT|VK_IMAGE_CREATE_SPARSE_RESIDENCY_BIT))
			{
				record->sparseInfo = new SparseMapping();
				
				{
					SCOPED_LOCK(m_CapTransitionLock);
					if(m_State != WRITING_CAPFRAME)
						GetResourceManager()->MarkDirtyResource(id);
					else
						GetResourceManager()->MarkPendingDirty(id);
				}

				if(pCreateInfo->flags & VK_IMAGE_CREATE_SPARSE_RESIDENCY_BIT)
				{
					// must record image and page dimension, and create page tables
					uint32_t numreqs = NUM_VK_IMAGE_ASPECTS;
					VkSparseImageMemoryRequirements reqs[NUM_VK_IMAGE_ASPECTS];
					ObjDisp(device)->GetImageSparseMemoryRequirements(Unwrap(device), Unwrap(*pImage), &numreqs, reqs);

					RDCASSERT(numreqs > 0);
					
					record->sparseInfo->pagedim = reqs[0].formatProperties.imageGranularity;
					record->sparseInfo->imgdim = pCreateInfo->extent;
					record->sparseInfo->imgdim.width /= record->sparseInfo->pagedim.width;
					record->sparseInfo->imgdim.height /= record->sparseInfo->pagedim.height;
					record->sparseInfo->imgdim.depth /= record->sparseInfo->pagedim.depth;
					
					uint32_t numpages = record->sparseInfo->imgdim.width*record->sparseInfo->imgdim.height*record->sparseInfo->imgdim.depth;

					for(uint32_t i=0; i < numreqs; i++)
					{
						// assume all page sizes are the same for all aspects
						RDCASSERT(record->sparseInfo->pagedim.width == reqs[i].formatProperties.imageGranularity.width &&
							record->sparseInfo->pagedim.height == reqs[i].formatProperties.imageGranularity.height &&
							record->sparseInfo->pagedim.depth == reqs[i].formatProperties.imageGranularity.depth);

						int a=0;
						for(; a < NUM_VK_IMAGE_ASPECTS; a++)
							if(reqs[i].formatProperties.aspectMask & (1<<a))
								break;

						record->sparseInfo->pages[a] = new pair<VkDeviceMemory, VkDeviceSize>[numpages];
					}
				}
				else
				{
					// don't have to do anything, image is opaque and must be fully bound, just need
					// to track the memory bindings.
				}
			}
		}
		else
		{
			GetResourceManager()->AddLiveResource(id, *pImage);
			
			m_CreationInfo.m_Image[id].Init(GetResourceManager(), m_CreationInfo, pCreateInfo);
		}

		VkImageSubresourceRange range;
		range.baseMipLevel = range.baseArrayLayer = 0;
		range.levelCount = pCreateInfo->mipLevels;
		range.layerCount = pCreateInfo->arrayLayers;
		if(pCreateInfo->imageType == VK_IMAGE_TYPE_3D)
			range.layerCount = pCreateInfo->extent.depth;

		ImageLayouts *layout = NULL;
		{
			SCOPED_LOCK(m_ImageLayoutsLock);
			layout = &m_ImageLayouts[id];
		}

		layout->layerCount = pCreateInfo->arrayLayers;
		layout->levelCount = pCreateInfo->mipLevels;
		layout->extent = pCreateInfo->extent;
		layout->format = pCreateInfo->format;

		layout->subresourceStates.clear();

		range.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT;
		if(IsDepthOnlyFormat(pCreateInfo->format))
			range.aspectMask = VK_IMAGE_ASPECT_DEPTH_BIT;
		else if(IsDepthStencilFormat(pCreateInfo->format))
			range.aspectMask = VK_IMAGE_ASPECT_DEPTH_BIT|VK_IMAGE_ASPECT_STENCIL_BIT;

		layout->subresourceStates.push_back(ImageRegionState(range, UNKNOWN_PREV_IMG_LAYOUT, VK_IMAGE_LAYOUT_UNDEFINED));
	}

	return ret;
}
コード例 #23
0
ファイル: vk_device_funcs.cpp プロジェクト: qqdiguo/renderdoc
VkResult WrappedVulkan::vkCreateDevice(
		VkPhysicalDevice                            physicalDevice,
		const VkDeviceCreateInfo*                   pCreateInfo,
		const VkAllocationCallbacks*                pAllocator,
		VkDevice*                                   pDevice)
{
	VkDeviceCreateInfo createInfo = *pCreateInfo;

	uint32_t qCount = 0;
	VkResult vkr = VK_SUCCESS;
	
	ObjDisp(physicalDevice)->GetPhysicalDeviceQueueFamilyProperties(Unwrap(physicalDevice), &qCount, NULL);

	VkQueueFamilyProperties *props = new VkQueueFamilyProperties[qCount];
	ObjDisp(physicalDevice)->GetPhysicalDeviceQueueFamilyProperties(Unwrap(physicalDevice), &qCount, props);

	// find a queue that supports all capabilities, and if one doesn't exist, add it.
	bool found = false;
	uint32_t qFamilyIdx = 0;
	VkQueueFlags search = (VK_QUEUE_GRAPHICS_BIT);

	// for queue priorities, if we need it
	float one = 1.0f;

	// if we need to change the requested queues, it will point to this
	VkDeviceQueueCreateInfo *modQueues = NULL;

	for(uint32_t i=0; i < createInfo.queueCreateInfoCount; i++)
	{
		uint32_t idx = createInfo.pQueueCreateInfos[i].queueFamilyIndex;
		RDCASSERT(idx < qCount);

		// this requested queue is one we can use too
		if((props[idx].queueFlags & search) == search && createInfo.pQueueCreateInfos[i].queueCount > 0)
		{
			qFamilyIdx = idx;
			found = true;
			break;
		}
	}

	// if we didn't find it, search for which queue family we should add a request for
	if(!found)
	{
		RDCDEBUG("App didn't request a queue family we can use - adding our own");

		for(uint32_t i=0; i < qCount; i++)
		{
			if((props[i].queueFlags & search) == search)
			{
				qFamilyIdx = i;
				found = true;
				break;
			}
		}

		if(!found)
		{
			SAFE_DELETE_ARRAY(props);
			RDCERR("Can't add a queue with required properties for RenderDoc! Unsupported configuration");
			return VK_ERROR_INITIALIZATION_FAILED;
		}

		// we found the queue family, add it
		modQueues = new VkDeviceQueueCreateInfo[createInfo.queueCreateInfoCount + 1];
		for(uint32_t i=0; i < createInfo.queueCreateInfoCount; i++)
			modQueues[i] = createInfo.pQueueCreateInfos[i];

		modQueues[createInfo.queueCreateInfoCount].queueFamilyIndex = qFamilyIdx;
		modQueues[createInfo.queueCreateInfoCount].queueCount = 1;
		modQueues[createInfo.queueCreateInfoCount].pQueuePriorities = &one;

		createInfo.pQueueCreateInfos = modQueues;
		createInfo.queueCreateInfoCount++;
	}

	SAFE_DELETE_ARRAY(props);

	m_QueueFamilies.resize(createInfo.queueCreateInfoCount);
	for(size_t i=0; i < createInfo.queueCreateInfoCount; i++)
	{
		uint32_t family = createInfo.pQueueCreateInfos[i].queueFamilyIndex;
		uint32_t count = createInfo.pQueueCreateInfos[i].queueCount;
		m_QueueFamilies.resize(RDCMAX(m_QueueFamilies.size(), size_t(family+1)));

		m_QueueFamilies[family] = new VkQueue[count];
		for(uint32_t q=0; q < count; q++)
			m_QueueFamilies[family][q] = VK_NULL_HANDLE;
	}

	VkLayerDeviceCreateInfo *layerCreateInfo = (VkLayerDeviceCreateInfo *)pCreateInfo->pNext;

	// step through the chain of pNext until we get to the link info
	while(layerCreateInfo &&
				(layerCreateInfo->sType != VK_STRUCTURE_TYPE_LOADER_DEVICE_CREATE_INFO || 
				 layerCreateInfo->function != VK_LAYER_LINK_INFO)
			)
	{
		layerCreateInfo = (VkLayerDeviceCreateInfo *)layerCreateInfo->pNext;
	}
	RDCASSERT(layerCreateInfo);

	PFN_vkGetDeviceProcAddr gdpa = layerCreateInfo->u.pLayerInfo->pfnNextGetDeviceProcAddr;
	PFN_vkGetInstanceProcAddr gipa = layerCreateInfo->u.pLayerInfo->pfnNextGetInstanceProcAddr;
	// move chain on for next layer
	layerCreateInfo->u.pLayerInfo = layerCreateInfo->u.pLayerInfo->pNext;

	PFN_vkCreateDevice createFunc = (PFN_vkCreateDevice)gipa(VK_NULL_HANDLE, "vkCreateDevice");

	// now search again through for the loader data callback (if it exists)
	layerCreateInfo = (VkLayerDeviceCreateInfo *)pCreateInfo->pNext;

	// step through the chain of pNext
	while(layerCreateInfo &&
				(layerCreateInfo->sType != VK_STRUCTURE_TYPE_LOADER_DEVICE_CREATE_INFO || 
				 layerCreateInfo->function != VK_LOADER_DATA_CALLBACK)
			)
	{
		layerCreateInfo = (VkLayerDeviceCreateInfo *)layerCreateInfo->pNext;
	}

	// if we found one (we might not - on old loaders), then store the func ptr for
	// use instead of SetDispatchTableOverMagicNumber
	if(layerCreateInfo)
	{
		RDCASSERT(m_SetDeviceLoaderData == layerCreateInfo->u.pfnSetDeviceLoaderData || m_SetDeviceLoaderData == NULL,
		          m_SetDeviceLoaderData, layerCreateInfo->u.pfnSetDeviceLoaderData);
		m_SetDeviceLoaderData = layerCreateInfo->u.pfnSetDeviceLoaderData;
	}

	VkResult ret = createFunc(Unwrap(physicalDevice), &createInfo, pAllocator, pDevice);
	
	// don't serialise out any of the pNext stuff for layer initialisation
	// (note that we asserted above that there was nothing else in the chain)
	createInfo.pNext = NULL;

	if(ret == VK_SUCCESS)
	{
		InitDeviceTable(*pDevice, gdpa);

		ResourceId id = GetResourceManager()->WrapResource(*pDevice, *pDevice);
		
		if(m_State >= WRITING)
		{
			Chunk *chunk = NULL;

			{
				CACHE_THREAD_SERIALISER();

				SCOPED_SERIALISE_CONTEXT(CREATE_DEVICE);
				Serialise_vkCreateDevice(localSerialiser, physicalDevice, &createInfo, NULL, pDevice);

				chunk = scope.Get();
			}

			VkResourceRecord *record = GetResourceManager()->AddResourceRecord(*pDevice);
			RDCASSERT(record);

			record->AddChunk(chunk);

			record->memIdxMap = GetRecord(physicalDevice)->memIdxMap;

			record->instDevInfo = new InstanceDeviceInfo();
		
#undef CheckExt
#define CheckExt(name) record->instDevInfo->name = GetRecord(m_Instance)->instDevInfo->name;

			// inherit extension enablement from instance, that way GetDeviceProcAddress can check
			// for enabled extensions for instance functions
			CheckInstanceExts();

#undef CheckExt
#define CheckExt(name) if(!strcmp(createInfo.ppEnabledExtensionNames[i], STRINGIZE(name))) { record->instDevInfo->name = true; }

			for(uint32_t i=0; i < createInfo.enabledExtensionCount; i++)
			{
				CheckDeviceExts();
			}
		
			InitDeviceExtensionTables(*pDevice);

			GetRecord(m_Instance)->AddParent(record);
		}
		else
		{
			GetResourceManager()->AddLiveResource(id, *pDevice);
		}

		VkDevice device = *pDevice;

		RDCASSERT(m_Device == VK_NULL_HANDLE); // MULTIDEVICE

		m_PhysicalDevice = physicalDevice;
		m_Device = device;

		m_QueueFamilyIdx = qFamilyIdx;

		if(m_InternalCmds.cmdpool == VK_NULL_HANDLE)
		{
			VkCommandPoolCreateInfo poolInfo = { VK_STRUCTURE_TYPE_COMMAND_POOL_CREATE_INFO, NULL, VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT, qFamilyIdx };
			vkr = ObjDisp(device)->CreateCommandPool(Unwrap(device), &poolInfo, NULL, &m_InternalCmds.cmdpool);
			RDCASSERTEQUAL(vkr, VK_SUCCESS);

			GetResourceManager()->WrapResource(Unwrap(device), m_InternalCmds.cmdpool);
		}
		
		ObjDisp(physicalDevice)->GetPhysicalDeviceProperties(Unwrap(physicalDevice), &m_PhysicalDeviceData.props);
		
		ObjDisp(physicalDevice)->GetPhysicalDeviceMemoryProperties(Unwrap(physicalDevice), &m_PhysicalDeviceData.memProps);

		ObjDisp(physicalDevice)->GetPhysicalDeviceFeatures(Unwrap(physicalDevice), &m_PhysicalDeviceData.features);

		m_PhysicalDeviceData.readbackMemIndex = m_PhysicalDeviceData.GetMemoryIndex(~0U, VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT, 0);
		m_PhysicalDeviceData.uploadMemIndex = m_PhysicalDeviceData.GetMemoryIndex(~0U, VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT, 0);
		m_PhysicalDeviceData.GPULocalMemIndex = m_PhysicalDeviceData.GetMemoryIndex(~0U, VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT, VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT);

		m_PhysicalDeviceData.fakeMemProps = GetRecord(physicalDevice)->memProps;

		m_DebugManager = new VulkanDebugManager(this, device);
	}

	SAFE_DELETE_ARRAY(modQueues);

	return ret;
}
コード例 #24
0
ファイル: vk_device_funcs.cpp プロジェクト: qqdiguo/renderdoc
VkResult WrappedVulkan::vkEnumeratePhysicalDevices(
		VkInstance                                  instance,
		uint32_t*                                   pPhysicalDeviceCount,
		VkPhysicalDevice*                           pPhysicalDevices)
{
	uint32_t count;

	VkResult vkr = ObjDisp(instance)->EnumeratePhysicalDevices(Unwrap(instance), &count, NULL);

	if(vkr != VK_SUCCESS)
		return vkr;

	VkPhysicalDevice *devices = new VkPhysicalDevice[count];

	vkr = ObjDisp(instance)->EnumeratePhysicalDevices(Unwrap(instance), &count, devices);
	RDCASSERTEQUAL(vkr, VK_SUCCESS);

	m_PhysicalDevices.resize(count);
	
	for(uint32_t i=0; i < count; i++)
	{
		// it's perfectly valid for enumerate type functions to return the same handle
		// each time. If that happens, we will already have a wrapper created so just
		// return the wrapped object to the user and do nothing else
		if(m_PhysicalDevices[i] != VK_NULL_HANDLE)
		{
			GetWrapped(m_PhysicalDevices[i])->RewrapObject(devices[i]);
			devices[i] = m_PhysicalDevices[i];
		}
		else
		{
			GetResourceManager()->WrapResource(instance, devices[i]);
			
			if(m_State >= WRITING)
			{
				// add the record first since it's used in the serialise function below to fetch
				// the memory indices
				VkResourceRecord *record = GetResourceManager()->AddResourceRecord(devices[i]);
				RDCASSERT(record);
				
				record->memProps = new VkPhysicalDeviceMemoryProperties();

				ObjDisp(devices[i])->GetPhysicalDeviceMemoryProperties(Unwrap(devices[i]), record->memProps);

				m_PhysicalDevices[i] = devices[i];

				// we remap memory indices to discourage coherent maps as much as possible
				RemapMemoryIndices(record->memProps, &record->memIdxMap);
				
				{
					CACHE_THREAD_SERIALISER();

					SCOPED_SERIALISE_CONTEXT(ENUM_PHYSICALS);
					Serialise_vkEnumeratePhysicalDevices(localSerialiser, instance, &i, &devices[i]);

					record->AddChunk(scope.Get());
				}

				VkResourceRecord *instrecord = GetRecord(instance);

				instrecord->AddParent(record);

				// treat physical devices as pool members of the instance (ie. freed when the instance dies)
				{
					instrecord->LockChunks();
					instrecord->pooledChildren.push_back(record);
					instrecord->UnlockChunks();
				}
			}
		}
	}

	if(pPhysicalDeviceCount) *pPhysicalDeviceCount = count;
	if(pPhysicalDevices) memcpy(pPhysicalDevices, devices, count*sizeof(VkPhysicalDevice));

	SAFE_DELETE_ARRAY(devices);

	return VK_SUCCESS;
}
コード例 #25
0
ファイル: vk_queue_funcs.cpp プロジェクト: Anteru/renderdoc
void WrappedVulkan::vkGetDeviceQueue(VkDevice device, uint32_t queueFamilyIndex,
                                     uint32_t queueIndex, VkQueue *pQueue)
{
  ObjDisp(device)->GetDeviceQueue(Unwrap(device), queueFamilyIndex, queueIndex, pQueue);

  if(m_SetDeviceLoaderData)
    m_SetDeviceLoaderData(m_Device, *pQueue);
  else
    SetDispatchTableOverMagicNumber(device, *pQueue);

  RDCASSERT(m_State >= WRITING);

  {
    // it's perfectly valid for enumerate type functions to return the same handle
    // each time. If that happens, we will already have a wrapper created so just
    // return the wrapped object to the user and do nothing else
    if(m_QueueFamilies[queueFamilyIndex][queueIndex] != VK_NULL_HANDLE)
    {
      *pQueue = m_QueueFamilies[queueFamilyIndex][queueIndex];
    }
    else
    {
      ResourceId id = GetResourceManager()->WrapResource(Unwrap(device), *pQueue);

      {
        Chunk *chunk = NULL;

        {
          CACHE_THREAD_SERIALISER();

          SCOPED_SERIALISE_CONTEXT(GET_DEVICE_QUEUE);
          Serialise_vkGetDeviceQueue(localSerialiser, device, queueFamilyIndex, queueIndex, pQueue);

          chunk = scope.Get();
        }

        VkResourceRecord *record = GetResourceManager()->AddResourceRecord(*pQueue);
        RDCASSERT(record);

        VkResourceRecord *instrecord = GetRecord(m_Instance);

        // treat queues as pool members of the instance (ie. freed when the instance dies)
        {
          instrecord->LockChunks();
          instrecord->pooledChildren.push_back(record);
          instrecord->UnlockChunks();
        }

        record->AddChunk(chunk);
      }

      m_QueueFamilies[queueFamilyIndex][queueIndex] = *pQueue;

      if(queueFamilyIndex == m_QueueFamilyIdx)
      {
        m_Queue = *pQueue;

        // we can now submit any cmds that were queued (e.g. from creating debug
        // manager on vkCreateDevice)
        SubmitCmds();
      }
    }
  }
}
コード例 #26
0
ファイル: vk_queue_funcs.cpp プロジェクト: Anteru/renderdoc
VkResult WrappedVulkan::vkQueueSubmit(VkQueue queue, uint32_t submitCount,
                                      const VkSubmitInfo *pSubmits, VkFence fence)
{
  SCOPED_DBG_SINK();

  size_t tempmemSize = sizeof(VkSubmitInfo) * submitCount;

  // need to count how many semaphore and command buffer arrays to allocate for
  for(uint32_t i = 0; i < submitCount; i++)
  {
    tempmemSize += pSubmits[i].commandBufferCount * sizeof(VkCommandBuffer);
    tempmemSize += pSubmits[i].signalSemaphoreCount * sizeof(VkSemaphore);
    tempmemSize += pSubmits[i].waitSemaphoreCount * sizeof(VkSemaphore);
  }

  byte *memory = GetTempMemory(tempmemSize);

  VkSubmitInfo *unwrappedSubmits = (VkSubmitInfo *)memory;
  VkSemaphore *unwrappedWaitSems = (VkSemaphore *)(unwrappedSubmits + submitCount);

  for(uint32_t i = 0; i < submitCount; i++)
  {
    RDCASSERT(pSubmits[i].sType == VK_STRUCTURE_TYPE_SUBMIT_INFO && pSubmits[i].pNext == NULL);
    unwrappedSubmits[i] = pSubmits[i];

    unwrappedSubmits[i].pWaitSemaphores =
        unwrappedSubmits[i].waitSemaphoreCount ? unwrappedWaitSems : NULL;
    for(uint32_t o = 0; o < unwrappedSubmits[i].waitSemaphoreCount; o++)
      unwrappedWaitSems[o] = Unwrap(pSubmits[i].pWaitSemaphores[o]);
    unwrappedWaitSems += unwrappedSubmits[i].waitSemaphoreCount;

    VkCommandBuffer *unwrappedCommandBuffers = (VkCommandBuffer *)unwrappedWaitSems;

    unwrappedSubmits[i].pCommandBuffers =
        unwrappedSubmits[i].commandBufferCount ? unwrappedCommandBuffers : NULL;
    for(uint32_t o = 0; o < unwrappedSubmits[i].commandBufferCount; o++)
      unwrappedCommandBuffers[o] = Unwrap(pSubmits[i].pCommandBuffers[o]);
    unwrappedCommandBuffers += unwrappedSubmits[i].commandBufferCount;

    VkSemaphore *unwrappedSignalSems = (VkSemaphore *)unwrappedCommandBuffers;

    unwrappedSubmits[i].pSignalSemaphores =
        unwrappedSubmits[i].signalSemaphoreCount ? unwrappedSignalSems : NULL;
    for(uint32_t o = 0; o < unwrappedSubmits[i].signalSemaphoreCount; o++)
      unwrappedSignalSems[o] = Unwrap(pSubmits[i].pSignalSemaphores[o]);
  }

  VkResult ret =
      ObjDisp(queue)->QueueSubmit(Unwrap(queue), submitCount, unwrappedSubmits, Unwrap(fence));

  bool capframe = false;
  set<ResourceId> refdIDs;

  for(uint32_t s = 0; s < submitCount; s++)
  {
    for(uint32_t i = 0; i < pSubmits[s].commandBufferCount; i++)
    {
      ResourceId cmd = GetResID(pSubmits[s].pCommandBuffers[i]);

      VkResourceRecord *record = GetRecord(pSubmits[s].pCommandBuffers[i]);

      {
        SCOPED_LOCK(m_ImageLayoutsLock);
        GetResourceManager()->ApplyBarriers(record->bakedCommands->cmdInfo->imgbarriers,
                                            m_ImageLayouts);
      }

      // need to lock the whole section of code, not just the check on
      // m_State, as we also need to make sure we don't check the state,
      // start marking dirty resources then while we're doing so the
      // state becomes capframe.
      // the next sections where we mark resources referenced and add
      // the submit chunk to the frame record don't have to be protected.
      // Only the decision of whether we're inframe or not, and marking
      // dirty.
      {
        SCOPED_LOCK(m_CapTransitionLock);
        if(m_State == WRITING_CAPFRAME)
        {
          for(auto it = record->bakedCommands->cmdInfo->dirtied.begin();
              it != record->bakedCommands->cmdInfo->dirtied.end(); ++it)
            GetResourceManager()->MarkPendingDirty(*it);

          capframe = true;
        }
        else
        {
          for(auto it = record->bakedCommands->cmdInfo->dirtied.begin();
              it != record->bakedCommands->cmdInfo->dirtied.end(); ++it)
            GetResourceManager()->MarkDirtyResource(*it);
        }
      }

      if(capframe)
      {
        // for each bound descriptor set, mark it referenced as well as all resources currently
        // bound to it
        for(auto it = record->bakedCommands->cmdInfo->boundDescSets.begin();
            it != record->bakedCommands->cmdInfo->boundDescSets.end(); ++it)
        {
          GetResourceManager()->MarkResourceFrameReferenced(GetResID(*it), eFrameRef_Read);

          VkResourceRecord *setrecord = GetRecord(*it);

          for(auto refit = setrecord->descInfo->bindFrameRefs.begin();
              refit != setrecord->descInfo->bindFrameRefs.end(); ++refit)
          {
            refdIDs.insert(refit->first);
            GetResourceManager()->MarkResourceFrameReferenced(refit->first, refit->second.second);

            if(refit->second.first & DescriptorSetData::SPARSE_REF_BIT)
            {
              VkResourceRecord *sparserecord = GetResourceManager()->GetResourceRecord(refit->first);

              GetResourceManager()->MarkSparseMapReferenced(sparserecord->sparseInfo);
            }
          }
        }

        for(auto it = record->bakedCommands->cmdInfo->sparse.begin();
            it != record->bakedCommands->cmdInfo->sparse.end(); ++it)
          GetResourceManager()->MarkSparseMapReferenced(*it);

        // pull in frame refs from this baked command buffer
        record->bakedCommands->AddResourceReferences(GetResourceManager());
        record->bakedCommands->AddReferencedIDs(refdIDs);

        // ref the parent command buffer by itself, this will pull in the cmd buffer pool
        GetResourceManager()->MarkResourceFrameReferenced(record->GetResourceID(), eFrameRef_Read);

        for(size_t sub = 0; sub < record->bakedCommands->cmdInfo->subcmds.size(); sub++)
        {
          record->bakedCommands->cmdInfo->subcmds[sub]->bakedCommands->AddResourceReferences(
              GetResourceManager());
          record->bakedCommands->cmdInfo->subcmds[sub]->bakedCommands->AddReferencedIDs(refdIDs);
          GetResourceManager()->MarkResourceFrameReferenced(
              record->bakedCommands->cmdInfo->subcmds[sub]->GetResourceID(), eFrameRef_Read);

          record->bakedCommands->cmdInfo->subcmds[sub]->bakedCommands->AddRef();
        }

        GetResourceManager()->MarkResourceFrameReferenced(GetResID(queue), eFrameRef_Read);

        if(fence != VK_NULL_HANDLE)
          GetResourceManager()->MarkResourceFrameReferenced(GetResID(fence), eFrameRef_Read);

        {
          SCOPED_LOCK(m_CmdBufferRecordsLock);
          m_CmdBufferRecords.push_back(record->bakedCommands);
          for(size_t sub = 0; sub < record->bakedCommands->cmdInfo->subcmds.size(); sub++)
            m_CmdBufferRecords.push_back(record->bakedCommands->cmdInfo->subcmds[sub]->bakedCommands);
        }

        record->bakedCommands->AddRef();
      }

      record->cmdInfo->dirtied.clear();
    }
  }

  if(capframe)
  {
    vector<VkResourceRecord *> maps;
    {
      SCOPED_LOCK(m_CoherentMapsLock);
      maps = m_CoherentMaps;
    }

    for(auto it = maps.begin(); it != maps.end(); ++it)
    {
      VkResourceRecord *record = *it;
      MemMapState &state = *record->memMapState;

      // potential persistent map
      if(state.mapCoherent && state.mappedPtr && !state.mapFlushed)
      {
        // only need to flush memory that could affect this submitted batch of work
        if(refdIDs.find(record->GetResourceID()) == refdIDs.end())
        {
          RDCDEBUG("Map of memory %llu not referenced in this queue - not flushing",
                   record->GetResourceID());
          continue;
        }

        size_t diffStart = 0, diffEnd = 0;
        bool found = true;

// enabled as this is necessary for programs with very large coherent mappings
// (> 1GB) as otherwise more than a couple of vkQueueSubmit calls leads to vast
// memory allocation. There might still be bugs lurking in here though
#if 1
        // this causes vkFlushMappedMemoryRanges call to allocate and copy to refData
        // from serialised buffer. We want to copy *precisely* the serialised data,
        // otherwise there is a gap in time between serialising out a snapshot of
        // the buffer and whenever we then copy into the ref data, e.g. below.
        // during this time, data could be written to the buffer and it won't have
        // been caught in the serialised snapshot, and if it doesn't change then
        // it *also* won't be caught in any future FindDiffRange() calls.
        //
        // Likewise once refData is allocated, the call below will also update it
        // with the data serialised out for the same reason.
        //
        // Note: it's still possible that data is being written to by the
        // application while it's being serialised out in the snapshot below. That
        // is OK, since the application is responsible for ensuring it's not writing
        // data that would be needed by the GPU in this submit. As long as the
        // refdata we use for future use is identical to what was serialised, we
        // shouldn't miss anything
        state.needRefData = true;

        // if we have a previous set of data, compare.
        // otherwise just serialise it all
        if(state.refData)
          found = FindDiffRange((byte *)state.mappedPtr, state.refData, (size_t)state.mapSize,
                                diffStart, diffEnd);
        else
#endif
          diffEnd = (size_t)state.mapSize;

        if(found)
        {
          // MULTIDEVICE should find the device for this queue.
          // MULTIDEVICE only want to flush maps associated with this queue
          VkDevice dev = GetDev();

          {
            RDCLOG("Persistent map flush forced for %llu (%llu -> %llu)", record->GetResourceID(),
                   (uint64_t)diffStart, (uint64_t)diffEnd);
            VkMappedMemoryRange range = {VK_STRUCTURE_TYPE_MAPPED_MEMORY_RANGE, NULL,
                                         (VkDeviceMemory)(uint64_t)record->Resource,
                                         state.mapOffset + diffStart, diffEnd - diffStart};
            vkFlushMappedMemoryRanges(dev, 1, &range);
            state.mapFlushed = false;
          }

          GetResourceManager()->MarkPendingDirty(record->GetResourceID());
        }
        else
        {
          RDCDEBUG("Persistent map flush not needed for %llu", record->GetResourceID());
        }
      }
    }

    {
      CACHE_THREAD_SERIALISER();

      for(uint32_t s = 0; s < submitCount; s++)
      {
        SCOPED_SERIALISE_CONTEXT(QUEUE_SUBMIT);
        Serialise_vkQueueSubmit(localSerialiser, queue, 1, &pSubmits[s], fence);

        m_FrameCaptureRecord->AddChunk(scope.Get());

        for(uint32_t sem = 0; sem < pSubmits[s].waitSemaphoreCount; sem++)
          GetResourceManager()->MarkResourceFrameReferenced(
              GetResID(pSubmits[s].pWaitSemaphores[sem]), eFrameRef_Read);

        for(uint32_t sem = 0; sem < pSubmits[s].signalSemaphoreCount; sem++)
          GetResourceManager()->MarkResourceFrameReferenced(
              GetResID(pSubmits[s].pSignalSemaphores[sem]), eFrameRef_Read);
      }
    }
  }

  return ret;
}
コード例 #27
0
VkResult WrappedVulkan::vkAllocateDescriptorSets(
    VkDevice                                    device,
    const VkDescriptorSetAllocateInfo*          pAllocateInfo,
    VkDescriptorSet*                            pDescriptorSets)
{
    size_t tempmemSize = sizeof(VkDescriptorSetAllocateInfo) + sizeof(VkDescriptorSetLayout)*pAllocateInfo->descriptorSetCount;

    byte *memory = GetTempMemory(tempmemSize);

    VkDescriptorSetAllocateInfo *unwrapped = (VkDescriptorSetAllocateInfo *)memory;
    VkDescriptorSetLayout *layouts = (VkDescriptorSetLayout *)(unwrapped + 1);

    *unwrapped = *pAllocateInfo;
    unwrapped->pSetLayouts = layouts;
    unwrapped->descriptorPool = Unwrap(unwrapped->descriptorPool);
    for(uint32_t i=0; i < pAllocateInfo->descriptorSetCount; i++)
        layouts[i] = Unwrap(pAllocateInfo->pSetLayouts[i]);

    VkResult ret = ObjDisp(device)->AllocateDescriptorSets(Unwrap(device), unwrapped, pDescriptorSets);

    if(ret != VK_SUCCESS) return ret;

    for(uint32_t i=0; i < pAllocateInfo->descriptorSetCount; i++)
    {
        ResourceId id = GetResourceManager()->WrapResource(Unwrap(device), pDescriptorSets[i]);

        if(m_State >= WRITING)
        {
            Chunk *chunk = NULL;

            {
                CACHE_THREAD_SERIALISER();

                VkDescriptorSetAllocateInfo info = *pAllocateInfo;
                info.descriptorSetCount = 1;
                info.pSetLayouts += i;

                SCOPED_SERIALISE_CONTEXT(ALLOC_DESC_SET);
                Serialise_vkAllocateDescriptorSets(localSerialiser, device, &info, &pDescriptorSets[i]);

                chunk = scope.Get();
            }

            VkResourceRecord *record = GetResourceManager()->AddResourceRecord(pDescriptorSets[i]);
            record->AddChunk(chunk);

            ResourceId layoutID = GetResID(pAllocateInfo->pSetLayouts[i]);
            VkResourceRecord *layoutRecord = GetRecord(pAllocateInfo->pSetLayouts[i]);

            VkResourceRecord *poolrecord = GetRecord(pAllocateInfo->descriptorPool);

            {
                poolrecord->LockChunks();
                poolrecord->pooledChildren.push_back(record);
                poolrecord->UnlockChunks();
            }

            record->pool = poolrecord;

            record->AddParent(poolrecord);
            record->AddParent(GetResourceManager()->GetResourceRecord(layoutID));

            // just always treat descriptor sets as dirty
            {
                SCOPED_LOCK(m_CapTransitionLock);
                if(m_State != WRITING_CAPFRAME)
                    GetResourceManager()->MarkDirtyResource(id);
                else
                    GetResourceManager()->MarkPendingDirty(id);
            }

            record->descInfo = new DescriptorSetData();
            record->descInfo->layout = layoutRecord->descInfo->layout;
            record->descInfo->layout->CreateBindingsArray(record->descInfo->descBindings);
        }
        else
        {
            GetResourceManager()->AddLiveResource(id, pDescriptorSets[i]);
        }
    }

    return ret;
}
コード例 #28
0
void WrappedVulkan::vkCmdWaitEvents(
			VkCommandBuffer                                 cmdBuffer,
			uint32_t                                    eventCount,
			const VkEvent*                              pEvents,
			VkPipelineStageFlags                        srcStageMask,
			VkPipelineStageFlags                        dstStageMask,
			uint32_t                                    memoryBarrierCount,
			const VkMemoryBarrier*                      pMemoryBarriers,
			uint32_t                                    bufferMemoryBarrierCount,
			const VkBufferMemoryBarrier*                pBufferMemoryBarriers,
			uint32_t                                    imageMemoryBarrierCount,
			const VkImageMemoryBarrier*                 pImageMemoryBarriers)
{
	{
		byte *memory = GetTempMemory( sizeof(VkEvent)*eventCount +
			sizeof(VkBufferMemoryBarrier)*bufferMemoryBarrierCount + 
			sizeof(VkImageMemoryBarrier)*imageMemoryBarrierCount);

		VkEvent *ev = (VkEvent *)memory;
		VkImageMemoryBarrier *im = (VkImageMemoryBarrier *)(ev + eventCount);
		VkBufferMemoryBarrier *buf = (VkBufferMemoryBarrier *)(im + imageMemoryBarrierCount);

		for(uint32_t i=0; i < eventCount; i++)
			ev[i] = Unwrap(pEvents[i]);

		for(uint32_t i=0; i < bufferMemoryBarrierCount; i++)
		{
			buf[i] = pBufferMemoryBarriers[i];
			buf[i].buffer = Unwrap(buf[i].buffer);
		}

		for(uint32_t i=0; i < imageMemoryBarrierCount; i++)
		{
			im[i] = pImageMemoryBarriers[i];
			im[i].image = Unwrap(im[i].image);
		}
		
		ObjDisp(cmdBuffer)->CmdWaitEvents(Unwrap(cmdBuffer), eventCount, ev, srcStageMask, dstStageMask,
			memoryBarrierCount, pMemoryBarriers,
			bufferMemoryBarrierCount, buf,
			imageMemoryBarrierCount, im);
	}

	if(m_State >= WRITING)
	{
		VkResourceRecord *record = GetRecord(cmdBuffer);

		CACHE_THREAD_SERIALISER();

		SCOPED_SERIALISE_CONTEXT(CMD_WAIT_EVENTS);
		Serialise_vkCmdWaitEvents(localSerialiser, cmdBuffer, eventCount, pEvents, srcStageMask, dstStageMask,
			memoryBarrierCount, pMemoryBarriers,
			bufferMemoryBarrierCount, pBufferMemoryBarriers,
			imageMemoryBarrierCount, pImageMemoryBarriers);
		
		if(imageMemoryBarrierCount > 0)
		{
			SCOPED_LOCK(m_ImageLayoutsLock);
			GetResourceManager()->RecordBarriers(GetRecord(cmdBuffer)->cmdInfo->imgbarriers, m_ImageLayouts, imageMemoryBarrierCount, pImageMemoryBarriers);
		}

		record->AddChunk(scope.Get());
		for(uint32_t i=0; i < eventCount; i++)
			record->MarkResourceFrameReferenced(GetResID(pEvents[i]), eFrameRef_Read);
	}
}
コード例 #29
0
VkResult WrappedVulkan::vkAllocateMemory(
			VkDevice                                    device,
			const VkMemoryAllocateInfo*                 pAllocateInfo,
			const VkAllocationCallbacks*                pAllocator,
			VkDeviceMemory*                             pMemory)
{
	VkMemoryAllocateInfo info = *pAllocateInfo;
	if(m_State >= WRITING)
	{
		info.memoryTypeIndex = GetRecord(device)->memIdxMap[info.memoryTypeIndex];

		// we need to be able to allocate a buffer that covers the whole memory range. However
		// if the memory is e.g. 100 bytes (arbitrary example) and buffers have memory requirements
		// such that it must be bound to a multiple of 128 bytes, then we can't create a buffer
		// that entirely covers a 100 byte allocation.
		// To get around this, we create a buffer of the allocation's size with the properties we
		// want, check its required size, then bump up the allocation size to that as if the application
		// had requested more. We're assuming here no system will require something like "buffer of
		// size N must be bound to memory of size N+O for some value of O overhead bytes".
		//
		// this could be optimised as maybe we'll be creating buffers of multiple sizes, but allocation
		// in vulkan is already expensive and making it a little more expensive isn't a big deal.
	
		VkBufferCreateInfo bufInfo = {
			VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO, NULL, 0,
			info.allocationSize, VK_BUFFER_USAGE_TRANSFER_SRC_BIT|VK_BUFFER_USAGE_TRANSFER_DST_BIT,
		};

		// since this is very short lived, it's not wrapped
		VkBuffer buf;

		VkResult vkr = ObjDisp(device)->CreateBuffer(Unwrap(device), &bufInfo, NULL, &buf);
		RDCASSERTEQUAL(vkr, VK_SUCCESS);

		if(vkr == VK_SUCCESS && buf != VK_NULL_HANDLE)
		{
			VkMemoryRequirements mrq = { 0 };
			ObjDisp(device)->GetBufferMemoryRequirements(Unwrap(device), buf, &mrq);

			RDCASSERTMSG("memory requirements less than desired size", mrq.size >= bufInfo.size, mrq.size, bufInfo.size);

			// round up allocation size to allow creation of buffers
			if(mrq.size >= bufInfo.size)
				info.allocationSize = mrq.size;
		}

		ObjDisp(device)->DestroyBuffer(Unwrap(device), buf, NULL);
	}

	VkResult ret = ObjDisp(device)->AllocateMemory(Unwrap(device), &info, pAllocator, pMemory);

	// restore the memoryTypeIndex to the original, as that's what we want to serialise,
	// but maintain any potential modifications we made to info.allocationSize
	info.memoryTypeIndex = pAllocateInfo->memoryTypeIndex;
	
	if(ret == VK_SUCCESS)
	{
		ResourceId id = GetResourceManager()->WrapResource(Unwrap(device), *pMemory);

		if(m_State >= WRITING)
		{
			Chunk *chunk = NULL;

			{
				CACHE_THREAD_SERIALISER();
					
				SCOPED_SERIALISE_CONTEXT(ALLOC_MEM);
				Serialise_vkAllocateMemory(localSerialiser, device, &info, NULL, pMemory);

				chunk = scope.Get();
			}
			
			// create resource record for gpu memory
			VkResourceRecord *record = GetResourceManager()->AddResourceRecord(*pMemory);
			RDCASSERT(record);

			record->AddChunk(chunk);

			record->Length = info.allocationSize;

			uint32_t memProps = m_PhysicalDeviceData.fakeMemProps->memoryTypes[info.memoryTypeIndex].propertyFlags;

			// if memory is not host visible, so not mappable, don't create map state at all
			if((memProps & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) != 0)
			{
				record->memMapState = new MemMapState();
				record->memMapState->mapCoherent = (memProps & VK_MEMORY_PROPERTY_HOST_COHERENT_BIT) != 0;
				record->memMapState->refData = NULL;
			}
		}
		else
		{
			GetResourceManager()->AddLiveResource(id, *pMemory);

			m_CreationInfo.m_Memory[id].Init(GetResourceManager(), m_CreationInfo, &info);

			// create a buffer with the whole memory range bound, for copying to and from
			// conveniently (for initial state data)
			VkBuffer buf = VK_NULL_HANDLE;

			VkBufferCreateInfo bufInfo = {
				VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO, NULL, 0,
				info.allocationSize, VK_BUFFER_USAGE_TRANSFER_SRC_BIT|VK_BUFFER_USAGE_TRANSFER_DST_BIT,
			};

			ret = ObjDisp(device)->CreateBuffer(Unwrap(device), &bufInfo, NULL, &buf);
			RDCASSERTEQUAL(ret, VK_SUCCESS);

			ResourceId bufid = GetResourceManager()->WrapResource(Unwrap(device), buf);

			ObjDisp(device)->BindBufferMemory(Unwrap(device), Unwrap(buf), Unwrap(*pMemory), 0);
			
			// register as a live-only resource, so it is cleaned up properly
			GetResourceManager()->AddLiveResource(bufid, buf);

			m_CreationInfo.m_Memory[id].wholeMemBuf = buf;
		}
	}

	return ret;
}
コード例 #30
0
void WrappedVulkan::vkUpdateDescriptorSets(
    VkDevice                                    device,
    uint32_t                                    writeCount,
    const VkWriteDescriptorSet*                 pDescriptorWrites,
    uint32_t                                    copyCount,
    const VkCopyDescriptorSet*                  pDescriptorCopies)
{
    {
        // need to count up number of descriptor infos, to be able to alloc enough space
        uint32_t numInfos = 0;
        for(uint32_t i=0; i < writeCount; i++) numInfos += pDescriptorWrites[i].descriptorCount;

        byte *memory = GetTempMemory(sizeof(VkDescriptorBufferInfo)*numInfos +
                                     sizeof(VkWriteDescriptorSet)*writeCount + sizeof(VkCopyDescriptorSet)*copyCount);

        RDCCOMPILE_ASSERT(sizeof(VkDescriptorBufferInfo) >= sizeof(VkDescriptorImageInfo), "Descriptor structs sizes are unexpected, ensure largest size is used");

        VkWriteDescriptorSet *unwrappedWrites = (VkWriteDescriptorSet *)memory;
        VkCopyDescriptorSet *unwrappedCopies = (VkCopyDescriptorSet *)(unwrappedWrites + writeCount);
        VkDescriptorBufferInfo *nextDescriptors = (VkDescriptorBufferInfo *)(unwrappedCopies + copyCount);

        for(uint32_t i=0; i < writeCount; i++)
        {
            unwrappedWrites[i] = pDescriptorWrites[i];
            unwrappedWrites[i].dstSet = Unwrap(unwrappedWrites[i].dstSet);

            VkDescriptorBufferInfo *bufInfos = nextDescriptors;
            VkDescriptorImageInfo *imInfos = (VkDescriptorImageInfo *)bufInfos;
            VkBufferView *bufViews = (VkBufferView *)bufInfos;
            nextDescriptors += pDescriptorWrites[i].descriptorCount;

            // unwrap and assign the appropriate array
            if(pDescriptorWrites[i].descriptorType == VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER ||
                    pDescriptorWrites[i].descriptorType == VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER)
            {
                unwrappedWrites[i].pTexelBufferView = (VkBufferView *)bufInfos;
                for(uint32_t j=0; j < pDescriptorWrites[i].descriptorCount; j++)
                    bufViews[j] = Unwrap(pDescriptorWrites[i].pTexelBufferView[j]);
            }
            else if(pDescriptorWrites[i].descriptorType == VK_DESCRIPTOR_TYPE_SAMPLER ||
                    pDescriptorWrites[i].descriptorType == VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER ||
                    pDescriptorWrites[i].descriptorType == VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE ||
                    pDescriptorWrites[i].descriptorType == VK_DESCRIPTOR_TYPE_STORAGE_IMAGE ||
                    pDescriptorWrites[i].descriptorType == VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT)
            {
                unwrappedWrites[i].pImageInfo = (VkDescriptorImageInfo *)bufInfos;
                for(uint32_t j=0; j < pDescriptorWrites[i].descriptorCount; j++)
                {
                    imInfos[j].imageView = Unwrap(pDescriptorWrites[i].pImageInfo[j].imageView);
                    imInfos[j].sampler = Unwrap(pDescriptorWrites[i].pImageInfo[j].sampler);
                    imInfos[j].imageLayout = pDescriptorWrites[i].pImageInfo[j].imageLayout;
                }
            }
            else
            {
                unwrappedWrites[i].pBufferInfo = bufInfos;
                for(uint32_t j=0; j < pDescriptorWrites[i].descriptorCount; j++)
                {
                    bufInfos[j].buffer = Unwrap(pDescriptorWrites[i].pBufferInfo[j].buffer);
                    bufInfos[j].offset = pDescriptorWrites[i].pBufferInfo[j].offset;
                    bufInfos[j].range = pDescriptorWrites[i].pBufferInfo[j].range;
                }
            }
        }

        for(uint32_t i=0; i < copyCount; i++)
        {
            unwrappedCopies[i] = pDescriptorCopies[i];
            unwrappedCopies[i].dstSet = Unwrap(unwrappedCopies[i].dstSet);
            unwrappedCopies[i].srcSet = Unwrap(unwrappedCopies[i].srcSet);
        }

        ObjDisp(device)->UpdateDescriptorSets(Unwrap(device), writeCount, unwrappedWrites, copyCount, unwrappedCopies);
    }

    bool capframe = false;
    {
        SCOPED_LOCK(m_CapTransitionLock);
        capframe = (m_State == WRITING_CAPFRAME);
    }

    if(capframe)
    {
        // don't have to mark referenced any of the resources pointed to by the descriptor set - that's handled
        // on queue submission by marking ref'd all the current bindings of the sets referenced by the cmd buffer

        for(uint32_t i=0; i < writeCount; i++)
        {
            {
                CACHE_THREAD_SERIALISER();

                SCOPED_SERIALISE_CONTEXT(UPDATE_DESC_SET);
                Serialise_vkUpdateDescriptorSets(localSerialiser, device, 1, &pDescriptorWrites[i], 0, NULL);

                m_FrameCaptureRecord->AddChunk(scope.Get());
            }

            // as long as descriptor sets are forced to have initial states, we don't have to mark them ref'd for
            // write here. The reason being that as long as we only mark them as ref'd when they're actually bound,
            // we can safely skip the ref here and it means any descriptor set updates of descriptor sets that are
            // never used in the frame can be ignored.
            //GetResourceManager()->MarkResourceFrameReferenced(GetResID(pDescriptorWrites[i].destSet), eFrameRef_Write);
        }

        for(uint32_t i=0; i < copyCount; i++)
        {
            {
                CACHE_THREAD_SERIALISER();

                SCOPED_SERIALISE_CONTEXT(UPDATE_DESC_SET);
                Serialise_vkUpdateDescriptorSets(localSerialiser, device, 0, NULL, 1, &pDescriptorCopies[i]);

                m_FrameCaptureRecord->AddChunk(scope.Get());
            }

            // Like writes we don't have to mark the written descriptor set as used because unless it's bound somewhere
            // we don't need it anyway. However we DO have to mark the source set as used because it doesn't have to
            // be bound to still be needed (think about if the dest set is bound somewhere after this copy - what refs
            // the source set?).
            // At the same time as ref'ing the source set, we must ref all of its resources (via the bindFrameRefs).
            // We just ref all rather than looking at only the copied sets to keep things simple.
            // This does mean a slightly conservative ref'ing if the dest set doesn't end up getting bound, but we only
            // do this during frame capture so it's not too bad.
            //GetResourceManager()->MarkResourceFrameReferenced(GetResID(pDescriptorCopies[i].destSet), eFrameRef_Write);

            {
                GetResourceManager()->MarkResourceFrameReferenced(GetResID(pDescriptorCopies[i].srcSet), eFrameRef_Read);

                VkResourceRecord *setrecord = GetRecord(pDescriptorCopies[i].srcSet);

                for(auto refit = setrecord->descInfo->bindFrameRefs.begin(); refit != setrecord->descInfo->bindFrameRefs.end(); ++refit)
                {
                    GetResourceManager()->MarkResourceFrameReferenced(refit->first, refit->second.second);

                    if(refit->second.first & DescriptorSetData::SPARSE_REF_BIT)
                    {
                        VkResourceRecord *record = GetResourceManager()->GetResourceRecord(refit->first);

                        GetResourceManager()->MarkSparseMapReferenced(record->sparseInfo);
                    }
                }
            }
        }
    }

    // need to track descriptor set contents whether capframing or idle
    if(m_State >= WRITING)
    {
        for(uint32_t i=0; i < writeCount; i++)
        {
            VkResourceRecord *record = GetRecord(pDescriptorWrites[i].dstSet);
            RDCASSERT(record->descInfo && record->descInfo->layout);
            const DescSetLayout &layout = *record->descInfo->layout;

            RDCASSERT(pDescriptorWrites[i].dstBinding < record->descInfo->descBindings.size());

            DescriptorSetSlot *binding = record->descInfo->descBindings[pDescriptorWrites[i].dstBinding];

            FrameRefType ref = eFrameRef_Write;

            switch(layout.bindings[pDescriptorWrites[i].dstBinding].descriptorType)
            {
            case VK_DESCRIPTOR_TYPE_SAMPLER:
            case VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER:
            case VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE:
            case VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER:
            case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER:
            case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC:
            case VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT:
                ref = eFrameRef_Read;
                break;
            case VK_DESCRIPTOR_TYPE_STORAGE_IMAGE:
            case VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER:
            case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER:
            case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC:
                ref = eFrameRef_Write;
                break;
            default:
                RDCERR("Unexpected descriptor type");
            }

            // We need to handle the cases where these bindings are stale:
            // ie. image handle 0xf00baa is allocated
            // bound into a descriptor set
            // image is released
            // descriptor set is bound but this image is never used by shader etc.
            //
            // worst case, a new image or something has been added with this handle -
            // in this case we end up ref'ing an image that isn't actually used.
            // Worst worst case, we ref an image as write when actually it's not, but
            // this is likewise not a serious problem, and rather difficult to solve
            // (would need to version handles somehow, but don't have enough bits
            // to do that reliably).
            //
            // This is handled by RemoveBindFrameRef silently dropping id == ResourceId()

            for(uint32_t d=0; d < pDescriptorWrites[i].descriptorCount; d++)
            {
                DescriptorSetSlot &bind = binding[pDescriptorWrites[i].dstArrayElement + d];

                if(bind.texelBufferView != VK_NULL_HANDLE)
                {
                    record->RemoveBindFrameRef(GetResID(bind.texelBufferView));
                    if(GetRecord(bind.texelBufferView)->baseResource != ResourceId())
                        record->RemoveBindFrameRef(GetRecord(bind.texelBufferView)->baseResource);
                }
                if(bind.imageInfo.imageView != VK_NULL_HANDLE)
                {
                    record->RemoveBindFrameRef(GetResID(bind.imageInfo.imageView));
                    record->RemoveBindFrameRef(GetRecord(bind.imageInfo.imageView)->baseResource);
                    if(GetRecord(bind.imageInfo.imageView)->baseResourceMem != ResourceId())
                        record->RemoveBindFrameRef(GetRecord(bind.imageInfo.imageView)->baseResourceMem);
                }
                if(bind.imageInfo.sampler != VK_NULL_HANDLE)
                {
                    record->RemoveBindFrameRef(GetResID(bind.imageInfo.sampler));
                }
                if(bind.bufferInfo.buffer != VK_NULL_HANDLE)
                {
                    record->RemoveBindFrameRef(GetResID(bind.bufferInfo.buffer));
                    if(GetRecord(bind.bufferInfo.buffer)->baseResource != ResourceId())
                        record->RemoveBindFrameRef(GetRecord(bind.bufferInfo.buffer)->baseResource);
                }

                // NULL everything out now so that we don't accidentally reference an object
                // that was removed already
                bind.texelBufferView = VK_NULL_HANDLE;
                bind.bufferInfo.buffer = VK_NULL_HANDLE;
                bind.imageInfo.imageView = VK_NULL_HANDLE;
                bind.imageInfo.sampler = VK_NULL_HANDLE;

                if(pDescriptorWrites[i].descriptorType == VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER ||
                        pDescriptorWrites[i].descriptorType == VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER)
                {
                    bind.texelBufferView = pDescriptorWrites[i].pTexelBufferView[d];
                }
                else if(pDescriptorWrites[i].descriptorType == VK_DESCRIPTOR_TYPE_SAMPLER ||
                        pDescriptorWrites[i].descriptorType == VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER ||
                        pDescriptorWrites[i].descriptorType == VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE ||
                        pDescriptorWrites[i].descriptorType == VK_DESCRIPTOR_TYPE_STORAGE_IMAGE ||
                        pDescriptorWrites[i].descriptorType == VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT)
                {
                    bind.imageInfo = pDescriptorWrites[i].pImageInfo[d];

                    // ignore descriptors not part of the write, by NULL'ing out those members
                    // as they might not even point to a valid object
                    if(pDescriptorWrites[i].descriptorType == VK_DESCRIPTOR_TYPE_SAMPLER)
                        bind.imageInfo.imageView = VK_NULL_HANDLE;
                    else if(pDescriptorWrites[i].descriptorType != VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER)
                        bind.imageInfo.sampler = VK_NULL_HANDLE;
                }
                else
                {
                    bind.bufferInfo = pDescriptorWrites[i].pBufferInfo[d];
                }

                if(bind.texelBufferView != VK_NULL_HANDLE)
                {
                    record->AddBindFrameRef(GetResID(bind.texelBufferView), eFrameRef_Read, GetRecord(bind.texelBufferView)->sparseInfo != NULL);
                    if(GetRecord(bind.texelBufferView)->baseResource != ResourceId())
                        record->AddBindFrameRef(GetRecord(bind.texelBufferView)->baseResource, ref);
                }
                if(bind.imageInfo.imageView != VK_NULL_HANDLE)
                {
                    record->AddBindFrameRef(GetResID(bind.imageInfo.imageView), eFrameRef_Read, GetRecord(bind.imageInfo.imageView)->sparseInfo != NULL);
                    record->AddBindFrameRef(GetRecord(bind.imageInfo.imageView)->baseResource, ref);
                    if(GetRecord(bind.imageInfo.imageView)->baseResourceMem != ResourceId())
                        record->AddBindFrameRef(GetRecord(bind.imageInfo.imageView)->baseResourceMem, eFrameRef_Read);
                }
                if(bind.imageInfo.sampler != VK_NULL_HANDLE)
                {
                    record->AddBindFrameRef(GetResID(bind.imageInfo.sampler), eFrameRef_Read);
                }
                if(bind.bufferInfo.buffer != VK_NULL_HANDLE)
                {
                    record->AddBindFrameRef(GetResID(bind.bufferInfo.buffer), eFrameRef_Read, GetRecord(bind.bufferInfo.buffer)->sparseInfo != NULL);
                    if(GetRecord(bind.bufferInfo.buffer)->baseResource != ResourceId())
                        record->AddBindFrameRef(GetRecord(bind.bufferInfo.buffer)->baseResource, ref);
                }
            }
        }

        // this is almost identical to the above loop, except that instead of sourcing the descriptors
        // from the writedescriptor struct, we source it from our stored bindings on the source
        // descrpitor set

        for(uint32_t i=0; i < copyCount; i++)
        {
            VkResourceRecord *dstrecord = GetRecord(pDescriptorCopies[i].dstSet);
            RDCASSERT(dstrecord->descInfo && dstrecord->descInfo->layout);
            const DescSetLayout &layout = *dstrecord->descInfo->layout;

            VkResourceRecord *srcrecord = GetRecord(pDescriptorCopies[i].srcSet);

            RDCASSERT(pDescriptorCopies[i].dstBinding < dstrecord->descInfo->descBindings.size());
            RDCASSERT(pDescriptorCopies[i].srcBinding < srcrecord->descInfo->descBindings.size());

            DescriptorSetSlot *dstbinding = dstrecord->descInfo->descBindings[pDescriptorCopies[i].dstBinding];
            DescriptorSetSlot *srcbinding = srcrecord->descInfo->descBindings[pDescriptorCopies[i].srcBinding];

            FrameRefType ref = eFrameRef_Write;

            switch(layout.bindings[pDescriptorCopies[i].dstBinding].descriptorType)
            {
            case VK_DESCRIPTOR_TYPE_SAMPLER:
            case VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER:
            case VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE:
            case VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER:
            case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER:
            case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC:
            case VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT:
                ref = eFrameRef_Read;
                break;
            case VK_DESCRIPTOR_TYPE_STORAGE_IMAGE:
            case VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER:
            case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER:
            case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC:
                ref = eFrameRef_Write;
                break;
            default:
                RDCERR("Unexpected descriptor type");
            }

            for(uint32_t d=0; d < pDescriptorCopies[i].descriptorCount; d++)
            {
                DescriptorSetSlot &bind = dstbinding[pDescriptorCopies[i].dstArrayElement + d];

                if(bind.texelBufferView != VK_NULL_HANDLE)
                {
                    dstrecord->RemoveBindFrameRef(GetResID(bind.texelBufferView));
                    if(GetRecord(bind.texelBufferView)->baseResource != ResourceId())
                        dstrecord->RemoveBindFrameRef(GetRecord(bind.texelBufferView)->baseResource);
                }
                if(bind.imageInfo.imageView != VK_NULL_HANDLE)
                {
                    dstrecord->RemoveBindFrameRef(GetResID(bind.imageInfo.imageView));
                    dstrecord->RemoveBindFrameRef(GetRecord(bind.imageInfo.imageView)->baseResource);
                    if(GetRecord(bind.imageInfo.imageView)->baseResourceMem != ResourceId())
                        dstrecord->RemoveBindFrameRef(GetRecord(bind.imageInfo.imageView)->baseResourceMem);
                }
                if(bind.imageInfo.sampler != VK_NULL_HANDLE)
                {
                    dstrecord->RemoveBindFrameRef(GetResID(bind.imageInfo.sampler));
                }
                if(bind.bufferInfo.buffer != VK_NULL_HANDLE)
                {
                    dstrecord->RemoveBindFrameRef(GetResID(bind.bufferInfo.buffer));
                    if(GetRecord(bind.bufferInfo.buffer)->baseResource != ResourceId())
                        dstrecord->RemoveBindFrameRef(GetRecord(bind.bufferInfo.buffer)->baseResource);
                }

                bind = srcbinding[pDescriptorCopies[i].srcArrayElement + d];

                if(bind.texelBufferView != VK_NULL_HANDLE)
                {
                    dstrecord->AddBindFrameRef(GetResID(bind.texelBufferView), eFrameRef_Read, GetRecord(bind.texelBufferView)->sparseInfo != NULL);
                    if(GetRecord(bind.texelBufferView)->baseResource != ResourceId())
                        dstrecord->AddBindFrameRef(GetRecord(bind.texelBufferView)->baseResource, ref);
                }
                if(bind.imageInfo.imageView != VK_NULL_HANDLE)
                {
                    dstrecord->AddBindFrameRef(GetResID(bind.imageInfo.imageView), eFrameRef_Read, GetRecord(bind.imageInfo.imageView)->sparseInfo != NULL);
                    dstrecord->AddBindFrameRef(GetRecord(bind.imageInfo.imageView)->baseResource, ref);
                    if(GetRecord(bind.imageInfo.imageView)->baseResourceMem != ResourceId())
                        dstrecord->AddBindFrameRef(GetRecord(bind.imageInfo.imageView)->baseResourceMem, eFrameRef_Read);
                }
                if(bind.imageInfo.sampler != VK_NULL_HANDLE)
                {
                    dstrecord->AddBindFrameRef(GetResID(bind.imageInfo.sampler), ref);
                }
                if(bind.bufferInfo.buffer != VK_NULL_HANDLE)
                {
                    dstrecord->AddBindFrameRef(GetResID(bind.bufferInfo.buffer), eFrameRef_Read, GetRecord(bind.bufferInfo.buffer)->sparseInfo != NULL);
                    if(GetRecord(bind.bufferInfo.buffer)->baseResource != ResourceId())
                        dstrecord->AddBindFrameRef(GetRecord(bind.bufferInfo.buffer)->baseResource, ref);
                }
            }
        }

    }
}