示例#1
0
bool WrappedVulkan::Serialise_vkCreateSwapchainKHR(
		Serialiser*                             localSerialiser,
		VkDevice                                device,
		const VkSwapchainCreateInfoKHR*         pCreateInfo,
    const VkAllocationCallbacks*            pAllocator,
		VkSwapchainKHR*                         pSwapChain)
{
	SERIALISE_ELEMENT(ResourceId, devId, GetResID(device));
	SERIALISE_ELEMENT(VkSwapchainCreateInfoKHR, info, *pCreateInfo);
	SERIALISE_ELEMENT(ResourceId, id, GetResID(*pSwapChain));

	uint32_t numIms = 0;

	if(m_State >= WRITING)
	{
		VkResult vkr = VK_SUCCESS;

		vkr = ObjDisp(device)->GetSwapchainImagesKHR(Unwrap(device), Unwrap(*pSwapChain), &numIms, NULL);
		RDCASSERTEQUAL(vkr, VK_SUCCESS);
	}

	SERIALISE_ELEMENT(uint32_t, numSwapImages, numIms);
	SERIALISE_ELEMENT(VkSharingMode, sharingMode, pCreateInfo->imageSharingMode);

	if(m_State == READING)
	{
		// use original ID because we don't create a live version of the swapchain
		SwapchainInfo &swapinfo = m_CreationInfo.m_SwapChain[id];

		swapinfo.format = info.imageFormat;
		swapinfo.extent = info.imageExtent;
		swapinfo.arraySize = info.imageArrayLayers;

		swapinfo.images.resize(numSwapImages);

		device = GetResourceManager()->GetLiveHandle<VkDevice>(devId);

		const VkImageCreateInfo imInfo = {
			VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO, NULL, 0,
			VK_IMAGE_TYPE_2D, info.imageFormat,
			{ info.imageExtent.width, info.imageExtent.height, 1 },
			1, info.imageArrayLayers, VK_SAMPLE_COUNT_1_BIT,
			VK_IMAGE_TILING_OPTIMAL,
			VK_IMAGE_USAGE_TRANSFER_SRC_BIT|
			VK_IMAGE_USAGE_TRANSFER_DST_BIT|
			VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT|
			VK_IMAGE_USAGE_SAMPLED_BIT,
			sharingMode, 0, NULL,
			VK_IMAGE_LAYOUT_UNDEFINED,
		};

		for(uint32_t i=0; i < numSwapImages; i++)
		{
			VkDeviceMemory mem = VK_NULL_HANDLE;
			VkImage im = VK_NULL_HANDLE;

			VkResult vkr = ObjDisp(device)->CreateImage(Unwrap(device), &imInfo, NULL, &im);
			RDCASSERTEQUAL(vkr, VK_SUCCESS);

			ResourceId liveId = GetResourceManager()->WrapResource(Unwrap(device), im);
			
			VkMemoryRequirements mrq = {0};

			ObjDisp(device)->GetImageMemoryRequirements(Unwrap(device), Unwrap(im), &mrq);
			
			VkMemoryAllocateInfo allocInfo = {
				VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO, NULL,
				mrq.size, GetGPULocalMemoryIndex(mrq.memoryTypeBits),
			};

			vkr = ObjDisp(device)->AllocateMemory(Unwrap(device), &allocInfo, NULL, &mem);
			RDCASSERTEQUAL(vkr, VK_SUCCESS);
			
			ResourceId memid = GetResourceManager()->WrapResource(Unwrap(device), mem);
			// register as a live-only resource, so it is cleaned up properly
			GetResourceManager()->AddLiveResource(memid, mem);

			vkr = ObjDisp(device)->BindImageMemory(Unwrap(device), Unwrap(im), Unwrap(mem), 0);
			RDCASSERTEQUAL(vkr, VK_SUCCESS);

			// image live ID will be assigned separately in Serialise_vkGetSwapChainInfoWSI
			// memory doesn't have a live ID

			swapinfo.images[i].im = im;

			// fill out image info so we track resource state barriers
			// sneaky-cheeky use of the swapchain's ID here (it's not a live ID because
			// we don't create a live swapchain). This will be picked up in
			// Serialise_vkGetSwapchainImagesKHR to set the data for the live IDs on the
			// swapchain images.
			VulkanCreationInfo::Image &iminfo = m_CreationInfo.m_Image[id];
			iminfo.type = VK_IMAGE_TYPE_2D;
			iminfo.format = info.imageFormat;
			iminfo.extent.width = info.imageExtent.width;
			iminfo.extent.height = info.imageExtent.height;
			iminfo.extent.depth = 1;
			iminfo.mipLevels = 1;
			iminfo.arrayLayers = info.imageArrayLayers;
			iminfo.creationFlags = eTextureCreate_SRV|eTextureCreate_RTV|eTextureCreate_SwapBuffer;
			iminfo.cube = false;
			iminfo.samples = VK_SAMPLE_COUNT_1_BIT;

			m_CreationInfo.m_Names[liveId] = StringFormat::Fmt("Presentable Image %u", i);

			VkImageSubresourceRange range;
			range.baseMipLevel = range.baseArrayLayer = 0;
			range.levelCount = 1;
			range.layerCount = info.imageArrayLayers;
			range.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT;

			m_ImageLayouts[liveId].subresourceStates.clear();
			m_ImageLayouts[liveId].subresourceStates.push_back(ImageRegionState(range, UNKNOWN_PREV_IMG_LAYOUT, VK_IMAGE_LAYOUT_UNDEFINED));
		}
	}

	return true;
}
示例#2
0
MemoryAllocation WrappedVulkan::AllocateMemoryForResource(bool buffer, VkMemoryRequirements mrq,
                                                          MemoryScope scope, MemoryType type)
{
  MemoryAllocation ret;
  ret.scope = scope;
  ret.type = type;
  ret.buffer = buffer;
  ret.size = AlignUp(mrq.size, mrq.alignment);

  RDCDEBUG("Allocating 0x%llx with alignment 0x%llx in 0x%x for a %s (%s in %s)", ret.size,
           mrq.alignment, mrq.memoryTypeBits, buffer ? "buffer" : "image", ToStr(type).c_str(),
           ToStr(scope).c_str());

  std::vector<MemoryAllocation> &blockList = m_MemoryBlocks[(size_t)scope];

  // first try to find a match
  int i = 0;
  for(MemoryAllocation &block : blockList)
  {
    RDCDEBUG(
        "Considering block %d: memory type %u and type %s. Total size 0x%llx, current offset "
        "0x%llx, last alloc was %s",
        i, block.memoryTypeIndex, ToStr(block.type).c_str(), block.size, block.offs,
        block.buffer ? "buffer" : "image");
    i++;

    // skip this block if it's not the memory type we want
    if(ret.type != block.type || (mrq.memoryTypeBits & (1 << block.memoryTypeIndex)) == 0)
    {
      RDCDEBUG("block type %d or memory type %d is incompatible", block.type, block.memoryTypeIndex);
      continue;
    }

    // offs is where we can put our next sub-allocation
    VkDeviceSize offs = block.offs;

    // if we are on a buffer/image, account for any alignment we might have to do
    if(ret.buffer != block.buffer)
      offs = AlignUp(offs, m_PhysicalDeviceData.props.limits.bufferImageGranularity);

    // align as required by the resource
    offs = AlignUp(offs, mrq.alignment);

    if(offs > block.size)
    {
      RDCDEBUG("Next offset 0x%llx would be off the end of the memory (size 0x%llx).", offs,
               block.size);
      continue;
    }

    VkDeviceSize avail = block.size - offs;

    RDCDEBUG("At next offset 0x%llx, there's 0x%llx bytes available for 0x%llx bytes requested",
             offs, avail, ret.size);

    // if the allocation will fit, we've found our candidate.
    if(ret.size <= avail)
    {
      // update the block offset and buffer/image bit
      block.offs = offs + ret.size;
      block.buffer = ret.buffer;

      // update our return value
      ret.offs = offs;
      ret.mem = block.mem;

      RDCDEBUG("Allocating using this block: 0x%llx -> 0x%llx", ret.offs, block.offs);

      // stop searching
      break;
    }
  }

  if(ret.mem == VK_NULL_HANDLE)
  {
    RDCDEBUG("No available block found - allocating new block");

    VkDeviceSize &allocSize = m_MemoryBlockSize[(size_t)scope];

    // we start allocating 32M, then increment each time we need a new block.
    switch(allocSize)
    {
      case 0: allocSize = 32; break;
      case 32: allocSize = 64; break;
      case 64: allocSize = 128; break;
      case 128:
      case 256: allocSize = 256; break;
      default:
        RDCDEBUG("Unexpected previous allocation size 0x%llx bytes, allocating 256MB", allocSize);
        allocSize = 256;
        break;
    }

    uint32_t memoryTypeIndex = 0;

    switch(ret.type)
    {
      case MemoryType::Upload: memoryTypeIndex = GetUploadMemoryIndex(mrq.memoryTypeBits); break;
      case MemoryType::GPULocal:
        memoryTypeIndex = GetGPULocalMemoryIndex(mrq.memoryTypeBits);
        break;
      case MemoryType::Readback:
        memoryTypeIndex = GetReadbackMemoryIndex(mrq.memoryTypeBits);
        break;
    }

    VkMemoryAllocateInfo info = {
        VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO, NULL, allocSize * 1024 * 1024, memoryTypeIndex,
    };

    if(ret.size > info.allocationSize)
    {
      // if we get an over-sized allocation, first try to immediately jump to the largest block
      // size.
      allocSize = 256;
      info.allocationSize = allocSize * 1024 * 1024;

      // if it's still over-sized, just allocate precisely enough and give it a dedicated allocation
      if(ret.size > info.allocationSize)
      {
        RDCDEBUG("Over-sized allocation for 0x%llx bytes", ret.size);
        info.allocationSize = ret.size;
      }
    }

    RDCDEBUG("Creating new allocation of 0x%llx bytes", info.allocationSize);

    MemoryAllocation chunk;
    chunk.buffer = ret.buffer;
    chunk.memoryTypeIndex = memoryTypeIndex;
    chunk.scope = scope;
    chunk.type = type;
    chunk.size = info.allocationSize;

    // the offset starts immediately after this allocation
    chunk.offs = ret.size;

    VkDevice d = GetDev();

    // do the actual allocation
    VkResult vkr = ObjDisp(d)->AllocateMemory(Unwrap(d), &info, NULL, &chunk.mem);
    RDCASSERTEQUAL(vkr, VK_SUCCESS);

    GetResourceManager()->WrapResource(Unwrap(d), chunk.mem);

    // push the new chunk
    blockList.push_back(chunk);

    // return the first bytes in the new chunk
    ret.offs = 0;
    ret.mem = chunk.mem;
  }

  return ret;
}