VkResult WrappedVulkan::vkBindBufferMemory( VkDevice device, VkBuffer buffer, VkDeviceMemory mem, VkDeviceSize memOffset) { VkResourceRecord *record = GetRecord(buffer); if(m_State >= WRITING) { Chunk *chunk = NULL; { CACHE_THREAD_SERIALISER(); SCOPED_SERIALISE_CONTEXT(BIND_BUFFER_MEM); Serialise_vkBindBufferMemory(localSerialiser, device, buffer, mem, memOffset); chunk = scope.Get(); } // memory object bindings are immutable and must happen before creation or use, // so this can always go into the record, even if a resource is created and bound // to memory mid-frame record->AddChunk(chunk); record->AddParent(GetRecord(mem)); record->baseResource = GetResID(mem); } return ObjDisp(device)->BindBufferMemory(Unwrap(device), Unwrap(buffer), Unwrap(mem), memOffset); }
VkResult WrappedVulkan::vkCreateBuffer( VkDevice device, const VkBufferCreateInfo* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkBuffer* pBuffer) { VkResult ret = ObjDisp(device)->CreateBuffer(Unwrap(device), pCreateInfo, pAllocator, pBuffer); // SHARING: pCreateInfo sharingMode, queueFamilyCount, pQueueFamilyIndices if(ret == VK_SUCCESS) { ResourceId id = GetResourceManager()->WrapResource(Unwrap(device), *pBuffer); if(m_State >= WRITING) { Chunk *chunk = NULL; { CACHE_THREAD_SERIALISER(); SCOPED_SERIALISE_CONTEXT(CREATE_BUFFER); Serialise_vkCreateBuffer(localSerialiser, device, pCreateInfo, NULL, pBuffer); chunk = scope.Get(); } VkResourceRecord *record = GetResourceManager()->AddResourceRecord(*pBuffer); record->AddChunk(chunk); if(pCreateInfo->flags & (VK_BUFFER_CREATE_SPARSE_BINDING_BIT|VK_BUFFER_CREATE_SPARSE_RESIDENCY_BIT)) { record->sparseInfo = new SparseMapping(); // buffers are always bound opaquely and in arbitrary divisions, sparse residency // only means not all the buffer needs to be bound, which is not that interesting for // our purposes { SCOPED_LOCK(m_CapTransitionLock); if(m_State != WRITING_CAPFRAME) GetResourceManager()->MarkDirtyResource(id); else GetResourceManager()->MarkPendingDirty(id); } } } else { GetResourceManager()->AddLiveResource(id, *pBuffer); m_CreationInfo.m_Buffer[id].Init(GetResourceManager(), m_CreationInfo, pCreateInfo); } } return ret; }
VkResult WrappedVulkan::vkCreatePipelineLayout(VkDevice device, const VkPipelineLayoutCreateInfo *pCreateInfo, const VkAllocationCallbacks *pAllocator, VkPipelineLayout *pPipelineLayout) { VkDescriptorSetLayout *unwrapped = GetTempArray<VkDescriptorSetLayout>(pCreateInfo->setLayoutCount); for(uint32_t i = 0; i < pCreateInfo->setLayoutCount; i++) unwrapped[i] = Unwrap(pCreateInfo->pSetLayouts[i]); VkPipelineLayoutCreateInfo unwrappedInfo = *pCreateInfo; unwrappedInfo.pSetLayouts = unwrapped; VkResult ret = ObjDisp(device)->CreatePipelineLayout(Unwrap(device), &unwrappedInfo, pAllocator, pPipelineLayout); if(ret == VK_SUCCESS) { ResourceId id = GetResourceManager()->WrapResource(Unwrap(device), *pPipelineLayout); if(m_State >= WRITING) { Chunk *chunk = NULL; { CACHE_THREAD_SERIALISER(); SCOPED_SERIALISE_CONTEXT(CREATE_PIPE_LAYOUT); Serialise_vkCreatePipelineLayout(localSerialiser, device, pCreateInfo, NULL, pPipelineLayout); chunk = scope.Get(); } VkResourceRecord *record = GetResourceManager()->AddResourceRecord(*pPipelineLayout); record->AddChunk(chunk); for(uint32_t i = 0; i < pCreateInfo->setLayoutCount; i++) { VkResourceRecord *layoutrecord = GetRecord(pCreateInfo->pSetLayouts[i]); record->AddParent(layoutrecord); } } else { GetResourceManager()->AddLiveResource(id, *pPipelineLayout); m_CreationInfo.m_PipelineLayout[id].Init(GetResourceManager(), m_CreationInfo, &unwrappedInfo); } } return ret; }
VkResult WrappedVulkan::vkCreateImageView( VkDevice device, const VkImageViewCreateInfo* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkImageView* pView) { VkImageViewCreateInfo unwrappedInfo = *pCreateInfo; unwrappedInfo.image = Unwrap(unwrappedInfo.image); VkResult ret = ObjDisp(device)->CreateImageView(Unwrap(device), &unwrappedInfo, pAllocator, pView); if(ret == VK_SUCCESS) { ResourceId id = GetResourceManager()->WrapResource(Unwrap(device), *pView); if(m_State >= WRITING) { Chunk *chunk = NULL; { CACHE_THREAD_SERIALISER(); SCOPED_SERIALISE_CONTEXT(CREATE_IMAGE_VIEW); Serialise_vkCreateImageView(localSerialiser, device, pCreateInfo, NULL, pView); chunk = scope.Get(); } VkResourceRecord *imageRecord = GetRecord(pCreateInfo->image); VkResourceRecord *record = GetResourceManager()->AddResourceRecord(*pView); record->AddChunk(chunk); record->AddParent(imageRecord); // store the base resource. Note images have a baseResource pointing // to their memory, which we will also need so we store that separately record->baseResource = imageRecord->GetResourceID(); record->baseResourceMem = imageRecord->baseResource; record->sparseInfo = imageRecord->sparseInfo; } else { GetResourceManager()->AddLiveResource(id, *pView); m_CreationInfo.m_ImageView[id].Init(GetResourceManager(), m_CreationInfo, &unwrappedInfo); } } return ret; }
VkResult WrappedVulkan::vkCreatePipelineCache(VkDevice device, const VkPipelineCacheCreateInfo *pCreateInfo, const VkAllocationCallbacks *pAllocator, VkPipelineCache *pPipelineCache) { // pretend the user didn't provide any cache data VkPipelineCacheCreateInfo createInfo = *pCreateInfo; createInfo.initialDataSize = 0; createInfo.pInitialData = NULL; if(pCreateInfo->initialDataSize > 0) { RDCWARN( "Application provided pipeline cache data! This is invalid, as RenderDoc reports " "incompatibility with previous caches"); } VkResult ret = ObjDisp(device)->CreatePipelineCache(Unwrap(device), &createInfo, pAllocator, pPipelineCache); if(ret == VK_SUCCESS) { ResourceId id = GetResourceManager()->WrapResource(Unwrap(device), *pPipelineCache); if(m_State >= WRITING) { Chunk *chunk = NULL; { CACHE_THREAD_SERIALISER(); SCOPED_SERIALISE_CONTEXT(CREATE_PIPE_CACHE); Serialise_vkCreatePipelineCache(localSerialiser, device, &createInfo, NULL, pPipelineCache); chunk = scope.Get(); } VkResourceRecord *record = GetResourceManager()->AddResourceRecord(*pPipelineCache); record->AddChunk(chunk); } else { GetResourceManager()->AddLiveResource(id, *pPipelineCache); } } return ret; }
VkResult WrappedVulkan::vkCreateBufferView( VkDevice device, const VkBufferViewCreateInfo* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkBufferView* pView) { VkBufferViewCreateInfo unwrappedInfo = *pCreateInfo; unwrappedInfo.buffer = Unwrap(unwrappedInfo.buffer); VkResult ret = ObjDisp(device)->CreateBufferView(Unwrap(device), &unwrappedInfo, pAllocator, pView); if(ret == VK_SUCCESS) { ResourceId id = GetResourceManager()->WrapResource(Unwrap(device), *pView); if(m_State >= WRITING) { Chunk *chunk = NULL; { CACHE_THREAD_SERIALISER(); SCOPED_SERIALISE_CONTEXT(CREATE_BUFFER_VIEW); Serialise_vkCreateBufferView(localSerialiser, device, pCreateInfo, NULL, pView); chunk = scope.Get(); } VkResourceRecord *bufferRecord = GetRecord(pCreateInfo->buffer); VkResourceRecord *record = GetResourceManager()->AddResourceRecord(*pView); record->AddChunk(chunk); record->AddParent(bufferRecord); // store the base resource record->baseResource = bufferRecord->baseResource; record->sparseInfo = bufferRecord->sparseInfo; } else { GetResourceManager()->AddLiveResource(id, *pView); m_CreationInfo.m_BufferView[id].Init(GetResourceManager(), m_CreationInfo, &unwrappedInfo); } } return ret; }
void WrappedVulkan::vkCmdSetBlendConstants(VkCommandBuffer cmdBuffer, const float *blendConst) { SCOPED_DBG_SINK(); ObjDisp(cmdBuffer)->CmdSetBlendConstants(Unwrap(cmdBuffer), blendConst); if(m_State >= WRITING) { VkResourceRecord *record = GetRecord(cmdBuffer); CACHE_THREAD_SERIALISER(); SCOPED_SERIALISE_CONTEXT(SET_BLEND_CONST); Serialise_vkCmdSetBlendConstants(localSerialiser, cmdBuffer, blendConst); record->AddChunk(scope.Get()); } }
void WrappedVulkan::vkCmdSetLineWidth(VkCommandBuffer cmdBuffer, float lineWidth) { SCOPED_DBG_SINK(); ObjDisp(cmdBuffer)->CmdSetLineWidth(Unwrap(cmdBuffer), lineWidth); if(m_State >= WRITING) { VkResourceRecord *record = GetRecord(cmdBuffer); CACHE_THREAD_SERIALISER(); SCOPED_SERIALISE_CONTEXT(SET_LINE_WIDTH); Serialise_vkCmdSetLineWidth(localSerialiser, cmdBuffer, lineWidth); record->AddChunk(scope.Get()); } }
void WrappedVulkan::vkCmdSetViewport(VkCommandBuffer cmdBuffer, uint32_t firstViewport, uint32_t viewportCount, const VkViewport *pViewports) { SCOPED_DBG_SINK(); ObjDisp(cmdBuffer)->CmdSetViewport(Unwrap(cmdBuffer), firstViewport, viewportCount, pViewports); if(m_State >= WRITING) { VkResourceRecord *record = GetRecord(cmdBuffer); CACHE_THREAD_SERIALISER(); SCOPED_SERIALISE_CONTEXT(SET_VP); Serialise_vkCmdSetViewport(localSerialiser, cmdBuffer, firstViewport, viewportCount, pViewports); record->AddChunk(scope.Get()); } }
void WrappedVulkan::vkCmdSetStencilReference(VkCommandBuffer cmdBuffer, VkStencilFaceFlags faceMask, uint32_t reference) { SCOPED_DBG_SINK(); ObjDisp(cmdBuffer)->CmdSetStencilReference(Unwrap(cmdBuffer), faceMask, reference); if(m_State >= WRITING) { VkResourceRecord *record = GetRecord(cmdBuffer); CACHE_THREAD_SERIALISER(); SCOPED_SERIALISE_CONTEXT(SET_STENCIL_REF); Serialise_vkCmdSetStencilReference(localSerialiser, cmdBuffer, faceMask, reference); record->AddChunk(scope.Get()); } }
void WrappedVulkan::vkCmdSetDepthBounds(VkCommandBuffer cmdBuffer, float minDepthBounds, float maxDepthBounds) { SCOPED_DBG_SINK(); ObjDisp(cmdBuffer)->CmdSetDepthBounds(Unwrap(cmdBuffer), minDepthBounds, maxDepthBounds); if(m_State >= WRITING) { VkResourceRecord *record = GetRecord(cmdBuffer); CACHE_THREAD_SERIALISER(); SCOPED_SERIALISE_CONTEXT(SET_DEPTH_BOUNDS); Serialise_vkCmdSetDepthBounds(localSerialiser, cmdBuffer, minDepthBounds, maxDepthBounds); record->AddChunk(scope.Get()); } }
void WrappedVulkan::vkCmdSetScissor(VkCommandBuffer cmdBuffer, uint32_t firstScissor, uint32_t scissorCount, const VkRect2D *pScissors) { SCOPED_DBG_SINK(); ObjDisp(cmdBuffer)->CmdSetScissor(Unwrap(cmdBuffer), firstScissor, scissorCount, pScissors); if(m_State >= WRITING) { VkResourceRecord *record = GetRecord(cmdBuffer); CACHE_THREAD_SERIALISER(); SCOPED_SERIALISE_CONTEXT(SET_SCISSOR); Serialise_vkCmdSetScissor(localSerialiser, cmdBuffer, firstScissor, scissorCount, pScissors); record->AddChunk(scope.Get()); } }
VkResult WrappedVulkan::vkRegisterDeviceEventEXT(VkDevice device, const VkDeviceEventInfoEXT *pDeviceEventInfo, const VkAllocationCallbacks *pAllocator, VkFence *pFence) { // for now we emulate this on replay as just a regular fence create, since we don't faithfully // replay sync events anyway. VkResult ret = ObjDisp(device)->RegisterDeviceEventEXT(Unwrap(device), pDeviceEventInfo, pAllocator, pFence); if(ret == VK_SUCCESS) { ResourceId id = GetResourceManager()->WrapResource(Unwrap(device), *pFence); if(m_State >= WRITING) { Chunk *chunk = NULL; { CACHE_THREAD_SERIALISER(); VkFenceCreateInfo createInfo = { VK_STRUCTURE_TYPE_FENCE_CREATE_INFO, NULL, VK_FENCE_CREATE_SIGNALED_BIT, }; SCOPED_SERIALISE_CONTEXT(CREATE_FENCE); Serialise_vkCreateFence(localSerialiser, device, &createInfo, NULL, pFence); chunk = scope.Get(); } VkResourceRecord *record = GetResourceManager()->AddResourceRecord(*pFence); record->AddChunk(chunk); } else { GetResourceManager()->AddLiveResource(id, *pFence); } } return ret; }
void WrappedVulkan::vkCmdSetDepthBias(VkCommandBuffer cmdBuffer, float depthBias, float depthBiasClamp, float slopeScaledDepthBias) { SCOPED_DBG_SINK(); ObjDisp(cmdBuffer)->CmdSetDepthBias(Unwrap(cmdBuffer), depthBias, depthBiasClamp, slopeScaledDepthBias); if(m_State >= WRITING) { VkResourceRecord *record = GetRecord(cmdBuffer); CACHE_THREAD_SERIALISER(); SCOPED_SERIALISE_CONTEXT(SET_DEPTH_BIAS); Serialise_vkCmdSetDepthBias(localSerialiser, cmdBuffer, depthBias, depthBiasClamp, slopeScaledDepthBias); record->AddChunk(scope.Get()); } }
void WrappedVulkan::vkCmdResetEvent( VkCommandBuffer cmdBuffer, VkEvent event, VkPipelineStageFlags stageMask) { SCOPED_DBG_SINK(); ObjDisp(cmdBuffer)->CmdResetEvent(Unwrap(cmdBuffer), Unwrap(event), stageMask); if(m_State >= WRITING) { VkResourceRecord *record = GetRecord(cmdBuffer); CACHE_THREAD_SERIALISER(); SCOPED_SERIALISE_CONTEXT(CMD_RESET_EVENT); Serialise_vkCmdResetEvent(localSerialiser, cmdBuffer, event, stageMask); record->AddChunk(scope.Get()); record->MarkResourceFrameReferenced(GetResID(event), eFrameRef_Read); } }
VkResult WrappedVulkan::vkCreateSampler( VkDevice device, const VkSamplerCreateInfo* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkSampler* pSampler) { VkResult ret = ObjDisp(device)->CreateSampler(Unwrap(device), pCreateInfo, pAllocator, pSampler); if(ret == VK_SUCCESS) { ResourceId id = GetResourceManager()->WrapResource(Unwrap(device), *pSampler); if(m_State >= WRITING) { Chunk *chunk = NULL; { CACHE_THREAD_SERIALISER(); SCOPED_SERIALISE_CONTEXT(CREATE_SAMPLER); Serialise_vkCreateSampler(localSerialiser, device, pCreateInfo, NULL, pSampler); chunk = scope.Get(); } VkResourceRecord *record = GetResourceManager()->AddResourceRecord(*pSampler); record->AddChunk(chunk); } else { GetResourceManager()->AddLiveResource(id, *pSampler); m_CreationInfo.m_Sampler[id].Init(GetResourceManager(), m_CreationInfo, pCreateInfo); } } return ret; }
VkResult WrappedVulkan::vkBindImageMemory( VkDevice device, VkImage image, VkDeviceMemory mem, VkDeviceSize memOffset) { VkResourceRecord *record = GetRecord(image); if(m_State >= WRITING) { Chunk *chunk = NULL; { CACHE_THREAD_SERIALISER(); SCOPED_SERIALISE_CONTEXT(BIND_IMAGE_MEM); Serialise_vkBindImageMemory(localSerialiser, device, image, mem, memOffset); chunk = scope.Get(); } // memory object bindings are immutable and must happen before creation or use, // so this can always go into the record, even if a resource is created and bound // to memory mid-frame record->AddChunk(chunk); record->AddParent(GetRecord(mem)); // images are a base resource but we want to track where their memory comes from. // Anything that looks up a baseResource for an image knows not to chase further // than the image. record->baseResource = GetResID(mem); } return ObjDisp(device)->BindImageMemory(Unwrap(device), Unwrap(image), Unwrap(mem), memOffset); }
VkResult WrappedVulkan::vkDbgSetObjectName( VkDevice device, VkDebugReportObjectTypeEXT objType, uint64_t object, size_t nameSize, const char* pName) { if(ObjDisp(device)->DbgSetObjectName) ObjDisp(device)->DbgSetObjectName(device, objType, object, nameSize, pName); if(m_State >= WRITING) { Chunk *chunk = NULL; VkResourceRecord *record = GetObjRecord(objType, object); if(!record) { RDCERR("Unrecognised object %d %llu", objType, object); return VK_SUCCESS; } { CACHE_THREAD_SERIALISER(); SCOPED_SERIALISE_CONTEXT(SET_NAME); Serialise_vkDbgSetObjectName(localSerialiser, device, objType, object, nameSize, pName); chunk = scope.Get(); } record->AddChunk(chunk); } return VK_SUCCESS; }
VkResult WrappedVulkan::vkCreateDescriptorPool( VkDevice device, const VkDescriptorPoolCreateInfo* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkDescriptorPool* pDescriptorPool) { VkResult ret = ObjDisp(device)->CreateDescriptorPool(Unwrap(device), pCreateInfo, pAllocator, pDescriptorPool); if(ret == VK_SUCCESS) { ResourceId id = GetResourceManager()->WrapResource(Unwrap(device), *pDescriptorPool); if(m_State >= WRITING) { Chunk *chunk = NULL; { CACHE_THREAD_SERIALISER(); SCOPED_SERIALISE_CONTEXT(CREATE_DESCRIPTOR_POOL); Serialise_vkCreateDescriptorPool(localSerialiser, device, pCreateInfo, NULL, pDescriptorPool); chunk = scope.Get(); } VkResourceRecord *record = GetResourceManager()->AddResourceRecord(*pDescriptorPool); record->AddChunk(chunk); } else { GetResourceManager()->AddLiveResource(id, *pDescriptorPool); } } return ret; }
void WrappedVulkan::vkUnmapMemory( VkDevice device, VkDeviceMemory mem) { if(m_State >= WRITING) { ResourceId id = GetResID(mem); VkResourceRecord *memrecord = GetRecord(mem); RDCASSERT(memrecord->memMapState); MemMapState &state = *memrecord->memMapState; { // decide atomically if this chunk should be in-frame or not // so that we're not in the else branch but haven't marked // dirty when capframe starts, then we mark dirty while in-frame bool capframe = false; { SCOPED_LOCK(m_CapTransitionLock); capframe = (m_State == WRITING_CAPFRAME); if(!capframe) GetResourceManager()->MarkDirtyResource(id); } if(capframe) { // coherent maps must always serialise all data on unmap, even if a flush was seen, because // unflushed data is *also* visible. This is a bit redundant since data is serialised here // and in any flushes, but that's the app's fault - the spec calls out flushing coherent maps // as inefficient // if the memory is not coherent, we must have a flush for every region written while it is // mapped, there is no implicit flush on unmap, so we follow the spec strictly on this. if(state.mapCoherent) { CACHE_THREAD_SERIALISER(); SCOPED_SERIALISE_CONTEXT(UNMAP_MEM); Serialise_vkUnmapMemory(localSerialiser, device, mem); VkResourceRecord *record = GetRecord(mem); if(m_State == WRITING_IDLE) { record->AddChunk(scope.Get()); } else { m_FrameCaptureRecord->AddChunk(scope.Get()); GetResourceManager()->MarkResourceFrameReferenced(id, eFrameRef_Write); } } } state.mappedPtr = NULL; } Serialiser::FreeAlignedBuffer(state.refData); if(state.mapCoherent) { SCOPED_LOCK(m_CoherentMapsLock); auto it = std::find(m_CoherentMaps.begin(), m_CoherentMaps.end(), memrecord); if(it == m_CoherentMaps.end()) RDCERR("vkUnmapMemory for memory handle that's not currently mapped"); m_CoherentMaps.erase(it); } } ObjDisp(device)->UnmapMemory(Unwrap(device), Unwrap(mem)); }
VkResult WrappedVulkan::vkAllocateMemory( VkDevice device, const VkMemoryAllocateInfo* pAllocateInfo, const VkAllocationCallbacks* pAllocator, VkDeviceMemory* pMemory) { VkMemoryAllocateInfo info = *pAllocateInfo; if(m_State >= WRITING) info.memoryTypeIndex = GetRecord(device)->memIdxMap[info.memoryTypeIndex]; VkResult ret = ObjDisp(device)->AllocateMemory(Unwrap(device), &info, pAllocator, pMemory); if(ret == VK_SUCCESS) { ResourceId id = GetResourceManager()->WrapResource(Unwrap(device), *pMemory); if(m_State >= WRITING) { Chunk *chunk = NULL; { CACHE_THREAD_SERIALISER(); SCOPED_SERIALISE_CONTEXT(ALLOC_MEM); Serialise_vkAllocateMemory(localSerialiser, device, pAllocateInfo, NULL, pMemory); chunk = scope.Get(); } // create resource record for gpu memory VkResourceRecord *record = GetResourceManager()->AddResourceRecord(*pMemory); RDCASSERT(record); record->AddChunk(chunk); record->Length = pAllocateInfo->allocationSize; uint32_t memProps = m_PhysicalDeviceData.fakeMemProps->memoryTypes[pAllocateInfo->memoryTypeIndex].propertyFlags; // if memory is not host visible, so not mappable, don't create map state at all if((memProps & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) != 0) { record->memMapState = new MemMapState(); record->memMapState->mapCoherent = (memProps & VK_MEMORY_PROPERTY_HOST_COHERENT_BIT) != 0; record->memMapState->refData = NULL; } } else { GetResourceManager()->AddLiveResource(id, *pMemory); m_CreationInfo.m_Memory[id].Init(GetResourceManager(), m_CreationInfo, pAllocateInfo); // create a buffer with the whole memory range bound, for copying to and from // conveniently (for initial state data) VkBuffer buf = VK_NULL_HANDLE; VkBufferCreateInfo bufInfo = { VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO, NULL, 0, info.allocationSize, VK_BUFFER_USAGE_TRANSFER_SRC_BIT|VK_BUFFER_USAGE_TRANSFER_DST_BIT, }; ret = ObjDisp(device)->CreateBuffer(Unwrap(device), &bufInfo, NULL, &buf); RDCASSERTEQUAL(ret, VK_SUCCESS); ResourceId bufid = GetResourceManager()->WrapResource(Unwrap(device), buf); ObjDisp(device)->BindBufferMemory(Unwrap(device), Unwrap(buf), Unwrap(*pMemory), 0); // register as a live-only resource, so it is cleaned up properly GetResourceManager()->AddLiveResource(bufid, buf); m_CreationInfo.m_Memory[id].wholeMemBuf = buf; } } return ret; }
VkResult WrappedVulkan::vkCreateImage( VkDevice device, const VkImageCreateInfo* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkImage* pImage) { VkImageCreateInfo createInfo_adjusted = *pCreateInfo; createInfo_adjusted.usage |= VK_IMAGE_USAGE_TRANSFER_SRC_BIT; VkResult ret = ObjDisp(device)->CreateImage(Unwrap(device), &createInfo_adjusted, pAllocator, pImage); // SHARING: pCreateInfo sharingMode, queueFamilyCount, pQueueFamilyIndices if(ret == VK_SUCCESS) { ResourceId id = GetResourceManager()->WrapResource(Unwrap(device), *pImage); if(m_State >= WRITING) { Chunk *chunk = NULL; { CACHE_THREAD_SERIALISER(); SCOPED_SERIALISE_CONTEXT(CREATE_IMAGE); Serialise_vkCreateImage(localSerialiser, device, pCreateInfo, NULL, pImage); chunk = scope.Get(); } VkResourceRecord *record = GetResourceManager()->AddResourceRecord(*pImage); record->AddChunk(chunk); if(pCreateInfo->flags & (VK_IMAGE_CREATE_SPARSE_BINDING_BIT|VK_IMAGE_CREATE_SPARSE_RESIDENCY_BIT)) { record->sparseInfo = new SparseMapping(); { SCOPED_LOCK(m_CapTransitionLock); if(m_State != WRITING_CAPFRAME) GetResourceManager()->MarkDirtyResource(id); else GetResourceManager()->MarkPendingDirty(id); } if(pCreateInfo->flags & VK_IMAGE_CREATE_SPARSE_RESIDENCY_BIT) { // must record image and page dimension, and create page tables uint32_t numreqs = NUM_VK_IMAGE_ASPECTS; VkSparseImageMemoryRequirements reqs[NUM_VK_IMAGE_ASPECTS]; ObjDisp(device)->GetImageSparseMemoryRequirements(Unwrap(device), Unwrap(*pImage), &numreqs, reqs); RDCASSERT(numreqs > 0); record->sparseInfo->pagedim = reqs[0].formatProperties.imageGranularity; record->sparseInfo->imgdim = pCreateInfo->extent; record->sparseInfo->imgdim.width /= record->sparseInfo->pagedim.width; record->sparseInfo->imgdim.height /= record->sparseInfo->pagedim.height; record->sparseInfo->imgdim.depth /= record->sparseInfo->pagedim.depth; uint32_t numpages = record->sparseInfo->imgdim.width*record->sparseInfo->imgdim.height*record->sparseInfo->imgdim.depth; for(uint32_t i=0; i < numreqs; i++) { // assume all page sizes are the same for all aspects RDCASSERT(record->sparseInfo->pagedim.width == reqs[i].formatProperties.imageGranularity.width && record->sparseInfo->pagedim.height == reqs[i].formatProperties.imageGranularity.height && record->sparseInfo->pagedim.depth == reqs[i].formatProperties.imageGranularity.depth); int a=0; for(; a < NUM_VK_IMAGE_ASPECTS; a++) if(reqs[i].formatProperties.aspectMask & (1<<a)) break; record->sparseInfo->pages[a] = new pair<VkDeviceMemory, VkDeviceSize>[numpages]; } } else { // don't have to do anything, image is opaque and must be fully bound, just need // to track the memory bindings. } } } else { GetResourceManager()->AddLiveResource(id, *pImage); m_CreationInfo.m_Image[id].Init(GetResourceManager(), m_CreationInfo, pCreateInfo); } VkImageSubresourceRange range; range.baseMipLevel = range.baseArrayLayer = 0; range.levelCount = pCreateInfo->mipLevels; range.layerCount = pCreateInfo->arrayLayers; if(pCreateInfo->imageType == VK_IMAGE_TYPE_3D) range.layerCount = pCreateInfo->extent.depth; ImageLayouts *layout = NULL; { SCOPED_LOCK(m_ImageLayoutsLock); layout = &m_ImageLayouts[id]; } layout->layerCount = pCreateInfo->arrayLayers; layout->levelCount = pCreateInfo->mipLevels; layout->extent = pCreateInfo->extent; layout->format = pCreateInfo->format; layout->subresourceStates.clear(); range.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT; if(IsDepthOnlyFormat(pCreateInfo->format)) range.aspectMask = VK_IMAGE_ASPECT_DEPTH_BIT; else if(IsDepthStencilFormat(pCreateInfo->format)) range.aspectMask = VK_IMAGE_ASPECT_DEPTH_BIT|VK_IMAGE_ASPECT_STENCIL_BIT; layout->subresourceStates.push_back(ImageRegionState(range, UNKNOWN_PREV_IMG_LAYOUT, VK_IMAGE_LAYOUT_UNDEFINED)); } return ret; }
VkResult WrappedVulkan::vkAllocateDescriptorSets( VkDevice device, const VkDescriptorSetAllocateInfo* pAllocateInfo, VkDescriptorSet* pDescriptorSets) { size_t tempmemSize = sizeof(VkDescriptorSetAllocateInfo) + sizeof(VkDescriptorSetLayout)*pAllocateInfo->descriptorSetCount; byte *memory = GetTempMemory(tempmemSize); VkDescriptorSetAllocateInfo *unwrapped = (VkDescriptorSetAllocateInfo *)memory; VkDescriptorSetLayout *layouts = (VkDescriptorSetLayout *)(unwrapped + 1); *unwrapped = *pAllocateInfo; unwrapped->pSetLayouts = layouts; unwrapped->descriptorPool = Unwrap(unwrapped->descriptorPool); for(uint32_t i=0; i < pAllocateInfo->descriptorSetCount; i++) layouts[i] = Unwrap(pAllocateInfo->pSetLayouts[i]); VkResult ret = ObjDisp(device)->AllocateDescriptorSets(Unwrap(device), unwrapped, pDescriptorSets); if(ret != VK_SUCCESS) return ret; for(uint32_t i=0; i < pAllocateInfo->descriptorSetCount; i++) { ResourceId id = GetResourceManager()->WrapResource(Unwrap(device), pDescriptorSets[i]); if(m_State >= WRITING) { Chunk *chunk = NULL; { CACHE_THREAD_SERIALISER(); VkDescriptorSetAllocateInfo info = *pAllocateInfo; info.descriptorSetCount = 1; info.pSetLayouts += i; SCOPED_SERIALISE_CONTEXT(ALLOC_DESC_SET); Serialise_vkAllocateDescriptorSets(localSerialiser, device, &info, &pDescriptorSets[i]); chunk = scope.Get(); } VkResourceRecord *record = GetResourceManager()->AddResourceRecord(pDescriptorSets[i]); record->AddChunk(chunk); ResourceId layoutID = GetResID(pAllocateInfo->pSetLayouts[i]); VkResourceRecord *layoutRecord = GetRecord(pAllocateInfo->pSetLayouts[i]); VkResourceRecord *poolrecord = GetRecord(pAllocateInfo->descriptorPool); { poolrecord->LockChunks(); poolrecord->pooledChildren.push_back(record); poolrecord->UnlockChunks(); } record->pool = poolrecord; record->AddParent(poolrecord); record->AddParent(GetResourceManager()->GetResourceRecord(layoutID)); // just always treat descriptor sets as dirty { SCOPED_LOCK(m_CapTransitionLock); if(m_State != WRITING_CAPFRAME) GetResourceManager()->MarkDirtyResource(id); else GetResourceManager()->MarkPendingDirty(id); } record->descInfo = new DescriptorSetData(); record->descInfo->layout = layoutRecord->descInfo->layout; record->descInfo->layout->CreateBindingsArray(record->descInfo->descBindings); } else { GetResourceManager()->AddLiveResource(id, pDescriptorSets[i]); } } return ret; }
VkResult WrappedVulkan::vkCreateDevice( VkPhysicalDevice physicalDevice, const VkDeviceCreateInfo* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkDevice* pDevice) { VkDeviceCreateInfo createInfo = *pCreateInfo; uint32_t qCount = 0; VkResult vkr = VK_SUCCESS; ObjDisp(physicalDevice)->GetPhysicalDeviceQueueFamilyProperties(Unwrap(physicalDevice), &qCount, NULL); VkQueueFamilyProperties *props = new VkQueueFamilyProperties[qCount]; ObjDisp(physicalDevice)->GetPhysicalDeviceQueueFamilyProperties(Unwrap(physicalDevice), &qCount, props); // find a queue that supports all capabilities, and if one doesn't exist, add it. bool found = false; uint32_t qFamilyIdx = 0; VkQueueFlags search = (VK_QUEUE_GRAPHICS_BIT); // for queue priorities, if we need it float one = 1.0f; // if we need to change the requested queues, it will point to this VkDeviceQueueCreateInfo *modQueues = NULL; for(uint32_t i=0; i < createInfo.queueCreateInfoCount; i++) { uint32_t idx = createInfo.pQueueCreateInfos[i].queueFamilyIndex; RDCASSERT(idx < qCount); // this requested queue is one we can use too if((props[idx].queueFlags & search) == search && createInfo.pQueueCreateInfos[i].queueCount > 0) { qFamilyIdx = idx; found = true; break; } } // if we didn't find it, search for which queue family we should add a request for if(!found) { RDCDEBUG("App didn't request a queue family we can use - adding our own"); for(uint32_t i=0; i < qCount; i++) { if((props[i].queueFlags & search) == search) { qFamilyIdx = i; found = true; break; } } if(!found) { SAFE_DELETE_ARRAY(props); RDCERR("Can't add a queue with required properties for RenderDoc! Unsupported configuration"); return VK_ERROR_INITIALIZATION_FAILED; } // we found the queue family, add it modQueues = new VkDeviceQueueCreateInfo[createInfo.queueCreateInfoCount + 1]; for(uint32_t i=0; i < createInfo.queueCreateInfoCount; i++) modQueues[i] = createInfo.pQueueCreateInfos[i]; modQueues[createInfo.queueCreateInfoCount].queueFamilyIndex = qFamilyIdx; modQueues[createInfo.queueCreateInfoCount].queueCount = 1; modQueues[createInfo.queueCreateInfoCount].pQueuePriorities = &one; createInfo.pQueueCreateInfos = modQueues; createInfo.queueCreateInfoCount++; } SAFE_DELETE_ARRAY(props); m_QueueFamilies.resize(createInfo.queueCreateInfoCount); for(size_t i=0; i < createInfo.queueCreateInfoCount; i++) { uint32_t family = createInfo.pQueueCreateInfos[i].queueFamilyIndex; uint32_t count = createInfo.pQueueCreateInfos[i].queueCount; m_QueueFamilies.resize(RDCMAX(m_QueueFamilies.size(), size_t(family+1))); m_QueueFamilies[family] = new VkQueue[count]; for(uint32_t q=0; q < count; q++) m_QueueFamilies[family][q] = VK_NULL_HANDLE; } VkLayerDeviceCreateInfo *layerCreateInfo = (VkLayerDeviceCreateInfo *)pCreateInfo->pNext; // step through the chain of pNext until we get to the link info while(layerCreateInfo && (layerCreateInfo->sType != VK_STRUCTURE_TYPE_LOADER_DEVICE_CREATE_INFO || layerCreateInfo->function != VK_LAYER_LINK_INFO) ) { layerCreateInfo = (VkLayerDeviceCreateInfo *)layerCreateInfo->pNext; } RDCASSERT(layerCreateInfo); PFN_vkGetDeviceProcAddr gdpa = layerCreateInfo->u.pLayerInfo->pfnNextGetDeviceProcAddr; PFN_vkGetInstanceProcAddr gipa = layerCreateInfo->u.pLayerInfo->pfnNextGetInstanceProcAddr; // move chain on for next layer layerCreateInfo->u.pLayerInfo = layerCreateInfo->u.pLayerInfo->pNext; PFN_vkCreateDevice createFunc = (PFN_vkCreateDevice)gipa(VK_NULL_HANDLE, "vkCreateDevice"); // now search again through for the loader data callback (if it exists) layerCreateInfo = (VkLayerDeviceCreateInfo *)pCreateInfo->pNext; // step through the chain of pNext while(layerCreateInfo && (layerCreateInfo->sType != VK_STRUCTURE_TYPE_LOADER_DEVICE_CREATE_INFO || layerCreateInfo->function != VK_LOADER_DATA_CALLBACK) ) { layerCreateInfo = (VkLayerDeviceCreateInfo *)layerCreateInfo->pNext; } // if we found one (we might not - on old loaders), then store the func ptr for // use instead of SetDispatchTableOverMagicNumber if(layerCreateInfo) { RDCASSERT(m_SetDeviceLoaderData == layerCreateInfo->u.pfnSetDeviceLoaderData || m_SetDeviceLoaderData == NULL, m_SetDeviceLoaderData, layerCreateInfo->u.pfnSetDeviceLoaderData); m_SetDeviceLoaderData = layerCreateInfo->u.pfnSetDeviceLoaderData; } VkResult ret = createFunc(Unwrap(physicalDevice), &createInfo, pAllocator, pDevice); // don't serialise out any of the pNext stuff for layer initialisation // (note that we asserted above that there was nothing else in the chain) createInfo.pNext = NULL; if(ret == VK_SUCCESS) { InitDeviceTable(*pDevice, gdpa); ResourceId id = GetResourceManager()->WrapResource(*pDevice, *pDevice); if(m_State >= WRITING) { Chunk *chunk = NULL; { CACHE_THREAD_SERIALISER(); SCOPED_SERIALISE_CONTEXT(CREATE_DEVICE); Serialise_vkCreateDevice(localSerialiser, physicalDevice, &createInfo, NULL, pDevice); chunk = scope.Get(); } VkResourceRecord *record = GetResourceManager()->AddResourceRecord(*pDevice); RDCASSERT(record); record->AddChunk(chunk); record->memIdxMap = GetRecord(physicalDevice)->memIdxMap; record->instDevInfo = new InstanceDeviceInfo(); #undef CheckExt #define CheckExt(name) record->instDevInfo->name = GetRecord(m_Instance)->instDevInfo->name; // inherit extension enablement from instance, that way GetDeviceProcAddress can check // for enabled extensions for instance functions CheckInstanceExts(); #undef CheckExt #define CheckExt(name) if(!strcmp(createInfo.ppEnabledExtensionNames[i], STRINGIZE(name))) { record->instDevInfo->name = true; } for(uint32_t i=0; i < createInfo.enabledExtensionCount; i++) { CheckDeviceExts(); } InitDeviceExtensionTables(*pDevice); GetRecord(m_Instance)->AddParent(record); } else { GetResourceManager()->AddLiveResource(id, *pDevice); } VkDevice device = *pDevice; RDCASSERT(m_Device == VK_NULL_HANDLE); // MULTIDEVICE m_PhysicalDevice = physicalDevice; m_Device = device; m_QueueFamilyIdx = qFamilyIdx; if(m_InternalCmds.cmdpool == VK_NULL_HANDLE) { VkCommandPoolCreateInfo poolInfo = { VK_STRUCTURE_TYPE_COMMAND_POOL_CREATE_INFO, NULL, VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT, qFamilyIdx }; vkr = ObjDisp(device)->CreateCommandPool(Unwrap(device), &poolInfo, NULL, &m_InternalCmds.cmdpool); RDCASSERTEQUAL(vkr, VK_SUCCESS); GetResourceManager()->WrapResource(Unwrap(device), m_InternalCmds.cmdpool); } ObjDisp(physicalDevice)->GetPhysicalDeviceProperties(Unwrap(physicalDevice), &m_PhysicalDeviceData.props); ObjDisp(physicalDevice)->GetPhysicalDeviceMemoryProperties(Unwrap(physicalDevice), &m_PhysicalDeviceData.memProps); ObjDisp(physicalDevice)->GetPhysicalDeviceFeatures(Unwrap(physicalDevice), &m_PhysicalDeviceData.features); m_PhysicalDeviceData.readbackMemIndex = m_PhysicalDeviceData.GetMemoryIndex(~0U, VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT, 0); m_PhysicalDeviceData.uploadMemIndex = m_PhysicalDeviceData.GetMemoryIndex(~0U, VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT, 0); m_PhysicalDeviceData.GPULocalMemIndex = m_PhysicalDeviceData.GetMemoryIndex(~0U, VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT, VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT); m_PhysicalDeviceData.fakeMemProps = GetRecord(physicalDevice)->memProps; m_DebugManager = new VulkanDebugManager(this, device); } SAFE_DELETE_ARRAY(modQueues); return ret; }
VkResult WrappedVulkan::vkEnumeratePhysicalDevices( VkInstance instance, uint32_t* pPhysicalDeviceCount, VkPhysicalDevice* pPhysicalDevices) { uint32_t count; VkResult vkr = ObjDisp(instance)->EnumeratePhysicalDevices(Unwrap(instance), &count, NULL); if(vkr != VK_SUCCESS) return vkr; VkPhysicalDevice *devices = new VkPhysicalDevice[count]; vkr = ObjDisp(instance)->EnumeratePhysicalDevices(Unwrap(instance), &count, devices); RDCASSERTEQUAL(vkr, VK_SUCCESS); m_PhysicalDevices.resize(count); for(uint32_t i=0; i < count; i++) { // it's perfectly valid for enumerate type functions to return the same handle // each time. If that happens, we will already have a wrapper created so just // return the wrapped object to the user and do nothing else if(m_PhysicalDevices[i] != VK_NULL_HANDLE) { GetWrapped(m_PhysicalDevices[i])->RewrapObject(devices[i]); devices[i] = m_PhysicalDevices[i]; } else { GetResourceManager()->WrapResource(instance, devices[i]); if(m_State >= WRITING) { // add the record first since it's used in the serialise function below to fetch // the memory indices VkResourceRecord *record = GetResourceManager()->AddResourceRecord(devices[i]); RDCASSERT(record); record->memProps = new VkPhysicalDeviceMemoryProperties(); ObjDisp(devices[i])->GetPhysicalDeviceMemoryProperties(Unwrap(devices[i]), record->memProps); m_PhysicalDevices[i] = devices[i]; // we remap memory indices to discourage coherent maps as much as possible RemapMemoryIndices(record->memProps, &record->memIdxMap); { CACHE_THREAD_SERIALISER(); SCOPED_SERIALISE_CONTEXT(ENUM_PHYSICALS); Serialise_vkEnumeratePhysicalDevices(localSerialiser, instance, &i, &devices[i]); record->AddChunk(scope.Get()); } VkResourceRecord *instrecord = GetRecord(instance); instrecord->AddParent(record); // treat physical devices as pool members of the instance (ie. freed when the instance dies) { instrecord->LockChunks(); instrecord->pooledChildren.push_back(record); instrecord->UnlockChunks(); } } } } if(pPhysicalDeviceCount) *pPhysicalDeviceCount = count; if(pPhysicalDevices) memcpy(pPhysicalDevices, devices, count*sizeof(VkPhysicalDevice)); SAFE_DELETE_ARRAY(devices); return VK_SUCCESS; }
void WrappedVulkan::vkGetDeviceQueue(VkDevice device, uint32_t queueFamilyIndex, uint32_t queueIndex, VkQueue *pQueue) { ObjDisp(device)->GetDeviceQueue(Unwrap(device), queueFamilyIndex, queueIndex, pQueue); if(m_SetDeviceLoaderData) m_SetDeviceLoaderData(m_Device, *pQueue); else SetDispatchTableOverMagicNumber(device, *pQueue); RDCASSERT(m_State >= WRITING); { // it's perfectly valid for enumerate type functions to return the same handle // each time. If that happens, we will already have a wrapper created so just // return the wrapped object to the user and do nothing else if(m_QueueFamilies[queueFamilyIndex][queueIndex] != VK_NULL_HANDLE) { *pQueue = m_QueueFamilies[queueFamilyIndex][queueIndex]; } else { ResourceId id = GetResourceManager()->WrapResource(Unwrap(device), *pQueue); { Chunk *chunk = NULL; { CACHE_THREAD_SERIALISER(); SCOPED_SERIALISE_CONTEXT(GET_DEVICE_QUEUE); Serialise_vkGetDeviceQueue(localSerialiser, device, queueFamilyIndex, queueIndex, pQueue); chunk = scope.Get(); } VkResourceRecord *record = GetResourceManager()->AddResourceRecord(*pQueue); RDCASSERT(record); VkResourceRecord *instrecord = GetRecord(m_Instance); // treat queues as pool members of the instance (ie. freed when the instance dies) { instrecord->LockChunks(); instrecord->pooledChildren.push_back(record); instrecord->UnlockChunks(); } record->AddChunk(chunk); } m_QueueFamilies[queueFamilyIndex][queueIndex] = *pQueue; if(queueFamilyIndex == m_QueueFamilyIdx) { m_Queue = *pQueue; // we can now submit any cmds that were queued (e.g. from creating debug // manager on vkCreateDevice) SubmitCmds(); } } } }
VkResult WrappedVulkan::vkAllocateMemory( VkDevice device, const VkMemoryAllocateInfo* pAllocateInfo, const VkAllocationCallbacks* pAllocator, VkDeviceMemory* pMemory) { VkMemoryAllocateInfo info = *pAllocateInfo; if(m_State >= WRITING) { info.memoryTypeIndex = GetRecord(device)->memIdxMap[info.memoryTypeIndex]; // we need to be able to allocate a buffer that covers the whole memory range. However // if the memory is e.g. 100 bytes (arbitrary example) and buffers have memory requirements // such that it must be bound to a multiple of 128 bytes, then we can't create a buffer // that entirely covers a 100 byte allocation. // To get around this, we create a buffer of the allocation's size with the properties we // want, check its required size, then bump up the allocation size to that as if the application // had requested more. We're assuming here no system will require something like "buffer of // size N must be bound to memory of size N+O for some value of O overhead bytes". // // this could be optimised as maybe we'll be creating buffers of multiple sizes, but allocation // in vulkan is already expensive and making it a little more expensive isn't a big deal. VkBufferCreateInfo bufInfo = { VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO, NULL, 0, info.allocationSize, VK_BUFFER_USAGE_TRANSFER_SRC_BIT|VK_BUFFER_USAGE_TRANSFER_DST_BIT, }; // since this is very short lived, it's not wrapped VkBuffer buf; VkResult vkr = ObjDisp(device)->CreateBuffer(Unwrap(device), &bufInfo, NULL, &buf); RDCASSERTEQUAL(vkr, VK_SUCCESS); if(vkr == VK_SUCCESS && buf != VK_NULL_HANDLE) { VkMemoryRequirements mrq = { 0 }; ObjDisp(device)->GetBufferMemoryRequirements(Unwrap(device), buf, &mrq); RDCASSERTMSG("memory requirements less than desired size", mrq.size >= bufInfo.size, mrq.size, bufInfo.size); // round up allocation size to allow creation of buffers if(mrq.size >= bufInfo.size) info.allocationSize = mrq.size; } ObjDisp(device)->DestroyBuffer(Unwrap(device), buf, NULL); } VkResult ret = ObjDisp(device)->AllocateMemory(Unwrap(device), &info, pAllocator, pMemory); // restore the memoryTypeIndex to the original, as that's what we want to serialise, // but maintain any potential modifications we made to info.allocationSize info.memoryTypeIndex = pAllocateInfo->memoryTypeIndex; if(ret == VK_SUCCESS) { ResourceId id = GetResourceManager()->WrapResource(Unwrap(device), *pMemory); if(m_State >= WRITING) { Chunk *chunk = NULL; { CACHE_THREAD_SERIALISER(); SCOPED_SERIALISE_CONTEXT(ALLOC_MEM); Serialise_vkAllocateMemory(localSerialiser, device, &info, NULL, pMemory); chunk = scope.Get(); } // create resource record for gpu memory VkResourceRecord *record = GetResourceManager()->AddResourceRecord(*pMemory); RDCASSERT(record); record->AddChunk(chunk); record->Length = info.allocationSize; uint32_t memProps = m_PhysicalDeviceData.fakeMemProps->memoryTypes[info.memoryTypeIndex].propertyFlags; // if memory is not host visible, so not mappable, don't create map state at all if((memProps & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) != 0) { record->memMapState = new MemMapState(); record->memMapState->mapCoherent = (memProps & VK_MEMORY_PROPERTY_HOST_COHERENT_BIT) != 0; record->memMapState->refData = NULL; } } else { GetResourceManager()->AddLiveResource(id, *pMemory); m_CreationInfo.m_Memory[id].Init(GetResourceManager(), m_CreationInfo, &info); // create a buffer with the whole memory range bound, for copying to and from // conveniently (for initial state data) VkBuffer buf = VK_NULL_HANDLE; VkBufferCreateInfo bufInfo = { VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO, NULL, 0, info.allocationSize, VK_BUFFER_USAGE_TRANSFER_SRC_BIT|VK_BUFFER_USAGE_TRANSFER_DST_BIT, }; ret = ObjDisp(device)->CreateBuffer(Unwrap(device), &bufInfo, NULL, &buf); RDCASSERTEQUAL(ret, VK_SUCCESS); ResourceId bufid = GetResourceManager()->WrapResource(Unwrap(device), buf); ObjDisp(device)->BindBufferMemory(Unwrap(device), Unwrap(buf), Unwrap(*pMemory), 0); // register as a live-only resource, so it is cleaned up properly GetResourceManager()->AddLiveResource(bufid, buf); m_CreationInfo.m_Memory[id].wholeMemBuf = buf; } } return ret; }
VkResult WrappedVulkan::vkGetSwapchainImagesKHR( VkDevice device, VkSwapchainKHR swapchain, uint32_t* pCount, VkImage* pSwapchainImages) { // make sure we always get the size uint32_t dummySize = 0; if(pCount == NULL) pCount = &dummySize; VkResult ret = ObjDisp(device)->GetSwapchainImagesKHR(Unwrap(device), Unwrap(swapchain), pCount, pSwapchainImages); if(pSwapchainImages && m_State >= WRITING) { uint32_t numImages = *pCount; VkResourceRecord *swapRecord = GetRecord(swapchain); for(uint32_t i=0; i < numImages; i++) { // these were all wrapped and serialised on swapchain create - we just have to // return the wrapped image in that case if(swapRecord->swapInfo->images[i].im != VK_NULL_HANDLE) { pSwapchainImages[i] = swapRecord->swapInfo->images[i].im; } else { ResourceId id = GetResourceManager()->WrapResource(Unwrap(device), pSwapchainImages[i]); if(m_State >= WRITING) { Chunk *chunk = NULL; { CACHE_THREAD_SERIALISER(); SCOPED_SERIALISE_CONTEXT(GET_SWAPCHAIN_IMAGE); Serialise_vkGetSwapchainImagesKHR(localSerialiser, device, swapchain, &i, &pSwapchainImages[i]); chunk = scope.Get(); } VkResourceRecord *record = GetResourceManager()->AddResourceRecord(pSwapchainImages[i]); VkResourceRecord *swaprecord = GetRecord(swapchain); record->SpecialResource = true; record->AddParent(swaprecord); // note we add the chunk to the swap record, that way when the swapchain is created it will // always create all of its images on replay. The image's record is kept around for reference // tracking and any other chunks. Because it has a parent relationship on the swapchain, if // the image is referenced the swapchain (and thus all the getimages) will be included. swaprecord->AddChunk(chunk); } else { GetResourceManager()->AddLiveResource(id, pSwapchainImages[i]); } } } } return ret; }
void WrappedVulkan::vkCmdWaitEvents( VkCommandBuffer cmdBuffer, uint32_t eventCount, const VkEvent* pEvents, VkPipelineStageFlags srcStageMask, VkPipelineStageFlags dstStageMask, uint32_t memoryBarrierCount, const VkMemoryBarrier* pMemoryBarriers, uint32_t bufferMemoryBarrierCount, const VkBufferMemoryBarrier* pBufferMemoryBarriers, uint32_t imageMemoryBarrierCount, const VkImageMemoryBarrier* pImageMemoryBarriers) { { byte *memory = GetTempMemory( sizeof(VkEvent)*eventCount + sizeof(VkBufferMemoryBarrier)*bufferMemoryBarrierCount + sizeof(VkImageMemoryBarrier)*imageMemoryBarrierCount); VkEvent *ev = (VkEvent *)memory; VkImageMemoryBarrier *im = (VkImageMemoryBarrier *)(ev + eventCount); VkBufferMemoryBarrier *buf = (VkBufferMemoryBarrier *)(im + imageMemoryBarrierCount); for(uint32_t i=0; i < eventCount; i++) ev[i] = Unwrap(pEvents[i]); for(uint32_t i=0; i < bufferMemoryBarrierCount; i++) { buf[i] = pBufferMemoryBarriers[i]; buf[i].buffer = Unwrap(buf[i].buffer); } for(uint32_t i=0; i < imageMemoryBarrierCount; i++) { im[i] = pImageMemoryBarriers[i]; im[i].image = Unwrap(im[i].image); } ObjDisp(cmdBuffer)->CmdWaitEvents(Unwrap(cmdBuffer), eventCount, ev, srcStageMask, dstStageMask, memoryBarrierCount, pMemoryBarriers, bufferMemoryBarrierCount, buf, imageMemoryBarrierCount, im); } if(m_State >= WRITING) { VkResourceRecord *record = GetRecord(cmdBuffer); CACHE_THREAD_SERIALISER(); SCOPED_SERIALISE_CONTEXT(CMD_WAIT_EVENTS); Serialise_vkCmdWaitEvents(localSerialiser, cmdBuffer, eventCount, pEvents, srcStageMask, dstStageMask, memoryBarrierCount, pMemoryBarriers, bufferMemoryBarrierCount, pBufferMemoryBarriers, imageMemoryBarrierCount, pImageMemoryBarriers); if(imageMemoryBarrierCount > 0) { SCOPED_LOCK(m_ImageLayoutsLock); GetResourceManager()->RecordBarriers(GetRecord(cmdBuffer)->cmdInfo->imgbarriers, m_ImageLayouts, imageMemoryBarrierCount, pImageMemoryBarriers); } record->AddChunk(scope.Get()); for(uint32_t i=0; i < eventCount; i++) record->MarkResourceFrameReferenced(GetResID(pEvents[i]), eFrameRef_Read); } }
VkResult WrappedVulkan::vkCreateDescriptorSetLayout( VkDevice device, const VkDescriptorSetLayoutCreateInfo* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkDescriptorSetLayout* pSetLayout) { size_t tempmemSize = sizeof(VkDescriptorSetLayoutBinding)*pCreateInfo->bindingCount; // need to count how many VkSampler arrays to allocate for for(uint32_t i=0; i < pCreateInfo->bindingCount; i++) if(pCreateInfo->pBindings[i].pImmutableSamplers) tempmemSize += pCreateInfo->pBindings[i].descriptorCount*sizeof(VkSampler); byte *memory = GetTempMemory(tempmemSize); VkDescriptorSetLayoutBinding *unwrapped = (VkDescriptorSetLayoutBinding *)memory; VkSampler *nextSampler = (VkSampler *)(unwrapped + pCreateInfo->bindingCount); for(uint32_t i=0; i < pCreateInfo->bindingCount; i++) { unwrapped[i] = pCreateInfo->pBindings[i]; if(unwrapped[i].pImmutableSamplers) { VkSampler *unwrappedSamplers = nextSampler; nextSampler += unwrapped[i].descriptorCount; for(uint32_t j=0; j < unwrapped[i].descriptorCount; j++) unwrappedSamplers[j] = Unwrap(unwrapped[i].pImmutableSamplers[j]); unwrapped[i].pImmutableSamplers = unwrappedSamplers; } } VkDescriptorSetLayoutCreateInfo unwrappedInfo = *pCreateInfo; unwrappedInfo.pBindings = unwrapped; VkResult ret = ObjDisp(device)->CreateDescriptorSetLayout(Unwrap(device), &unwrappedInfo, pAllocator, pSetLayout); if(ret == VK_SUCCESS) { ResourceId id = GetResourceManager()->WrapResource(Unwrap(device), *pSetLayout); if(m_State >= WRITING) { Chunk *chunk = NULL; { CACHE_THREAD_SERIALISER(); SCOPED_SERIALISE_CONTEXT(CREATE_DESCRIPTOR_SET_LAYOUT); Serialise_vkCreateDescriptorSetLayout(localSerialiser, device, pCreateInfo, NULL, pSetLayout); chunk = scope.Get(); } VkResourceRecord *record = GetResourceManager()->AddResourceRecord(*pSetLayout); record->AddChunk(chunk); record->descInfo = new DescriptorSetData(); record->descInfo->layout = new DescSetLayout(); record->descInfo->layout->Init(GetResourceManager(), m_CreationInfo, pCreateInfo); } else { GetResourceManager()->AddLiveResource(id, *pSetLayout); m_CreationInfo.m_DescSetLayout[id].Init(GetResourceManager(), m_CreationInfo, &unwrappedInfo); } } return ret; }