VkResult WrappedVulkan::vkBindBufferMemory( VkDevice device, VkBuffer buffer, VkDeviceMemory mem, VkDeviceSize memOffset) { VkResourceRecord *record = GetRecord(buffer); if(m_State >= WRITING) { Chunk *chunk = NULL; { CACHE_THREAD_SERIALISER(); SCOPED_SERIALISE_CONTEXT(BIND_BUFFER_MEM); Serialise_vkBindBufferMemory(localSerialiser, device, buffer, mem, memOffset); chunk = scope.Get(); } // memory object bindings are immutable and must happen before creation or use, // so this can always go into the record, even if a resource is created and bound // to memory mid-frame record->AddChunk(chunk); record->AddParent(GetRecord(mem)); record->baseResource = GetResID(mem); } return ObjDisp(device)->BindBufferMemory(Unwrap(device), Unwrap(buffer), Unwrap(mem), memOffset); }
VkResult WrappedVulkan::vkWaitForFences( VkDevice device, uint32_t fenceCount, const VkFence* pFences, VkBool32 waitAll, uint64_t timeout) { SCOPED_DBG_SINK(); VkFence *unwrapped = GetTempArray<VkFence>(fenceCount); for (uint32_t i = 0; i < fenceCount; i++) unwrapped[i] = Unwrap(pFences[i]); VkResult ret = ObjDisp(device)->WaitForFences(Unwrap(device), fenceCount, unwrapped, waitAll, timeout); if(m_State >= WRITING_CAPFRAME) { CACHE_THREAD_SERIALISER(); SCOPED_SERIALISE_CONTEXT(WAIT_FENCES); Serialise_vkWaitForFences(localSerialiser, device, fenceCount, pFences, waitAll, timeout); m_FrameCaptureRecord->AddChunk(scope.Get()); } return ret; }
VkResult WrappedVulkan::vkCreateBuffer( VkDevice device, const VkBufferCreateInfo* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkBuffer* pBuffer) { VkResult ret = ObjDisp(device)->CreateBuffer(Unwrap(device), pCreateInfo, pAllocator, pBuffer); // SHARING: pCreateInfo sharingMode, queueFamilyCount, pQueueFamilyIndices if(ret == VK_SUCCESS) { ResourceId id = GetResourceManager()->WrapResource(Unwrap(device), *pBuffer); if(m_State >= WRITING) { Chunk *chunk = NULL; { CACHE_THREAD_SERIALISER(); SCOPED_SERIALISE_CONTEXT(CREATE_BUFFER); Serialise_vkCreateBuffer(localSerialiser, device, pCreateInfo, NULL, pBuffer); chunk = scope.Get(); } VkResourceRecord *record = GetResourceManager()->AddResourceRecord(*pBuffer); record->AddChunk(chunk); if(pCreateInfo->flags & (VK_BUFFER_CREATE_SPARSE_BINDING_BIT|VK_BUFFER_CREATE_SPARSE_RESIDENCY_BIT)) { record->sparseInfo = new SparseMapping(); // buffers are always bound opaquely and in arbitrary divisions, sparse residency // only means not all the buffer needs to be bound, which is not that interesting for // our purposes { SCOPED_LOCK(m_CapTransitionLock); if(m_State != WRITING_CAPFRAME) GetResourceManager()->MarkDirtyResource(id); else GetResourceManager()->MarkPendingDirty(id); } } } else { GetResourceManager()->AddLiveResource(id, *pBuffer); m_CreationInfo.m_Buffer[id].Init(GetResourceManager(), m_CreationInfo, pCreateInfo); } } return ret; }
VkResult WrappedVulkan::vkFlushMappedMemoryRanges( VkDevice device, uint32_t memRangeCount, const VkMappedMemoryRange* pMemRanges) { if(m_State >= WRITING) { bool capframe = false; { SCOPED_LOCK(m_CapTransitionLock); capframe = (m_State == WRITING_CAPFRAME); } for(uint32_t i = 0; i < memRangeCount; i++) { ResourceId memid = GetResID(pMemRanges[i].memory); MemMapState *state = GetRecord(pMemRanges[i].memory)->memMapState; state->mapFlushed = true; if(state->mappedPtr == NULL) { RDCERR("Flushing memory that isn't currently mapped"); continue; } if(capframe) { CACHE_THREAD_SERIALISER(); SCOPED_SERIALISE_CONTEXT(FLUSH_MEM); Serialise_vkFlushMappedMemoryRanges(localSerialiser, device, 1, pMemRanges + i); m_FrameCaptureRecord->AddChunk(scope.Get()); GetResourceManager()->MarkResourceFrameReferenced(GetResID(pMemRanges[i].memory), eFrameRef_Write); } else { GetResourceManager()->MarkDirtyResource(memid); } } } VkMappedMemoryRange *unwrapped = GetTempArray<VkMappedMemoryRange>(memRangeCount); for(uint32_t i=0; i < memRangeCount; i++) { unwrapped[i] = pMemRanges[i]; unwrapped[i].memory = Unwrap(unwrapped[i].memory); } VkResult ret = ObjDisp(device)->FlushMappedMemoryRanges(Unwrap(device), memRangeCount, unwrapped); return ret; }
VkResult WrappedVulkan::vkCreatePipelineLayout(VkDevice device, const VkPipelineLayoutCreateInfo *pCreateInfo, const VkAllocationCallbacks *pAllocator, VkPipelineLayout *pPipelineLayout) { VkDescriptorSetLayout *unwrapped = GetTempArray<VkDescriptorSetLayout>(pCreateInfo->setLayoutCount); for(uint32_t i = 0; i < pCreateInfo->setLayoutCount; i++) unwrapped[i] = Unwrap(pCreateInfo->pSetLayouts[i]); VkPipelineLayoutCreateInfo unwrappedInfo = *pCreateInfo; unwrappedInfo.pSetLayouts = unwrapped; VkResult ret = ObjDisp(device)->CreatePipelineLayout(Unwrap(device), &unwrappedInfo, pAllocator, pPipelineLayout); if(ret == VK_SUCCESS) { ResourceId id = GetResourceManager()->WrapResource(Unwrap(device), *pPipelineLayout); if(m_State >= WRITING) { Chunk *chunk = NULL; { CACHE_THREAD_SERIALISER(); SCOPED_SERIALISE_CONTEXT(CREATE_PIPE_LAYOUT); Serialise_vkCreatePipelineLayout(localSerialiser, device, pCreateInfo, NULL, pPipelineLayout); chunk = scope.Get(); } VkResourceRecord *record = GetResourceManager()->AddResourceRecord(*pPipelineLayout); record->AddChunk(chunk); for(uint32_t i = 0; i < pCreateInfo->setLayoutCount; i++) { VkResourceRecord *layoutrecord = GetRecord(pCreateInfo->pSetLayouts[i]); record->AddParent(layoutrecord); } } else { GetResourceManager()->AddLiveResource(id, *pPipelineLayout); m_CreationInfo.m_PipelineLayout[id].Init(GetResourceManager(), m_CreationInfo, &unwrappedInfo); } } return ret; }
VkResult WrappedVulkan::vkCreatePipelineCache(VkDevice device, const VkPipelineCacheCreateInfo *pCreateInfo, const VkAllocationCallbacks *pAllocator, VkPipelineCache *pPipelineCache) { // pretend the user didn't provide any cache data VkPipelineCacheCreateInfo createInfo = *pCreateInfo; createInfo.initialDataSize = 0; createInfo.pInitialData = NULL; if(pCreateInfo->initialDataSize > 0) { RDCWARN( "Application provided pipeline cache data! This is invalid, as RenderDoc reports " "incompatibility with previous caches"); } VkResult ret = ObjDisp(device)->CreatePipelineCache(Unwrap(device), &createInfo, pAllocator, pPipelineCache); if(ret == VK_SUCCESS) { ResourceId id = GetResourceManager()->WrapResource(Unwrap(device), *pPipelineCache); if(m_State >= WRITING) { Chunk *chunk = NULL; { CACHE_THREAD_SERIALISER(); SCOPED_SERIALISE_CONTEXT(CREATE_PIPE_CACHE); Serialise_vkCreatePipelineCache(localSerialiser, device, &createInfo, NULL, pPipelineCache); chunk = scope.Get(); } VkResourceRecord *record = GetResourceManager()->AddResourceRecord(*pPipelineCache); record->AddChunk(chunk); } else { GetResourceManager()->AddLiveResource(id, *pPipelineCache); } } return ret; }
VkResult WrappedVulkan::vkCreateImageView( VkDevice device, const VkImageViewCreateInfo* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkImageView* pView) { VkImageViewCreateInfo unwrappedInfo = *pCreateInfo; unwrappedInfo.image = Unwrap(unwrappedInfo.image); VkResult ret = ObjDisp(device)->CreateImageView(Unwrap(device), &unwrappedInfo, pAllocator, pView); if(ret == VK_SUCCESS) { ResourceId id = GetResourceManager()->WrapResource(Unwrap(device), *pView); if(m_State >= WRITING) { Chunk *chunk = NULL; { CACHE_THREAD_SERIALISER(); SCOPED_SERIALISE_CONTEXT(CREATE_IMAGE_VIEW); Serialise_vkCreateImageView(localSerialiser, device, pCreateInfo, NULL, pView); chunk = scope.Get(); } VkResourceRecord *imageRecord = GetRecord(pCreateInfo->image); VkResourceRecord *record = GetResourceManager()->AddResourceRecord(*pView); record->AddChunk(chunk); record->AddParent(imageRecord); // store the base resource. Note images have a baseResource pointing // to their memory, which we will also need so we store that separately record->baseResource = imageRecord->GetResourceID(); record->baseResourceMem = imageRecord->baseResource; record->sparseInfo = imageRecord->sparseInfo; } else { GetResourceManager()->AddLiveResource(id, *pView); m_CreationInfo.m_ImageView[id].Init(GetResourceManager(), m_CreationInfo, &unwrappedInfo); } } return ret; }
VkResult WrappedVulkan::vkDeviceWaitIdle(VkDevice device) { VkResult ret = ObjDisp(device)->DeviceWaitIdle(Unwrap(device)); if(m_State >= WRITING_CAPFRAME) { CACHE_THREAD_SERIALISER(); SCOPED_SERIALISE_CONTEXT(DEVICE_WAIT_IDLE); Serialise_vkDeviceWaitIdle(localSerialiser, device); m_FrameCaptureRecord->AddChunk(scope.Get()); } return ret; }
VkResult WrappedVulkan::vkCreateBufferView( VkDevice device, const VkBufferViewCreateInfo* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkBufferView* pView) { VkBufferViewCreateInfo unwrappedInfo = *pCreateInfo; unwrappedInfo.buffer = Unwrap(unwrappedInfo.buffer); VkResult ret = ObjDisp(device)->CreateBufferView(Unwrap(device), &unwrappedInfo, pAllocator, pView); if(ret == VK_SUCCESS) { ResourceId id = GetResourceManager()->WrapResource(Unwrap(device), *pView); if(m_State >= WRITING) { Chunk *chunk = NULL; { CACHE_THREAD_SERIALISER(); SCOPED_SERIALISE_CONTEXT(CREATE_BUFFER_VIEW); Serialise_vkCreateBufferView(localSerialiser, device, pCreateInfo, NULL, pView); chunk = scope.Get(); } VkResourceRecord *bufferRecord = GetRecord(pCreateInfo->buffer); VkResourceRecord *record = GetResourceManager()->AddResourceRecord(*pView); record->AddChunk(chunk); record->AddParent(bufferRecord); // store the base resource record->baseResource = bufferRecord->baseResource; record->sparseInfo = bufferRecord->sparseInfo; } else { GetResourceManager()->AddLiveResource(id, *pView); m_CreationInfo.m_BufferView[id].Init(GetResourceManager(), m_CreationInfo, &unwrappedInfo); } } return ret; }
VkResult WrappedVulkan::vkQueueWaitIdle(VkQueue queue) { VkResult ret = ObjDisp(queue)->QueueWaitIdle(Unwrap(queue)); if(m_State >= WRITING_CAPFRAME) { CACHE_THREAD_SERIALISER(); SCOPED_SERIALISE_CONTEXT(QUEUE_WAIT_IDLE); Serialise_vkQueueWaitIdle(localSerialiser, queue); m_FrameCaptureRecord->AddChunk(scope.Get()); GetResourceManager()->MarkResourceFrameReferenced(GetResID(queue), eFrameRef_Read); } return ret; }
void WrappedVulkan::vkCmdSetBlendConstants(VkCommandBuffer cmdBuffer, const float *blendConst) { SCOPED_DBG_SINK(); ObjDisp(cmdBuffer)->CmdSetBlendConstants(Unwrap(cmdBuffer), blendConst); if(m_State >= WRITING) { VkResourceRecord *record = GetRecord(cmdBuffer); CACHE_THREAD_SERIALISER(); SCOPED_SERIALISE_CONTEXT(SET_BLEND_CONST); Serialise_vkCmdSetBlendConstants(localSerialiser, cmdBuffer, blendConst); record->AddChunk(scope.Get()); } }
void WrappedVulkan::vkCmdSetLineWidth(VkCommandBuffer cmdBuffer, float lineWidth) { SCOPED_DBG_SINK(); ObjDisp(cmdBuffer)->CmdSetLineWidth(Unwrap(cmdBuffer), lineWidth); if(m_State >= WRITING) { VkResourceRecord *record = GetRecord(cmdBuffer); CACHE_THREAD_SERIALISER(); SCOPED_SERIALISE_CONTEXT(SET_LINE_WIDTH); Serialise_vkCmdSetLineWidth(localSerialiser, cmdBuffer, lineWidth); record->AddChunk(scope.Get()); } }
void WrappedVulkan::vkCmdSetViewport(VkCommandBuffer cmdBuffer, uint32_t firstViewport, uint32_t viewportCount, const VkViewport *pViewports) { SCOPED_DBG_SINK(); ObjDisp(cmdBuffer)->CmdSetViewport(Unwrap(cmdBuffer), firstViewport, viewportCount, pViewports); if(m_State >= WRITING) { VkResourceRecord *record = GetRecord(cmdBuffer); CACHE_THREAD_SERIALISER(); SCOPED_SERIALISE_CONTEXT(SET_VP); Serialise_vkCmdSetViewport(localSerialiser, cmdBuffer, firstViewport, viewportCount, pViewports); record->AddChunk(scope.Get()); } }
void WrappedVulkan::vkCmdSetStencilReference(VkCommandBuffer cmdBuffer, VkStencilFaceFlags faceMask, uint32_t reference) { SCOPED_DBG_SINK(); ObjDisp(cmdBuffer)->CmdSetStencilReference(Unwrap(cmdBuffer), faceMask, reference); if(m_State >= WRITING) { VkResourceRecord *record = GetRecord(cmdBuffer); CACHE_THREAD_SERIALISER(); SCOPED_SERIALISE_CONTEXT(SET_STENCIL_REF); Serialise_vkCmdSetStencilReference(localSerialiser, cmdBuffer, faceMask, reference); record->AddChunk(scope.Get()); } }
void WrappedVulkan::vkCmdSetDepthBounds(VkCommandBuffer cmdBuffer, float minDepthBounds, float maxDepthBounds) { SCOPED_DBG_SINK(); ObjDisp(cmdBuffer)->CmdSetDepthBounds(Unwrap(cmdBuffer), minDepthBounds, maxDepthBounds); if(m_State >= WRITING) { VkResourceRecord *record = GetRecord(cmdBuffer); CACHE_THREAD_SERIALISER(); SCOPED_SERIALISE_CONTEXT(SET_DEPTH_BOUNDS); Serialise_vkCmdSetDepthBounds(localSerialiser, cmdBuffer, minDepthBounds, maxDepthBounds); record->AddChunk(scope.Get()); } }
void WrappedVulkan::vkCmdSetScissor(VkCommandBuffer cmdBuffer, uint32_t firstScissor, uint32_t scissorCount, const VkRect2D *pScissors) { SCOPED_DBG_SINK(); ObjDisp(cmdBuffer)->CmdSetScissor(Unwrap(cmdBuffer), firstScissor, scissorCount, pScissors); if(m_State >= WRITING) { VkResourceRecord *record = GetRecord(cmdBuffer); CACHE_THREAD_SERIALISER(); SCOPED_SERIALISE_CONTEXT(SET_SCISSOR); Serialise_vkCmdSetScissor(localSerialiser, cmdBuffer, firstScissor, scissorCount, pScissors); record->AddChunk(scope.Get()); } }
VkResult WrappedVulkan::vkRegisterDeviceEventEXT(VkDevice device, const VkDeviceEventInfoEXT *pDeviceEventInfo, const VkAllocationCallbacks *pAllocator, VkFence *pFence) { // for now we emulate this on replay as just a regular fence create, since we don't faithfully // replay sync events anyway. VkResult ret = ObjDisp(device)->RegisterDeviceEventEXT(Unwrap(device), pDeviceEventInfo, pAllocator, pFence); if(ret == VK_SUCCESS) { ResourceId id = GetResourceManager()->WrapResource(Unwrap(device), *pFence); if(m_State >= WRITING) { Chunk *chunk = NULL; { CACHE_THREAD_SERIALISER(); VkFenceCreateInfo createInfo = { VK_STRUCTURE_TYPE_FENCE_CREATE_INFO, NULL, VK_FENCE_CREATE_SIGNALED_BIT, }; SCOPED_SERIALISE_CONTEXT(CREATE_FENCE); Serialise_vkCreateFence(localSerialiser, device, &createInfo, NULL, pFence); chunk = scope.Get(); } VkResourceRecord *record = GetResourceManager()->AddResourceRecord(*pFence); record->AddChunk(chunk); } else { GetResourceManager()->AddLiveResource(id, *pFence); } } return ret; }
VkResult WrappedVulkan::vkGetEventStatus( VkDevice device, VkEvent event) { SCOPED_DBG_SINK(); VkResult ret = ObjDisp(device)->GetEventStatus(Unwrap(device), Unwrap(event)); if(m_State >= WRITING_CAPFRAME) { CACHE_THREAD_SERIALISER(); SCOPED_SERIALISE_CONTEXT(GET_EVENT_STATUS); Serialise_vkGetEventStatus(localSerialiser, device, event); m_FrameCaptureRecord->AddChunk(scope.Get()); } return ret; }
void WrappedVulkan::vkCmdSetDepthBias(VkCommandBuffer cmdBuffer, float depthBias, float depthBiasClamp, float slopeScaledDepthBias) { SCOPED_DBG_SINK(); ObjDisp(cmdBuffer)->CmdSetDepthBias(Unwrap(cmdBuffer), depthBias, depthBiasClamp, slopeScaledDepthBias); if(m_State >= WRITING) { VkResourceRecord *record = GetRecord(cmdBuffer); CACHE_THREAD_SERIALISER(); SCOPED_SERIALISE_CONTEXT(SET_DEPTH_BIAS); Serialise_vkCmdSetDepthBias(localSerialiser, cmdBuffer, depthBias, depthBiasClamp, slopeScaledDepthBias); record->AddChunk(scope.Get()); } }
void WrappedVulkan::vkCmdResetEvent( VkCommandBuffer cmdBuffer, VkEvent event, VkPipelineStageFlags stageMask) { SCOPED_DBG_SINK(); ObjDisp(cmdBuffer)->CmdResetEvent(Unwrap(cmdBuffer), Unwrap(event), stageMask); if(m_State >= WRITING) { VkResourceRecord *record = GetRecord(cmdBuffer); CACHE_THREAD_SERIALISER(); SCOPED_SERIALISE_CONTEXT(CMD_RESET_EVENT); Serialise_vkCmdResetEvent(localSerialiser, cmdBuffer, event, stageMask); record->AddChunk(scope.Get()); record->MarkResourceFrameReferenced(GetResID(event), eFrameRef_Read); } }
VkResult WrappedVulkan::vkCreateSampler( VkDevice device, const VkSamplerCreateInfo* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkSampler* pSampler) { VkResult ret = ObjDisp(device)->CreateSampler(Unwrap(device), pCreateInfo, pAllocator, pSampler); if(ret == VK_SUCCESS) { ResourceId id = GetResourceManager()->WrapResource(Unwrap(device), *pSampler); if(m_State >= WRITING) { Chunk *chunk = NULL; { CACHE_THREAD_SERIALISER(); SCOPED_SERIALISE_CONTEXT(CREATE_SAMPLER); Serialise_vkCreateSampler(localSerialiser, device, pCreateInfo, NULL, pSampler); chunk = scope.Get(); } VkResourceRecord *record = GetResourceManager()->AddResourceRecord(*pSampler); record->AddChunk(chunk); } else { GetResourceManager()->AddLiveResource(id, *pSampler); m_CreationInfo.m_Sampler[id].Init(GetResourceManager(), m_CreationInfo, pCreateInfo); } } return ret; }
VkResult WrappedVulkan::vkBindImageMemory( VkDevice device, VkImage image, VkDeviceMemory mem, VkDeviceSize memOffset) { VkResourceRecord *record = GetRecord(image); if(m_State >= WRITING) { Chunk *chunk = NULL; { CACHE_THREAD_SERIALISER(); SCOPED_SERIALISE_CONTEXT(BIND_IMAGE_MEM); Serialise_vkBindImageMemory(localSerialiser, device, image, mem, memOffset); chunk = scope.Get(); } // memory object bindings are immutable and must happen before creation or use, // so this can always go into the record, even if a resource is created and bound // to memory mid-frame record->AddChunk(chunk); record->AddParent(GetRecord(mem)); // images are a base resource but we want to track where their memory comes from. // Anything that looks up a baseResource for an image knows not to chase further // than the image. record->baseResource = GetResID(mem); } return ObjDisp(device)->BindImageMemory(Unwrap(device), Unwrap(image), Unwrap(mem), memOffset); }
VkResult WrappedVulkan::vkDbgSetObjectName( VkDevice device, VkDebugReportObjectTypeEXT objType, uint64_t object, size_t nameSize, const char* pName) { if(ObjDisp(device)->DbgSetObjectName) ObjDisp(device)->DbgSetObjectName(device, objType, object, nameSize, pName); if(m_State >= WRITING) { Chunk *chunk = NULL; VkResourceRecord *record = GetObjRecord(objType, object); if(!record) { RDCERR("Unrecognised object %d %llu", objType, object); return VK_SUCCESS; } { CACHE_THREAD_SERIALISER(); SCOPED_SERIALISE_CONTEXT(SET_NAME); Serialise_vkDbgSetObjectName(localSerialiser, device, objType, object, nameSize, pName); chunk = scope.Get(); } record->AddChunk(chunk); } return VK_SUCCESS; }
VkResult WrappedVulkan::vkCreateDescriptorPool( VkDevice device, const VkDescriptorPoolCreateInfo* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkDescriptorPool* pDescriptorPool) { VkResult ret = ObjDisp(device)->CreateDescriptorPool(Unwrap(device), pCreateInfo, pAllocator, pDescriptorPool); if(ret == VK_SUCCESS) { ResourceId id = GetResourceManager()->WrapResource(Unwrap(device), *pDescriptorPool); if(m_State >= WRITING) { Chunk *chunk = NULL; { CACHE_THREAD_SERIALISER(); SCOPED_SERIALISE_CONTEXT(CREATE_DESCRIPTOR_POOL); Serialise_vkCreateDescriptorPool(localSerialiser, device, pCreateInfo, NULL, pDescriptorPool); chunk = scope.Get(); } VkResourceRecord *record = GetResourceManager()->AddResourceRecord(*pDescriptorPool); record->AddChunk(chunk); } else { GetResourceManager()->AddLiveResource(id, *pDescriptorPool); } } return ret; }
VkResult WrappedVulkan::vkEnumeratePhysicalDevices( VkInstance instance, uint32_t* pPhysicalDeviceCount, VkPhysicalDevice* pPhysicalDevices) { uint32_t count; VkResult vkr = ObjDisp(instance)->EnumeratePhysicalDevices(Unwrap(instance), &count, NULL); if(vkr != VK_SUCCESS) return vkr; VkPhysicalDevice *devices = new VkPhysicalDevice[count]; vkr = ObjDisp(instance)->EnumeratePhysicalDevices(Unwrap(instance), &count, devices); RDCASSERTEQUAL(vkr, VK_SUCCESS); m_PhysicalDevices.resize(count); for(uint32_t i=0; i < count; i++) { // it's perfectly valid for enumerate type functions to return the same handle // each time. If that happens, we will already have a wrapper created so just // return the wrapped object to the user and do nothing else if(m_PhysicalDevices[i] != VK_NULL_HANDLE) { GetWrapped(m_PhysicalDevices[i])->RewrapObject(devices[i]); devices[i] = m_PhysicalDevices[i]; } else { GetResourceManager()->WrapResource(instance, devices[i]); if(m_State >= WRITING) { // add the record first since it's used in the serialise function below to fetch // the memory indices VkResourceRecord *record = GetResourceManager()->AddResourceRecord(devices[i]); RDCASSERT(record); record->memProps = new VkPhysicalDeviceMemoryProperties(); ObjDisp(devices[i])->GetPhysicalDeviceMemoryProperties(Unwrap(devices[i]), record->memProps); m_PhysicalDevices[i] = devices[i]; // we remap memory indices to discourage coherent maps as much as possible RemapMemoryIndices(record->memProps, &record->memIdxMap); { CACHE_THREAD_SERIALISER(); SCOPED_SERIALISE_CONTEXT(ENUM_PHYSICALS); Serialise_vkEnumeratePhysicalDevices(localSerialiser, instance, &i, &devices[i]); record->AddChunk(scope.Get()); } VkResourceRecord *instrecord = GetRecord(instance); instrecord->AddParent(record); // treat physical devices as pool members of the instance (ie. freed when the instance dies) { instrecord->LockChunks(); instrecord->pooledChildren.push_back(record); instrecord->UnlockChunks(); } } } } if(pPhysicalDeviceCount) *pPhysicalDeviceCount = count; if(pPhysicalDevices) memcpy(pPhysicalDevices, devices, count*sizeof(VkPhysicalDevice)); SAFE_DELETE_ARRAY(devices); return VK_SUCCESS; }
void WrappedVulkan::vkUnmapMemory( VkDevice device, VkDeviceMemory mem) { if(m_State >= WRITING) { ResourceId id = GetResID(mem); VkResourceRecord *memrecord = GetRecord(mem); RDCASSERT(memrecord->memMapState); MemMapState &state = *memrecord->memMapState; { // decide atomically if this chunk should be in-frame or not // so that we're not in the else branch but haven't marked // dirty when capframe starts, then we mark dirty while in-frame bool capframe = false; { SCOPED_LOCK(m_CapTransitionLock); capframe = (m_State == WRITING_CAPFRAME); if(!capframe) GetResourceManager()->MarkDirtyResource(id); } if(capframe) { // coherent maps must always serialise all data on unmap, even if a flush was seen, because // unflushed data is *also* visible. This is a bit redundant since data is serialised here // and in any flushes, but that's the app's fault - the spec calls out flushing coherent maps // as inefficient // if the memory is not coherent, we must have a flush for every region written while it is // mapped, there is no implicit flush on unmap, so we follow the spec strictly on this. if(state.mapCoherent) { CACHE_THREAD_SERIALISER(); SCOPED_SERIALISE_CONTEXT(UNMAP_MEM); Serialise_vkUnmapMemory(localSerialiser, device, mem); VkResourceRecord *record = GetRecord(mem); if(m_State == WRITING_IDLE) { record->AddChunk(scope.Get()); } else { m_FrameCaptureRecord->AddChunk(scope.Get()); GetResourceManager()->MarkResourceFrameReferenced(id, eFrameRef_Write); } } } state.mappedPtr = NULL; } Serialiser::FreeAlignedBuffer(state.refData); if(state.mapCoherent) { SCOPED_LOCK(m_CoherentMapsLock); auto it = std::find(m_CoherentMaps.begin(), m_CoherentMaps.end(), memrecord); if(it == m_CoherentMaps.end()) RDCERR("vkUnmapMemory for memory handle that's not currently mapped"); m_CoherentMaps.erase(it); } } ObjDisp(device)->UnmapMemory(Unwrap(device), Unwrap(mem)); }
VkResult WrappedVulkan::vkAllocateMemory( VkDevice device, const VkMemoryAllocateInfo* pAllocateInfo, const VkAllocationCallbacks* pAllocator, VkDeviceMemory* pMemory) { VkMemoryAllocateInfo info = *pAllocateInfo; if(m_State >= WRITING) info.memoryTypeIndex = GetRecord(device)->memIdxMap[info.memoryTypeIndex]; VkResult ret = ObjDisp(device)->AllocateMemory(Unwrap(device), &info, pAllocator, pMemory); if(ret == VK_SUCCESS) { ResourceId id = GetResourceManager()->WrapResource(Unwrap(device), *pMemory); if(m_State >= WRITING) { Chunk *chunk = NULL; { CACHE_THREAD_SERIALISER(); SCOPED_SERIALISE_CONTEXT(ALLOC_MEM); Serialise_vkAllocateMemory(localSerialiser, device, pAllocateInfo, NULL, pMemory); chunk = scope.Get(); } // create resource record for gpu memory VkResourceRecord *record = GetResourceManager()->AddResourceRecord(*pMemory); RDCASSERT(record); record->AddChunk(chunk); record->Length = pAllocateInfo->allocationSize; uint32_t memProps = m_PhysicalDeviceData.fakeMemProps->memoryTypes[pAllocateInfo->memoryTypeIndex].propertyFlags; // if memory is not host visible, so not mappable, don't create map state at all if((memProps & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) != 0) { record->memMapState = new MemMapState(); record->memMapState->mapCoherent = (memProps & VK_MEMORY_PROPERTY_HOST_COHERENT_BIT) != 0; record->memMapState->refData = NULL; } } else { GetResourceManager()->AddLiveResource(id, *pMemory); m_CreationInfo.m_Memory[id].Init(GetResourceManager(), m_CreationInfo, pAllocateInfo); // create a buffer with the whole memory range bound, for copying to and from // conveniently (for initial state data) VkBuffer buf = VK_NULL_HANDLE; VkBufferCreateInfo bufInfo = { VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO, NULL, 0, info.allocationSize, VK_BUFFER_USAGE_TRANSFER_SRC_BIT|VK_BUFFER_USAGE_TRANSFER_DST_BIT, }; ret = ObjDisp(device)->CreateBuffer(Unwrap(device), &bufInfo, NULL, &buf); RDCASSERTEQUAL(ret, VK_SUCCESS); ResourceId bufid = GetResourceManager()->WrapResource(Unwrap(device), buf); ObjDisp(device)->BindBufferMemory(Unwrap(device), Unwrap(buf), Unwrap(*pMemory), 0); // register as a live-only resource, so it is cleaned up properly GetResourceManager()->AddLiveResource(bufid, buf); m_CreationInfo.m_Memory[id].wholeMemBuf = buf; } } return ret; }
VkResult WrappedVulkan::vkCreateImage( VkDevice device, const VkImageCreateInfo* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkImage* pImage) { VkImageCreateInfo createInfo_adjusted = *pCreateInfo; createInfo_adjusted.usage |= VK_IMAGE_USAGE_TRANSFER_SRC_BIT; VkResult ret = ObjDisp(device)->CreateImage(Unwrap(device), &createInfo_adjusted, pAllocator, pImage); // SHARING: pCreateInfo sharingMode, queueFamilyCount, pQueueFamilyIndices if(ret == VK_SUCCESS) { ResourceId id = GetResourceManager()->WrapResource(Unwrap(device), *pImage); if(m_State >= WRITING) { Chunk *chunk = NULL; { CACHE_THREAD_SERIALISER(); SCOPED_SERIALISE_CONTEXT(CREATE_IMAGE); Serialise_vkCreateImage(localSerialiser, device, pCreateInfo, NULL, pImage); chunk = scope.Get(); } VkResourceRecord *record = GetResourceManager()->AddResourceRecord(*pImage); record->AddChunk(chunk); if(pCreateInfo->flags & (VK_IMAGE_CREATE_SPARSE_BINDING_BIT|VK_IMAGE_CREATE_SPARSE_RESIDENCY_BIT)) { record->sparseInfo = new SparseMapping(); { SCOPED_LOCK(m_CapTransitionLock); if(m_State != WRITING_CAPFRAME) GetResourceManager()->MarkDirtyResource(id); else GetResourceManager()->MarkPendingDirty(id); } if(pCreateInfo->flags & VK_IMAGE_CREATE_SPARSE_RESIDENCY_BIT) { // must record image and page dimension, and create page tables uint32_t numreqs = NUM_VK_IMAGE_ASPECTS; VkSparseImageMemoryRequirements reqs[NUM_VK_IMAGE_ASPECTS]; ObjDisp(device)->GetImageSparseMemoryRequirements(Unwrap(device), Unwrap(*pImage), &numreqs, reqs); RDCASSERT(numreqs > 0); record->sparseInfo->pagedim = reqs[0].formatProperties.imageGranularity; record->sparseInfo->imgdim = pCreateInfo->extent; record->sparseInfo->imgdim.width /= record->sparseInfo->pagedim.width; record->sparseInfo->imgdim.height /= record->sparseInfo->pagedim.height; record->sparseInfo->imgdim.depth /= record->sparseInfo->pagedim.depth; uint32_t numpages = record->sparseInfo->imgdim.width*record->sparseInfo->imgdim.height*record->sparseInfo->imgdim.depth; for(uint32_t i=0; i < numreqs; i++) { // assume all page sizes are the same for all aspects RDCASSERT(record->sparseInfo->pagedim.width == reqs[i].formatProperties.imageGranularity.width && record->sparseInfo->pagedim.height == reqs[i].formatProperties.imageGranularity.height && record->sparseInfo->pagedim.depth == reqs[i].formatProperties.imageGranularity.depth); int a=0; for(; a < NUM_VK_IMAGE_ASPECTS; a++) if(reqs[i].formatProperties.aspectMask & (1<<a)) break; record->sparseInfo->pages[a] = new pair<VkDeviceMemory, VkDeviceSize>[numpages]; } } else { // don't have to do anything, image is opaque and must be fully bound, just need // to track the memory bindings. } } } else { GetResourceManager()->AddLiveResource(id, *pImage); m_CreationInfo.m_Image[id].Init(GetResourceManager(), m_CreationInfo, pCreateInfo); } VkImageSubresourceRange range; range.baseMipLevel = range.baseArrayLayer = 0; range.levelCount = pCreateInfo->mipLevels; range.layerCount = pCreateInfo->arrayLayers; if(pCreateInfo->imageType == VK_IMAGE_TYPE_3D) range.layerCount = pCreateInfo->extent.depth; ImageLayouts *layout = NULL; { SCOPED_LOCK(m_ImageLayoutsLock); layout = &m_ImageLayouts[id]; } layout->layerCount = pCreateInfo->arrayLayers; layout->levelCount = pCreateInfo->mipLevels; layout->extent = pCreateInfo->extent; layout->format = pCreateInfo->format; layout->subresourceStates.clear(); range.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT; if(IsDepthOnlyFormat(pCreateInfo->format)) range.aspectMask = VK_IMAGE_ASPECT_DEPTH_BIT; else if(IsDepthStencilFormat(pCreateInfo->format)) range.aspectMask = VK_IMAGE_ASPECT_DEPTH_BIT|VK_IMAGE_ASPECT_STENCIL_BIT; layout->subresourceStates.push_back(ImageRegionState(range, UNKNOWN_PREV_IMG_LAYOUT, VK_IMAGE_LAYOUT_UNDEFINED)); } return ret; }
void WrappedVulkan::vkCmdWaitEvents( VkCommandBuffer cmdBuffer, uint32_t eventCount, const VkEvent* pEvents, VkPipelineStageFlags srcStageMask, VkPipelineStageFlags dstStageMask, uint32_t memoryBarrierCount, const VkMemoryBarrier* pMemoryBarriers, uint32_t bufferMemoryBarrierCount, const VkBufferMemoryBarrier* pBufferMemoryBarriers, uint32_t imageMemoryBarrierCount, const VkImageMemoryBarrier* pImageMemoryBarriers) { { byte *memory = GetTempMemory( sizeof(VkEvent)*eventCount + sizeof(VkBufferMemoryBarrier)*bufferMemoryBarrierCount + sizeof(VkImageMemoryBarrier)*imageMemoryBarrierCount); VkEvent *ev = (VkEvent *)memory; VkImageMemoryBarrier *im = (VkImageMemoryBarrier *)(ev + eventCount); VkBufferMemoryBarrier *buf = (VkBufferMemoryBarrier *)(im + imageMemoryBarrierCount); for(uint32_t i=0; i < eventCount; i++) ev[i] = Unwrap(pEvents[i]); for(uint32_t i=0; i < bufferMemoryBarrierCount; i++) { buf[i] = pBufferMemoryBarriers[i]; buf[i].buffer = Unwrap(buf[i].buffer); } for(uint32_t i=0; i < imageMemoryBarrierCount; i++) { im[i] = pImageMemoryBarriers[i]; im[i].image = Unwrap(im[i].image); } ObjDisp(cmdBuffer)->CmdWaitEvents(Unwrap(cmdBuffer), eventCount, ev, srcStageMask, dstStageMask, memoryBarrierCount, pMemoryBarriers, bufferMemoryBarrierCount, buf, imageMemoryBarrierCount, im); } if(m_State >= WRITING) { VkResourceRecord *record = GetRecord(cmdBuffer); CACHE_THREAD_SERIALISER(); SCOPED_SERIALISE_CONTEXT(CMD_WAIT_EVENTS); Serialise_vkCmdWaitEvents(localSerialiser, cmdBuffer, eventCount, pEvents, srcStageMask, dstStageMask, memoryBarrierCount, pMemoryBarriers, bufferMemoryBarrierCount, pBufferMemoryBarriers, imageMemoryBarrierCount, pImageMemoryBarriers); if(imageMemoryBarrierCount > 0) { SCOPED_LOCK(m_ImageLayoutsLock); GetResourceManager()->RecordBarriers(GetRecord(cmdBuffer)->cmdInfo->imgbarriers, m_ImageLayouts, imageMemoryBarrierCount, pImageMemoryBarriers); } record->AddChunk(scope.Get()); for(uint32_t i=0; i < eventCount; i++) record->MarkResourceFrameReferenced(GetResID(pEvents[i]), eFrameRef_Read); } }
VkResult WrappedVulkan::vkCreateDevice( VkPhysicalDevice physicalDevice, const VkDeviceCreateInfo* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkDevice* pDevice) { VkDeviceCreateInfo createInfo = *pCreateInfo; uint32_t qCount = 0; VkResult vkr = VK_SUCCESS; ObjDisp(physicalDevice)->GetPhysicalDeviceQueueFamilyProperties(Unwrap(physicalDevice), &qCount, NULL); VkQueueFamilyProperties *props = new VkQueueFamilyProperties[qCount]; ObjDisp(physicalDevice)->GetPhysicalDeviceQueueFamilyProperties(Unwrap(physicalDevice), &qCount, props); // find a queue that supports all capabilities, and if one doesn't exist, add it. bool found = false; uint32_t qFamilyIdx = 0; VkQueueFlags search = (VK_QUEUE_GRAPHICS_BIT); // for queue priorities, if we need it float one = 1.0f; // if we need to change the requested queues, it will point to this VkDeviceQueueCreateInfo *modQueues = NULL; for(uint32_t i=0; i < createInfo.queueCreateInfoCount; i++) { uint32_t idx = createInfo.pQueueCreateInfos[i].queueFamilyIndex; RDCASSERT(idx < qCount); // this requested queue is one we can use too if((props[idx].queueFlags & search) == search && createInfo.pQueueCreateInfos[i].queueCount > 0) { qFamilyIdx = idx; found = true; break; } } // if we didn't find it, search for which queue family we should add a request for if(!found) { RDCDEBUG("App didn't request a queue family we can use - adding our own"); for(uint32_t i=0; i < qCount; i++) { if((props[i].queueFlags & search) == search) { qFamilyIdx = i; found = true; break; } } if(!found) { SAFE_DELETE_ARRAY(props); RDCERR("Can't add a queue with required properties for RenderDoc! Unsupported configuration"); return VK_ERROR_INITIALIZATION_FAILED; } // we found the queue family, add it modQueues = new VkDeviceQueueCreateInfo[createInfo.queueCreateInfoCount + 1]; for(uint32_t i=0; i < createInfo.queueCreateInfoCount; i++) modQueues[i] = createInfo.pQueueCreateInfos[i]; modQueues[createInfo.queueCreateInfoCount].queueFamilyIndex = qFamilyIdx; modQueues[createInfo.queueCreateInfoCount].queueCount = 1; modQueues[createInfo.queueCreateInfoCount].pQueuePriorities = &one; createInfo.pQueueCreateInfos = modQueues; createInfo.queueCreateInfoCount++; } SAFE_DELETE_ARRAY(props); m_QueueFamilies.resize(createInfo.queueCreateInfoCount); for(size_t i=0; i < createInfo.queueCreateInfoCount; i++) { uint32_t family = createInfo.pQueueCreateInfos[i].queueFamilyIndex; uint32_t count = createInfo.pQueueCreateInfos[i].queueCount; m_QueueFamilies.resize(RDCMAX(m_QueueFamilies.size(), size_t(family+1))); m_QueueFamilies[family] = new VkQueue[count]; for(uint32_t q=0; q < count; q++) m_QueueFamilies[family][q] = VK_NULL_HANDLE; } VkLayerDeviceCreateInfo *layerCreateInfo = (VkLayerDeviceCreateInfo *)pCreateInfo->pNext; // step through the chain of pNext until we get to the link info while(layerCreateInfo && (layerCreateInfo->sType != VK_STRUCTURE_TYPE_LOADER_DEVICE_CREATE_INFO || layerCreateInfo->function != VK_LAYER_LINK_INFO) ) { layerCreateInfo = (VkLayerDeviceCreateInfo *)layerCreateInfo->pNext; } RDCASSERT(layerCreateInfo); PFN_vkGetDeviceProcAddr gdpa = layerCreateInfo->u.pLayerInfo->pfnNextGetDeviceProcAddr; PFN_vkGetInstanceProcAddr gipa = layerCreateInfo->u.pLayerInfo->pfnNextGetInstanceProcAddr; // move chain on for next layer layerCreateInfo->u.pLayerInfo = layerCreateInfo->u.pLayerInfo->pNext; PFN_vkCreateDevice createFunc = (PFN_vkCreateDevice)gipa(VK_NULL_HANDLE, "vkCreateDevice"); // now search again through for the loader data callback (if it exists) layerCreateInfo = (VkLayerDeviceCreateInfo *)pCreateInfo->pNext; // step through the chain of pNext while(layerCreateInfo && (layerCreateInfo->sType != VK_STRUCTURE_TYPE_LOADER_DEVICE_CREATE_INFO || layerCreateInfo->function != VK_LOADER_DATA_CALLBACK) ) { layerCreateInfo = (VkLayerDeviceCreateInfo *)layerCreateInfo->pNext; } // if we found one (we might not - on old loaders), then store the func ptr for // use instead of SetDispatchTableOverMagicNumber if(layerCreateInfo) { RDCASSERT(m_SetDeviceLoaderData == layerCreateInfo->u.pfnSetDeviceLoaderData || m_SetDeviceLoaderData == NULL, m_SetDeviceLoaderData, layerCreateInfo->u.pfnSetDeviceLoaderData); m_SetDeviceLoaderData = layerCreateInfo->u.pfnSetDeviceLoaderData; } VkResult ret = createFunc(Unwrap(physicalDevice), &createInfo, pAllocator, pDevice); // don't serialise out any of the pNext stuff for layer initialisation // (note that we asserted above that there was nothing else in the chain) createInfo.pNext = NULL; if(ret == VK_SUCCESS) { InitDeviceTable(*pDevice, gdpa); ResourceId id = GetResourceManager()->WrapResource(*pDevice, *pDevice); if(m_State >= WRITING) { Chunk *chunk = NULL; { CACHE_THREAD_SERIALISER(); SCOPED_SERIALISE_CONTEXT(CREATE_DEVICE); Serialise_vkCreateDevice(localSerialiser, physicalDevice, &createInfo, NULL, pDevice); chunk = scope.Get(); } VkResourceRecord *record = GetResourceManager()->AddResourceRecord(*pDevice); RDCASSERT(record); record->AddChunk(chunk); record->memIdxMap = GetRecord(physicalDevice)->memIdxMap; record->instDevInfo = new InstanceDeviceInfo(); #undef CheckExt #define CheckExt(name) record->instDevInfo->name = GetRecord(m_Instance)->instDevInfo->name; // inherit extension enablement from instance, that way GetDeviceProcAddress can check // for enabled extensions for instance functions CheckInstanceExts(); #undef CheckExt #define CheckExt(name) if(!strcmp(createInfo.ppEnabledExtensionNames[i], STRINGIZE(name))) { record->instDevInfo->name = true; } for(uint32_t i=0; i < createInfo.enabledExtensionCount; i++) { CheckDeviceExts(); } InitDeviceExtensionTables(*pDevice); GetRecord(m_Instance)->AddParent(record); } else { GetResourceManager()->AddLiveResource(id, *pDevice); } VkDevice device = *pDevice; RDCASSERT(m_Device == VK_NULL_HANDLE); // MULTIDEVICE m_PhysicalDevice = physicalDevice; m_Device = device; m_QueueFamilyIdx = qFamilyIdx; if(m_InternalCmds.cmdpool == VK_NULL_HANDLE) { VkCommandPoolCreateInfo poolInfo = { VK_STRUCTURE_TYPE_COMMAND_POOL_CREATE_INFO, NULL, VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT, qFamilyIdx }; vkr = ObjDisp(device)->CreateCommandPool(Unwrap(device), &poolInfo, NULL, &m_InternalCmds.cmdpool); RDCASSERTEQUAL(vkr, VK_SUCCESS); GetResourceManager()->WrapResource(Unwrap(device), m_InternalCmds.cmdpool); } ObjDisp(physicalDevice)->GetPhysicalDeviceProperties(Unwrap(physicalDevice), &m_PhysicalDeviceData.props); ObjDisp(physicalDevice)->GetPhysicalDeviceMemoryProperties(Unwrap(physicalDevice), &m_PhysicalDeviceData.memProps); ObjDisp(physicalDevice)->GetPhysicalDeviceFeatures(Unwrap(physicalDevice), &m_PhysicalDeviceData.features); m_PhysicalDeviceData.readbackMemIndex = m_PhysicalDeviceData.GetMemoryIndex(~0U, VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT, 0); m_PhysicalDeviceData.uploadMemIndex = m_PhysicalDeviceData.GetMemoryIndex(~0U, VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT, 0); m_PhysicalDeviceData.GPULocalMemIndex = m_PhysicalDeviceData.GetMemoryIndex(~0U, VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT, VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT); m_PhysicalDeviceData.fakeMemProps = GetRecord(physicalDevice)->memProps; m_DebugManager = new VulkanDebugManager(this, device); } SAFE_DELETE_ARRAY(modQueues); return ret; }