void WrappedOpenGL::glGetBufferPointerv(GLenum target, GLenum pname, void **params) { CoherentMapImplicitBarrier(); // intercept GL_BUFFER_MAP_POINTER queries if(pname == eGL_BUFFER_MAP_POINTER) { GLResourceRecord *record = GetCtxData().m_BufferRecord[BufferIdx(target)]; RDCASSERTMSG("Couldn't identify implicit object at binding. Mismatched or bad GLuint?", record, target); if(record) { if(record->Map.status == GLResourceRecord::Unmapped) *params = NULL; else *params = (void *)record->Map.ptr; } else { *params = NULL; } } else { m_Real.glGetBufferPointerv(target, pname, params); } }
void WrappedOpenGL::glGetNamedBufferPointervEXT(GLuint buffer, GLenum pname, void **params) { CoherentMapImplicitBarrier(); // intercept GL_BUFFER_MAP_POINTER queries if(pname == eGL_BUFFER_MAP_POINTER) { GLResourceRecord *record = GetResourceManager()->GetResourceRecord(BufferRes(GetCtx(), buffer)); RDCASSERTMSG("Couldn't identify object passed to function. Mismatched or bad GLuint?", record, buffer); if(record) { if(record->Map.status == GLResourceRecord::Unmapped) *params = NULL; else *params = (void *)record->Map.ptr; } else { *params = NULL; } } else { m_Real.glGetNamedBufferPointervEXT(buffer, pname, params); } }
VkResult WrappedVulkan::vkAllocateMemory( VkDevice device, const VkMemoryAllocateInfo* pAllocateInfo, const VkAllocationCallbacks* pAllocator, VkDeviceMemory* pMemory) { VkMemoryAllocateInfo info = *pAllocateInfo; if(m_State >= WRITING) { info.memoryTypeIndex = GetRecord(device)->memIdxMap[info.memoryTypeIndex]; // we need to be able to allocate a buffer that covers the whole memory range. However // if the memory is e.g. 100 bytes (arbitrary example) and buffers have memory requirements // such that it must be bound to a multiple of 128 bytes, then we can't create a buffer // that entirely covers a 100 byte allocation. // To get around this, we create a buffer of the allocation's size with the properties we // want, check its required size, then bump up the allocation size to that as if the application // had requested more. We're assuming here no system will require something like "buffer of // size N must be bound to memory of size N+O for some value of O overhead bytes". // // this could be optimised as maybe we'll be creating buffers of multiple sizes, but allocation // in vulkan is already expensive and making it a little more expensive isn't a big deal. VkBufferCreateInfo bufInfo = { VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO, NULL, 0, info.allocationSize, VK_BUFFER_USAGE_TRANSFER_SRC_BIT|VK_BUFFER_USAGE_TRANSFER_DST_BIT, }; // since this is very short lived, it's not wrapped VkBuffer buf; VkResult vkr = ObjDisp(device)->CreateBuffer(Unwrap(device), &bufInfo, NULL, &buf); RDCASSERTEQUAL(vkr, VK_SUCCESS); if(vkr == VK_SUCCESS && buf != VK_NULL_HANDLE) { VkMemoryRequirements mrq = { 0 }; ObjDisp(device)->GetBufferMemoryRequirements(Unwrap(device), buf, &mrq); RDCASSERTMSG("memory requirements less than desired size", mrq.size >= bufInfo.size, mrq.size, bufInfo.size); // round up allocation size to allow creation of buffers if(mrq.size >= bufInfo.size) info.allocationSize = mrq.size; } ObjDisp(device)->DestroyBuffer(Unwrap(device), buf, NULL); } VkResult ret = ObjDisp(device)->AllocateMemory(Unwrap(device), &info, pAllocator, pMemory); // restore the memoryTypeIndex to the original, as that's what we want to serialise, // but maintain any potential modifications we made to info.allocationSize info.memoryTypeIndex = pAllocateInfo->memoryTypeIndex; if(ret == VK_SUCCESS) { ResourceId id = GetResourceManager()->WrapResource(Unwrap(device), *pMemory); if(m_State >= WRITING) { Chunk *chunk = NULL; { CACHE_THREAD_SERIALISER(); SCOPED_SERIALISE_CONTEXT(ALLOC_MEM); Serialise_vkAllocateMemory(localSerialiser, device, &info, NULL, pMemory); chunk = scope.Get(); } // create resource record for gpu memory VkResourceRecord *record = GetResourceManager()->AddResourceRecord(*pMemory); RDCASSERT(record); record->AddChunk(chunk); record->Length = info.allocationSize; uint32_t memProps = m_PhysicalDeviceData.fakeMemProps->memoryTypes[info.memoryTypeIndex].propertyFlags; // if memory is not host visible, so not mappable, don't create map state at all if((memProps & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) != 0) { record->memMapState = new MemMapState(); record->memMapState->mapCoherent = (memProps & VK_MEMORY_PROPERTY_HOST_COHERENT_BIT) != 0; record->memMapState->refData = NULL; } } else { GetResourceManager()->AddLiveResource(id, *pMemory); m_CreationInfo.m_Memory[id].Init(GetResourceManager(), m_CreationInfo, &info); // create a buffer with the whole memory range bound, for copying to and from // conveniently (for initial state data) VkBuffer buf = VK_NULL_HANDLE; VkBufferCreateInfo bufInfo = { VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO, NULL, 0, info.allocationSize, VK_BUFFER_USAGE_TRANSFER_SRC_BIT|VK_BUFFER_USAGE_TRANSFER_DST_BIT, }; ret = ObjDisp(device)->CreateBuffer(Unwrap(device), &bufInfo, NULL, &buf); RDCASSERTEQUAL(ret, VK_SUCCESS); ResourceId bufid = GetResourceManager()->WrapResource(Unwrap(device), buf); ObjDisp(device)->BindBufferMemory(Unwrap(device), Unwrap(buf), Unwrap(*pMemory), 0); // register as a live-only resource, so it is cleaned up properly GetResourceManager()->AddLiveResource(bufid, buf); m_CreationInfo.m_Memory[id].wholeMemBuf = buf; } } return ret; }