bool GrVkBuffer::vkUpdateData(const GrVkGpu* gpu, const void* src, size_t srcSizeInBytes, bool* createdNewBuffer) { SkASSERT(!this->vkIsMapped()); VALIDATE(); if (srcSizeInBytes > fDesc.fSizeInBytes) { return false; } if (!fResource->unique()) { // in use by the command buffer, so we need to create a new one fResource->unref(gpu); fResource = Create(gpu, fDesc); if (createdNewBuffer) { *createdNewBuffer = true; } } void* mapPtr; VkResult err = VK_CALL(gpu, MapMemory(gpu->device(), alloc(), 0, srcSizeInBytes, 0, &mapPtr)); if (VK_SUCCESS != err) { return false; } memcpy(mapPtr, src, srcSizeInBytes); VK_CALL(gpu, UnmapMemory(gpu->device(), alloc())); return true; }
void GrVkBuffer::vkUnmap(const GrVkGpu* gpu) { VALIDATE(); SkASSERT(this->vkIsMapped()); VK_CALL(gpu, UnmapMemory(gpu->device(), alloc())); fMapPtr = nullptr; }
void GrVkBuffer::vkUnmap(GrVkGpu* gpu) { VALIDATE(); SkASSERT(this->vkIsMapped()); if (fDesc.fDynamic) { VK_CALL(gpu, UnmapMemory(gpu->device(), this->alloc().fMemory)); } else { gpu->updateBuffer(this, fMapPtr, this->offset(), this->size()); delete [] (unsigned char*)fMapPtr; } fMapPtr = nullptr; }
void GrVkBuffer::internalUnmap(GrVkGpu* gpu, size_t size) { VALIDATE(); SkASSERT(this->vkIsMapped()); if (fDesc.fDynamic) { GrVkMemory::FlushMappedAlloc(gpu, this->alloc()); VK_CALL(gpu, UnmapMemory(gpu->device(), this->alloc().fMemory)); fMapPtr = nullptr; } else { gpu->updateBuffer(this, fMapPtr, this->offset(), size); this->addMemoryBarrier(gpu, VK_ACCESS_TRANSFER_WRITE_BIT, buffer_type_to_access_flags(fDesc.fType), VK_PIPELINE_STAGE_TRANSFER_BIT, VK_PIPELINE_STAGE_VERTEX_INPUT_BIT, false); } }