VkBool32 VulkanExampleBase::createBuffer( VkBufferUsageFlags usage, VkDeviceSize size, void * data, VkBuffer *buffer, VkDeviceMemory *memory) { VkMemoryRequirements memReqs; VkMemoryAllocateInfo memAlloc = vkTools::initializers::memoryAllocateInfo(); VkBufferCreateInfo bufferCreateInfo = vkTools::initializers::bufferCreateInfo(usage, size); VkResult err = vkCreateBuffer(device, &bufferCreateInfo, nullptr, buffer); assert(!err); vkGetBufferMemoryRequirements(device, *buffer, &memReqs); memAlloc.allocationSize = memReqs.size; getMemoryType(memReqs.memoryTypeBits, VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT, &memAlloc.memoryTypeIndex); err = vkAllocateMemory(device, &memAlloc, nullptr, memory); assert(!err); if (data != nullptr) { void *mapped; err = vkMapMemory(device, *memory, 0, size, 0, &mapped); assert(!err); memcpy(mapped, data, size); vkUnmapMemory(device, *memory); } err = vkBindBufferMemory(device, *buffer, *memory, 0); assert(!err); return true; }
/** * Create a buffer on the device * * @param usageFlags Usage flag bitmask for the buffer (i.e. index, vertex, uniform buffer) * @param memoryPropertyFlags Memory properties for this buffer (i.e. device local, host visible, coherent) * @param size Size of the buffer in byes * @param buffer Pointer to the buffer handle acquired by the function * @param memory Pointer to the memory handle acquired by the function * @param data Pointer to the data that should be copied to the buffer after creation (optional, if not set, no data is copied over) * * @return VK_SUCCESS if buffer handle and memory have been created and (optionally passed) data has been copied */ VkResult createBuffer(VkBufferUsageFlags usageFlags, VkMemoryPropertyFlags memoryPropertyFlags, VkDeviceSize size, VkBuffer *buffer, VkDeviceMemory *memory, void *data = nullptr) { // Create the buffer handle VkBufferCreateInfo bufferCreateInfo = vkTools::initializers::bufferCreateInfo(usageFlags, size); bufferCreateInfo.sharingMode = VK_SHARING_MODE_EXCLUSIVE; VK_CHECK_RESULT(vkCreateBuffer(logicalDevice, &bufferCreateInfo, nullptr, buffer)); // Create the memory backing up the buffer handle VkMemoryRequirements memReqs; VkMemoryAllocateInfo memAlloc = vkTools::initializers::memoryAllocateInfo(); vkGetBufferMemoryRequirements(logicalDevice, *buffer, &memReqs); memAlloc.allocationSize = memReqs.size; // Find a memory type index that fits the properties of the buffer memAlloc.memoryTypeIndex = getMemoryType(memReqs.memoryTypeBits, memoryPropertyFlags); VK_CHECK_RESULT(vkAllocateMemory(logicalDevice, &memAlloc, nullptr, memory)); // If a pointer to the buffer data has been passed, map the buffer and copy over the data if (data != nullptr) { void *mapped; VK_CHECK_RESULT(vkMapMemory(logicalDevice, *memory, 0, size, 0, &mapped)); memcpy(mapped, data, size); vkUnmapMemory(logicalDevice, *memory); } // Attach the memory to the buffer object VK_CHECK_RESULT(vkBindBufferMemory(logicalDevice, *buffer, *memory, 0)); return VK_SUCCESS; }
void VkHelper::createBuffer(VkDevice device, VkPhysicalDevice physicalDevice, VkDeviceSize bufferSize, VkBufferUsageFlags usageFlags, VkMemoryPropertyFlags preferedPropertyFlags, VkMemoryPropertyFlags requiredPropertyFlags, VkBuffer* bufferId, VkDeviceMemory* bufferMem) { VkBufferCreateInfo bufferInfo{}; bufferInfo.sType = VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO; bufferInfo.pNext = VK_NULL_HANDLE; bufferInfo.flags = 0; bufferInfo.size = bufferSize;// mesh->getVertexCount() * mesh->getVertexStride(); bufferInfo.usage = usageFlags;// VK_BUFFER_USAGE_VERTEX_BUFFER_BIT; bufferInfo.sharingMode = VK_SHARING_MODE_EXCLUSIVE; bufferInfo.pQueueFamilyIndices = nullptr; bufferInfo.queueFamilyIndexCount = 0; if (vkCreateBuffer(device, &bufferInfo, nullptr, bufferId) != VK_SUCCESS) std::runtime_error("failed to create vertex buffer."); VkMemoryRequirements memReq; vkGetBufferMemoryRequirements(device, *bufferId, &memReq); //VkMemoryPropertyFlags required = VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT; //VkMemoryPropertyFlags prefered = VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT | VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT; uint32_t memType = findMemoryType(physicalDevice, memReq, requiredPropertyFlags, preferedPropertyFlags); VkMemoryAllocateInfo allocInfo{}; allocInfo.sType = VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO; allocInfo.pNext = VK_NULL_HANDLE; allocInfo.allocationSize = memReq.size; allocInfo.memoryTypeIndex = memType; if (vkAllocateMemory(device, &allocInfo, nullptr, bufferMem) != VK_SUCCESS) std::runtime_error("ERROR: Allocating buffer memory failed."); if (vkBindBufferMemory(device, *bufferId, *bufferMem, 0) != VK_SUCCESS) std::runtime_error("ERROR: Binding Memory to Buffer Failed."); }
/* Please see header for specification */ bool Anvil::Buffer::set_memory(Anvil::MemoryBlock* memory_block_ptr) { bool result = false; VkResult result_vk; if (memory_block_ptr == nullptr) { anvil_assert(!(memory_block_ptr == nullptr) ); goto end; } if (m_memory_block_ptr != nullptr) { anvil_assert( (memory_block_ptr == nullptr) ); goto end; } /* Bind the memory object to the buffer object */ m_memory_block_ptr = memory_block_ptr; m_memory_block_ptr->retain(); result_vk = vkBindBufferMemory(m_device_ptr->get_device_vk(), m_buffer, m_memory_block_ptr->get_memory(), memory_block_ptr->get_start_offset() ); anvil_assert_vk_call_succeeded(result_vk); result = is_vk_call_successful(result_vk); end: return result; }
void VertexBuffer::uploadData(std::vector<Vertex>& vertexData) { //create the buffer VkBufferCreateInfo createInfo = {}; createInfo.sType = VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO; createInfo.size = sizeof(Vertex) * vertexData.size(); createInfo.usage = VK_BUFFER_USAGE_VERTEX_BUFFER_BIT; createInfo.sharingMode = VK_SHARING_MODE_EXCLUSIVE; if (vkCreateBuffer(deviceHelper.getDevice(), &createInfo, nullptr, buffer.replace()) != VK_SUCCESS) { throw std::runtime_error("Could not create buffer."); } VkMemoryRequirements memReqs = {}; vkGetBufferMemoryRequirements(deviceHelper.getDevice(), buffer, &memReqs); //allocate the memory VkMemoryAllocateInfo allocInfo = {}; allocInfo.sType = VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO; allocInfo.allocationSize = memReqs.size; allocInfo.memoryTypeIndex = deviceHelper.findMemoryType(memReqs.memoryTypeBits, VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT); if (vkAllocateMemory(deviceHelper.getDevice(), &allocInfo, nullptr, bufferMemory.replace()) != VK_SUCCESS) { throw std::runtime_error("Could not allocate buffer memory."); } vkBindBufferMemory(deviceHelper.getDevice(), buffer, bufferMemory, 0); //upload the data void* dataPtr; vkMapMemory(deviceHelper.getDevice(), bufferMemory, 0, createInfo.size, 0, &dataPtr); memcpy(dataPtr, vertexData.data(), createInfo.size); vkUnmapMemory(deviceHelper.getDevice(), bufferMemory); }
// Setup pool and buffer for storing pipeline statistics results void setupQueryResultBuffer() { uint32_t bufSize = 2 * sizeof(uint64_t); VkMemoryRequirements memReqs; VkMemoryAllocateInfo memAlloc = vks::initializers::memoryAllocateInfo(); VkBufferCreateInfo bufferCreateInfo = vks::initializers::bufferCreateInfo( VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT | VK_BUFFER_USAGE_TRANSFER_DST_BIT, bufSize); // Results are saved in a host visible buffer for easy access by the application VK_CHECK_RESULT(vkCreateBuffer(device, &bufferCreateInfo, nullptr, &queryResult.buffer)); vkGetBufferMemoryRequirements(device, queryResult.buffer, &memReqs); memAlloc.allocationSize = memReqs.size; memAlloc.memoryTypeIndex = vulkanDevice->getMemoryType(memReqs.memoryTypeBits, VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT); VK_CHECK_RESULT(vkAllocateMemory(device, &memAlloc, nullptr, &queryResult.memory)); VK_CHECK_RESULT(vkBindBufferMemory(device, queryResult.buffer, queryResult.memory, 0)); // Create query pool if (deviceFeatures.pipelineStatisticsQuery) { VkQueryPoolCreateInfo queryPoolInfo = {}; queryPoolInfo.sType = VK_STRUCTURE_TYPE_QUERY_POOL_CREATE_INFO; queryPoolInfo.queryType = VK_QUERY_TYPE_PIPELINE_STATISTICS; queryPoolInfo.pipelineStatistics = VK_QUERY_PIPELINE_STATISTIC_VERTEX_SHADER_INVOCATIONS_BIT | VK_QUERY_PIPELINE_STATISTIC_TESSELLATION_EVALUATION_SHADER_INVOCATIONS_BIT; queryPoolInfo.queryCount = 2; VK_CHECK_RESULT(vkCreateQueryPool(device, &queryPoolInfo, NULL, &queryPool)); } }
void VkApp::createBuffer(VkDeviceSize size, VkBufferUsageFlags usage, VkMemoryPropertyFlags properties, VDeleter<VkBuffer>& buffer, VDeleter<VkDeviceMemory>& bufferMemory){ VkBufferCreateInfo bufferInfo = {}; bufferInfo.sType = VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO; bufferInfo.size = size; bufferInfo.usage = usage; bufferInfo.sharingMode = VK_SHARING_MODE_EXCLUSIVE; if( vkCreateBuffer(device, &bufferInfo, nullptr, &buffer) != VK_SUCCESS ){ throw std::runtime_error("failed to create buffer!"); } VkMemoryRequirements memRequirements; vkGetBufferMemoryRequirements(device, buffer, &memRequirements); VkMemoryAllocateInfo allocInfo = {}; allocInfo.sType = VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO; allocInfo.allocationSize = memRequirements.size; allocInfo.memoryTypeIndex = findMemoryType(memRequirements.memoryTypeBits, properties); if (vkAllocateMemory(device, &allocInfo, nullptr, &bufferMemory) != VK_SUCCESS) { throw std::runtime_error("failed to allocate buffer memory!"); } vkBindBufferMemory(device, buffer, bufferMemory, 0); }
Result<Eldritch2::UniquePointer<AssetView>> MeshView::CreateView( Allocator& allocator, Device& device, DeviceMemoryPool& pool, const AssetLibrary& library, const Utf8Char* const name, Range<const char*> rawBytes ) { Eldritch2::UniquePointer<VkVertexInputAttributeDescription[]> attributes( CreateInputAttributeDescriptions( allocator ) ); Eldritch2::UniquePointer<VkVertexInputBindingDescription[]> bindings( CreateVertexInputBindings( allocator ) ); Eldritch2::UniquePointer<Submesh[]> submeshes( CreateSubmeshes( allocator ) ); UniquePointer<VkBuffer> buffer; VkMemoryRequirements memoryRequirements; vkGetBufferMemoryRequirements( device, buffer.Get(), &memoryRequirements ); Result<DeviceMemoryPool::Allocation> allocateMemoryResult( pool.TryAllocateRegion( memoryRequirements.size ) ); if( !allocateMemoryResult ) { library.GetLog()( MessageSeverity::Error, "Unable to allocate device memory for mesh '{}'! (Size: {} bytes)" ET_UTF8_NEWLINE_LITERAL, name, memoryRequirements.size ); return allocateMemoryResult.GetErrorCode(); } const VkResult bindMemoryResult( vkBindBufferMemory( device, buffer.Get(), pool.GetDeviceMemory(), allocateMemoryResult->GetOffsetIntoPoolInBytes() ) ); if( VkResult::VK_SUCCESS != bindMemoryResult ) { library.GetLog()( MessageSeverity::Error, "Unable to bind device memory for mesh '{}'!" ET_UTF8_NEWLINE_LITERAL, name ); } auto result( MakeUnique<MeshView>( allocator, name, eastl::move( buffer ), eastl::move( *allocateMemoryResult ), eastl::move( attributes ), eastl::move( bindings ), eastl::move( submeshes ) ) ); if( !result ) { return Error::OutOfMemory; } return eastl::move( result ); }
void prepareUniformBuffers() { VkMemoryRequirements memReqs; VkBufferCreateInfo bufferInfo = {}; VkMemoryAllocateInfo allocInfo = {}; allocInfo.sType = VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO; allocInfo.pNext = nullptr; allocInfo.allocationSize = 0; allocInfo.memoryTypeIndex = 0; bufferInfo.sType = VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO; bufferInfo.size = sizeof(uboVS); bufferInfo.usage = VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT; VK_CHECK_RESULT(vkCreateBuffer(device, &bufferInfo, nullptr, &uniformDataVS.buffer)); vkGetBufferMemoryRequirements(device, uniformDataVS.buffer, &memReqs); allocInfo.allocationSize = memReqs.size; allocInfo.memoryTypeIndex = getMemoryTypeIndex(memReqs.memoryTypeBits, VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT); VK_CHECK_RESULT(vkAllocateMemory(device, &allocInfo, nullptr, &(uniformDataVS.memory))); VK_CHECK_RESULT(vkBindBufferMemory(device, uniformDataVS.buffer, uniformDataVS.memory, 0)); uniformDataVS.descriptor.buffer = uniformDataVS.buffer; uniformDataVS.descriptor.offset = 0; uniformDataVS.descriptor.range = sizeof(uboVS); updateUniformBuffers(); }
// Create a buffer for storing the query result // Setup a query pool void setupQueryResultBuffer() { uint32_t bufSize = 2 * sizeof(uint64_t); VkMemoryRequirements memReqs; VkMemoryAllocateInfo memAlloc = vkTools::initializers::memoryAllocateInfo(); VkBufferCreateInfo bufferCreateInfo = vkTools::initializers::bufferCreateInfo( VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT | VK_BUFFER_USAGE_TRANSFER_DST_BIT, bufSize); VkResult err = vkCreateBuffer(device, &bufferCreateInfo, nullptr, &queryResult.buffer); assert(!err); vkGetBufferMemoryRequirements(device, queryResult.buffer, &memReqs); memAlloc.allocationSize = memReqs.size; getMemoryType(memReqs.memoryTypeBits, VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT, &memAlloc.memoryTypeIndex); err = vkAllocateMemory(device, &memAlloc, nullptr, &queryResult.memory); assert(!err); err = vkBindBufferMemory(device, queryResult.buffer, queryResult.memory, 0); assert(!err); // Create query pool VkQueryPoolCreateInfo queryPoolInfo = {}; queryPoolInfo.sType = VK_STRUCTURE_TYPE_QUERY_POOL_CREATE_INFO; queryPoolInfo.queryType = VK_QUERY_TYPE_OCCLUSION; queryPoolInfo.queryCount = 2; err = vkCreateQueryPool(device, &queryPoolInfo, NULL, &queryPool); assert(!err); }
// Create a buffer for storing the query result // Setup a query pool void setupQueryResultBuffer() { uint32_t bufSize = 2 * sizeof(uint64_t); VkMemoryRequirements memReqs; VkMemoryAllocateInfo memAlloc = vkTools::initializers::memoryAllocateInfo(); VkBufferCreateInfo bufferCreateInfo = vkTools::initializers::bufferCreateInfo( VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT | VK_BUFFER_USAGE_TRANSFER_DST_BIT, bufSize); // Results are saved in a host visible buffer for easy access by the application VK_CHECK_RESULT(vkCreateBuffer(device, &bufferCreateInfo, nullptr, &queryResult.buffer)); vkGetBufferMemoryRequirements(device, queryResult.buffer, &memReqs); memAlloc.allocationSize = memReqs.size; memAlloc.memoryTypeIndex = vulkanDevice->getMemoryType(memReqs.memoryTypeBits, VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT); VK_CHECK_RESULT(vkAllocateMemory(device, &memAlloc, nullptr, &queryResult.memory)); VK_CHECK_RESULT(vkBindBufferMemory(device, queryResult.buffer, queryResult.memory, 0)); // Create query pool VkQueryPoolCreateInfo queryPoolInfo = {}; queryPoolInfo.sType = VK_STRUCTURE_TYPE_QUERY_POOL_CREATE_INFO; // Query pool will be created for occlusion queries queryPoolInfo.queryType = VK_QUERY_TYPE_OCCLUSION; queryPoolInfo.queryCount = 2; VK_CHECK_RESULT(vkCreateQueryPool(device, &queryPoolInfo, NULL, &queryPool)); }
void prepareUniformBuffers() { // Prepare and initialize uniform buffer containing shader uniforms VkMemoryRequirements memReqs; // Vertex shader uniform buffer block VkBufferCreateInfo bufferInfo = {}; VkMemoryAllocateInfo allocInfo = vkTools::initializers::memoryAllocateInfo(); VkResult err; bufferInfo.sType = VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO; bufferInfo.size = sizeof(computeUbo); bufferInfo.usage = VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT; err = vkCreateBuffer(device, &bufferInfo, nullptr, &uniformDataCompute.buffer); assert(!err); vkGetBufferMemoryRequirements(device, uniformDataCompute.buffer, &memReqs); allocInfo.allocationSize = memReqs.size; getMemoryType(memReqs.memoryTypeBits, VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT, &allocInfo.memoryTypeIndex); err = vkAllocateMemory(device, &allocInfo, nullptr, &(uniformDataCompute.memory)); assert(!err); err = vkBindBufferMemory(device, uniformDataCompute.buffer, uniformDataCompute.memory, 0); assert(!err); uniformDataCompute.descriptor.buffer = uniformDataCompute.buffer; uniformDataCompute.descriptor.offset = 0; uniformDataCompute.descriptor.range = sizeof(computeUbo); updateUniformBuffers(); }
void Renderer::createBuffer(VkDeviceSize size, VkBufferUsageFlags usage, VkMemoryPropertyFlags memory_properties, VkBuffer & buffer, VkDeviceMemory & buffer_memory) { VkBufferCreateInfo buffer_create_info{}; buffer_create_info.sType = VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO; buffer_create_info.size = size; buffer_create_info.usage = usage; buffer_create_info.sharingMode = VK_SHARING_MODE_EXCLUSIVE; ErrorCheck(vkCreateBuffer(_device, &buffer_create_info, nullptr, &buffer)); VkMemoryRequirements mem_requirements{}; vkGetBufferMemoryRequirements(_device, buffer, &mem_requirements); uint32_t type_filter = mem_requirements.memoryTypeBits; uint32_t memory_type; for (uint32_t i = 0; i < _gpu_memory_properties.memoryTypeCount; i++) { if ((type_filter & (1 << i)) && (_gpu_memory_properties.memoryTypes[i].propertyFlags & memory_properties) == memory_properties) { memory_type = i; break; } } VkMemoryAllocateInfo memory_allocate_info{}; memory_allocate_info.sType = VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO; memory_allocate_info.allocationSize = mem_requirements.size; memory_allocate_info.memoryTypeIndex = memory_type; ErrorCheck(vkAllocateMemory(_device, &memory_allocate_info, nullptr, &buffer_memory)); ErrorCheck(vkBindBufferMemory(_device, buffer, buffer_memory, 0)); }
DeviceBufferAllocator::Handle DeviceBufferAllocator::GetNewBufferHandle(EDeviceBufferType eDeviceBufferType, size_t size, u32 queueFamilyIndex) { Handle handle = {}; VkBufferUsageFlags usageFlags = AsVkFlag_EGeometryBufferType[eDeviceBufferType]; CreateBuffer(usageFlags, size, queueFamilyIndex, &handle.buffer); VkMemoryRequirements bufferMemReqs; vkGetBufferMemoryRequirements(gDevice.VkHandle(), handle.buffer, &bufferMemReqs); // TODO DO STAGING OF COURSE ! WE WANT DEVICE LOCAL MEMORY ! u32 bufferMemTypeIndex = getOptimalMemTypeIndex(VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT, bufferMemReqs.memoryTypeBits); if (bufferMemTypeIndex == UINT32_MAX) { exit_eprintf("Could not find any suitable memory type for this buffer"); } if (mPerUsageAlignement[eDeviceBufferType] == ((VkDeviceSize)~0)) { mPerUsageAlignement[eDeviceBufferType] = bufferMemReqs.alignment; } std::vector<Bank>& usageBank = mFamilyBanks[eDeviceBufferType][queueFamilyIndex]; u32 bankIdx = 0; u64 bankOffset = npos64; for (; bankIdx < usageBank.size(); ++bankIdx) { bankOffset = usageBank[bankIdx].allocator.GetContiguousFreeMemoryOffset(bufferMemReqs.size); if (bankOffset != npos64) { break; } } if (bankOffset == npos64) { Bank bank; bank.size = closestSuperiorMult(bufferMemReqs.size, USAGE_MEMORY_BANK_SIZE[eDeviceBufferType]); bank.allocator.Init(mPerUsageAlignement[eDeviceBufferType], bank.size); auto bufferMemAllocInfo = vkstruct<VkMemoryAllocateInfo>(); bufferMemAllocInfo.allocationSize = bank.size; bufferMemAllocInfo.memoryTypeIndex = bufferMemTypeIndex; gVkLastRes = vkAllocateMemory(gDevice.VkHandle(), &bufferMemAllocInfo, nullptr, &bank.memory); VKFN_LAST_RES_SUCCESS_OR_QUIT(vkAllocateMemory); bankOffset = bank.allocator.GetContiguousFreeMemoryOffset(bufferMemReqs.size); usageBank.push_back(bank); } gVkLastRes = vkBindBufferMemory(gDevice.VkHandle(), handle.buffer, usageBank[bankIdx].memory, bankOffset); VKFN_LAST_RES_SUCCESS_OR_QUIT(vkBindBufferMemory); handle.eGeometryBufferType = eDeviceBufferType; handle.bankIdx = bankIdx; handle.bankOffset = bankOffset; handle.queueFamilyIndex = queueFamilyIndex; return handle; }
XCamReturn VKDevice::bind_buffer (VkBuffer buf, VkDeviceMemory mem, VkDeviceSize offset) { XCAM_VK_CHECK_RETURN ( ERROR, vkBindBufferMemory (_dev_id, buf, mem, offset), XCAM_RETURN_ERROR_VULKAN, "vkdevice bind buffer to mem failed"); return XCAM_RETURN_NO_ERROR; }
VkBool32 Example::createBuffer(vkts::IBufferSP& buffer, vkts::IDeviceMemorySP& deviceMemory, const VkBufferCreateInfo& bufferCreateInfo, const VkMemoryPropertyFlags memoryPropertyFlags) const { VkResult result; // buffer = vkts::bufferCreate(device->getDevice(), bufferCreateInfo.flags, bufferCreateInfo.size, bufferCreateInfo.usage, bufferCreateInfo.sharingMode, bufferCreateInfo.queueFamilyIndexCount, bufferCreateInfo.pQueueFamilyIndices); if (!buffer.get()) { vkts::logPrint(VKTS_LOG_ERROR, __FILE__, __LINE__, "Could not create image."); return VK_FALSE; } // VkMemoryRequirements memoryRequirements; buffer->getBufferMemoryRequirements(memoryRequirements); // VkPhysicalDeviceMemoryProperties physicalDeviceMemoryProperties; physicalDevice->getPhysicalDeviceMemoryProperties(physicalDeviceMemoryProperties); deviceMemory = vkts::deviceMemoryCreate(device->getDevice(), memoryRequirements, physicalDeviceMemoryProperties.memoryTypeCount, physicalDeviceMemoryProperties.memoryTypes, memoryPropertyFlags); if (!deviceMemory.get()) { vkts::logPrint(VKTS_LOG_ERROR, __FILE__, __LINE__, "Could not allocate memory."); return VK_FALSE; } // result = vkBindBufferMemory(device->getDevice(), buffer->getBuffer(), deviceMemory->getDeviceMemory(), 0); if (result != VK_SUCCESS) { vkts::logPrint(VKTS_LOG_ERROR, __FILE__, __LINE__, "Could not bind buffer memory."); return VK_FALSE; } return VK_TRUE; }
xdl_int XdevLVertexBufferVulkan::init(xdl_uint8* src, xdl_uint size) { m_bufferCreateInfo.sType = VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO; m_bufferCreateInfo.pNext = nullptr; m_bufferCreateInfo.usage = VK_BUFFER_USAGE_VERTEX_BUFFER_BIT; m_bufferCreateInfo.size = size; m_bufferCreateInfo.queueFamilyIndexCount = 0; m_bufferCreateInfo.pQueueFamilyIndices = nullptr; m_bufferCreateInfo.sharingMode = VK_SHARING_MODE_EXCLUSIVE; m_bufferCreateInfo.flags = 0; VkResult result = vkCreateBuffer(m_device, &m_bufferCreateInfo, nullptr, &m_buffer); if(VK_SUCCESS != result) { return RET_FAILED; } vkGetBufferMemoryRequirements(m_device, m_buffer, &m_memReqs); VkMemoryAllocateInfo alloc_info = {}; alloc_info.sType = VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO; alloc_info.pNext = nullptr; alloc_info.memoryTypeIndex = 0; alloc_info.allocationSize = m_memReqs.size; result = vkAllocateMemory(m_device, &alloc_info, nullptr, &(m_deviceMemory)); if(VK_SUCCESS != result) { return RET_FAILED; } xdl_uint8* pData; result = vkMapMemory(m_device, m_deviceMemory, 0, m_memReqs.size, 0, (void **)&pData); if(VK_SUCCESS != result) { return RET_FAILED; } memcpy(pData, src, size); vkUnmapMemory(m_device, m_deviceMemory); result = vkBindBufferMemory(m_device, m_buffer, m_deviceMemory, 0); if(VK_SUCCESS != result) { return RET_FAILED; } m_size = size; return RET_SUCCESS; }
bool VulkanPushBuffer::AddBuffer() { BufInfo info; VkBufferCreateInfo b{ VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO }; b.size = size_; b.flags = 0; b.usage = VK_BUFFER_USAGE_INDEX_BUFFER_BIT | VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT | VK_BUFFER_USAGE_VERTEX_BUFFER_BIT | VK_BUFFER_USAGE_TRANSFER_SRC_BIT | VK_BUFFER_USAGE_STORAGE_BUFFER_BIT; b.sharingMode = VK_SHARING_MODE_EXCLUSIVE; b.queueFamilyIndexCount = 0; b.pQueueFamilyIndices = nullptr; VkResult res = vkCreateBuffer(device_, &b, nullptr, &info.buffer); if (VK_SUCCESS != res) { _assert_msg_(G3D, false, "vkCreateBuffer failed! result=%d", (int)res); return false; } // Make validation happy. VkMemoryRequirements reqs; vkGetBufferMemoryRequirements(device_, info.buffer, &reqs); // TODO: We really should use memoryTypeIndex here.. // Okay, that's the buffer. Now let's allocate some memory for it. VkMemoryAllocateInfo alloc{ VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO }; // TODO: Should check here that memoryTypeIndex_ matches reqs.memoryTypeBits. alloc.memoryTypeIndex = memoryTypeIndex_; alloc.allocationSize = reqs.size; res = vkAllocateMemory(device_, &alloc, nullptr, &info.deviceMemory); if (VK_SUCCESS != res) { _assert_msg_(G3D, false, "vkAllocateMemory failed! size=%d result=%d", (int)reqs.size, (int)res); vkDestroyBuffer(device_, info.buffer, nullptr); return false; } res = vkBindBufferMemory(device_, info.buffer, info.deviceMemory, 0); if (VK_SUCCESS != res) { ELOG("vkBindBufferMemory failed! result=%d", (int)res); vkFreeMemory(device_, info.deviceMemory, nullptr); vkDestroyBuffer(device_, info.buffer, nullptr); return false; } buffers_.push_back(info); buf_ = buffers_.size() - 1; return true; }
void prepareUniformBuffers() { // Prepare and initialize uniform buffer containing shader uniforms VkMemoryRequirements memReqs; // Vertex shader uniform buffer block VkBufferCreateInfo bufferInfo = {}; VkMemoryAllocateInfo allocInfo = {}; allocInfo.sType = VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO; allocInfo.pNext = NULL; allocInfo.allocationSize = 0; allocInfo.memoryTypeIndex = 0; VkResult err; bufferInfo.sType = VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO; bufferInfo.size = sizeof(uboVS); bufferInfo.usage = VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT; // Create a new buffer err = vkCreateBuffer(device, &bufferInfo, nullptr, &uniformDataVS.buffer); assert(!err); // Get memory requirements including size, alignment and memory type vkGetBufferMemoryRequirements(device, uniformDataVS.buffer, &memReqs); allocInfo.allocationSize = memReqs.size; // Gets the appropriate memory type for this type of buffer allocation // Only memory types that are visible to the host getMemoryType(memReqs.memoryTypeBits, VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT, &allocInfo.memoryTypeIndex); // Allocate memory for the uniform buffer err = vkAllocateMemory(device, &allocInfo, nullptr, &(uniformDataVS.memory)); assert(!err); // Bind memory to buffer err = vkBindBufferMemory(device, uniformDataVS.buffer, uniformDataVS.memory, 0); assert(!err); // Store information in the uniform's descriptor uniformDataVS.descriptor.buffer = uniformDataVS.buffer; uniformDataVS.descriptor.offset = 0; uniformDataVS.descriptor.range = sizeof(uboVS); updateUniformBuffers(); }
void VulkanGear::prepareUniformBuffer() { // Vertex shader uniform buffer block VkMemoryAllocateInfo allocInfo = vkTools::initializers::memoryAllocateInfo(); VkMemoryRequirements memReqs; VkBufferCreateInfo bufferInfo = vkTools::initializers::bufferCreateInfo( VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT, sizeof(ubo)); VK_CHECK_RESULT(vkCreateBuffer(device, &bufferInfo, nullptr, &uniformData.buffer)); vkGetBufferMemoryRequirements(device, uniformData.buffer, &memReqs); allocInfo.allocationSize = memReqs.size; allocInfo.memoryTypeIndex = exampleBase->getMemoryType(memReqs.memoryTypeBits, VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT); VK_CHECK_RESULT(vkAllocateMemory(device, &allocInfo, nullptr, &uniformData.memory)); VK_CHECK_RESULT(vkBindBufferMemory(device, uniformData.buffer, uniformData.memory, 0)); uniformData.descriptor.buffer = uniformData.buffer; uniformData.descriptor.offset = 0; uniformData.descriptor.range = sizeof(ubo); uniformData.allocSize = allocInfo.allocationSize; }
VkBool32 VulkanExampleBase::createBuffer(VkBufferUsageFlags usageFlags, VkMemoryPropertyFlags memoryPropertyFlags, VkDeviceSize size, void * data, VkBuffer * buffer, VkDeviceMemory * memory) { VkMemoryRequirements memReqs; VkMemoryAllocateInfo memAlloc = vkTools::initializers::memoryAllocateInfo(); VkBufferCreateInfo bufferCreateInfo = vkTools::initializers::bufferCreateInfo(usageFlags, size); vkTools::checkResult(vkCreateBuffer(device, &bufferCreateInfo, nullptr, buffer)); vkGetBufferMemoryRequirements(device, *buffer, &memReqs); memAlloc.allocationSize = memReqs.size; getMemoryType(memReqs.memoryTypeBits, memoryPropertyFlags, &memAlloc.memoryTypeIndex); vkTools::checkResult(vkAllocateMemory(device, &memAlloc, nullptr, memory)); if (data != nullptr) { void *mapped; vkTools::checkResult(vkMapMemory(device, *memory, 0, size, 0, &mapped)); memcpy(mapped, data, size); vkUnmapMemory(device, *memory); } vkTools::checkResult(vkBindBufferMemory(device, *buffer, *memory, 0)); return true; }
Buffer::ApiHandle createBuffer(size_t size, Buffer::BindFlags bindFlags, Device::MemoryType memType) { VkBufferCreateInfo bufferInfo = {}; bufferInfo.sType = VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO; bufferInfo.flags = 0; bufferInfo.size = size; bufferInfo.usage = getBufferUsageFlag(bindFlags); bufferInfo.sharingMode = VK_SHARING_MODE_EXCLUSIVE; bufferInfo.queueFamilyIndexCount = 0; bufferInfo.pQueueFamilyIndices = nullptr; VkBuffer buffer; vk_call(vkCreateBuffer(gpDevice->getApiHandle(), &bufferInfo, nullptr, &buffer)); // Get the required buffer size VkMemoryRequirements reqs; vkGetBufferMemoryRequirements(gpDevice->getApiHandle(), buffer, &reqs); VkDeviceMemory mem = allocateDeviceMemory(memType, reqs.memoryTypeBits, reqs.size); vk_call(vkBindBufferMemory(gpDevice->getApiHandle(), buffer, mem, 0)); Buffer::ApiHandle apiHandle = Buffer::ApiHandle::create(buffer, mem); return apiHandle; }
bool StreamBuffer::ResizeBuffer(size_t size) { // Create the buffer descriptor VkBufferCreateInfo buffer_create_info = { VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO, // VkStructureType sType nullptr, // const void* pNext 0, // VkBufferCreateFlags flags static_cast<VkDeviceSize>(size), // VkDeviceSize size m_usage, // VkBufferUsageFlags usage VK_SHARING_MODE_EXCLUSIVE, // VkSharingMode sharingMode 0, // uint32_t queueFamilyIndexCount nullptr // const uint32_t* pQueueFamilyIndices }; VkBuffer buffer = VK_NULL_HANDLE; VkResult res = vkCreateBuffer(g_vulkan_context->GetDevice(), &buffer_create_info, nullptr, &buffer); if (res != VK_SUCCESS) { LOG_VULKAN_ERROR(res, "vkCreateBuffer failed: "); return false; } // Get memory requirements (types etc) for this buffer VkMemoryRequirements memory_requirements; vkGetBufferMemoryRequirements(g_vulkan_context->GetDevice(), buffer, &memory_requirements); // Aim for a coherent mapping if possible. u32 memory_type_index = g_vulkan_context->GetUploadMemoryType(memory_requirements.memoryTypeBits, &m_coherent_mapping); // Allocate memory for backing this buffer VkMemoryAllocateInfo memory_allocate_info = { VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO, // VkStructureType sType nullptr, // const void* pNext memory_requirements.size, // VkDeviceSize allocationSize memory_type_index // uint32_t memoryTypeIndex }; VkDeviceMemory memory = VK_NULL_HANDLE; res = vkAllocateMemory(g_vulkan_context->GetDevice(), &memory_allocate_info, nullptr, &memory); if (res != VK_SUCCESS) { LOG_VULKAN_ERROR(res, "vkAllocateMemory failed: "); vkDestroyBuffer(g_vulkan_context->GetDevice(), buffer, nullptr); return false; } // Bind memory to buffer res = vkBindBufferMemory(g_vulkan_context->GetDevice(), buffer, memory, 0); if (res != VK_SUCCESS) { LOG_VULKAN_ERROR(res, "vkBindBufferMemory failed: "); vkDestroyBuffer(g_vulkan_context->GetDevice(), buffer, nullptr); vkFreeMemory(g_vulkan_context->GetDevice(), memory, nullptr); return false; } // Map this buffer into user-space void* mapped_ptr = nullptr; res = vkMapMemory(g_vulkan_context->GetDevice(), memory, 0, size, 0, &mapped_ptr); if (res != VK_SUCCESS) { LOG_VULKAN_ERROR(res, "vkMapMemory failed: "); vkDestroyBuffer(g_vulkan_context->GetDevice(), buffer, nullptr); vkFreeMemory(g_vulkan_context->GetDevice(), memory, nullptr); return false; } // Unmap current host pointer (if there was a previous buffer) if (m_host_pointer) vkUnmapMemory(g_vulkan_context->GetDevice(), m_memory); // Destroy the backings for the buffer after the command buffer executes if (m_buffer != VK_NULL_HANDLE) g_command_buffer_mgr->DeferBufferDestruction(m_buffer); if (m_memory != VK_NULL_HANDLE) g_command_buffer_mgr->DeferDeviceMemoryDestruction(m_memory); // Replace with the new buffer m_buffer = buffer; m_memory = memory; m_host_pointer = reinterpret_cast<u8*>(mapped_ptr); m_current_size = size; m_current_offset = 0; m_current_gpu_position = 0; m_tracked_fences.clear(); return true; }
/** * Attach the allocated memory block to the buffer * * @param offset (Optional) Byte offset (from the beginning) for the memory region to bind * * @return VkResult of the bindBufferMemory call */ VkResult bind(VkDeviceSize offset = 0) { return vkBindBufferMemory(device, buffer, memory, offset); }
void loadCubemap(std::string filename, VkFormat format, bool forceLinearTiling) { #if defined(__ANDROID__) // Textures are stored inside the apk on Android (compressed) // So they need to be loaded via the asset manager AAsset* asset = AAssetManager_open(androidApp->activity->assetManager, filename.c_str(), AASSET_MODE_STREAMING); assert(asset); size_t size = AAsset_getLength(asset); assert(size > 0); void *textureData = malloc(size); AAsset_read(asset, textureData, size); AAsset_close(asset); gli::texture_cube texCube(gli::load((const char*)textureData, size)); #else gli::texture_cube texCube(gli::load(filename)); #endif assert(!texCube.empty()); cubeMap.width = texCube.extent().x; cubeMap.height = texCube.extent().y; cubeMap.mipLevels = texCube.levels(); VkMemoryAllocateInfo memAllocInfo = vks::initializers::memoryAllocateInfo(); VkMemoryRequirements memReqs; // Create a host-visible staging buffer that contains the raw image data VkBuffer stagingBuffer; VkDeviceMemory stagingMemory; VkBufferCreateInfo bufferCreateInfo = vks::initializers::bufferCreateInfo(); bufferCreateInfo.size = texCube.size(); // This buffer is used as a transfer source for the buffer copy bufferCreateInfo.usage = VK_BUFFER_USAGE_TRANSFER_SRC_BIT; bufferCreateInfo.sharingMode = VK_SHARING_MODE_EXCLUSIVE; VK_CHECK_RESULT(vkCreateBuffer(device, &bufferCreateInfo, nullptr, &stagingBuffer)); // Get memory requirements for the staging buffer (alignment, memory type bits) vkGetBufferMemoryRequirements(device, stagingBuffer, &memReqs); memAllocInfo.allocationSize = memReqs.size; // Get memory type index for a host visible buffer memAllocInfo.memoryTypeIndex = vulkanDevice->getMemoryType(memReqs.memoryTypeBits, VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT); VK_CHECK_RESULT(vkAllocateMemory(device, &memAllocInfo, nullptr, &stagingMemory)); VK_CHECK_RESULT(vkBindBufferMemory(device, stagingBuffer, stagingMemory, 0)); // Copy texture data into staging buffer uint8_t *data; VK_CHECK_RESULT(vkMapMemory(device, stagingMemory, 0, memReqs.size, 0, (void **)&data)); memcpy(data, texCube.data(), texCube.size()); vkUnmapMemory(device, stagingMemory); // Create optimal tiled target image VkImageCreateInfo imageCreateInfo = vks::initializers::imageCreateInfo(); imageCreateInfo.imageType = VK_IMAGE_TYPE_2D; imageCreateInfo.format = format; imageCreateInfo.mipLevels = cubeMap.mipLevels; imageCreateInfo.samples = VK_SAMPLE_COUNT_1_BIT; imageCreateInfo.tiling = VK_IMAGE_TILING_OPTIMAL; imageCreateInfo.usage = VK_IMAGE_USAGE_SAMPLED_BIT; imageCreateInfo.sharingMode = VK_SHARING_MODE_EXCLUSIVE; imageCreateInfo.initialLayout = VK_IMAGE_LAYOUT_UNDEFINED; imageCreateInfo.extent = { cubeMap.width, cubeMap.height, 1 }; imageCreateInfo.usage = VK_IMAGE_USAGE_TRANSFER_DST_BIT | VK_IMAGE_USAGE_SAMPLED_BIT; // Cube faces count as array layers in Vulkan imageCreateInfo.arrayLayers = 6; // This flag is required for cube map images imageCreateInfo.flags = VK_IMAGE_CREATE_CUBE_COMPATIBLE_BIT; VK_CHECK_RESULT(vkCreateImage(device, &imageCreateInfo, nullptr, &cubeMap.image)); vkGetImageMemoryRequirements(device, cubeMap.image, &memReqs); memAllocInfo.allocationSize = memReqs.size; memAllocInfo.memoryTypeIndex = vulkanDevice->getMemoryType(memReqs.memoryTypeBits, VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT); VK_CHECK_RESULT(vkAllocateMemory(device, &memAllocInfo, nullptr, &cubeMap.deviceMemory)); VK_CHECK_RESULT(vkBindImageMemory(device, cubeMap.image, cubeMap.deviceMemory, 0)); VkCommandBuffer copyCmd = VulkanExampleBase::createCommandBuffer(VK_COMMAND_BUFFER_LEVEL_PRIMARY, true); // Setup buffer copy regions for each face including all of it's miplevels std::vector<VkBufferImageCopy> bufferCopyRegions; uint32_t offset = 0; for (uint32_t face = 0; face < 6; face++) { for (uint32_t level = 0; level < cubeMap.mipLevels; level++) { VkBufferImageCopy bufferCopyRegion = {}; bufferCopyRegion.imageSubresource.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT; bufferCopyRegion.imageSubresource.mipLevel = level; bufferCopyRegion.imageSubresource.baseArrayLayer = face; bufferCopyRegion.imageSubresource.layerCount = 1; bufferCopyRegion.imageExtent.width = texCube[face][level].extent().x; bufferCopyRegion.imageExtent.height = texCube[face][level].extent().y; bufferCopyRegion.imageExtent.depth = 1; bufferCopyRegion.bufferOffset = offset; bufferCopyRegions.push_back(bufferCopyRegion); // Increase offset into staging buffer for next level / face offset += texCube[face][level].size(); } } // Image barrier for optimal image (target) // Set initial layout for all array layers (faces) of the optimal (target) tiled texture VkImageSubresourceRange subresourceRange = {}; subresourceRange.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT; subresourceRange.baseMipLevel = 0; subresourceRange.levelCount = cubeMap.mipLevels; subresourceRange.layerCount = 6; vks::tools::setImageLayout( copyCmd, cubeMap.image, VK_IMAGE_LAYOUT_UNDEFINED, VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, subresourceRange); // Copy the cube map faces from the staging buffer to the optimal tiled image vkCmdCopyBufferToImage( copyCmd, stagingBuffer, cubeMap.image, VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, static_cast<uint32_t>(bufferCopyRegions.size()), bufferCopyRegions.data() ); // Change texture image layout to shader read after all faces have been copied cubeMap.imageLayout = VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL; vks::tools::setImageLayout( copyCmd, cubeMap.image, VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, cubeMap.imageLayout, subresourceRange); VulkanExampleBase::flushCommandBuffer(copyCmd, queue, true); // Create sampler VkSamplerCreateInfo sampler = vks::initializers::samplerCreateInfo(); sampler.magFilter = VK_FILTER_LINEAR; sampler.minFilter = VK_FILTER_LINEAR; sampler.mipmapMode = VK_SAMPLER_MIPMAP_MODE_LINEAR; sampler.addressModeU = VK_SAMPLER_ADDRESS_MODE_CLAMP_TO_EDGE; sampler.addressModeV = sampler.addressModeU; sampler.addressModeW = sampler.addressModeU; sampler.mipLodBias = 0.0f; sampler.compareOp = VK_COMPARE_OP_NEVER; sampler.minLod = 0.0f; sampler.maxLod = cubeMap.mipLevels; sampler.borderColor = VK_BORDER_COLOR_FLOAT_OPAQUE_WHITE; sampler.maxAnisotropy = 1.0f; if (vulkanDevice->features.samplerAnisotropy) { sampler.maxAnisotropy = vulkanDevice->properties.limits.maxSamplerAnisotropy; sampler.anisotropyEnable = VK_TRUE; } VK_CHECK_RESULT(vkCreateSampler(device, &sampler, nullptr, &cubeMap.sampler)); // Create image view VkImageViewCreateInfo view = vks::initializers::imageViewCreateInfo(); // Cube map view type view.viewType = VK_IMAGE_VIEW_TYPE_CUBE; view.format = format; view.components = { VK_COMPONENT_SWIZZLE_R, VK_COMPONENT_SWIZZLE_G, VK_COMPONENT_SWIZZLE_B, VK_COMPONENT_SWIZZLE_A }; view.subresourceRange = { VK_IMAGE_ASPECT_COLOR_BIT, 0, 1, 0, 1 }; // 6 array layers (faces) view.subresourceRange.layerCount = 6; // Set number of mip levels view.subresourceRange.levelCount = cubeMap.mipLevels; view.image = cubeMap.image; VK_CHECK_RESULT(vkCreateImageView(device, &view, nullptr, &cubeMap.view)); // Clean up staging resources vkFreeMemory(device, stagingMemory, nullptr); vkDestroyBuffer(device, stagingBuffer, nullptr); }
/* ================ GLMesh_LoadVertexBuffer Upload the given alias model's mesh to a VBO Original code by MH from RMQEngine ================ */ static void GLMesh_LoadVertexBuffer (qmodel_t *m, const aliashdr_t *hdr) { int totalvbosize = 0; int remaining_size; int copy_offset; const aliasmesh_t *desc; const short *indexes; const trivertx_t *trivertexes; byte *vbodata; int f; VkResult err; // count the sizes we need // ericw -- RMQEngine stored these vbo*ofs values in aliashdr_t, but we must not // mutate Mod_Extradata since it might be reloaded from disk, so I moved them to qmodel_t // (test case: roman1.bsp from arwop, 64mb heap) m->vboindexofs = 0; m->vboxyzofs = 0; totalvbosize += (hdr->numposes * hdr->numverts_vbo * sizeof (meshxyz_t)); // ericw -- what RMQEngine called nummeshframes is called numposes in QuakeSpasm m->vbostofs = totalvbosize; totalvbosize += (hdr->numverts_vbo * sizeof (meshst_t)); if (!hdr->numindexes) return; if (!totalvbosize) return; // grab the pointers to data in the extradata desc = (aliasmesh_t *) ((byte *) hdr + hdr->meshdesc); indexes = (short *) ((byte *) hdr + hdr->indexes); trivertexes = (trivertx_t *) ((byte *)hdr + hdr->vertexes); { const int totalindexsize = hdr->numindexes * sizeof (unsigned short); // Allocate index buffer & upload to GPU VkBufferCreateInfo buffer_create_info; memset(&buffer_create_info, 0, sizeof(buffer_create_info)); buffer_create_info.sType = VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO; buffer_create_info.size = totalindexsize; buffer_create_info.usage = VK_BUFFER_USAGE_INDEX_BUFFER_BIT | VK_BUFFER_USAGE_TRANSFER_DST_BIT; err = vkCreateBuffer(vulkan_globals.device, &buffer_create_info, NULL, &m->index_buffer); if (err != VK_SUCCESS) Sys_Error("vkCreateBuffer failed"); GL_SetObjectName((uint64_t)m->index_buffer, VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT, m->name); VkMemoryRequirements memory_requirements; vkGetBufferMemoryRequirements(vulkan_globals.device, m->index_buffer, &memory_requirements); uint32_t memory_type_index = GL_MemoryTypeFromProperties(memory_requirements.memoryTypeBits, VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT, 0); VkDeviceSize heap_size = INDEX_HEAP_SIZE_MB * (VkDeviceSize)1024 * (VkDeviceSize)1024; VkDeviceSize aligned_offset = GL_AllocateFromHeaps(GEOMETRY_MAX_HEAPS, index_buffer_heaps, heap_size, memory_type_index, memory_requirements.size, memory_requirements.alignment, &m->index_heap, &m->index_heap_node, &num_vulkan_mesh_allocations, "Index Buffers"); err = vkBindBufferMemory(vulkan_globals.device, m->index_buffer, m->index_heap->memory, aligned_offset); if (err != VK_SUCCESS) Sys_Error("vkBindBufferMemory failed"); remaining_size = totalindexsize; copy_offset = 0; while (remaining_size > 0) { const int size_to_copy = q_min(remaining_size, STAGING_BUFFER_SIZE_KB * 1024); VkBuffer staging_buffer; VkCommandBuffer command_buffer; int staging_offset; unsigned char * staging_memory = R_StagingAllocate(size_to_copy, 1, &command_buffer, &staging_buffer, &staging_offset); memcpy(staging_memory, (byte*)indexes + copy_offset, size_to_copy); VkBufferCopy region; region.srcOffset = staging_offset; region.dstOffset = copy_offset; region.size = size_to_copy; vkCmdCopyBuffer(command_buffer, staging_buffer, m->index_buffer, 1, ®ion); copy_offset += size_to_copy; remaining_size -= size_to_copy; } } // create the vertex buffer (empty) vbodata = (byte *) malloc(totalvbosize); memset(vbodata, 0, totalvbosize); // fill in the vertices at the start of the buffer for (f = 0; f < hdr->numposes; f++) // ericw -- what RMQEngine called nummeshframes is called numposes in QuakeSpasm { int v; meshxyz_t *xyz = (meshxyz_t *) (vbodata + (f * hdr->numverts_vbo * sizeof (meshxyz_t))); const trivertx_t *tv = trivertexes + (hdr->numverts * f); for (v = 0; v < hdr->numverts_vbo; v++) { trivertx_t trivert = tv[desc[v].vertindex]; xyz[v].xyz[0] = trivert.v[0]; xyz[v].xyz[1] = trivert.v[1]; xyz[v].xyz[2] = trivert.v[2]; xyz[v].xyz[3] = 1; // need w 1 for 4 byte vertex compression // map the normal coordinates in [-1..1] to [-127..127] and store in an unsigned char. // this introduces some error (less than 0.004), but the normals were very coarse // to begin with xyz[v].normal[0] = 127 * r_avertexnormals[trivert.lightnormalindex][0]; xyz[v].normal[1] = 127 * r_avertexnormals[trivert.lightnormalindex][1]; xyz[v].normal[2] = 127 * r_avertexnormals[trivert.lightnormalindex][2]; xyz[v].normal[3] = 0; // unused; for 4-byte alignment } } // fill in the ST coords at the end of the buffer { meshst_t *st; float hscale, vscale; //johnfitz -- padded skins hscale = (float)hdr->skinwidth/(float)TexMgr_PadConditional(hdr->skinwidth); vscale = (float)hdr->skinheight/(float)TexMgr_PadConditional(hdr->skinheight); //johnfitz st = (meshst_t *) (vbodata + m->vbostofs); for (f = 0; f < hdr->numverts_vbo; f++) { st[f].st[0] = hscale * ((float) desc[f].st[0] + 0.5f) / (float) hdr->skinwidth; st[f].st[1] = vscale * ((float) desc[f].st[1] + 0.5f) / (float) hdr->skinheight; } } // Allocate vertex buffer & upload to GPU { VkBufferCreateInfo buffer_create_info; memset(&buffer_create_info, 0, sizeof(buffer_create_info)); buffer_create_info.sType = VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO; buffer_create_info.size = totalvbosize; buffer_create_info.usage = VK_BUFFER_USAGE_VERTEX_BUFFER_BIT | VK_BUFFER_USAGE_TRANSFER_DST_BIT; err = vkCreateBuffer(vulkan_globals.device, &buffer_create_info, NULL, &m->vertex_buffer); if (err != VK_SUCCESS) Sys_Error("vkCreateBuffer failed"); GL_SetObjectName((uint64_t)m->vertex_buffer, VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT, m->name); VkMemoryRequirements memory_requirements; vkGetBufferMemoryRequirements(vulkan_globals.device, m->vertex_buffer, &memory_requirements); uint32_t memory_type_index = GL_MemoryTypeFromProperties(memory_requirements.memoryTypeBits, VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT, 0); VkDeviceSize heap_size = VERTEX_HEAP_SIZE_MB * (VkDeviceSize)1024 * (VkDeviceSize)1024; VkDeviceSize aligned_offset = GL_AllocateFromHeaps(GEOMETRY_MAX_HEAPS, vertex_buffer_heaps, heap_size, memory_type_index, memory_requirements.size, memory_requirements.alignment, &m->vertex_heap, &m->vertex_heap_node, &num_vulkan_mesh_allocations, "Vertex Buffers"); err = vkBindBufferMemory(vulkan_globals.device, m->vertex_buffer, m->vertex_heap->memory, aligned_offset); if (err != VK_SUCCESS) Sys_Error("vkBindBufferMemory failed"); remaining_size = totalvbosize; copy_offset = 0; while (remaining_size > 0) { const int size_to_copy = q_min(remaining_size, STAGING_BUFFER_SIZE_KB * 1024); VkBuffer staging_buffer; VkCommandBuffer command_buffer; int staging_offset; unsigned char * staging_memory = R_StagingAllocate(size_to_copy, 1, &command_buffer, &staging_buffer, &staging_offset); memcpy(staging_memory, (byte*)vbodata + copy_offset, size_to_copy); VkBufferCopy region; region.srcOffset = staging_offset; region.dstOffset = copy_offset; region.size = size_to_copy; vkCmdCopyBuffer(command_buffer, staging_buffer, m->vertex_buffer, 1, ®ion); copy_offset += size_to_copy; remaining_size -= size_to_copy; } } free (vbodata); }
int sample_main(int argc, char *argv[]) { VkResult U_ASSERT_ONLY res; bool U_ASSERT_ONLY pass; struct sample_info info = {}; char sample_title[] = "Draw Cube"; const bool depthPresent = true; process_command_line_args(info, argc, argv); init_global_layer_properties(info); init_instance_extension_names(info); init_device_extension_names(info); init_instance(info, sample_title); init_enumerate_device(info); if (info.gpu_props.limits.maxDescriptorSetUniformBuffersDynamic < 1) { std::cout << "No dynamic uniform buffers supported\n"; exit(-1); } init_window_size(info, 500, 500); init_connection(info); init_window(info); init_swapchain_extension(info); init_device(info); init_command_pool(info); init_command_buffer(info); execute_begin_command_buffer(info); init_device_queue(info); init_swap_chain(info); init_depth_buffer(info); init_renderpass(info, depthPresent); init_shaders(info, vertShaderText, fragShaderText); init_framebuffers(info, depthPresent); init_vertex_buffer(info, g_vb_solid_face_colors_Data, sizeof(g_vb_solid_face_colors_Data), sizeof(g_vb_solid_face_colors_Data[0]), false); /* Set up uniform buffer with 2 transform matrices in it */ info.Projection = glm::perspective(glm::radians(45.0f), 1.0f, 0.1f, 100.0f); info.View = glm::lookAt( glm::vec3(0, 3, 10), // Camera is at (0,3,10), in World Space glm::vec3(0, 0, 0), // and looks at the origin glm::vec3(0, -1, 0) // Head is up (set to 0,-1,0 to look upside-down) ); info.Model = glm::mat4(1.0f); // Vulkan clip space has inverted Y and half Z. info.Clip = glm::mat4(1.0f, 0.0f, 0.0f, 0.0f, 0.0f, -1.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.5f, 0.0f, 0.0f, 0.0f, 0.5f, 1.0f); info.MVP = info.Clip * info.Projection * info.View * info.Model; /* VULKAN_KEY_START */ info.Model = glm::translate(info.Model, glm::vec3(1.5, 1.5, 1.5)); glm::mat4 MVP2 = info.Clip * info.Projection * info.View * info.Model; VkDeviceSize buf_size = sizeof(info.MVP); if (info.gpu_props.limits.minUniformBufferOffsetAlignment) buf_size = (buf_size + info.gpu_props.limits.minUniformBufferOffsetAlignment - 1) & ~(info.gpu_props.limits.minUniformBufferOffsetAlignment - 1); VkBufferCreateInfo buf_info = {}; buf_info.sType = VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO; buf_info.pNext = NULL; buf_info.usage = VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT; buf_info.size = 2 * buf_size; buf_info.queueFamilyIndexCount = 0; buf_info.pQueueFamilyIndices = NULL; buf_info.sharingMode = VK_SHARING_MODE_EXCLUSIVE; buf_info.flags = 0; res = vkCreateBuffer(info.device, &buf_info, NULL, &info.uniform_data.buf); assert(res == VK_SUCCESS); VkMemoryRequirements mem_reqs; vkGetBufferMemoryRequirements(info.device, info.uniform_data.buf, &mem_reqs); VkMemoryAllocateInfo alloc_info = {}; alloc_info.sType = VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO; alloc_info.pNext = NULL; alloc_info.memoryTypeIndex = 0; alloc_info.allocationSize = mem_reqs.size; pass = memory_type_from_properties(info, mem_reqs.memoryTypeBits, VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT, &alloc_info.memoryTypeIndex); assert(pass); res = vkAllocateMemory(info.device, &alloc_info, NULL, &(info.uniform_data.mem)); assert(res == VK_SUCCESS); /* Map the buffer memory and copy both matrices */ uint8_t *pData; res = vkMapMemory(info.device, info.uniform_data.mem, 0, mem_reqs.size, 0, (void **)&pData); assert(res == VK_SUCCESS); memcpy(pData, &info.MVP, sizeof(info.MVP)); pData += buf_size; memcpy(pData, &MVP2, sizeof(MVP2)); vkUnmapMemory(info.device, info.uniform_data.mem); res = vkBindBufferMemory(info.device, info.uniform_data.buf, info.uniform_data.mem, 0); assert(res == VK_SUCCESS); info.uniform_data.buffer_info.buffer = info.uniform_data.buf; info.uniform_data.buffer_info.offset = 0; info.uniform_data.buffer_info.range = buf_size; /* Init desciptor and pipeline layouts - descriptor type is * UNIFORM_BUFFER_DYNAMIC */ VkDescriptorSetLayoutBinding layout_bindings[2]; layout_bindings[0].binding = 0; layout_bindings[0].descriptorType = VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC; layout_bindings[0].descriptorCount = 1; layout_bindings[0].stageFlags = VK_SHADER_STAGE_VERTEX_BIT; layout_bindings[0].pImmutableSamplers = NULL; /* Next take layout bindings and use them to create a descriptor set layout */ VkDescriptorSetLayoutCreateInfo descriptor_layout = {}; descriptor_layout.sType = VK_STRUCTURE_TYPE_DESCRIPTOR_SET_LAYOUT_CREATE_INFO; descriptor_layout.pNext = NULL; descriptor_layout.bindingCount = 1; descriptor_layout.pBindings = layout_bindings; info.desc_layout.resize(NUM_DESCRIPTOR_SETS); res = vkCreateDescriptorSetLayout(info.device, &descriptor_layout, NULL, info.desc_layout.data()); assert(res == VK_SUCCESS); /* Now use the descriptor layout to create a pipeline layout */ VkPipelineLayoutCreateInfo pPipelineLayoutCreateInfo = {}; pPipelineLayoutCreateInfo.sType = VK_STRUCTURE_TYPE_PIPELINE_LAYOUT_CREATE_INFO; pPipelineLayoutCreateInfo.pNext = NULL; pPipelineLayoutCreateInfo.pushConstantRangeCount = 0; pPipelineLayoutCreateInfo.pPushConstantRanges = NULL; pPipelineLayoutCreateInfo.setLayoutCount = NUM_DESCRIPTOR_SETS; pPipelineLayoutCreateInfo.pSetLayouts = info.desc_layout.data(); res = vkCreatePipelineLayout(info.device, &pPipelineLayoutCreateInfo, NULL, &info.pipeline_layout); assert(res == VK_SUCCESS); /* Create descriptor pool with UNIFOM_BUFFER_DYNAMIC type */ VkDescriptorPoolSize type_count[1]; type_count[0].type = VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC; type_count[0].descriptorCount = 1; VkDescriptorPoolCreateInfo descriptor_pool = {}; descriptor_pool.sType = VK_STRUCTURE_TYPE_DESCRIPTOR_POOL_CREATE_INFO; descriptor_pool.pNext = NULL; descriptor_pool.maxSets = 1; descriptor_pool.poolSizeCount = 1; descriptor_pool.pPoolSizes = type_count; res = vkCreateDescriptorPool(info.device, &descriptor_pool, NULL, &info.desc_pool); assert(res == VK_SUCCESS); VkDescriptorSetAllocateInfo desc_alloc_info[1]; desc_alloc_info[0].sType = VK_STRUCTURE_TYPE_DESCRIPTOR_SET_ALLOCATE_INFO; desc_alloc_info[0].pNext = NULL; desc_alloc_info[0].descriptorPool = info.desc_pool; desc_alloc_info[0].descriptorSetCount = NUM_DESCRIPTOR_SETS; desc_alloc_info[0].pSetLayouts = info.desc_layout.data(); /* Allocate descriptor set with UNIFORM_BUFFER_DYNAMIC */ info.desc_set.resize(NUM_DESCRIPTOR_SETS); res = vkAllocateDescriptorSets(info.device, desc_alloc_info, info.desc_set.data()); assert(res == VK_SUCCESS); VkWriteDescriptorSet writes[1]; writes[0] = {}; writes[0].sType = VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET; writes[0].pNext = NULL; writes[0].dstSet = info.desc_set[0]; writes[0].descriptorCount = 1; writes[0].descriptorType = VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC; writes[0].pBufferInfo = &info.uniform_data.buffer_info; writes[0].dstArrayElement = 0; writes[0].dstBinding = 0; vkUpdateDescriptorSets(info.device, 1, writes, 0, NULL); init_pipeline_cache(info); init_pipeline(info, depthPresent); VkClearValue clear_values[2]; clear_values[0].color.float32[0] = 0.2f; clear_values[0].color.float32[1] = 0.2f; clear_values[0].color.float32[2] = 0.2f; clear_values[0].color.float32[3] = 0.2f; clear_values[1].depthStencil.depth = 1.0f; clear_values[1].depthStencil.stencil = 0; VkSemaphore presentCompleteSemaphore; VkSemaphoreCreateInfo presentCompleteSemaphoreCreateInfo; presentCompleteSemaphoreCreateInfo.sType = VK_STRUCTURE_TYPE_SEMAPHORE_CREATE_INFO; presentCompleteSemaphoreCreateInfo.pNext = NULL; presentCompleteSemaphoreCreateInfo.flags = 0; res = vkCreateSemaphore(info.device, &presentCompleteSemaphoreCreateInfo, NULL, &presentCompleteSemaphore); assert(res == VK_SUCCESS); // Get the index of the next available swapchain image: res = vkAcquireNextImageKHR(info.device, info.swap_chain, UINT64_MAX, presentCompleteSemaphore, VK_NULL_HANDLE, &info.current_buffer); // TODO: Deal with the VK_SUBOPTIMAL_KHR and VK_ERROR_OUT_OF_DATE_KHR // return codes assert(res == VK_SUCCESS); VkRenderPassBeginInfo rp_begin; rp_begin.sType = VK_STRUCTURE_TYPE_RENDER_PASS_BEGIN_INFO; rp_begin.pNext = NULL; rp_begin.renderPass = info.render_pass; rp_begin.framebuffer = info.framebuffers[info.current_buffer]; rp_begin.renderArea.offset.x = 0; rp_begin.renderArea.offset.y = 0; rp_begin.renderArea.extent.width = info.width; rp_begin.renderArea.extent.height = info.height; rp_begin.clearValueCount = 2; rp_begin.pClearValues = clear_values; vkCmdBeginRenderPass(info.cmd, &rp_begin, VK_SUBPASS_CONTENTS_INLINE); vkCmdBindPipeline(info.cmd, VK_PIPELINE_BIND_POINT_GRAPHICS, info.pipeline); /* The first draw should use the first matrix in the buffer */ uint32_t uni_offsets[1] = {0}; vkCmdBindDescriptorSets(info.cmd, VK_PIPELINE_BIND_POINT_GRAPHICS, info.pipeline_layout, 0, NUM_DESCRIPTOR_SETS, info.desc_set.data(), 1, uni_offsets); const VkDeviceSize vtx_offsets[1] = {0}; vkCmdBindVertexBuffers(info.cmd, 0, 1, &info.vertex_buffer.buf, vtx_offsets); init_viewports(info); init_scissors(info); vkCmdDraw(info.cmd, 12 * 3, 1, 0, 0); uni_offsets[0] = (uint32_t)buf_size; /* The second draw should use the second matrix in the buffer */ vkCmdBindDescriptorSets(info.cmd, VK_PIPELINE_BIND_POINT_GRAPHICS, info.pipeline_layout, 0, NUM_DESCRIPTOR_SETS, info.desc_set.data(), 1, uni_offsets); vkCmdDraw(info.cmd, 12 * 3, 1, 0, 0); vkCmdEndRenderPass(info.cmd); VkImageMemoryBarrier prePresentBarrier = {}; prePresentBarrier.sType = VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER; prePresentBarrier.pNext = NULL; prePresentBarrier.srcAccessMask = VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT; prePresentBarrier.dstAccessMask = VK_ACCESS_MEMORY_READ_BIT; prePresentBarrier.oldLayout = VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL; prePresentBarrier.newLayout = VK_IMAGE_LAYOUT_PRESENT_SRC_KHR; prePresentBarrier.srcQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED; prePresentBarrier.dstQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED; prePresentBarrier.subresourceRange.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT; prePresentBarrier.subresourceRange.baseMipLevel = 0; prePresentBarrier.subresourceRange.levelCount = 1; prePresentBarrier.subresourceRange.baseArrayLayer = 0; prePresentBarrier.subresourceRange.layerCount = 1; prePresentBarrier.image = info.buffers[info.current_buffer].image; vkCmdPipelineBarrier(info.cmd, VK_PIPELINE_STAGE_ALL_COMMANDS_BIT, VK_PIPELINE_STAGE_BOTTOM_OF_PIPE_BIT, 0, 0, NULL, 0, NULL, 1, &prePresentBarrier); res = vkEndCommandBuffer(info.cmd); const VkCommandBuffer cmd_bufs[] = {info.cmd}; VkFenceCreateInfo fenceInfo; VkFence drawFence; fenceInfo.sType = VK_STRUCTURE_TYPE_FENCE_CREATE_INFO; fenceInfo.pNext = NULL; fenceInfo.flags = 0; vkCreateFence(info.device, &fenceInfo, NULL, &drawFence); VkPipelineStageFlags pipe_stage_flags = VK_PIPELINE_STAGE_BOTTOM_OF_PIPE_BIT; VkSubmitInfo submit_info[1] = {}; submit_info[0].pNext = NULL; submit_info[0].sType = VK_STRUCTURE_TYPE_SUBMIT_INFO; submit_info[0].waitSemaphoreCount = 1; submit_info[0].pWaitSemaphores = &presentCompleteSemaphore; submit_info[0].pWaitDstStageMask = &pipe_stage_flags; submit_info[0].commandBufferCount = 1; submit_info[0].pCommandBuffers = cmd_bufs; submit_info[0].signalSemaphoreCount = 0; submit_info[0].pSignalSemaphores = NULL; /* Queue the command buffer for execution */ res = vkQueueSubmit(info.queue, 1, submit_info, drawFence); assert(res == VK_SUCCESS); /* Now present the image in the window */ VkPresentInfoKHR present; present.sType = VK_STRUCTURE_TYPE_PRESENT_INFO_KHR; present.pNext = NULL; present.swapchainCount = 1; present.pSwapchains = &info.swap_chain; present.pImageIndices = &info.current_buffer; present.pWaitSemaphores = NULL; present.waitSemaphoreCount = 0; present.pResults = NULL; /* Make sure command buffer is finished before presenting */ do { res = vkWaitForFences(info.device, 1, &drawFence, VK_TRUE, FENCE_TIMEOUT); } while (res == VK_TIMEOUT); assert(res == VK_SUCCESS); res = vkQueuePresentKHR(info.queue, &present); assert(res == VK_SUCCESS); wait_seconds(1); /* VULKAN_KEY_END */ if (info.save_images) write_ppm(info, "dynamicuniform"); vkDestroySemaphore(info.device, presentCompleteSemaphore, NULL); vkDestroyFence(info.device, drawFence, NULL); destroy_pipeline(info); destroy_pipeline_cache(info); destroy_descriptor_pool(info); destroy_vertex_buffer(info); destroy_framebuffers(info); destroy_shaders(info); destroy_renderpass(info); destroy_descriptor_and_pipeline_layouts(info); destroy_uniform_buffer(info); destroy_depth_buffer(info); destroy_swap_chain(info); destroy_command_buffer(info); destroy_command_pool(info); destroy_device(info); destroy_window(info); destroy_instance(info); return 0; }
std::unique_ptr<StagingTexture2D> StagingTexture2DBuffer::Create(STAGING_BUFFER_TYPE type, u32 width, u32 height, VkFormat format) { // Assume tight packing. u32 row_stride = Util::GetTexelSize(format) * width; u32 buffer_size = row_stride * height; VkImageUsageFlags usage = VK_IMAGE_USAGE_TRANSFER_SRC_BIT | VK_IMAGE_USAGE_TRANSFER_DST_BIT; VkBufferCreateInfo buffer_create_info = { VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO, // VkStructureType sType nullptr, // const void* pNext 0, // VkBufferCreateFlags flags buffer_size, // VkDeviceSize size usage, // VkBufferUsageFlags usage VK_SHARING_MODE_EXCLUSIVE, // VkSharingMode sharingMode 0, // uint32_t queueFamilyIndexCount nullptr // const uint32_t* pQueueFamilyIndices }; VkBuffer buffer; VkResult res = vkCreateBuffer(g_vulkan_context->GetDevice(), &buffer_create_info, nullptr, &buffer); if (res != VK_SUCCESS) { LOG_VULKAN_ERROR(res, "vkCreateBuffer failed: "); return nullptr; } VkMemoryRequirements memory_requirements; vkGetBufferMemoryRequirements(g_vulkan_context->GetDevice(), buffer, &memory_requirements); bool is_coherent; u32 memory_type_index; if (type == STAGING_BUFFER_TYPE_READBACK) { memory_type_index = g_vulkan_context->GetReadbackMemoryType(memory_requirements.memoryTypeBits, &is_coherent); } else { memory_type_index = g_vulkan_context->GetUploadMemoryType(memory_requirements.memoryTypeBits, &is_coherent); } VkMemoryAllocateInfo memory_allocate_info = { VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO, // VkStructureType sType nullptr, // const void* pNext memory_requirements.size, // VkDeviceSize allocationSize memory_type_index // uint32_t memoryTypeIndex }; VkDeviceMemory memory; res = vkAllocateMemory(g_vulkan_context->GetDevice(), &memory_allocate_info, nullptr, &memory); if (res != VK_SUCCESS) { LOG_VULKAN_ERROR(res, "vkAllocateMemory failed: "); vkDestroyBuffer(g_vulkan_context->GetDevice(), buffer, nullptr); return nullptr; } res = vkBindBufferMemory(g_vulkan_context->GetDevice(), buffer, memory, 0); if (res != VK_SUCCESS) { LOG_VULKAN_ERROR(res, "vkBindBufferMemory failed: "); vkDestroyBuffer(g_vulkan_context->GetDevice(), buffer, nullptr); vkFreeMemory(g_vulkan_context->GetDevice(), memory, nullptr); return nullptr; } return std::make_unique<StagingTexture2DBuffer>(type, width, height, format, row_stride, buffer, memory, buffer_size, is_coherent); }
int main(int argc, char *argv[]) { VkResult U_ASSERT_ONLY res; bool U_ASSERT_ONLY pass; struct sample_info info = {}; char sample_title[] = "Draw Cube"; process_command_line_args(info, argc, argv); init_global_layer_properties(info); info.instance_extension_names.push_back(VK_KHR_SURFACE_EXTENSION_NAME); #ifdef _WIN32 info.instance_extension_names.push_back( VK_KHR_WIN32_SURFACE_EXTENSION_NAME); #else info.instance_extension_names.push_back(VK_KHR_XCB_SURFACE_EXTENSION_NAME); #endif info.device_extension_names.push_back(VK_KHR_SWAPCHAIN_EXTENSION_NAME); init_instance(info, sample_title); init_enumerate_device(info); init_window_size(info, 500, 500); init_connection(info); init_window(info); init_swapchain_extension(info); init_device(info); init_command_pool(info); init_command_buffer(info); execute_begin_command_buffer(info); init_device_queue(info); init_swap_chain(info); init_depth_buffer(info); init_uniform_buffer(info); init_descriptor_and_pipeline_layouts(info, false); init_renderpass(info, DEPTH_PRESENT); init_shaders(info, vertShaderText, fragShaderText); init_framebuffers(info, DEPTH_PRESENT); init_vertex_buffer(info, g_vb_solid_face_colors_Data, sizeof(g_vb_solid_face_colors_Data), sizeof(g_vb_solid_face_colors_Data[0]), false); init_descriptor_pool(info, false); init_descriptor_set(info, false); init_pipeline_cache(info); init_pipeline(info, DEPTH_PRESENT); /* VULKAN_KEY_START */ VkClearValue clear_values[2]; clear_values[0].color.float32[0] = 0.2f; clear_values[0].color.float32[1] = 0.2f; clear_values[0].color.float32[2] = 0.2f; clear_values[0].color.float32[3] = 0.2f; clear_values[1].depthStencil.depth = 1.0f; clear_values[1].depthStencil.stencil = 0; VkSemaphore presentCompleteSemaphore; VkSemaphoreCreateInfo presentCompleteSemaphoreCreateInfo; presentCompleteSemaphoreCreateInfo.sType = VK_STRUCTURE_TYPE_SEMAPHORE_CREATE_INFO; presentCompleteSemaphoreCreateInfo.pNext = NULL; presentCompleteSemaphoreCreateInfo.flags = VK_FENCE_CREATE_SIGNALED_BIT; res = vkCreateSemaphore(info.device, &presentCompleteSemaphoreCreateInfo, NULL, &presentCompleteSemaphore); assert(res == VK_SUCCESS); // Get the index of the next available swapchain image: res = vkAcquireNextImageKHR(info.device, info.swap_chain, UINT64_MAX, presentCompleteSemaphore, NULL, &info.current_buffer); // TODO: Deal with the VK_SUBOPTIMAL_KHR and VK_ERROR_OUT_OF_DATE_KHR // return codes assert(res == VK_SUCCESS); /* Allocate a uniform buffer that will take query results. */ VkBuffer query_result_buf; VkDeviceMemory query_result_mem; VkBufferCreateInfo buf_info = {}; buf_info.sType = VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO; buf_info.pNext = NULL; buf_info.usage = VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT | VK_BUFFER_USAGE_TRANSFER_DST_BIT; buf_info.size = 4 * sizeof(uint64_t); buf_info.queueFamilyIndexCount = 0; buf_info.pQueueFamilyIndices = NULL; buf_info.sharingMode = VK_SHARING_MODE_EXCLUSIVE; buf_info.flags = 0; res = vkCreateBuffer(info.device, &buf_info, NULL, &query_result_buf); assert(res == VK_SUCCESS); VkMemoryRequirements mem_reqs; vkGetBufferMemoryRequirements(info.device, query_result_buf, &mem_reqs); VkMemoryAllocateInfo alloc_info = {}; alloc_info.sType = VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO; alloc_info.pNext = NULL; alloc_info.memoryTypeIndex = 0; alloc_info.allocationSize = mem_reqs.size; pass = memory_type_from_properties(info, mem_reqs.memoryTypeBits, VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT, &alloc_info.memoryTypeIndex); assert(pass); res = vkAllocateMemory(info.device, &alloc_info, NULL, &query_result_mem); assert(res == VK_SUCCESS); res = vkBindBufferMemory(info.device, query_result_buf, query_result_mem, 0); assert(res == VK_SUCCESS); VkQueryPool query_pool; VkQueryPoolCreateInfo query_pool_info; query_pool_info.sType = VK_STRUCTURE_TYPE_QUERY_POOL_CREATE_INFO; query_pool_info.pNext = NULL; query_pool_info.queryType = VK_QUERY_TYPE_OCCLUSION; query_pool_info.flags = 0; query_pool_info.queryCount = 2; query_pool_info.pipelineStatistics = 0; res = vkCreateQueryPool(info.device, &query_pool_info, NULL, &query_pool); assert(res == VK_SUCCESS); vkCmdResetQueryPool(info.cmd, query_pool, 0 /*startQuery*/, 2 /*queryCount*/); VkRenderPassBeginInfo rp_begin; rp_begin.sType = VK_STRUCTURE_TYPE_RENDER_PASS_BEGIN_INFO; rp_begin.pNext = NULL; rp_begin.renderPass = info.render_pass; rp_begin.framebuffer = info.framebuffers[info.current_buffer]; rp_begin.renderArea.offset.x = 0; rp_begin.renderArea.offset.y = 0; rp_begin.renderArea.extent.width = info.width; rp_begin.renderArea.extent.height = info.height; rp_begin.clearValueCount = 2; rp_begin.pClearValues = clear_values; vkCmdBeginRenderPass(info.cmd, &rp_begin, VK_SUBPASS_CONTENTS_INLINE); vkCmdBindPipeline(info.cmd, VK_PIPELINE_BIND_POINT_GRAPHICS, info.pipeline); vkCmdBindDescriptorSets(info.cmd, VK_PIPELINE_BIND_POINT_GRAPHICS, info.pipeline_layout, 0, NUM_DESCRIPTOR_SETS, info.desc_set.data(), 0, NULL); const VkDeviceSize offsets[1] = {0}; vkCmdBindVertexBuffers(info.cmd, 0, 1, &info.vertex_buffer.buf, offsets); VkViewport viewport; viewport.height = (float)info.height; viewport.width = (float)info.width; viewport.minDepth = (float)0.0f; viewport.maxDepth = (float)1.0f; viewport.x = 0; viewport.y = 0; vkCmdSetViewport(info.cmd, 0, NUM_VIEWPORTS, &viewport); VkRect2D scissor; scissor.extent.width = info.width; scissor.extent.height = info.height; scissor.offset.x = 0; scissor.offset.y = 0; vkCmdSetScissor(info.cmd, 0, NUM_SCISSORS, &scissor); vkCmdBeginQuery(info.cmd, query_pool, 0 /*slot*/, 0 /*flags*/); vkCmdEndQuery(info.cmd, query_pool, 0 /*slot*/); vkCmdBeginQuery(info.cmd, query_pool, 1 /*slot*/, 0 /*flags*/); vkCmdDraw(info.cmd, 12 * 3, 1, 0, 0); vkCmdEndRenderPass(info.cmd); vkCmdEndQuery(info.cmd, query_pool, 1 /*slot*/); vkCmdCopyQueryPoolResults( info.cmd, query_pool, 0 /*firstQuery*/, 2 /*queryCount*/, query_result_buf, 0 /*dstOffset*/, sizeof(uint64_t) /*stride*/, VK_QUERY_RESULT_64_BIT | VK_QUERY_RESULT_WAIT_BIT); VkImageMemoryBarrier prePresentBarrier = {}; prePresentBarrier.sType = VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER; prePresentBarrier.pNext = NULL; prePresentBarrier.srcAccessMask = VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT; prePresentBarrier.dstAccessMask = 0; prePresentBarrier.oldLayout = VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL; prePresentBarrier.newLayout = VK_IMAGE_LAYOUT_PRESENT_SRC_KHR; prePresentBarrier.srcQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED; prePresentBarrier.dstQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED; prePresentBarrier.subresourceRange.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT; prePresentBarrier.subresourceRange.baseMipLevel = 0; prePresentBarrier.subresourceRange.levelCount = 1; prePresentBarrier.subresourceRange.baseArrayLayer = 0; prePresentBarrier.subresourceRange.layerCount = 1; prePresentBarrier.image = info.buffers[info.current_buffer].image; vkCmdPipelineBarrier(info.cmd, VK_PIPELINE_STAGE_ALL_COMMANDS_BIT, VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT, 0, 0, NULL, 0, NULL, 1, &prePresentBarrier); res = vkEndCommandBuffer(info.cmd); const VkCommandBuffer cmd_bufs[] = {info.cmd}; VkFenceCreateInfo fenceInfo; VkFence drawFence; fenceInfo.sType = VK_STRUCTURE_TYPE_FENCE_CREATE_INFO; fenceInfo.pNext = NULL; fenceInfo.flags = 0; vkCreateFence(info.device, &fenceInfo, NULL, &drawFence); VkPipelineStageFlags pipe_stage_flags = VK_PIPELINE_STAGE_BOTTOM_OF_PIPE_BIT; VkSubmitInfo submit_info[1] = {}; submit_info[0].pNext = NULL; submit_info[0].sType = VK_STRUCTURE_TYPE_SUBMIT_INFO; submit_info[0].waitSemaphoreCount = 1; submit_info[0].pWaitSemaphores = &presentCompleteSemaphore; submit_info[0].pWaitDstStageMask = &pipe_stage_flags; submit_info[0].commandBufferCount = 1; submit_info[0].pCommandBuffers = cmd_bufs; submit_info[0].signalSemaphoreCount = 0; submit_info[0].pSignalSemaphores = NULL; /* Queue the command buffer for execution */ res = vkQueueSubmit(info.queue, 1, submit_info, drawFence); assert(res == VK_SUCCESS); res = vkQueueWaitIdle(info.queue); assert(res == VK_SUCCESS); uint64_t samples_passed[4]; samples_passed[0] = 0; samples_passed[1] = 0; res = vkGetQueryPoolResults( info.device, query_pool, 0 /*firstQuery*/, 2 /*queryCount*/, sizeof(samples_passed) /*dataSize*/, samples_passed, sizeof(uint64_t) /*stride*/, VK_QUERY_RESULT_64_BIT | VK_QUERY_RESULT_WAIT_BIT); assert(res == VK_SUCCESS); std::cout << "vkGetQueryPoolResults data" << "\n"; std::cout << "samples_passed[0] = " << samples_passed[0] << "\n"; std::cout << "samples_passed[1] = " << samples_passed[1] << "\n"; /* Read back query result from buffer */ uint64_t *samples_passed_ptr; res = vkMapMemory(info.device, query_result_mem, 0, mem_reqs.size, 0, (void **)&samples_passed_ptr); assert(res == VK_SUCCESS); std::cout << "vkCmdCopyQueryPoolResults data" << "\n"; std::cout << "samples_passed[0] = " << samples_passed_ptr[0] << "\n"; std::cout << "samples_passed[1] = " << samples_passed_ptr[1] << "\n"; vkUnmapMemory(info.device, query_result_mem); /* Now present the image in the window */ VkPresentInfoKHR present; present.sType = VK_STRUCTURE_TYPE_PRESENT_INFO_KHR; present.pNext = NULL; present.swapchainCount = 1; present.pSwapchains = &info.swap_chain; present.pImageIndices = &info.current_buffer; present.pWaitSemaphores = NULL; present.waitSemaphoreCount = 0; present.pResults = NULL; /* Make sure command buffer is finished before presenting */ do { res = vkWaitForFences(info.device, 1, &drawFence, VK_TRUE, FENCE_TIMEOUT); } while (res == VK_TIMEOUT); assert(res == VK_SUCCESS); res = vkQueuePresentKHR(info.queue, &present); assert(res == VK_SUCCESS); wait_seconds(1); /* VULKAN_KEY_END */ if (info.save_images) write_ppm(info, "occlusion_query"); vkDestroyBuffer(info.device, query_result_buf, NULL); vkFreeMemory(info.device, query_result_mem, NULL); vkDestroySemaphore(info.device, presentCompleteSemaphore, NULL); vkDestroyQueryPool(info.device, query_pool, NULL); vkDestroyFence(info.device, drawFence, NULL); destroy_pipeline(info); destroy_pipeline_cache(info); destroy_descriptor_pool(info); destroy_vertex_buffer(info); destroy_framebuffers(info); destroy_shaders(info); destroy_renderpass(info); destroy_descriptor_and_pipeline_layouts(info); destroy_uniform_buffer(info); destroy_depth_buffer(info); destroy_swap_chain(info); destroy_command_buffer(info); destroy_command_pool(info); destroy_device(info); destroy_window(info); destroy_instance(info); return 0; }
// Setup and fill the compute shader storage buffers for // vertex positions and velocities void prepareStorageBuffers() { float destPosX = 0.0f; float destPosY = 0.0f; // Initial particle positions std::vector<Particle> particleBuffer; for (int i = 0; i < PARTICLE_COUNT; ++i) { // Position float aspectRatio = (float)height / (float)width; float rndVal = (float)rand() / (float)(RAND_MAX / (360.0f * 3.14f * 2.0f)); float rndRad = (float)rand() / (float)(RAND_MAX) * 0.5f; Particle p; p.pos = glm::vec4( destPosX + cos(rndVal) * rndRad * aspectRatio, destPosY + sin(rndVal) * rndRad, 0.0f, 1.0f); p.col = glm::vec4( (float)(rand() % 255) / 255.0f, (float)(rand() % 255) / 255.0f, (float)(rand() % 255) / 255.0f, 1.0f); p.vel = glm::vec4(0.0f); particleBuffer.push_back(p); } // Buffer size is the same for all storage buffers uint32_t storageBufferSize = particleBuffer.size() * sizeof(Particle); VkMemoryAllocateInfo memAlloc = vkTools::initializers::memoryAllocateInfo(); VkMemoryRequirements memReqs; VkResult err; void *data; struct StagingBuffer { VkDeviceMemory memory; VkBuffer buffer; } stagingBuffer; // Allocate and fill host-visible staging storage buffer object // Allocate and fill storage buffer object VkBufferCreateInfo vBufferInfo = vkTools::initializers::bufferCreateInfo( VK_BUFFER_USAGE_TRANSFER_SRC_BIT, storageBufferSize); vkTools::checkResult(vkCreateBuffer(device, &vBufferInfo, nullptr, &stagingBuffer.buffer)); vkGetBufferMemoryRequirements(device, stagingBuffer.buffer, &memReqs); memAlloc.allocationSize = memReqs.size; getMemoryType(memReqs.memoryTypeBits, VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT, &memAlloc.memoryTypeIndex); vkTools::checkResult(vkAllocateMemory(device, &memAlloc, nullptr, &stagingBuffer.memory)); vkTools::checkResult(vkMapMemory(device, stagingBuffer.memory, 0, storageBufferSize, 0, &data)); memcpy(data, particleBuffer.data(), storageBufferSize); vkUnmapMemory(device, stagingBuffer.memory); vkTools::checkResult(vkBindBufferMemory(device, stagingBuffer.buffer, stagingBuffer.memory, 0)); // Allocate device local storage buffer ojbect vBufferInfo.usage = VK_BUFFER_USAGE_STORAGE_BUFFER_BIT | VK_BUFFER_USAGE_TRANSFER_DST_BIT; vkTools::checkResult(vkCreateBuffer(device, &vBufferInfo, nullptr, &computeStorageBuffer.buffer)); vkGetBufferMemoryRequirements(device, computeStorageBuffer.buffer, &memReqs); memAlloc.allocationSize = memReqs.size; getMemoryType(memReqs.memoryTypeBits, VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT, &memAlloc.memoryTypeIndex); vkTools::checkResult(vkAllocateMemory(device, &memAlloc, nullptr, &computeStorageBuffer.memory)); vkTools::checkResult(vkBindBufferMemory(device, computeStorageBuffer.buffer, computeStorageBuffer.memory, 0)); // Copy from host to device createSetupCommandBuffer(); VkBufferCopy copyRegion = {}; copyRegion.size = storageBufferSize; vkCmdCopyBuffer( setupCmdBuffer, stagingBuffer.buffer, computeStorageBuffer.buffer, 1, ©Region); flushSetupCommandBuffer(); // Destroy staging buffer vkDestroyBuffer(device, stagingBuffer.buffer, nullptr); vkFreeMemory(device, stagingBuffer.memory, nullptr); computeStorageBuffer.descriptor.buffer = computeStorageBuffer.buffer; computeStorageBuffer.descriptor.offset = 0; computeStorageBuffer.descriptor.range = storageBufferSize; // Binding description vertices.bindingDescriptions.resize(1); vertices.bindingDescriptions[0] = vkTools::initializers::vertexInputBindingDescription( VERTEX_BUFFER_BIND_ID, sizeof(Particle), VK_VERTEX_INPUT_RATE_VERTEX); // Attribute descriptions // Describes memory layout and shader positions vertices.attributeDescriptions.resize(2); // Location 0 : Position vertices.attributeDescriptions[0] = vkTools::initializers::vertexInputAttributeDescription( VERTEX_BUFFER_BIND_ID, 0, VK_FORMAT_R32G32B32A32_SFLOAT, 0); // Location 1 : Color vertices.attributeDescriptions[1] = vkTools::initializers::vertexInputAttributeDescription( VERTEX_BUFFER_BIND_ID, 1, VK_FORMAT_R32G32B32A32_SFLOAT, sizeof(float) * 4); // Assign to vertex buffer vertices.inputState = vkTools::initializers::pipelineVertexInputStateCreateInfo(); vertices.inputState.vertexBindingDescriptionCount = vertices.bindingDescriptions.size(); vertices.inputState.pVertexBindingDescriptions = vertices.bindingDescriptions.data(); vertices.inputState.vertexAttributeDescriptionCount = vertices.attributeDescriptions.size(); vertices.inputState.pVertexAttributeDescriptions = vertices.attributeDescriptions.data(); }