// Setup and fill the compute shader storage buffers for // vertex positions and velocities void prepareStorageBuffers() { float destPosX = 0.0f; float destPosY = 0.0f; // Initial particle positions std::vector<Particle> particleBuffer; for (int i = 0; i < PARTICLE_COUNT; ++i) { // Position float aspectRatio = (float)height / (float)width; float rndVal = (float)rand() / (float)(RAND_MAX / (360.0f * 3.14f * 2.0f)); float rndRad = (float)rand() / (float)(RAND_MAX) * 0.5f; Particle p; p.pos = glm::vec4( destPosX + cos(rndVal) * rndRad * aspectRatio, destPosY + sin(rndVal) * rndRad, 0.0f, 1.0f); p.col = glm::vec4( (float)(rand() % 255) / 255.0f, (float)(rand() % 255) / 255.0f, (float)(rand() % 255) / 255.0f, 1.0f); p.vel = glm::vec4(0.0f); particleBuffer.push_back(p); } // Buffer size is the same for all storage buffers uint32_t storageBufferSize = particleBuffer.size() * sizeof(Particle); VkMemoryAllocateInfo memAlloc = vkTools::initializers::memoryAllocateInfo(); VkMemoryRequirements memReqs; VkResult err; void *data; // Allocate and fill storage buffer object VkBufferCreateInfo vBufferInfo = vkTools::initializers::bufferCreateInfo( VK_BUFFER_USAGE_STORAGE_BUFFER_BIT, storageBufferSize); err = vkCreateBuffer(device, &vBufferInfo, nullptr, &computeStorageBuffer.buffer); assert(!err); vkGetBufferMemoryRequirements(device, computeStorageBuffer.buffer, &memReqs); memAlloc.allocationSize = memReqs.size; getMemoryType(memReqs.memoryTypeBits, VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT, &memAlloc.memoryTypeIndex); err = vkAllocateMemory(device, &memAlloc, nullptr, &computeStorageBuffer.memory); assert(!err); err = vkMapMemory(device, computeStorageBuffer.memory, 0, storageBufferSize, 0, &data); assert(!err); memcpy(data, particleBuffer.data(), storageBufferSize); vkUnmapMemory(device, computeStorageBuffer.memory); err = vkBindBufferMemory(device, computeStorageBuffer.buffer, computeStorageBuffer.memory, 0); assert(!err); computeStorageBuffer.descriptor.buffer = computeStorageBuffer.buffer; computeStorageBuffer.descriptor.offset = 0; computeStorageBuffer.descriptor.range = storageBufferSize; // Binding description vertices.bindingDescriptions.resize(1); vertices.bindingDescriptions[0] = vkTools::initializers::vertexInputBindingDescription( VERTEX_BUFFER_BIND_ID, sizeof(Particle), VK_VERTEX_INPUT_RATE_VERTEX); // Attribute descriptions // Describes memory layout and shader positions vertices.attributeDescriptions.resize(2); // Location 0 : Position vertices.attributeDescriptions[0] = vkTools::initializers::vertexInputAttributeDescription( VERTEX_BUFFER_BIND_ID, 0, VK_FORMAT_R32G32B32A32_SFLOAT, 0); // Location 1 : Color vertices.attributeDescriptions[1] = vkTools::initializers::vertexInputAttributeDescription( VERTEX_BUFFER_BIND_ID, 1, VK_FORMAT_R32G32B32A32_SFLOAT, sizeof(float) * 4); // Assign to vertex buffer vertices.inputState = vkTools::initializers::pipelineVertexInputStateCreateInfo(); vertices.inputState.vertexBindingDescriptionCount = vertices.bindingDescriptions.size(); vertices.inputState.pVertexBindingDescriptions = vertices.bindingDescriptions.data(); vertices.inputState.vertexAttributeDescriptionCount = vertices.attributeDescriptions.size(); vertices.inputState.pVertexAttributeDescriptions = vertices.attributeDescriptions.data(); }
int sample_main() { VkResult U_ASSERT_ONLY res; bool U_ASSERT_ONLY pass; struct sample_info info = {}; char sample_title[] = "Vertex Buffer Sample"; const bool depthPresent = true; init_global_layer_properties(info); init_instance_extension_names(info); init_device_extension_names(info); init_instance(info, sample_title); init_enumerate_device(info); init_window_size(info, 500, 500); init_connection(info); init_window(info); init_swapchain_extension(info); init_device(info); init_command_pool(info); init_command_buffer(info); execute_begin_command_buffer(info); init_device_queue(info); init_swap_chain(info); init_depth_buffer(info); init_renderpass(info, depthPresent); init_framebuffers(info, depthPresent); /* VULKAN_KEY_START */ /* * Set up a vertex buffer: * - Create a buffer * - Map it and write the vertex data into it * - Bind it using vkCmdBindVertexBuffers * - Later, at pipeline creation, * - fill in vertex input part of the pipeline with relevent data */ VkBufferCreateInfo buf_info = {}; buf_info.sType = VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO; buf_info.pNext = NULL; buf_info.usage = VK_BUFFER_USAGE_VERTEX_BUFFER_BIT; buf_info.size = sizeof(g_vb_solid_face_colors_Data); buf_info.queueFamilyIndexCount = 0; buf_info.pQueueFamilyIndices = NULL; buf_info.sharingMode = VK_SHARING_MODE_EXCLUSIVE; buf_info.flags = 0; res = vkCreateBuffer(info.device, &buf_info, NULL, &info.vertex_buffer.buf); assert(res == VK_SUCCESS); VkMemoryRequirements mem_reqs; vkGetBufferMemoryRequirements(info.device, info.vertex_buffer.buf, &mem_reqs); VkMemoryAllocateInfo alloc_info = {}; alloc_info.sType = VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO; alloc_info.pNext = NULL; alloc_info.memoryTypeIndex = 0; alloc_info.allocationSize = mem_reqs.size; pass = memory_type_from_properties(info, mem_reqs.memoryTypeBits, VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT, &alloc_info.memoryTypeIndex); assert(pass); res = vkAllocateMemory(info.device, &alloc_info, NULL, &(info.vertex_buffer.mem)); assert(res == VK_SUCCESS); uint8_t *pData; res = vkMapMemory(info.device, info.vertex_buffer.mem, 0, mem_reqs.size, 0, (void **)&pData); assert(res == VK_SUCCESS); memcpy(pData, g_vb_solid_face_colors_Data, sizeof(g_vb_solid_face_colors_Data)); vkUnmapMemory(info.device, info.vertex_buffer.mem); res = vkBindBufferMemory(info.device, info.vertex_buffer.buf, info.vertex_buffer.mem, 0); assert(res == VK_SUCCESS); /* We won't use these here, but we will need this info when creating the * pipeline */ info.vi_binding.binding = 0; info.vi_binding.inputRate = VK_VERTEX_INPUT_RATE_VERTEX; info.vi_binding.stride = sizeof(g_vb_solid_face_colors_Data[0]); info.vi_attribs[0].binding = 0; info.vi_attribs[0].location = 0; info.vi_attribs[0].format = VK_FORMAT_R32G32B32A32_SFLOAT; info.vi_attribs[0].offset = 0; info.vi_attribs[1].binding = 0; info.vi_attribs[1].location = 1; info.vi_attribs[1].format = VK_FORMAT_R32G32B32A32_SFLOAT; info.vi_attribs[1].offset = 16; const VkDeviceSize offsets[1] = {0}; /* We cannot bind the vertex buffer until we begin a renderpass */ VkClearValue clear_values[2]; clear_values[0].color.float32[0] = 0.2f; clear_values[0].color.float32[1] = 0.2f; clear_values[0].color.float32[2] = 0.2f; clear_values[0].color.float32[3] = 0.2f; clear_values[1].depthStencil.depth = 1.0f; clear_values[1].depthStencil.stencil = 0; VkRenderPassBeginInfo rp_begin = {}; rp_begin.sType = VK_STRUCTURE_TYPE_RENDER_PASS_BEGIN_INFO; rp_begin.pNext = NULL; rp_begin.renderPass = info.render_pass; rp_begin.framebuffer = info.framebuffers[info.current_buffer]; rp_begin.renderArea.offset.x = 0; rp_begin.renderArea.offset.y = 0; rp_begin.renderArea.extent.width = info.width; rp_begin.renderArea.extent.height = info.height; rp_begin.clearValueCount = 2; rp_begin.pClearValues = clear_values; vkCmdBeginRenderPass(info.cmd, &rp_begin, VK_SUBPASS_CONTENTS_INLINE); vkCmdBindVertexBuffers(info.cmd, 0, /* Start Binding */ 1, /* Binding Count */ &info.vertex_buffer.buf, /* pBuffers */ offsets); /* pOffsets */ vkCmdEndRenderPass(info.cmd); execute_end_command_buffer(info); execute_queue_command_buffer(info); /* VULKAN_KEY_END */ vkDestroyBuffer(info.device, info.vertex_buffer.buf, NULL); vkFreeMemory(info.device, info.vertex_buffer.mem, NULL); destroy_framebuffers(info); destroy_renderpass(info); destroy_depth_buffer(info); destroy_swap_chain(info); destroy_command_buffer(info); destroy_command_pool(info); destroy_window(info); destroy_device(info); destroy_instance(info); return 0; }
bool Cubemap::Init(VulkanDevice * device, VulkanCommandBuffer * cmdBuffer, std::string cubemapDir) { VkResult result; void * pData; mipMapLevels = -1; std::vector<MipMap> mipMapsRight; std::vector<MipMap> mipMapsLeft; std::vector<MipMap> mipMapsTop; std::vector<MipMap> mipMapsBottom; std::vector<MipMap> mipMapsBack; std::vector<MipMap> mipMapsFront; // Read each cube face if (!ReadCubeFace(cubemapDir + "/right.rct", mipMapsRight)) return false; if (!ReadCubeFace(cubemapDir + "/left.rct", mipMapsLeft)) return false; if (!ReadCubeFace(cubemapDir + "/up.rct", mipMapsTop)) return false; if (!ReadCubeFace(cubemapDir + "/down.rct", mipMapsBottom)) return false; if (!ReadCubeFace(cubemapDir + "/back.rct", mipMapsBack)) return false; if (!ReadCubeFace(cubemapDir + "/front.rct", mipMapsFront)) return false; unsigned int totalTextureSize = 0; for (unsigned int i = 0; i < mipMapsRight.size(); i++) totalTextureSize += mipMapsRight[i].size; for (unsigned int i = 0; i < mipMapsLeft.size(); i++) totalTextureSize += mipMapsLeft[i].size; for (unsigned int i = 0; i < mipMapsTop.size(); i++) totalTextureSize += mipMapsTop[i].size; for (unsigned int i = 0; i < mipMapsBottom.size(); i++) totalTextureSize += mipMapsBottom[i].size; for (unsigned int i = 0; i < mipMapsBack.size(); i++) totalTextureSize += mipMapsBack[i].size; for (unsigned int i = 0; i < mipMapsFront.size(); i++) totalTextureSize += mipMapsFront[i].size; // Create an array of bits which stores all of the texture data std::vector<unsigned char> textureData; for (unsigned int i = 0; i < mipMapsRight.size(); i++) for (unsigned int j = 0; j < mipMapsRight[i].size; j++) textureData.push_back(mipMapsRight[i].data[j]); for (unsigned int i = 0; i < mipMapsLeft.size(); i++) for (unsigned int j = 0; j < mipMapsLeft[i].size; j++) textureData.push_back(mipMapsLeft[i].data[j]); for (unsigned int i = 0; i < mipMapsTop.size(); i++) for (unsigned int j = 0; j < mipMapsTop[i].size; j++) textureData.push_back(mipMapsTop[i].data[j]); for (unsigned int i = 0; i < mipMapsBottom.size(); i++) for (unsigned int j = 0; j < mipMapsBottom[i].size; j++) textureData.push_back(mipMapsBottom[i].data[j]); for (unsigned int i = 0; i < mipMapsBack.size(); i++) for (unsigned int j = 0; j < mipMapsBack[i].size; j++) textureData.push_back(mipMapsBack[i].data[j]); for (unsigned int i = 0; i < mipMapsFront.size(); i++) for (unsigned int j = 0; j < mipMapsFront[i].size; j++) textureData.push_back(mipMapsFront[i].data[j]); for (int i = 0; i < mipMapsRight.size(); i++) delete[] mipMapsRight[i].data; for (int i = 0; i < mipMapsLeft.size(); i++) delete[] mipMapsLeft[i].data; for (int i = 0; i < mipMapsTop.size(); i++) delete[] mipMapsTop[i].data; for (int i = 0; i < mipMapsBottom.size(); i++) delete[] mipMapsBottom[i].data; for (int i = 0; i < mipMapsBack.size(); i++) delete[] mipMapsBack[i].data; for (int i = 0; i < mipMapsFront.size(); i++) delete[] mipMapsFront[i].data; VkMemoryRequirements memReq{}; VkMemoryAllocateInfo allocInfo{}; allocInfo.sType = VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO; VkBuffer stagingBuffer; VkDeviceMemory stagingMemory; VkBufferCreateInfo bufferCI{}; bufferCI.sType = VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO; bufferCI.size = totalTextureSize; bufferCI.usage = VK_BUFFER_USAGE_TRANSFER_SRC_BIT; bufferCI.sharingMode = VK_SHARING_MODE_EXCLUSIVE; result = vkCreateBuffer(device->GetDevice(), &bufferCI, VK_NULL_HANDLE, &stagingBuffer); if (result != VK_SUCCESS) return false; vkGetBufferMemoryRequirements(device->GetDevice(), stagingBuffer, &memReq); allocInfo.allocationSize = memReq.size; if (!device->MemoryTypeFromProperties(memReq.memoryTypeBits, VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT, &allocInfo.memoryTypeIndex)) return false; result = vkAllocateMemory(device->GetDevice(), &allocInfo, VK_NULL_HANDLE, &stagingMemory); if (result != VK_SUCCESS) return false; result = vkBindBufferMemory(device->GetDevice(), stagingBuffer, stagingMemory, 0); if (result != VK_SUCCESS) return false; result = vkMapMemory(device->GetDevice(), stagingMemory, 0, memReq.size, 0, &pData); if (result != VK_SUCCESS) return false; memcpy(pData, textureData.data(), textureData.size()); vkUnmapMemory(device->GetDevice(), stagingMemory); std::vector<VkBufferImageCopy> bufferCopyRegions; uint32_t offset = 0; for (int face = 0; face < 6; face++) { for (unsigned int level = 0; level < mipMapLevels; level++) { VkBufferImageCopy bufferCopyRegion{}; bufferCopyRegion.imageSubresource.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT; bufferCopyRegion.imageSubresource.mipLevel = level; bufferCopyRegion.imageSubresource.baseArrayLayer = face; bufferCopyRegion.imageSubresource.layerCount = 1; bufferCopyRegion.imageExtent.depth = 1; bufferCopyRegion.bufferOffset = offset; // Every face has the same width, height and mipmap bufferCopyRegion.imageExtent.width = mipMapsRight[level].width; bufferCopyRegion.imageExtent.height = mipMapsRight[level].height; offset += (uint32_t)mipMapsRight[level].size; bufferCopyRegions.push_back(bufferCopyRegion); } } VkImageCreateInfo imageCI{}; imageCI.sType = VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO; imageCI.imageType = VK_IMAGE_TYPE_2D; imageCI.format = VK_FORMAT_R8G8B8A8_UNORM; imageCI.mipLevels = mipMapLevels; imageCI.arrayLayers = 6; imageCI.samples = VK_SAMPLE_COUNT_1_BIT; imageCI.tiling = VK_IMAGE_TILING_OPTIMAL; imageCI.usage = VK_IMAGE_USAGE_TRANSFER_DST_BIT | VK_IMAGE_USAGE_SAMPLED_BIT; imageCI.sharingMode = VK_SHARING_MODE_EXCLUSIVE; imageCI.initialLayout = VK_IMAGE_LAYOUT_UNDEFINED; imageCI.extent.width = mipMapsRight[0].width; imageCI.extent.height = mipMapsRight[0].height; imageCI.extent.depth = 1; imageCI.flags = VK_IMAGE_CREATE_CUBE_COMPATIBLE_BIT; result = vkCreateImage(device->GetDevice(), &imageCI, VK_NULL_HANDLE, &textureImage); if (result != VK_SUCCESS) return false; vkGetImageMemoryRequirements(device->GetDevice(), textureImage, &memReq); VkMemoryAllocateInfo memAlloc{}; memAlloc.sType = VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO; memAlloc.allocationSize = memReq.size; if (!device->MemoryTypeFromProperties(memReq.memoryTypeBits, VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT, &memAlloc.memoryTypeIndex)) return false; result = vkAllocateMemory(device->GetDevice(), &memAlloc, VK_NULL_HANDLE, &textureMemory); if (result != VK_SUCCESS) return false; result = vkBindImageMemory(device->GetDevice(), textureImage, textureMemory, 0); if (result != VK_SUCCESS) return false; VkImageSubresourceRange range{}; range.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT; range.baseMipLevel = 0; range.levelCount = mipMapLevels; range.layerCount = 6; cmdBuffer->BeginRecording(); VulkanTools::SetImageLayout(textureImage, VK_IMAGE_ASPECT_COLOR_BIT, VK_IMAGE_LAYOUT_UNDEFINED, VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, &range, cmdBuffer, device, false); vkCmdCopyBufferToImage(cmdBuffer->GetCommandBuffer(), stagingBuffer, textureImage, VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, (uint32_t)bufferCopyRegions.size(), bufferCopyRegions.data()); VulkanTools::SetImageLayout(textureImage, VK_IMAGE_ASPECT_COLOR_BIT, VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL, &range, cmdBuffer, device, false); cmdBuffer->EndRecording(); cmdBuffer->Execute(device, NULL, NULL, NULL, true); vkFreeMemory(device->GetDevice(), stagingMemory, VK_NULL_HANDLE); vkDestroyBuffer(device->GetDevice(), stagingBuffer, VK_NULL_HANDLE); VkImageViewCreateInfo viewCI{}; viewCI.sType = VK_STRUCTURE_TYPE_IMAGE_VIEW_CREATE_INFO; viewCI.image = textureImage; viewCI.viewType = VK_IMAGE_VIEW_TYPE_CUBE; viewCI.format = VK_FORMAT_R8G8B8A8_UNORM; viewCI.components.r = VK_COMPONENT_SWIZZLE_R; viewCI.components.g = VK_COMPONENT_SWIZZLE_G; viewCI.components.b = VK_COMPONENT_SWIZZLE_B; viewCI.components.a = VK_COMPONENT_SWIZZLE_A; viewCI.subresourceRange.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT; viewCI.subresourceRange.baseMipLevel = 0; viewCI.subresourceRange.baseArrayLayer = 0; viewCI.subresourceRange.layerCount = 6; viewCI.subresourceRange.levelCount = mipMapLevels; result = vkCreateImageView(device->GetDevice(), &viewCI, VK_NULL_HANDLE, &textureImageView); if (result != VK_SUCCESS) return false; return true; }
void prepareVertices(bool useStagingBuffers) { struct Vertex { float position[3]; float color[3]; }; std::vector<Vertex> vertexBuffer = { { { 1.0f, 1.0f, 0.0f }, { 1.0f, 0.0f, 0.0f } }, { { -1.0f, 1.0f, 0.0f }, { 0.0f, 1.0f, 0.0f } }, { { 0.0f, -1.0f, 0.0f }, { 0.0f, 0.0f, 1.0f } } }; uint32_t vertexBufferSize = static_cast<uint32_t>(vertexBuffer.size()) * sizeof(Vertex); std::vector<uint32_t> indexBuffer = { 0, 1, 2 }; indices.count = static_cast<uint32_t>(indexBuffer.size()); uint32_t indexBufferSize = indices.count * sizeof(uint32_t); VkMemoryAllocateInfo memAlloc = {}; memAlloc.sType = VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO; VkMemoryRequirements memReqs; void *data; if (useStagingBuffers) { struct StagingBuffer { VkDeviceMemory memory; VkBuffer buffer; }; struct { StagingBuffer vertices; StagingBuffer indices; } stagingBuffers; VkBufferCreateInfo vertexBufferInfo = {}; vertexBufferInfo.sType = VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO; vertexBufferInfo.size = vertexBufferSize; vertexBufferInfo.usage = VK_BUFFER_USAGE_TRANSFER_SRC_BIT; VK_CHECK_RESULT(vkCreateBuffer(device, &vertexBufferInfo, nullptr, &stagingBuffers.vertices.buffer)); vkGetBufferMemoryRequirements(device, stagingBuffers.vertices.buffer, &memReqs); memAlloc.allocationSize = memReqs.size; memAlloc.memoryTypeIndex = getMemoryTypeIndex(memReqs.memoryTypeBits, VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT); VK_CHECK_RESULT(vkAllocateMemory(device, &memAlloc, nullptr, &stagingBuffers.vertices.memory)); VK_CHECK_RESULT(vkMapMemory(device, stagingBuffers.vertices.memory, 0, memAlloc.allocationSize, 0, &data)); memcpy(data, vertexBuffer.data(), vertexBufferSize); vkUnmapMemory(device, stagingBuffers.vertices.memory); VK_CHECK_RESULT(vkBindBufferMemory(device, stagingBuffers.vertices.buffer, stagingBuffers.vertices.memory, 0)); vertexBufferInfo.usage = VK_BUFFER_USAGE_VERTEX_BUFFER_BIT | VK_BUFFER_USAGE_TRANSFER_DST_BIT; VK_CHECK_RESULT(vkCreateBuffer(device, &vertexBufferInfo, nullptr, &vertices.buffer)); vkGetBufferMemoryRequirements(device, vertices.buffer, &memReqs); memAlloc.allocationSize = memReqs.size; memAlloc.memoryTypeIndex = getMemoryTypeIndex(memReqs.memoryTypeBits, VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT); VK_CHECK_RESULT(vkAllocateMemory(device, &memAlloc, nullptr, &vertices.memory)); VK_CHECK_RESULT(vkBindBufferMemory(device, vertices.buffer, vertices.memory, 0)); VkBufferCreateInfo indexbufferInfo = {}; indexbufferInfo.sType = VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO; indexbufferInfo.size = indexBufferSize; indexbufferInfo.usage = VK_BUFFER_USAGE_TRANSFER_SRC_BIT; VK_CHECK_RESULT(vkCreateBuffer(device, &indexbufferInfo, nullptr, &stagingBuffers.indices.buffer)); vkGetBufferMemoryRequirements(device, stagingBuffers.indices.buffer, &memReqs); memAlloc.allocationSize = memReqs.size; memAlloc.memoryTypeIndex = getMemoryTypeIndex(memReqs.memoryTypeBits, VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT); VK_CHECK_RESULT(vkAllocateMemory(device, &memAlloc, nullptr, &stagingBuffers.indices.memory)); VK_CHECK_RESULT(vkMapMemory(device, stagingBuffers.indices.memory, 0, indexBufferSize, 0, &data)); memcpy(data, indexBuffer.data(), indexBufferSize); vkUnmapMemory(device, stagingBuffers.indices.memory); VK_CHECK_RESULT(vkBindBufferMemory(device, stagingBuffers.indices.buffer, stagingBuffers.indices.memory, 0)); indexbufferInfo.usage = VK_BUFFER_USAGE_INDEX_BUFFER_BIT | VK_BUFFER_USAGE_TRANSFER_DST_BIT; VK_CHECK_RESULT(vkCreateBuffer(device, &indexbufferInfo, nullptr, &indices.buffer)); vkGetBufferMemoryRequirements(device, indices.buffer, &memReqs); memAlloc.allocationSize = memReqs.size; memAlloc.memoryTypeIndex = getMemoryTypeIndex(memReqs.memoryTypeBits, VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT); VK_CHECK_RESULT(vkAllocateMemory(device, &memAlloc, nullptr, &indices.memory)); VK_CHECK_RESULT(vkBindBufferMemory(device, indices.buffer, indices.memory, 0)); VkCommandBufferBeginInfo cmdBufferBeginInfo = {}; cmdBufferBeginInfo.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_BEGIN_INFO; cmdBufferBeginInfo.pNext = nullptr; VkCommandBuffer copyCmd = getCommandBuffer(true); VkBufferCopy copyRegion = {}; copyRegion.size = vertexBufferSize; vkCmdCopyBuffer(copyCmd, stagingBuffers.vertices.buffer, vertices.buffer, 1, ©Region); copyRegion.size = indexBufferSize; vkCmdCopyBuffer(copyCmd, stagingBuffers.indices.buffer, indices.buffer, 1, ©Region); flushCommandBuffer(copyCmd); vkDestroyBuffer(device, stagingBuffers.vertices.buffer, nullptr); vkFreeMemory(device, stagingBuffers.vertices.memory, nullptr); vkDestroyBuffer(device, stagingBuffers.indices.buffer, nullptr); vkFreeMemory(device, stagingBuffers.indices.memory, nullptr); } else { VkBufferCreateInfo vertexBufferInfo = {}; vertexBufferInfo.sType = VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO; vertexBufferInfo.size = vertexBufferSize; vertexBufferInfo.usage = VK_BUFFER_USAGE_VERTEX_BUFFER_BIT; VK_CHECK_RESULT(vkCreateBuffer(device, &vertexBufferInfo, nullptr, &vertices.buffer)); vkGetBufferMemoryRequirements(device, vertices.buffer, &memReqs); memAlloc.allocationSize = memReqs.size; memAlloc.memoryTypeIndex = getMemoryTypeIndex(memReqs.memoryTypeBits, VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT); VK_CHECK_RESULT(vkAllocateMemory(device, &memAlloc, nullptr, &vertices.memory)); VK_CHECK_RESULT(vkMapMemory(device, vertices.memory, 0, memAlloc.allocationSize, 0, &data)); memcpy(data, vertexBuffer.data(), vertexBufferSize); vkUnmapMemory(device, vertices.memory); VK_CHECK_RESULT(vkBindBufferMemory(device, vertices.buffer, vertices.memory, 0)); VkBufferCreateInfo indexbufferInfo = {}; indexbufferInfo.sType = VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO; indexbufferInfo.size = indexBufferSize; indexbufferInfo.usage = VK_BUFFER_USAGE_INDEX_BUFFER_BIT; VK_CHECK_RESULT(vkCreateBuffer(device, &indexbufferInfo, nullptr, &indices.buffer)); vkGetBufferMemoryRequirements(device, indices.buffer, &memReqs); memAlloc.allocationSize = memReqs.size; memAlloc.memoryTypeIndex = getMemoryTypeIndex(memReqs.memoryTypeBits, VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT); VK_CHECK_RESULT(vkAllocateMemory(device, &memAlloc, nullptr, &indices.memory)); VK_CHECK_RESULT(vkMapMemory(device, indices.memory, 0, indexBufferSize, 0, &data)); memcpy(data, indexBuffer.data(), indexBufferSize); vkUnmapMemory(device, indices.memory); VK_CHECK_RESULT(vkBindBufferMemory(device, indices.buffer, indices.memory, 0)); } vertices.inputBinding.binding = VERTEX_BUFFER_BIND_ID; vertices.inputBinding.stride = sizeof(Vertex); vertices.inputBinding.inputRate = VK_VERTEX_INPUT_RATE_VERTEX; vertices.inputAttributes.resize(2); vertices.inputAttributes[0].binding = VERTEX_BUFFER_BIND_ID; vertices.inputAttributes[0].location = 0; vertices.inputAttributes[0].format = VK_FORMAT_R32G32B32_SFLOAT; vertices.inputAttributes[0].offset = offsetof(Vertex, position); vertices.inputAttributes[1].binding = VERTEX_BUFFER_BIND_ID; vertices.inputAttributes[1].location = 1; vertices.inputAttributes[1].format = VK_FORMAT_R32G32B32_SFLOAT; vertices.inputAttributes[1].offset = offsetof(Vertex, color); vertices.inputState.sType = VK_STRUCTURE_TYPE_PIPELINE_VERTEX_INPUT_STATE_CREATE_INFO; vertices.inputState.pNext = nullptr; vertices.inputState.flags = VK_FLAGS_NONE; vertices.inputState.vertexBindingDescriptionCount = 1; vertices.inputState.pVertexBindingDescriptions = &vertices.inputBinding; vertices.inputState.vertexAttributeDescriptionCount = static_cast<uint32_t>(vertices.inputAttributes.size()); vertices.inputState.pVertexAttributeDescriptions = vertices.inputAttributes.data(); }
static void *per_thread_code(void *arg) { /* This code should be executed by each of the three threads. It will */ /* create a vertex buffer with position and color per vertex, then load */ /* commands into the thread's designated command buffer to draw the */ /* triangle */ VkResult U_ASSERT_ONLY res; size_t threadNum = (size_t)arg; VkCommandPoolCreateInfo poolInfo; poolInfo.sType = VK_STRUCTURE_TYPE_COMMAND_POOL_CREATE_INFO; poolInfo.pNext = NULL; poolInfo.queueFamilyIndex = info.graphics_queue_family_index; poolInfo.flags = 0; vkCreateCommandPool(info.device, &poolInfo, NULL, &threadCmdPools[threadNum]); VkCommandBufferAllocateInfo cmdBufInfo; cmdBufInfo.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_ALLOCATE_INFO; cmdBufInfo.pNext = NULL; cmdBufInfo.commandBufferCount = 1; cmdBufInfo.commandPool = threadCmdPools[threadNum]; cmdBufInfo.level = VK_COMMAND_BUFFER_LEVEL_PRIMARY; vkAllocateCommandBuffers(info.device, &cmdBufInfo, &threadCmdBufs[threadNum]); VkBufferCreateInfo buf_info = {}; buf_info.sType = VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO; buf_info.pNext = NULL; buf_info.usage = VK_BUFFER_USAGE_VERTEX_BUFFER_BIT; buf_info.size = 3 * sizeof(triData[0]); buf_info.queueFamilyIndexCount = 0; buf_info.pQueueFamilyIndices = NULL; buf_info.sharingMode = VK_SHARING_MODE_EXCLUSIVE; buf_info.flags = 0; res = vkCreateBuffer(info.device, &buf_info, NULL, &vertex_buffer[threadNum].buf); assert(res == VK_SUCCESS); VkMemoryRequirements mem_reqs; vkGetBufferMemoryRequirements(info.device, vertex_buffer[threadNum].buf, &mem_reqs); VkMemoryAllocateInfo alloc_info = {}; alloc_info.sType = VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO; alloc_info.pNext = NULL; alloc_info.memoryTypeIndex = 0; alloc_info.allocationSize = mem_reqs.size; bool pass; pass = memory_type_from_properties(info, mem_reqs.memoryTypeBits, VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT, &alloc_info.memoryTypeIndex); assert(pass && "No mappable, coherent memory"); res = vkAllocateMemory(info.device, &alloc_info, NULL, &(vertex_buffer[threadNum].mem)); assert(res == VK_SUCCESS); uint8_t *pData; res = vkMapMemory(info.device, vertex_buffer[threadNum].mem, 0, mem_reqs.size, 0, (void **)&pData); assert(res == VK_SUCCESS); memcpy(pData, &triData[threadNum * 3], 3 * sizeof(triData[0])); vkUnmapMemory(info.device, vertex_buffer[threadNum].mem); res = vkBindBufferMemory(info.device, vertex_buffer[threadNum].buf, vertex_buffer[threadNum].mem, 0); assert(res == VK_SUCCESS); VkCommandBufferBeginInfo cmd_buf_info = {}; cmd_buf_info.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_BEGIN_INFO; cmd_buf_info.pNext = NULL; cmd_buf_info.flags = 0; cmd_buf_info.pInheritanceInfo = NULL; res = vkBeginCommandBuffer(threadCmdBufs[threadNum], &cmd_buf_info); assert(res == VK_SUCCESS); VkRenderPassBeginInfo rp_begin; rp_begin.sType = VK_STRUCTURE_TYPE_RENDER_PASS_BEGIN_INFO; rp_begin.pNext = NULL; rp_begin.renderPass = info.render_pass; rp_begin.framebuffer = info.framebuffers[info.current_buffer]; rp_begin.renderArea.offset.x = 0; rp_begin.renderArea.offset.y = 0; rp_begin.renderArea.extent.width = info.width; rp_begin.renderArea.extent.height = info.height; rp_begin.clearValueCount = 0; rp_begin.pClearValues = NULL; vkCmdBeginRenderPass(threadCmdBufs[threadNum], &rp_begin, VK_SUBPASS_CONTENTS_INLINE); vkCmdBindPipeline(threadCmdBufs[threadNum], VK_PIPELINE_BIND_POINT_GRAPHICS, info.pipeline); const VkDeviceSize offsets[1] = {0}; vkCmdBindVertexBuffers(threadCmdBufs[threadNum], 0, 1, &vertex_buffer[threadNum].buf, offsets); VkViewport viewport; viewport.height = (float)info.height; viewport.width = (float)info.width; viewport.minDepth = (float)0.0f; viewport.maxDepth = (float)1.0f; viewport.x = 0; viewport.y = 0; vkCmdSetViewport(threadCmdBufs[threadNum], 0, NUM_VIEWPORTS, &viewport); VkRect2D scissor; scissor.extent.width = info.width; scissor.extent.height = info.height; scissor.offset.x = 0; scissor.offset.y = 0; vkCmdSetScissor(threadCmdBufs[threadNum], 0, NUM_SCISSORS, &scissor); vkCmdDraw(threadCmdBufs[threadNum], 3, 1, 0, 0); vkCmdEndRenderPass(threadCmdBufs[threadNum]); res = vkEndCommandBuffer(threadCmdBufs[threadNum]); assert(res == VK_SUCCESS); return NULL; }
int sample_main(int argc, char *argv[]) { VkResult U_ASSERT_ONLY res; bool U_ASSERT_ONLY pass; struct sample_info info = {}; char sample_title[] = "Uniform Buffer Sample"; init_global_layer_properties(info); init_instance(info, sample_title); init_enumerate_device(info); init_queue_family_index(info); init_device(info); init_window_size(info, 50, 50); info.Projection = glm::perspective(glm::radians(45.0f), 1.0f, 0.1f, 100.0f); info.View = glm::lookAt( glm::vec3(0, 3, 10), // Camera is at (0,3,10), in World Space glm::vec3(0, 0, 0), // and looks at the origin glm::vec3(0, -1, 0) // Head is up (set to 0,-1,0 to look upside-down) ); info.Model = glm::mat4(1.0f); // Vulkan clip space has inverted Y and half Z. info.Clip = glm::mat4(1.0f, 0.0f, 0.0f, 0.0f, 0.0f, -1.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.5f, 0.0f, 0.0f, 0.0f, 0.5f, 1.0f); info.MVP = info.Clip * info.Projection * info.View * info.Model; /* VULKAN_KEY_START */ VkBufferCreateInfo buf_info = {}; buf_info.sType = VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO; buf_info.pNext = NULL; buf_info.usage = VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT; buf_info.size = sizeof(info.MVP); buf_info.queueFamilyIndexCount = 0; buf_info.pQueueFamilyIndices = NULL; buf_info.sharingMode = VK_SHARING_MODE_EXCLUSIVE; buf_info.flags = 0; res = vkCreateBuffer(info.device, &buf_info, NULL, &info.uniform_data.buf); assert(res == VK_SUCCESS); VkMemoryRequirements mem_reqs; vkGetBufferMemoryRequirements(info.device, info.uniform_data.buf, &mem_reqs); VkMemoryAllocateInfo alloc_info = {}; alloc_info.sType = VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO; alloc_info.pNext = NULL; alloc_info.memoryTypeIndex = 0; alloc_info.allocationSize = mem_reqs.size; pass = memory_type_from_properties(info, mem_reqs.memoryTypeBits, VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT, &alloc_info.memoryTypeIndex); assert(pass && "No mappable, coherent memory"); res = vkAllocateMemory(info.device, &alloc_info, NULL, &(info.uniform_data.mem)); assert(res == VK_SUCCESS); uint8_t *pData; res = vkMapMemory(info.device, info.uniform_data.mem, 0, mem_reqs.size, 0, (void **)&pData); assert(res == VK_SUCCESS); memcpy(pData, &info.MVP, sizeof(info.MVP)); vkUnmapMemory(info.device, info.uniform_data.mem); res = vkBindBufferMemory(info.device, info.uniform_data.buf, info.uniform_data.mem, 0); assert(res == VK_SUCCESS); info.uniform_data.buffer_info.buffer = info.uniform_data.buf; info.uniform_data.buffer_info.offset = 0; info.uniform_data.buffer_info.range = sizeof(info.MVP); /* VULKAN_KEY_END */ vkDestroyBuffer(info.device, info.uniform_data.buf, NULL); vkFreeMemory(info.device, info.uniform_data.mem, NULL); destroy_device(info); destroy_instance(info); return 0; }
bool ImGui_ImplGlfwVulkan_CreateFontsTexture(VkCommandBuffer command_buffer) { ImGuiIO& io = ImGui::GetIO(); unsigned char* pixels; int width, height; io.Fonts->GetTexDataAsRGBA32(&pixels, &width, &height); size_t upload_size = width*height*4*sizeof(char); VkResult err; // Create the Image: { VkImageCreateInfo info = {}; info.sType = VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO; info.imageType = VK_IMAGE_TYPE_2D; info.format = VK_FORMAT_R8G8B8A8_UNORM; info.extent.width = width; info.extent.height = height; info.extent.depth = 1; info.mipLevels = 1; info.arrayLayers = 1; info.samples = VK_SAMPLE_COUNT_1_BIT; info.tiling = VK_IMAGE_TILING_OPTIMAL; info.usage = VK_IMAGE_USAGE_SAMPLED_BIT | VK_IMAGE_USAGE_TRANSFER_DST_BIT; info.sharingMode = VK_SHARING_MODE_EXCLUSIVE; info.initialLayout = VK_IMAGE_LAYOUT_UNDEFINED; err = vkCreateImage(g_Device, &info, g_Allocator, &g_FontImage); ImGui_ImplGlfwVulkan_VkResult(err); VkMemoryRequirements req; vkGetImageMemoryRequirements(g_Device, g_FontImage, &req); VkMemoryAllocateInfo alloc_info = {}; alloc_info.sType = VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO; alloc_info.allocationSize = req.size; alloc_info.memoryTypeIndex = ImGui_ImplGlfwVulkan_MemoryType(VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT, req.memoryTypeBits); err = vkAllocateMemory(g_Device, &alloc_info, g_Allocator, &g_FontMemory); ImGui_ImplGlfwVulkan_VkResult(err); err = vkBindImageMemory(g_Device, g_FontImage, g_FontMemory, 0); ImGui_ImplGlfwVulkan_VkResult(err); } // Create the Image View: { VkResult err; VkImageViewCreateInfo info = {}; info.sType = VK_STRUCTURE_TYPE_IMAGE_VIEW_CREATE_INFO; info.image = g_FontImage; info.viewType = VK_IMAGE_VIEW_TYPE_2D; info.format = VK_FORMAT_R8G8B8A8_UNORM; info.subresourceRange.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT; info.subresourceRange.levelCount = 1; info.subresourceRange.layerCount = 1; err = vkCreateImageView(g_Device, &info, g_Allocator, &g_FontView); ImGui_ImplGlfwVulkan_VkResult(err); } // Update the Descriptor Set: { VkDescriptorImageInfo desc_image[1] = {}; desc_image[0].sampler = g_FontSampler; desc_image[0].imageView = g_FontView; desc_image[0].imageLayout = VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL; VkWriteDescriptorSet write_desc[1] = {}; write_desc[0].sType = VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET; write_desc[0].dstSet = g_DescriptorSet; write_desc[0].descriptorCount = 1; write_desc[0].descriptorType = VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER; write_desc[0].pImageInfo = desc_image; vkUpdateDescriptorSets(g_Device, 1, write_desc, 0, NULL); } // Create the Upload Buffer: { VkBufferCreateInfo buffer_info = {}; buffer_info.sType = VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO; buffer_info.size = upload_size; buffer_info.usage = VK_BUFFER_USAGE_TRANSFER_SRC_BIT; buffer_info.sharingMode = VK_SHARING_MODE_EXCLUSIVE; err = vkCreateBuffer(g_Device, &buffer_info, g_Allocator, &g_UploadBuffer); ImGui_ImplGlfwVulkan_VkResult(err); VkMemoryRequirements req; vkGetBufferMemoryRequirements(g_Device, g_UploadBuffer, &req); g_BufferMemoryAlignment = (g_BufferMemoryAlignment > req.alignment) ? g_BufferMemoryAlignment : req.alignment; VkMemoryAllocateInfo alloc_info = {}; alloc_info.sType = VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO; alloc_info.allocationSize = req.size; alloc_info.memoryTypeIndex = ImGui_ImplGlfwVulkan_MemoryType(VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT, req.memoryTypeBits); err = vkAllocateMemory(g_Device, &alloc_info, g_Allocator, &g_UploadBufferMemory); ImGui_ImplGlfwVulkan_VkResult(err); err = vkBindBufferMemory(g_Device, g_UploadBuffer, g_UploadBufferMemory, 0); ImGui_ImplGlfwVulkan_VkResult(err); } // Upload to Buffer: { char* map = NULL; err = vkMapMemory(g_Device, g_UploadBufferMemory, 0, upload_size, 0, (void**)(&map)); ImGui_ImplGlfwVulkan_VkResult(err); memcpy(map, pixels, upload_size); VkMappedMemoryRange range[1] = {}; range[0].sType = VK_STRUCTURE_TYPE_MAPPED_MEMORY_RANGE; range[0].memory = g_UploadBufferMemory; range[0].size = upload_size; err = vkFlushMappedMemoryRanges(g_Device, 1, range); ImGui_ImplGlfwVulkan_VkResult(err); vkUnmapMemory(g_Device, g_UploadBufferMemory); } // Copy to Image: { VkImageMemoryBarrier copy_barrier[1] = {}; copy_barrier[0].sType = VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER; copy_barrier[0].dstAccessMask = VK_ACCESS_TRANSFER_WRITE_BIT; copy_barrier[0].oldLayout = VK_IMAGE_LAYOUT_UNDEFINED; copy_barrier[0].newLayout = VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL; copy_barrier[0].srcQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED; copy_barrier[0].dstQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED; copy_barrier[0].image = g_FontImage; copy_barrier[0].subresourceRange.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT; copy_barrier[0].subresourceRange.levelCount = 1; copy_barrier[0].subresourceRange.layerCount = 1; vkCmdPipelineBarrier(command_buffer, VK_PIPELINE_STAGE_HOST_BIT, VK_PIPELINE_STAGE_TRANSFER_BIT, 0, 0, NULL, 0, NULL, 1, copy_barrier); VkBufferImageCopy region = {}; region.imageSubresource.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT; region.imageSubresource.layerCount = 1; region.imageExtent.width = width; region.imageExtent.height = height; vkCmdCopyBufferToImage(command_buffer, g_UploadBuffer, g_FontImage, VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, 1, ®ion); VkImageMemoryBarrier use_barrier[1] = {}; use_barrier[0].sType = VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER; use_barrier[0].srcAccessMask = VK_ACCESS_TRANSFER_WRITE_BIT; use_barrier[0].dstAccessMask = VK_ACCESS_SHADER_READ_BIT; use_barrier[0].oldLayout = VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL; use_barrier[0].newLayout = VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL; use_barrier[0].srcQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED; use_barrier[0].dstQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED; use_barrier[0].image = g_FontImage; use_barrier[0].subresourceRange.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT; use_barrier[0].subresourceRange.levelCount = 1; use_barrier[0].subresourceRange.layerCount = 1; vkCmdPipelineBarrier(command_buffer, VK_PIPELINE_STAGE_TRANSFER_BIT, VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT, 0, 0, NULL, 0, NULL, 1, use_barrier); } // Store our identifier io.Fonts->TexID = (void *)(intptr_t)g_FontImage; return true; }
// This is the main rendering function that you have to implement and provide to ImGui (via setting up 'RenderDrawListsFn' in the ImGuiIO structure) void ImGui_ImplGlfwVulkan_RenderDrawLists(ImDrawData* draw_data) { VkResult err; ImGuiIO& io = ImGui::GetIO(); // Create the Vertex Buffer: size_t vertex_size = draw_data->TotalVtxCount * sizeof(ImDrawVert); if (!g_VertexBuffer[g_FrameIndex] || g_VertexBufferSize[g_FrameIndex] < vertex_size) { if (g_VertexBuffer[g_FrameIndex]) vkDestroyBuffer(g_Device, g_VertexBuffer[g_FrameIndex], g_Allocator); if (g_VertexBufferMemory[g_FrameIndex]) vkFreeMemory(g_Device, g_VertexBufferMemory[g_FrameIndex], g_Allocator); size_t vertex_buffer_size = ((vertex_size-1) / g_BufferMemoryAlignment+1) * g_BufferMemoryAlignment; VkBufferCreateInfo buffer_info = {}; buffer_info.sType = VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO; buffer_info.size = vertex_buffer_size; buffer_info.usage = VK_BUFFER_USAGE_VERTEX_BUFFER_BIT; buffer_info.sharingMode = VK_SHARING_MODE_EXCLUSIVE; err = vkCreateBuffer(g_Device, &buffer_info, g_Allocator, &g_VertexBuffer[g_FrameIndex]); ImGui_ImplGlfwVulkan_VkResult(err); VkMemoryRequirements req; vkGetBufferMemoryRequirements(g_Device, g_VertexBuffer[g_FrameIndex], &req); g_BufferMemoryAlignment = (g_BufferMemoryAlignment > req.alignment) ? g_BufferMemoryAlignment : req.alignment; VkMemoryAllocateInfo alloc_info = {}; alloc_info.sType = VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO; alloc_info.allocationSize = req.size; alloc_info.memoryTypeIndex = ImGui_ImplGlfwVulkan_MemoryType(VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT, req.memoryTypeBits); err = vkAllocateMemory(g_Device, &alloc_info, g_Allocator, &g_VertexBufferMemory[g_FrameIndex]); ImGui_ImplGlfwVulkan_VkResult(err); err = vkBindBufferMemory(g_Device, g_VertexBuffer[g_FrameIndex], g_VertexBufferMemory[g_FrameIndex], 0); ImGui_ImplGlfwVulkan_VkResult(err); g_VertexBufferSize[g_FrameIndex] = vertex_buffer_size; } // Create the Index Buffer: size_t index_size = draw_data->TotalIdxCount * sizeof(ImDrawIdx); if (!g_IndexBuffer[g_FrameIndex] || g_IndexBufferSize[g_FrameIndex] < index_size) { if (g_IndexBuffer[g_FrameIndex]) vkDestroyBuffer(g_Device, g_IndexBuffer[g_FrameIndex], g_Allocator); if (g_IndexBufferMemory[g_FrameIndex]) vkFreeMemory(g_Device, g_IndexBufferMemory[g_FrameIndex], g_Allocator); size_t index_buffer_size = ((index_size-1) / g_BufferMemoryAlignment+1) * g_BufferMemoryAlignment; VkBufferCreateInfo buffer_info = {}; buffer_info.sType = VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO; buffer_info.size = index_buffer_size; buffer_info.usage = VK_BUFFER_USAGE_INDEX_BUFFER_BIT; buffer_info.sharingMode = VK_SHARING_MODE_EXCLUSIVE; err = vkCreateBuffer(g_Device, &buffer_info, g_Allocator, &g_IndexBuffer[g_FrameIndex]); ImGui_ImplGlfwVulkan_VkResult(err); VkMemoryRequirements req; vkGetBufferMemoryRequirements(g_Device, g_IndexBuffer[g_FrameIndex], &req); g_BufferMemoryAlignment = (g_BufferMemoryAlignment > req.alignment) ? g_BufferMemoryAlignment : req.alignment; VkMemoryAllocateInfo alloc_info = {}; alloc_info.sType = VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO; alloc_info.allocationSize = req.size; alloc_info.memoryTypeIndex = ImGui_ImplGlfwVulkan_MemoryType(VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT, req.memoryTypeBits); err = vkAllocateMemory(g_Device, &alloc_info, g_Allocator, &g_IndexBufferMemory[g_FrameIndex]); ImGui_ImplGlfwVulkan_VkResult(err); err = vkBindBufferMemory(g_Device, g_IndexBuffer[g_FrameIndex], g_IndexBufferMemory[g_FrameIndex], 0); ImGui_ImplGlfwVulkan_VkResult(err); g_IndexBufferSize[g_FrameIndex] = index_buffer_size; } // Upload Vertex and index Data: { ImDrawVert* vtx_dst; ImDrawIdx* idx_dst; err = vkMapMemory(g_Device, g_VertexBufferMemory[g_FrameIndex], 0, vertex_size, 0, (void**)(&vtx_dst)); ImGui_ImplGlfwVulkan_VkResult(err); err = vkMapMemory(g_Device, g_IndexBufferMemory[g_FrameIndex], 0, index_size, 0, (void**)(&idx_dst)); ImGui_ImplGlfwVulkan_VkResult(err); for (int n = 0; n < draw_data->CmdListsCount; n++) { const ImDrawList* cmd_list = draw_data->CmdLists[n]; memcpy(vtx_dst, cmd_list->VtxBuffer.Data, cmd_list->VtxBuffer.Size * sizeof(ImDrawVert)); memcpy(idx_dst, cmd_list->IdxBuffer.Data, cmd_list->IdxBuffer.Size * sizeof(ImDrawIdx)); vtx_dst += cmd_list->VtxBuffer.Size; idx_dst += cmd_list->IdxBuffer.Size; } VkMappedMemoryRange range[2] = {}; range[0].sType = VK_STRUCTURE_TYPE_MAPPED_MEMORY_RANGE; range[0].memory = g_VertexBufferMemory[g_FrameIndex]; range[0].size = vertex_size; range[1].sType = VK_STRUCTURE_TYPE_MAPPED_MEMORY_RANGE; range[1].memory = g_IndexBufferMemory[g_FrameIndex]; range[1].size = index_size; err = vkFlushMappedMemoryRanges(g_Device, 2, range); ImGui_ImplGlfwVulkan_VkResult(err); vkUnmapMemory(g_Device, g_VertexBufferMemory[g_FrameIndex]); vkUnmapMemory(g_Device, g_IndexBufferMemory[g_FrameIndex]); } // Bind pipeline and descriptor sets: { vkCmdBindPipeline(g_CommandBuffer, VK_PIPELINE_BIND_POINT_GRAPHICS, g_Pipeline); VkDescriptorSet desc_set[1] = {g_DescriptorSet}; vkCmdBindDescriptorSets(g_CommandBuffer, VK_PIPELINE_BIND_POINT_GRAPHICS, g_PipelineLayout, 0, 1, desc_set, 0, NULL); } // Bind Vertex And Index Buffer: { VkBuffer vertex_buffers[1] = {g_VertexBuffer[g_FrameIndex]}; VkDeviceSize vertex_offset[1] = {0}; vkCmdBindVertexBuffers(g_CommandBuffer, 0, 1, vertex_buffers, vertex_offset); vkCmdBindIndexBuffer(g_CommandBuffer, g_IndexBuffer[g_FrameIndex], 0, VK_INDEX_TYPE_UINT16); } // Setup viewport: { VkViewport viewport; viewport.x = 0; viewport.y = 0; viewport.width = ImGui::GetIO().DisplaySize.x; viewport.height = ImGui::GetIO().DisplaySize.y; viewport.minDepth = 0.0f; viewport.maxDepth = 1.0f; vkCmdSetViewport(g_CommandBuffer, 0, 1, &viewport); } // Setup scale and translation: { float scale[2]; scale[0] = 2.0f/io.DisplaySize.x; scale[1] = 2.0f/io.DisplaySize.y; float translate[2]; translate[0] = -1.0f; translate[1] = -1.0f; vkCmdPushConstants(g_CommandBuffer, g_PipelineLayout, VK_SHADER_STAGE_VERTEX_BIT, sizeof(float) * 0, sizeof(float) * 2, scale); vkCmdPushConstants(g_CommandBuffer, g_PipelineLayout, VK_SHADER_STAGE_VERTEX_BIT, sizeof(float) * 2, sizeof(float) * 2, translate); } // Render the command lists: int vtx_offset = 0; int idx_offset = 0; for (int n = 0; n < draw_data->CmdListsCount; n++) { const ImDrawList* cmd_list = draw_data->CmdLists[n]; for (int cmd_i = 0; cmd_i < cmd_list->CmdBuffer.Size; cmd_i++) { const ImDrawCmd* pcmd = &cmd_list->CmdBuffer[cmd_i]; if (pcmd->UserCallback) { pcmd->UserCallback(cmd_list, pcmd); } else { VkRect2D scissor; scissor.offset.x = (int32_t)(pcmd->ClipRect.x); scissor.offset.y = (int32_t)(pcmd->ClipRect.y); scissor.extent.width = (uint32_t)(pcmd->ClipRect.z - pcmd->ClipRect.x); scissor.extent.height = (uint32_t)(pcmd->ClipRect.w - pcmd->ClipRect.y + 1); // TODO: + 1?????? vkCmdSetScissor(g_CommandBuffer, 0, 1, &scissor); vkCmdDrawIndexed(g_CommandBuffer, pcmd->ElemCount, 1, idx_offset, vtx_offset, 0); } idx_offset += pcmd->ElemCount; } vtx_offset += cmd_list->VtxBuffer.Size; } }
int sample_main(int argc, char *argv[]) { VkResult U_ASSERT_ONLY res; bool U_ASSERT_ONLY pass; struct sample_info info = {}; char sample_title[] = "Draw Cube"; const bool depthPresent = true; process_command_line_args(info, argc, argv); init_global_layer_properties(info); init_instance_extension_names(info); init_device_extension_names(info); init_instance(info, sample_title); init_enumerate_device(info); if (info.gpu_props.limits.maxDescriptorSetUniformBuffersDynamic < 1) { std::cout << "No dynamic uniform buffers supported\n"; exit(-1); } init_window_size(info, 500, 500); init_connection(info); init_window(info); init_swapchain_extension(info); init_device(info); init_command_pool(info); init_command_buffer(info); execute_begin_command_buffer(info); init_device_queue(info); init_swap_chain(info); init_depth_buffer(info); init_renderpass(info, depthPresent); init_shaders(info, vertShaderText, fragShaderText); init_framebuffers(info, depthPresent); init_vertex_buffer(info, g_vb_solid_face_colors_Data, sizeof(g_vb_solid_face_colors_Data), sizeof(g_vb_solid_face_colors_Data[0]), false); /* Set up uniform buffer with 2 transform matrices in it */ info.Projection = glm::perspective(glm::radians(45.0f), 1.0f, 0.1f, 100.0f); info.View = glm::lookAt(glm::vec3(0, 3, -10), // Camera is at (0,3,-10), in World Space glm::vec3(0, 0, 0), // and looks at the origin glm::vec3(0, -1, 0) // Head is up (set to 0,-1,0 to look upside-down) ); info.Model = glm::mat4(1.0f); // Vulkan clip space has inverted Y and half Z. // clang-format off info.Clip = glm::mat4(1.0f, 0.0f, 0.0f, 0.0f, 0.0f,-1.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.5f, 0.0f, 0.0f, 0.0f, 0.5f, 1.0f); // clang-format on info.MVP = info.Clip * info.Projection * info.View * info.Model; /* VULKAN_KEY_START */ info.Model = glm::translate(info.Model, glm::vec3(-1.5, 1.5, -1.5)); glm::mat4 MVP2 = info.Clip * info.Projection * info.View * info.Model; VkDeviceSize buf_size = sizeof(info.MVP); if (info.gpu_props.limits.minUniformBufferOffsetAlignment) buf_size = (buf_size + info.gpu_props.limits.minUniformBufferOffsetAlignment - 1) & ~(info.gpu_props.limits.minUniformBufferOffsetAlignment - 1); VkBufferCreateInfo buf_info = {}; buf_info.sType = VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO; buf_info.pNext = NULL; buf_info.usage = VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT; buf_info.size = 2 * buf_size; buf_info.queueFamilyIndexCount = 0; buf_info.pQueueFamilyIndices = NULL; buf_info.sharingMode = VK_SHARING_MODE_EXCLUSIVE; buf_info.flags = 0; res = vkCreateBuffer(info.device, &buf_info, NULL, &info.uniform_data.buf); assert(res == VK_SUCCESS); VkMemoryRequirements mem_reqs; vkGetBufferMemoryRequirements(info.device, info.uniform_data.buf, &mem_reqs); VkMemoryAllocateInfo alloc_info = {}; alloc_info.sType = VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO; alloc_info.pNext = NULL; alloc_info.memoryTypeIndex = 0; alloc_info.allocationSize = mem_reqs.size; pass = memory_type_from_properties(info, mem_reqs.memoryTypeBits, VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT, &alloc_info.memoryTypeIndex); assert(pass && "No mappable, coherent memory"); res = vkAllocateMemory(info.device, &alloc_info, NULL, &(info.uniform_data.mem)); assert(res == VK_SUCCESS); /* Map the buffer memory and copy both matrices */ uint8_t *pData; res = vkMapMemory(info.device, info.uniform_data.mem, 0, mem_reqs.size, 0, (void **)&pData); assert(res == VK_SUCCESS); memcpy(pData, &info.MVP, sizeof(info.MVP)); pData += buf_size; memcpy(pData, &MVP2, sizeof(MVP2)); vkUnmapMemory(info.device, info.uniform_data.mem); res = vkBindBufferMemory(info.device, info.uniform_data.buf, info.uniform_data.mem, 0); assert(res == VK_SUCCESS); info.uniform_data.buffer_info.buffer = info.uniform_data.buf; info.uniform_data.buffer_info.offset = 0; info.uniform_data.buffer_info.range = buf_size; /* Init desciptor and pipeline layouts - descriptor type is * UNIFORM_BUFFER_DYNAMIC */ VkDescriptorSetLayoutBinding layout_bindings[2]; layout_bindings[0].binding = 0; layout_bindings[0].descriptorType = VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC; layout_bindings[0].descriptorCount = 1; layout_bindings[0].stageFlags = VK_SHADER_STAGE_VERTEX_BIT; layout_bindings[0].pImmutableSamplers = NULL; /* Next take layout bindings and use them to create a descriptor set layout */ VkDescriptorSetLayoutCreateInfo descriptor_layout = {}; descriptor_layout.sType = VK_STRUCTURE_TYPE_DESCRIPTOR_SET_LAYOUT_CREATE_INFO; descriptor_layout.pNext = NULL; descriptor_layout.bindingCount = 1; descriptor_layout.pBindings = layout_bindings; info.desc_layout.resize(NUM_DESCRIPTOR_SETS); res = vkCreateDescriptorSetLayout(info.device, &descriptor_layout, NULL, info.desc_layout.data()); assert(res == VK_SUCCESS); /* Now use the descriptor layout to create a pipeline layout */ VkPipelineLayoutCreateInfo pPipelineLayoutCreateInfo = {}; pPipelineLayoutCreateInfo.sType = VK_STRUCTURE_TYPE_PIPELINE_LAYOUT_CREATE_INFO; pPipelineLayoutCreateInfo.pNext = NULL; pPipelineLayoutCreateInfo.pushConstantRangeCount = 0; pPipelineLayoutCreateInfo.pPushConstantRanges = NULL; pPipelineLayoutCreateInfo.setLayoutCount = NUM_DESCRIPTOR_SETS; pPipelineLayoutCreateInfo.pSetLayouts = info.desc_layout.data(); res = vkCreatePipelineLayout(info.device, &pPipelineLayoutCreateInfo, NULL, &info.pipeline_layout); assert(res == VK_SUCCESS); /* Create descriptor pool with UNIFOM_BUFFER_DYNAMIC type */ VkDescriptorPoolSize type_count[1]; type_count[0].type = VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC; type_count[0].descriptorCount = 1; VkDescriptorPoolCreateInfo descriptor_pool = {}; descriptor_pool.sType = VK_STRUCTURE_TYPE_DESCRIPTOR_POOL_CREATE_INFO; descriptor_pool.pNext = NULL; descriptor_pool.maxSets = 1; descriptor_pool.poolSizeCount = 1; descriptor_pool.pPoolSizes = type_count; res = vkCreateDescriptorPool(info.device, &descriptor_pool, NULL, &info.desc_pool); assert(res == VK_SUCCESS); VkDescriptorSetAllocateInfo desc_alloc_info[1]; desc_alloc_info[0].sType = VK_STRUCTURE_TYPE_DESCRIPTOR_SET_ALLOCATE_INFO; desc_alloc_info[0].pNext = NULL; desc_alloc_info[0].descriptorPool = info.desc_pool; desc_alloc_info[0].descriptorSetCount = NUM_DESCRIPTOR_SETS; desc_alloc_info[0].pSetLayouts = info.desc_layout.data(); /* Allocate descriptor set with UNIFORM_BUFFER_DYNAMIC */ info.desc_set.resize(NUM_DESCRIPTOR_SETS); res = vkAllocateDescriptorSets(info.device, desc_alloc_info, info.desc_set.data()); assert(res == VK_SUCCESS); VkWriteDescriptorSet writes[1]; writes[0] = {}; writes[0].sType = VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET; writes[0].pNext = NULL; writes[0].dstSet = info.desc_set[0]; writes[0].descriptorCount = 1; writes[0].descriptorType = VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC; writes[0].pBufferInfo = &info.uniform_data.buffer_info; writes[0].dstArrayElement = 0; writes[0].dstBinding = 0; vkUpdateDescriptorSets(info.device, 1, writes, 0, NULL); init_pipeline_cache(info); init_pipeline(info, depthPresent); VkClearValue clear_values[2]; clear_values[0].color.float32[0] = 0.2f; clear_values[0].color.float32[1] = 0.2f; clear_values[0].color.float32[2] = 0.2f; clear_values[0].color.float32[3] = 0.2f; clear_values[1].depthStencil.depth = 1.0f; clear_values[1].depthStencil.stencil = 0; VkSemaphore imageAcquiredSemaphore; VkSemaphoreCreateInfo imageAcquiredSemaphoreCreateInfo; imageAcquiredSemaphoreCreateInfo.sType = VK_STRUCTURE_TYPE_SEMAPHORE_CREATE_INFO; imageAcquiredSemaphoreCreateInfo.pNext = NULL; imageAcquiredSemaphoreCreateInfo.flags = 0; res = vkCreateSemaphore(info.device, &imageAcquiredSemaphoreCreateInfo, NULL, &imageAcquiredSemaphore); assert(res == VK_SUCCESS); // Get the index of the next available swapchain image: res = vkAcquireNextImageKHR(info.device, info.swap_chain, UINT64_MAX, imageAcquiredSemaphore, VK_NULL_HANDLE, &info.current_buffer); // TODO: Deal with the VK_SUBOPTIMAL_KHR and VK_ERROR_OUT_OF_DATE_KHR // return codes assert(res == VK_SUCCESS); VkRenderPassBeginInfo rp_begin; rp_begin.sType = VK_STRUCTURE_TYPE_RENDER_PASS_BEGIN_INFO; rp_begin.pNext = NULL; rp_begin.renderPass = info.render_pass; rp_begin.framebuffer = info.framebuffers[info.current_buffer]; rp_begin.renderArea.offset.x = 0; rp_begin.renderArea.offset.y = 0; rp_begin.renderArea.extent.width = info.width; rp_begin.renderArea.extent.height = info.height; rp_begin.clearValueCount = 2; rp_begin.pClearValues = clear_values; vkCmdBeginRenderPass(info.cmd, &rp_begin, VK_SUBPASS_CONTENTS_INLINE); vkCmdBindPipeline(info.cmd, VK_PIPELINE_BIND_POINT_GRAPHICS, info.pipeline); /* The first draw should use the first matrix in the buffer */ uint32_t uni_offsets[1] = {0}; vkCmdBindDescriptorSets(info.cmd, VK_PIPELINE_BIND_POINT_GRAPHICS, info.pipeline_layout, 0, NUM_DESCRIPTOR_SETS, info.desc_set.data(), 1, uni_offsets); const VkDeviceSize vtx_offsets[1] = {0}; vkCmdBindVertexBuffers(info.cmd, 0, 1, &info.vertex_buffer.buf, vtx_offsets); init_viewports(info); init_scissors(info); vkCmdDraw(info.cmd, 12 * 3, 1, 0, 0); uni_offsets[0] = (uint32_t)buf_size; /* The second draw should use the second matrix in the buffer */ vkCmdBindDescriptorSets(info.cmd, VK_PIPELINE_BIND_POINT_GRAPHICS, info.pipeline_layout, 0, NUM_DESCRIPTOR_SETS, info.desc_set.data(), 1, uni_offsets); vkCmdDraw(info.cmd, 12 * 3, 1, 0, 0); vkCmdEndRenderPass(info.cmd); res = vkEndCommandBuffer(info.cmd); const VkCommandBuffer cmd_bufs[] = {info.cmd}; VkFenceCreateInfo fenceInfo; VkFence drawFence; fenceInfo.sType = VK_STRUCTURE_TYPE_FENCE_CREATE_INFO; fenceInfo.pNext = NULL; fenceInfo.flags = 0; vkCreateFence(info.device, &fenceInfo, NULL, &drawFence); VkPipelineStageFlags pipe_stage_flags = VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT; VkSubmitInfo submit_info[1] = {}; submit_info[0].pNext = NULL; submit_info[0].sType = VK_STRUCTURE_TYPE_SUBMIT_INFO; submit_info[0].waitSemaphoreCount = 1; submit_info[0].pWaitSemaphores = &imageAcquiredSemaphore; submit_info[0].pWaitDstStageMask = &pipe_stage_flags; submit_info[0].commandBufferCount = 1; submit_info[0].pCommandBuffers = cmd_bufs; submit_info[0].signalSemaphoreCount = 0; submit_info[0].pSignalSemaphores = NULL; /* Queue the command buffer for execution */ res = vkQueueSubmit(info.graphics_queue, 1, submit_info, drawFence); assert(res == VK_SUCCESS); /* Now present the image in the window */ VkPresentInfoKHR present; present.sType = VK_STRUCTURE_TYPE_PRESENT_INFO_KHR; present.pNext = NULL; present.swapchainCount = 1; present.pSwapchains = &info.swap_chain; present.pImageIndices = &info.current_buffer; present.pWaitSemaphores = NULL; present.waitSemaphoreCount = 0; present.pResults = NULL; /* Make sure command buffer is finished before presenting */ do { res = vkWaitForFences(info.device, 1, &drawFence, VK_TRUE, FENCE_TIMEOUT); } while (res == VK_TIMEOUT); assert(res == VK_SUCCESS); res = vkQueuePresentKHR(info.present_queue, &present); assert(res == VK_SUCCESS); wait_seconds(1); /* VULKAN_KEY_END */ if (info.save_images) write_ppm(info, "dynamicuniform"); vkDestroySemaphore(info.device, imageAcquiredSemaphore, NULL); vkDestroyFence(info.device, drawFence, NULL); destroy_pipeline(info); destroy_pipeline_cache(info); destroy_descriptor_pool(info); destroy_vertex_buffer(info); destroy_framebuffers(info); destroy_shaders(info); destroy_renderpass(info); destroy_descriptor_and_pipeline_layouts(info); destroy_uniform_buffer(info); destroy_depth_buffer(info); destroy_swap_chain(info); destroy_command_buffer(info); destroy_command_pool(info); destroy_device(info); destroy_window(info); destroy_instance(info); return 0; }
WError WMaterial::SetEffect(WEffect* const effect) { VkDevice device = m_app->GetVulkanDevice(); if (effect && !effect->Valid()) return WError(W_INVALIDPARAM); _DestroyResources(); if (!effect) return WError(W_SUCCEEDED); // // Create the uniform buffers // for (int i = 0; i < effect->m_shaders.size(); i++) { WShader* shader = effect->m_shaders[i]; for (int j = 0; j < shader->m_desc.bound_resources.size(); j++) { if (shader->m_desc.bound_resources[j].type == W_TYPE_UBO) { bool already_added = false; for (int k = 0; k < m_uniformBuffers.size(); k++) { if (m_uniformBuffers[k].ubo_info->binding_index == shader->m_desc.bound_resources[j].binding_index) { // two shaders have the same UBO binding index, skip (it is the same UBO, the WEffect::CreatePipeline ensures that) already_added = true; } } if (already_added) continue; UNIFORM_BUFFER_INFO ubo; VkBufferCreateInfo bufferInfo = {}; VkMemoryAllocateInfo uballocInfo = {}; uballocInfo.sType = VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO; uballocInfo.pNext = NULL; uballocInfo.allocationSize = 0; uballocInfo.memoryTypeIndex = 0; bufferInfo.sType = VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO; bufferInfo.size = shader->m_desc.bound_resources[j].GetSize(); bufferInfo.usage = VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT; // Create a new buffer VkResult err = vkCreateBuffer(device, &bufferInfo, nullptr, &ubo.descriptor.buffer); if (err) { _DestroyResources(); return WError(W_UNABLETOCREATEBUFFER); } // Get memory requirements including size, alignment and memory type VkMemoryRequirements memReqs; vkGetBufferMemoryRequirements(device, ubo.descriptor.buffer, &memReqs); uballocInfo.allocationSize = memReqs.size; // Gets the appropriate memory type for this type of buffer allocation // Only memory types that are visible to the host m_app->GetMemoryType(memReqs.memoryTypeBits, VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT, &uballocInfo.memoryTypeIndex); // Allocate memory for the uniform buffer err = vkAllocateMemory(device, &uballocInfo, nullptr, &(ubo.memory)); if (err) { vkDestroyBuffer(device, ubo.descriptor.buffer, nullptr); _DestroyResources(); return WError(W_OUTOFMEMORY); } // Bind memory to buffer err = vkBindBufferMemory(device, ubo.descriptor.buffer, ubo.memory, 0); if (err) { vkDestroyBuffer(device, ubo.descriptor.buffer, nullptr); vkFreeMemory(device, ubo.memory, nullptr); _DestroyResources(); return WError(W_UNABLETOCREATEBUFFER); } // Store information in the uniform's descriptor ubo.descriptor.offset = 0; ubo.descriptor.range = shader->m_desc.bound_resources[j].GetSize(); ubo.ubo_info = &shader->m_desc.bound_resources[j]; m_uniformBuffers.push_back(ubo); } else if (shader->m_desc.bound_resources[j].type == W_TYPE_SAMPLER) { bool already_added = false; for (int k = 0; k < m_sampler_info.size(); k++) { if (m_sampler_info[k].sampler_info->binding_index == shader->m_desc.bound_resources[j].binding_index) { // two shaders have the same sampler binding index, skip (it is the same sampler, the WEffect::CreatePipeline ensures that) already_added = true; } } if (already_added) continue; SAMPLER_INFO sampler; sampler.img = m_app->ImageManager->GetDefaultImage(); m_app->ImageManager->GetDefaultImage()->AddReference(); sampler.descriptor.sampler = m_app->Renderer->GetDefaultSampler(); sampler.descriptor.imageView = m_app->ImageManager->GetDefaultImage()->GetView(); sampler.descriptor.imageLayout = VK_IMAGE_LAYOUT_GENERAL; sampler.sampler_info = &shader->m_desc.bound_resources[j]; m_sampler_info.push_back(sampler); } } } m_writeDescriptorSets = vector<VkWriteDescriptorSet>(m_sampler_info.size()); // // Create descriptor pool // // We need to tell the API the number of max. requested descriptors per type vector<VkDescriptorPoolSize> typeCounts; if (m_uniformBuffers.size() > 0) { VkDescriptorPoolSize s; s.type = VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER; s.descriptorCount = m_uniformBuffers.size(); typeCounts.push_back(s); } if (m_sampler_info.size() > 0) { VkDescriptorPoolSize s; s.type = VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER; s.descriptorCount = m_sampler_info.size(); typeCounts.push_back(s); } if (typeCounts.size() > 0) { // Create the global descriptor pool // All descriptors used in this example are allocated from this pool VkDescriptorPoolCreateInfo descriptorPoolInfo = {}; descriptorPoolInfo.sType = VK_STRUCTURE_TYPE_DESCRIPTOR_POOL_CREATE_INFO; descriptorPoolInfo.pNext = NULL; descriptorPoolInfo.poolSizeCount = typeCounts.size(); descriptorPoolInfo.pPoolSizes = typeCounts.data(); // Set the max. number of sets that can be requested // Requesting descriptors beyond maxSets will result in an error descriptorPoolInfo.maxSets = descriptorPoolInfo.poolSizeCount; VkResult vkRes = vkCreateDescriptorPool(device, &descriptorPoolInfo, nullptr, &m_descriptorPool); if (vkRes) { _DestroyResources(); return WError(W_OUTOFMEMORY); } // // Create descriptor set // VkDescriptorSetAllocateInfo allocInfo = {}; allocInfo.sType = VK_STRUCTURE_TYPE_DESCRIPTOR_SET_ALLOCATE_INFO; allocInfo.descriptorPool = m_descriptorPool; allocInfo.descriptorSetCount = 1; allocInfo.pSetLayouts = effect->GetDescriptorSetLayout(); vkRes = vkAllocateDescriptorSets(device, &allocInfo, &m_descriptorSet); if (vkRes) { _DestroyResources(); return WError(W_OUTOFMEMORY); } // Update descriptor sets determining the shader binding points // For every binding point used in a shader there needs to be one // descriptor set matching that binding point vector<VkWriteDescriptorSet> writes; for (int i = 0; i < m_uniformBuffers.size(); i++) { VkWriteDescriptorSet writeDescriptorSet = {}; writeDescriptorSet.sType = VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET; writeDescriptorSet.dstSet = m_descriptorSet; writeDescriptorSet.descriptorCount = 1; writeDescriptorSet.descriptorType = VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER; writeDescriptorSet.pBufferInfo = &m_uniformBuffers[i].descriptor; writeDescriptorSet.dstBinding = m_uniformBuffers[i].ubo_info->binding_index; writes.push_back(writeDescriptorSet); } for (int i = 0; i < m_sampler_info.size(); i++) { VkWriteDescriptorSet writeDescriptorSet = {}; writeDescriptorSet.sType = VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET; writeDescriptorSet.dstSet = m_descriptorSet; writeDescriptorSet.descriptorCount = 1; writeDescriptorSet.descriptorType = VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER; writeDescriptorSet.pImageInfo = &m_sampler_info[i].descriptor; writeDescriptorSet.dstBinding = m_sampler_info[i].sampler_info->binding_index; writes.push_back(writeDescriptorSet); } vkUpdateDescriptorSets(device, writes.size(), writes.data(), 0, NULL); } m_effect = effect; effect->AddReference(); return WError(W_SUCCEEDED); }
void prepareVertices() { // Setup vertices std::vector<Vertex> vertexBuffer; vertexBuffer.push_back({ { 1.0f, 1.0f, 0.0f },{ 1.0f, 1.0f } }); vertexBuffer.push_back({ { -1.0f, 1.0f, 0.0f },{ 0.0f, 1.0f } }); vertexBuffer.push_back({ { -1.0f, -1.0f, 0.0f },{ 0.0f, 0.0f } }); vertexBuffer.push_back({ { 1.0f, -1.0f, 0.0f },{ 1.0f, 0.0f } }); int vertexBufferSize = vertexBuffer.size() * sizeof(Vertex); // Setup indices std::vector<uint32_t> indexBuffer; indexBuffer.push_back(0); indexBuffer.push_back(1); indexBuffer.push_back(2); indexBuffer.push_back(2); indexBuffer.push_back(3); indexBuffer.push_back(0); int indexBufferSize = indexBuffer.size() * sizeof(uint32_t); VkMemoryAllocateInfo memAlloc = {}; memAlloc.sType = VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO; memAlloc.pNext = NULL; memAlloc.allocationSize = 0; memAlloc.memoryTypeIndex = 0; VkMemoryRequirements memReqs; VkResult err; void *data; // Generate vertex buffer // Setup VkBufferCreateInfo bufInfo = {}; bufInfo.sType = VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO; bufInfo.pNext = NULL; bufInfo.size = vertexBufferSize; bufInfo.usage = VK_BUFFER_USAGE_VERTEX_BUFFER_BIT; bufInfo.flags = 0; // Copy vertex data to VRAM memset(&vertices, 0, sizeof(vertices)); err = vkCreateBuffer(device, &bufInfo, nullptr, &vertices.buf); assert(!err); vkGetBufferMemoryRequirements(device, vertices.buf, &memReqs); memAlloc.allocationSize = memReqs.size; getMemoryType(memReqs.memoryTypeBits, VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT, &memAlloc.memoryTypeIndex); vkAllocateMemory(device, &memAlloc, nullptr, &vertices.mem); assert(!err); err = vkMapMemory(device, vertices.mem, 0, memAlloc.allocationSize, 0, &data); assert(!err); memcpy(data, vertexBuffer.data(), vertexBufferSize); vkUnmapMemory(device, vertices.mem); assert(!err); err = vkBindBufferMemory(device, vertices.buf, vertices.mem, 0); assert(!err); // Generate index buffer // Setup VkBufferCreateInfo indexbufferInfo = {}; indexbufferInfo.sType = VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO; indexbufferInfo.pNext = NULL; indexbufferInfo.size = indexBufferSize; indexbufferInfo.usage = VK_BUFFER_USAGE_INDEX_BUFFER_BIT; indexbufferInfo.flags = 0; // Copy index data to VRAM memset(&indices, 0, sizeof(indices)); err = vkCreateBuffer(device, &bufInfo, nullptr, &indices.buf); assert(!err); vkGetBufferMemoryRequirements(device, indices.buf, &memReqs); memAlloc.allocationSize = memReqs.size; getMemoryType(memReqs.memoryTypeBits, VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT, &memAlloc.memoryTypeIndex); err = vkAllocateMemory(device, &memAlloc, nullptr, &indices.mem); assert(!err); err = vkMapMemory(device, indices.mem, 0, indexBufferSize, 0, &data); assert(!err); memcpy(data, indexBuffer.data(), indexBufferSize); vkUnmapMemory(device, indices.mem); err = vkBindBufferMemory(device, indices.buf, indices.mem, 0); assert(!err); indices.count = indexBuffer.size(); // Binding description vertices.bindingDescriptions.resize(1); vertices.bindingDescriptions[0] = vkTools::initializers::vertexInputBindingDescription( VERTEX_BUFFER_BIND_ID, sizeof(Vertex), VK_VERTEX_INPUT_RATE_VERTEX); // Attribute descriptions // Describes memory layout and shader positions vertices.attributeDescriptions.resize(2); // Location 0 : Position vertices.attributeDescriptions[0] = vkTools::initializers::vertexInputAttributeDescription( VERTEX_BUFFER_BIND_ID, 0, VK_FORMAT_R32G32B32_SFLOAT, 0); // Location 1 : UV vertices.attributeDescriptions[1] = vkTools::initializers::vertexInputAttributeDescription( VERTEX_BUFFER_BIND_ID, 1, VK_FORMAT_R32G32_SFLOAT, sizeof(float) * 3); // Assign to vertex buffer vertices.inputState.sType = VK_STRUCTURE_TYPE_PIPELINE_VERTEX_INPUT_STATE_CREATE_INFO; vertices.inputState.pNext = NULL; vertices.inputState.vertexBindingDescriptionCount = vertices.bindingDescriptions.size(); vertices.inputState.pVertexBindingDescriptions = vertices.bindingDescriptions.data(); vertices.inputState.vertexAttributeDescriptionCount = vertices.attributeDescriptions.size(); vertices.inputState.pVertexAttributeDescriptions = vertices.attributeDescriptions.data(); }