WError WMaterial::SetEffect(WEffect* const effect) { VkDevice device = m_app->GetVulkanDevice(); if (effect && !effect->Valid()) return WError(W_INVALIDPARAM); _DestroyResources(); if (!effect) return WError(W_SUCCEEDED); // // Create the uniform buffers // for (int i = 0; i < effect->m_shaders.size(); i++) { WShader* shader = effect->m_shaders[i]; for (int j = 0; j < shader->m_desc.bound_resources.size(); j++) { if (shader->m_desc.bound_resources[j].type == W_TYPE_UBO) { bool already_added = false; for (int k = 0; k < m_uniformBuffers.size(); k++) { if (m_uniformBuffers[k].ubo_info->binding_index == shader->m_desc.bound_resources[j].binding_index) { // two shaders have the same UBO binding index, skip (it is the same UBO, the WEffect::CreatePipeline ensures that) already_added = true; } } if (already_added) continue; UNIFORM_BUFFER_INFO ubo; VkBufferCreateInfo bufferInfo = {}; VkMemoryAllocateInfo uballocInfo = {}; uballocInfo.sType = VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO; uballocInfo.pNext = NULL; uballocInfo.allocationSize = 0; uballocInfo.memoryTypeIndex = 0; bufferInfo.sType = VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO; bufferInfo.size = shader->m_desc.bound_resources[j].GetSize(); bufferInfo.usage = VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT; // Create a new buffer VkResult err = vkCreateBuffer(device, &bufferInfo, nullptr, &ubo.descriptor.buffer); if (err) { _DestroyResources(); return WError(W_UNABLETOCREATEBUFFER); } // Get memory requirements including size, alignment and memory type VkMemoryRequirements memReqs; vkGetBufferMemoryRequirements(device, ubo.descriptor.buffer, &memReqs); uballocInfo.allocationSize = memReqs.size; // Gets the appropriate memory type for this type of buffer allocation // Only memory types that are visible to the host m_app->GetMemoryType(memReqs.memoryTypeBits, VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT, &uballocInfo.memoryTypeIndex); // Allocate memory for the uniform buffer err = vkAllocateMemory(device, &uballocInfo, nullptr, &(ubo.memory)); if (err) { vkDestroyBuffer(device, ubo.descriptor.buffer, nullptr); _DestroyResources(); return WError(W_OUTOFMEMORY); } // Bind memory to buffer err = vkBindBufferMemory(device, ubo.descriptor.buffer, ubo.memory, 0); if (err) { vkDestroyBuffer(device, ubo.descriptor.buffer, nullptr); vkFreeMemory(device, ubo.memory, nullptr); _DestroyResources(); return WError(W_UNABLETOCREATEBUFFER); } // Store information in the uniform's descriptor ubo.descriptor.offset = 0; ubo.descriptor.range = shader->m_desc.bound_resources[j].GetSize(); ubo.ubo_info = &shader->m_desc.bound_resources[j]; m_uniformBuffers.push_back(ubo); } else if (shader->m_desc.bound_resources[j].type == W_TYPE_SAMPLER) { bool already_added = false; for (int k = 0; k < m_sampler_info.size(); k++) { if (m_sampler_info[k].sampler_info->binding_index == shader->m_desc.bound_resources[j].binding_index) { // two shaders have the same sampler binding index, skip (it is the same sampler, the WEffect::CreatePipeline ensures that) already_added = true; } } if (already_added) continue; SAMPLER_INFO sampler; sampler.img = m_app->ImageManager->GetDefaultImage(); m_app->ImageManager->GetDefaultImage()->AddReference(); sampler.descriptor.sampler = m_app->Renderer->GetDefaultSampler(); sampler.descriptor.imageView = m_app->ImageManager->GetDefaultImage()->GetView(); sampler.descriptor.imageLayout = VK_IMAGE_LAYOUT_GENERAL; sampler.sampler_info = &shader->m_desc.bound_resources[j]; m_sampler_info.push_back(sampler); } } } m_writeDescriptorSets = vector<VkWriteDescriptorSet>(m_sampler_info.size()); // // Create descriptor pool // // We need to tell the API the number of max. requested descriptors per type vector<VkDescriptorPoolSize> typeCounts; if (m_uniformBuffers.size() > 0) { VkDescriptorPoolSize s; s.type = VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER; s.descriptorCount = m_uniformBuffers.size(); typeCounts.push_back(s); } if (m_sampler_info.size() > 0) { VkDescriptorPoolSize s; s.type = VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER; s.descriptorCount = m_sampler_info.size(); typeCounts.push_back(s); } if (typeCounts.size() > 0) { // Create the global descriptor pool // All descriptors used in this example are allocated from this pool VkDescriptorPoolCreateInfo descriptorPoolInfo = {}; descriptorPoolInfo.sType = VK_STRUCTURE_TYPE_DESCRIPTOR_POOL_CREATE_INFO; descriptorPoolInfo.pNext = NULL; descriptorPoolInfo.poolSizeCount = typeCounts.size(); descriptorPoolInfo.pPoolSizes = typeCounts.data(); // Set the max. number of sets that can be requested // Requesting descriptors beyond maxSets will result in an error descriptorPoolInfo.maxSets = descriptorPoolInfo.poolSizeCount; VkResult vkRes = vkCreateDescriptorPool(device, &descriptorPoolInfo, nullptr, &m_descriptorPool); if (vkRes) { _DestroyResources(); return WError(W_OUTOFMEMORY); } // // Create descriptor set // VkDescriptorSetAllocateInfo allocInfo = {}; allocInfo.sType = VK_STRUCTURE_TYPE_DESCRIPTOR_SET_ALLOCATE_INFO; allocInfo.descriptorPool = m_descriptorPool; allocInfo.descriptorSetCount = 1; allocInfo.pSetLayouts = effect->GetDescriptorSetLayout(); vkRes = vkAllocateDescriptorSets(device, &allocInfo, &m_descriptorSet); if (vkRes) { _DestroyResources(); return WError(W_OUTOFMEMORY); } // Update descriptor sets determining the shader binding points // For every binding point used in a shader there needs to be one // descriptor set matching that binding point vector<VkWriteDescriptorSet> writes; for (int i = 0; i < m_uniformBuffers.size(); i++) { VkWriteDescriptorSet writeDescriptorSet = {}; writeDescriptorSet.sType = VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET; writeDescriptorSet.dstSet = m_descriptorSet; writeDescriptorSet.descriptorCount = 1; writeDescriptorSet.descriptorType = VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER; writeDescriptorSet.pBufferInfo = &m_uniformBuffers[i].descriptor; writeDescriptorSet.dstBinding = m_uniformBuffers[i].ubo_info->binding_index; writes.push_back(writeDescriptorSet); } for (int i = 0; i < m_sampler_info.size(); i++) { VkWriteDescriptorSet writeDescriptorSet = {}; writeDescriptorSet.sType = VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET; writeDescriptorSet.dstSet = m_descriptorSet; writeDescriptorSet.descriptorCount = 1; writeDescriptorSet.descriptorType = VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER; writeDescriptorSet.pImageInfo = &m_sampler_info[i].descriptor; writeDescriptorSet.dstBinding = m_sampler_info[i].sampler_info->binding_index; writes.push_back(writeDescriptorSet); } vkUpdateDescriptorSets(device, writes.size(), writes.data(), 0, NULL); } m_effect = effect; effect->AddReference(); return WError(W_SUCCEEDED); }
// Setup and fill the compute shader storage buffers for // vertex positions and velocities void prepareStorageBuffers() { float destPosX = 0.0f; float destPosY = 0.0f; // Initial particle positions std::vector<Particle> particleBuffer; for (int i = 0; i < PARTICLE_COUNT; ++i) { // Position float aspectRatio = (float)height / (float)width; float rndVal = (float)rand() / (float)(RAND_MAX / (360.0f * 3.14f * 2.0f)); float rndRad = (float)rand() / (float)(RAND_MAX) * 0.5f; Particle p; p.pos = glm::vec4( destPosX + cos(rndVal) * rndRad * aspectRatio, destPosY + sin(rndVal) * rndRad, 0.0f, 1.0f); p.col = glm::vec4( (float)(rand() % 255) / 255.0f, (float)(rand() % 255) / 255.0f, (float)(rand() % 255) / 255.0f, 1.0f); p.vel = glm::vec4(0.0f); particleBuffer.push_back(p); } // Buffer size is the same for all storage buffers uint32_t storageBufferSize = particleBuffer.size() * sizeof(Particle); VkMemoryAllocateInfo memAlloc = vkTools::initializers::memoryAllocateInfo(); VkMemoryRequirements memReqs; VkResult err; void *data; struct StagingBuffer { VkDeviceMemory memory; VkBuffer buffer; } stagingBuffer; // Allocate and fill host-visible staging storage buffer object // Allocate and fill storage buffer object VkBufferCreateInfo vBufferInfo = vkTools::initializers::bufferCreateInfo( VK_BUFFER_USAGE_TRANSFER_SRC_BIT, storageBufferSize); vkTools::checkResult(vkCreateBuffer(device, &vBufferInfo, nullptr, &stagingBuffer.buffer)); vkGetBufferMemoryRequirements(device, stagingBuffer.buffer, &memReqs); memAlloc.allocationSize = memReqs.size; getMemoryType(memReqs.memoryTypeBits, VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT, &memAlloc.memoryTypeIndex); vkTools::checkResult(vkAllocateMemory(device, &memAlloc, nullptr, &stagingBuffer.memory)); vkTools::checkResult(vkMapMemory(device, stagingBuffer.memory, 0, storageBufferSize, 0, &data)); memcpy(data, particleBuffer.data(), storageBufferSize); vkUnmapMemory(device, stagingBuffer.memory); vkTools::checkResult(vkBindBufferMemory(device, stagingBuffer.buffer, stagingBuffer.memory, 0)); // Allocate device local storage buffer ojbect vBufferInfo.usage = VK_BUFFER_USAGE_STORAGE_BUFFER_BIT | VK_BUFFER_USAGE_TRANSFER_DST_BIT; vkTools::checkResult(vkCreateBuffer(device, &vBufferInfo, nullptr, &computeStorageBuffer.buffer)); vkGetBufferMemoryRequirements(device, computeStorageBuffer.buffer, &memReqs); memAlloc.allocationSize = memReqs.size; getMemoryType(memReqs.memoryTypeBits, VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT, &memAlloc.memoryTypeIndex); vkTools::checkResult(vkAllocateMemory(device, &memAlloc, nullptr, &computeStorageBuffer.memory)); vkTools::checkResult(vkBindBufferMemory(device, computeStorageBuffer.buffer, computeStorageBuffer.memory, 0)); // Copy from host to device createSetupCommandBuffer(); VkBufferCopy copyRegion = {}; copyRegion.size = storageBufferSize; vkCmdCopyBuffer( setupCmdBuffer, stagingBuffer.buffer, computeStorageBuffer.buffer, 1, ©Region); flushSetupCommandBuffer(); // Destroy staging buffer vkDestroyBuffer(device, stagingBuffer.buffer, nullptr); vkFreeMemory(device, stagingBuffer.memory, nullptr); computeStorageBuffer.descriptor.buffer = computeStorageBuffer.buffer; computeStorageBuffer.descriptor.offset = 0; computeStorageBuffer.descriptor.range = storageBufferSize; // Binding description vertices.bindingDescriptions.resize(1); vertices.bindingDescriptions[0] = vkTools::initializers::vertexInputBindingDescription( VERTEX_BUFFER_BIND_ID, sizeof(Particle), VK_VERTEX_INPUT_RATE_VERTEX); // Attribute descriptions // Describes memory layout and shader positions vertices.attributeDescriptions.resize(2); // Location 0 : Position vertices.attributeDescriptions[0] = vkTools::initializers::vertexInputAttributeDescription( VERTEX_BUFFER_BIND_ID, 0, VK_FORMAT_R32G32B32A32_SFLOAT, 0); // Location 1 : Color vertices.attributeDescriptions[1] = vkTools::initializers::vertexInputAttributeDescription( VERTEX_BUFFER_BIND_ID, 1, VK_FORMAT_R32G32B32A32_SFLOAT, sizeof(float) * 4); // Assign to vertex buffer vertices.inputState = vkTools::initializers::pipelineVertexInputStateCreateInfo(); vertices.inputState.vertexBindingDescriptionCount = vertices.bindingDescriptions.size(); vertices.inputState.pVertexBindingDescriptions = vertices.bindingDescriptions.data(); vertices.inputState.vertexAttributeDescriptionCount = vertices.attributeDescriptions.size(); vertices.inputState.pVertexAttributeDescriptions = vertices.attributeDescriptions.data(); }
// Setup and fill the compute shader storage buffers for // vertex positions and velocities void prepareStorageBuffers() { std::mt19937 rGenerator; std::uniform_real_distribution<float> rDistribution(-1.0f, 1.0f); // Initial particle positions std::vector<Particle> particleBuffer(PARTICLE_COUNT); for (auto& particle : particleBuffer) { particle.pos = glm::vec2(rDistribution(rGenerator), rDistribution(rGenerator)); particle.vel = glm::vec2(0.0f); particle.gradientPos.x = particle.pos.x / 2.0f; } uint32_t storageBufferSize = particleBuffer.size() * sizeof(Particle); // Staging // SSBO is static, copy to device local memory // This results in better performance struct { VkDeviceMemory memory; VkBuffer buffer; } stagingBuffer; VulkanExampleBase::createBuffer( VK_BUFFER_USAGE_TRANSFER_SRC_BIT, VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT, storageBufferSize, particleBuffer.data(), &stagingBuffer.buffer, &stagingBuffer.memory); VulkanExampleBase::createBuffer( VK_BUFFER_USAGE_VERTEX_BUFFER_BIT | VK_BUFFER_USAGE_TRANSFER_DST_BIT, VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT, storageBufferSize, nullptr, &computeStorageBuffer.buffer, &computeStorageBuffer.memory); // Copy to staging buffer VkCommandBuffer copyCmd = VulkanExampleBase::createCommandBuffer(VK_COMMAND_BUFFER_LEVEL_PRIMARY, true); VkBufferCopy copyRegion = {}; copyRegion.size = storageBufferSize; vkCmdCopyBuffer( copyCmd, stagingBuffer.buffer, computeStorageBuffer.buffer, 1, ©Region); VulkanExampleBase::flushCommandBuffer(copyCmd, queue, true); vkFreeMemory(device, stagingBuffer.memory, nullptr); vkDestroyBuffer(device, stagingBuffer.buffer, nullptr); computeStorageBuffer.descriptor.range = storageBufferSize; computeStorageBuffer.descriptor.buffer = computeStorageBuffer.buffer; computeStorageBuffer.descriptor.offset = 0; // Binding description vertices.bindingDescriptions.resize(1); vertices.bindingDescriptions[0] = vkTools::initializers::vertexInputBindingDescription( VERTEX_BUFFER_BIND_ID, sizeof(Particle), VK_VERTEX_INPUT_RATE_VERTEX); // Attribute descriptions // Describes memory layout and shader positions vertices.attributeDescriptions.resize(2); // Location 0 : Position vertices.attributeDescriptions[0] = vkTools::initializers::vertexInputAttributeDescription( VERTEX_BUFFER_BIND_ID, 0, VK_FORMAT_R32G32_SFLOAT, 0); // Location 1 : Gradient position vertices.attributeDescriptions[1] = vkTools::initializers::vertexInputAttributeDescription( VERTEX_BUFFER_BIND_ID, 1, VK_FORMAT_R32G32B32A32_SFLOAT, 4 * sizeof(float)); // Assign to vertex buffer vertices.inputState = vkTools::initializers::pipelineVertexInputStateCreateInfo(); vertices.inputState.vertexBindingDescriptionCount = vertices.bindingDescriptions.size(); vertices.inputState.pVertexBindingDescriptions = vertices.bindingDescriptions.data(); vertices.inputState.vertexAttributeDescriptionCount = vertices.attributeDescriptions.size(); vertices.inputState.pVertexAttributeDescriptions = vertices.attributeDescriptions.data(); }
void VkeCubeTexture::loadCubeDDS(const char *inFile){ std::string searchPaths[] = { std::string(PROJECT_NAME), NVPWindow::sysExePath() + std::string(PROJECT_RELDIRECTORY), std::string(PROJECT_ABSDIRECTORY) }; nv_dds::CDDSImage ddsImage; for (uint32_t i = 0; i < 3; ++i){ std::string separator = ""; uint32_t strSize = searchPaths[i].size(); if(searchPaths[i].substr(strSize-1,strSize) != "/") separator = "/"; std::string filePath = searchPaths[i] + separator + std::string("images/") + std::string(inFile); ddsImage.load(filePath, true); if (ddsImage.is_valid()) break; } if (!ddsImage.is_valid()){ perror("Could not cube load texture image.\n"); exit(1); } uint32_t imgW = ddsImage.get_width(); uint32_t imgH = ddsImage.get_height(); uint32_t comCount = ddsImage.get_components(); uint32_t fmt = ddsImage.get_format(); bool isCube = ddsImage.is_cubemap(); bool isComp = ddsImage.is_compressed(); VkFormat vkFmt = VK_FORMAT_R8G8B8A8_UNORM; switch (fmt){ case GL_COMPRESSED_RGBA_S3TC_DXT1_EXT: vkFmt = VK_FORMAT_BC1_RGB_SRGB_BLOCK; break; case GL_COMPRESSED_RGBA_S3TC_DXT3_EXT: vkFmt = VK_FORMAT_BC2_UNORM_BLOCK; break; case GL_COMPRESSED_RGBA_S3TC_DXT5_EXT: vkFmt = VK_FORMAT_BC3_UNORM_BLOCK; break; default: break; } m_width = imgW; m_height = imgH; m_format = vkFmt; VulkanDC::Device::Queue::Name queueName = "DEFAULT_GRAPHICS_QUEUE"; VulkanDC::Device::Queue::CommandBufferID cmdID = INIT_COMMAND_ID; VulkanDC *dc = VulkanDC::Get(); VulkanDC::Device *device = dc->getDefaultDevice(); VulkanDC::Device::Queue *queue = device->getQueue(queueName); VkCommandBuffer cmd = VK_NULL_HANDLE; queue->beginCommandBuffer(cmdID, &cmd, VK_COMMAND_BUFFER_USAGE_ONE_TIME_SUBMIT_BIT); imageCreateAndBind( &m_data.image, &m_data.memory, m_format, VK_IMAGE_TYPE_2D, m_width, m_height, 1, 6, VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT, (VkImageUsageFlagBits)(VK_IMAGE_USAGE_TRANSFER_DST_BIT | VK_IMAGE_USAGE_SAMPLED_BIT), VK_IMAGE_TILING_OPTIMAL); VkBuffer cubeMapBuffer; VkDeviceMemory cubeMapMem; bufferCreate(&cubeMapBuffer, m_width*m_height * 3 * 6, VK_BUFFER_USAGE_TRANSFER_SRC_BIT); bufferAlloc(&cubeMapBuffer, &cubeMapMem, VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT); if (m_memory_flags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT){ imageSetLayoutBarrier(cmdID, queueName, m_data.image, VK_IMAGE_ASPECT_COLOR_BIT, VK_IMAGE_LAYOUT_PREINITIALIZED, VK_IMAGE_LAYOUT_GENERAL); for (uint32_t i = 0; i < 6; ++i){ void *data = NULL; VkSubresourceLayout layout; VkImageSubresource subres; subres.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT; subres.mipLevel = m_mip_level; subres.arrayLayer = i; vkGetImageSubresourceLayout(getDefaultDevice(), m_data.image, &subres, &layout); VKA_CHECK_ERROR(vkMapMemory(getDefaultDevice(), cubeMapMem, layout.offset, layout.size, 0, &data), "Could not map memory for image.\n"); const nv_dds::CTexture &mipmap = ddsImage.get_cubemap_face(i); memcpy(data, (void *)mipmap, layout.size); vkUnmapMemory(getDefaultDevice(), cubeMapMem); } VkBufferImageCopy biCpyRgn[6]; for (uint32_t k = 0; k < 6; ++k){ VkSubresourceLayout layout; VkImageSubresource subres; subres.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT; subres.mipLevel = m_mip_level; subres.arrayLayer = k; vkGetImageSubresourceLayout(getDefaultDevice(), m_data.image, &subres, &layout); biCpyRgn[k].bufferOffset = layout.offset; biCpyRgn[k].bufferImageHeight = 0; biCpyRgn[k].bufferRowLength = 0; biCpyRgn[k].imageExtent.width = m_width; biCpyRgn[k].imageExtent.height = m_height; biCpyRgn[k].imageExtent.depth = 1; biCpyRgn[k].imageOffset.x = 0; biCpyRgn[k].imageOffset.y = 0; biCpyRgn[k].imageOffset.z = 0; biCpyRgn[k].imageSubresource.baseArrayLayer = k; biCpyRgn[k].imageSubresource.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT; biCpyRgn[k].imageSubresource.layerCount = 1; biCpyRgn[k].imageSubresource.mipLevel = 0; } VkFence copyFence; VkFenceCreateInfo fenceInfo; memset(&fenceInfo, 0, sizeof(fenceInfo)); fenceInfo.sType = VK_STRUCTURE_TYPE_FENCE_CREATE_INFO; vkCreateFence(device->getVKDevice(), &fenceInfo, NULL, ©Fence); vkCmdCopyBufferToImage(cmd, cubeMapBuffer, m_data.image, m_data.imageLayout, 6, biCpyRgn); queue->flushCommandBuffer(cmdID, ©Fence); vkWaitForFences(device->getVKDevice(), 1, ©Fence, VK_TRUE, 100000000000); vkDestroyBuffer(device->getVKDevice(), cubeMapBuffer, NULL); vkFreeMemory(device->getVKDevice(), cubeMapMem, NULL); } VkSamplerCreateInfo sampler; sampler.sType = VK_STRUCTURE_TYPE_SAMPLER_CREATE_INFO; sampler.pNext = NULL; sampler.magFilter = VK_FILTER_NEAREST; sampler.minFilter = VK_FILTER_NEAREST; sampler.mipmapMode = VK_SAMPLER_MIPMAP_MODE_NEAREST; sampler.addressModeU = VK_SAMPLER_ADDRESS_MODE_CLAMP_TO_EDGE; sampler.addressModeU = VK_SAMPLER_ADDRESS_MODE_CLAMP_TO_EDGE; sampler.addressModeU = VK_SAMPLER_ADDRESS_MODE_CLAMP_TO_EDGE; sampler.mipLodBias = 0.0f; sampler.maxAnisotropy = 1; sampler.compareOp = VK_COMPARE_OP_NEVER; sampler.minLod = 0.0f; sampler.maxLod = 0.0f; sampler.borderColor = VK_BORDER_COLOR_FLOAT_OPAQUE_WHITE; VkImageViewCreateInfo view; view.sType = VK_STRUCTURE_TYPE_IMAGE_VIEW_CREATE_INFO; view.pNext = NULL; view.viewType = VK_IMAGE_VIEW_TYPE_CUBE; view.format = m_format; view.components.r = VK_COMPONENT_SWIZZLE_R; view.components.g = VK_COMPONENT_SWIZZLE_G; view.components.b = VK_COMPONENT_SWIZZLE_B; view.components.a = VK_COMPONENT_SWIZZLE_A; view.subresourceRange.baseArrayLayer = 0; view.subresourceRange.levelCount = 1; view.subresourceRange.baseMipLevel = 0; view.subresourceRange.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT; view.subresourceRange.layerCount = 1; VKA_CHECK_ERROR(vkCreateSampler(getDefaultDevice(), &sampler, NULL, &m_data.sampler), "Could not create sampler for image texture.\n"); view.image = m_data.image; VKA_CHECK_ERROR(vkCreateImageView(getDefaultDevice(), &view, NULL, &m_data.view), "Could not create image view for texture.\n"); }
int sample_main(int argc, char *argv[]) { VkResult U_ASSERT_ONLY res; bool U_ASSERT_ONLY pass; struct sample_info info = {}; char sample_title[] = "Vertex Buffer Sample"; const bool depthPresent = true; init_global_layer_properties(info); init_instance_extension_names(info); init_device_extension_names(info); init_instance(info, sample_title); init_enumerate_device(info); init_window_size(info, 500, 500); init_connection(info); init_window(info); init_swapchain_extension(info); init_device(info); init_command_pool(info); init_command_buffer(info); execute_begin_command_buffer(info); init_device_queue(info); init_swap_chain(info); init_depth_buffer(info); init_renderpass(info, depthPresent); init_framebuffers(info, depthPresent); /* VULKAN_KEY_START */ /* * Set up a vertex buffer: * - Create a buffer * - Map it and write the vertex data into it * - Bind it using vkCmdBindVertexBuffers * - Later, at pipeline creation, * - fill in vertex input part of the pipeline with relevent data */ VkBufferCreateInfo buf_info = {}; buf_info.sType = VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO; buf_info.pNext = NULL; buf_info.usage = VK_BUFFER_USAGE_VERTEX_BUFFER_BIT; buf_info.size = sizeof(g_vb_solid_face_colors_Data); buf_info.queueFamilyIndexCount = 0; buf_info.pQueueFamilyIndices = NULL; buf_info.sharingMode = VK_SHARING_MODE_EXCLUSIVE; buf_info.flags = 0; res = vkCreateBuffer(info.device, &buf_info, NULL, &info.vertex_buffer.buf); assert(res == VK_SUCCESS); VkMemoryRequirements mem_reqs; vkGetBufferMemoryRequirements(info.device, info.vertex_buffer.buf, &mem_reqs); VkMemoryAllocateInfo alloc_info = {}; alloc_info.sType = VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO; alloc_info.pNext = NULL; alloc_info.memoryTypeIndex = 0; alloc_info.allocationSize = mem_reqs.size; pass = memory_type_from_properties(info, mem_reqs.memoryTypeBits, VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT, &alloc_info.memoryTypeIndex); assert(pass && "No mappable, coherent memory"); res = vkAllocateMemory(info.device, &alloc_info, NULL, &(info.vertex_buffer.mem)); assert(res == VK_SUCCESS); uint8_t *pData; res = vkMapMemory(info.device, info.vertex_buffer.mem, 0, mem_reqs.size, 0, (void **)&pData); assert(res == VK_SUCCESS); memcpy(pData, g_vb_solid_face_colors_Data, sizeof(g_vb_solid_face_colors_Data)); vkUnmapMemory(info.device, info.vertex_buffer.mem); res = vkBindBufferMemory(info.device, info.vertex_buffer.buf, info.vertex_buffer.mem, 0); assert(res == VK_SUCCESS); /* We won't use these here, but we will need this info when creating the * pipeline */ info.vi_binding.binding = 0; info.vi_binding.inputRate = VK_VERTEX_INPUT_RATE_VERTEX; info.vi_binding.stride = sizeof(g_vb_solid_face_colors_Data[0]); info.vi_attribs[0].binding = 0; info.vi_attribs[0].location = 0; info.vi_attribs[0].format = VK_FORMAT_R32G32B32A32_SFLOAT; info.vi_attribs[0].offset = 0; info.vi_attribs[1].binding = 0; info.vi_attribs[1].location = 1; info.vi_attribs[1].format = VK_FORMAT_R32G32B32A32_SFLOAT; info.vi_attribs[1].offset = 16; const VkDeviceSize offsets[1] = {0}; /* We cannot bind the vertex buffer until we begin a renderpass */ VkClearValue clear_values[2]; clear_values[0].color.float32[0] = 0.2f; clear_values[0].color.float32[1] = 0.2f; clear_values[0].color.float32[2] = 0.2f; clear_values[0].color.float32[3] = 0.2f; clear_values[1].depthStencil.depth = 1.0f; clear_values[1].depthStencil.stencil = 0; VkSemaphore imageAcquiredSemaphore; VkSemaphoreCreateInfo imageAcquiredSemaphoreCreateInfo; imageAcquiredSemaphoreCreateInfo.sType = VK_STRUCTURE_TYPE_SEMAPHORE_CREATE_INFO; imageAcquiredSemaphoreCreateInfo.pNext = NULL; imageAcquiredSemaphoreCreateInfo.flags = 0; res = vkCreateSemaphore(info.device, &imageAcquiredSemaphoreCreateInfo, NULL, &imageAcquiredSemaphore); assert(res == VK_SUCCESS); // Get the index of the next available swapchain image: res = vkAcquireNextImageKHR(info.device, info.swap_chain, UINT64_MAX, imageAcquiredSemaphore, VK_NULL_HANDLE, &info.current_buffer); // TODO: Deal with the VK_SUBOPTIMAL_KHR and VK_ERROR_OUT_OF_DATE_KHR // return codes assert(res == VK_SUCCESS); VkRenderPassBeginInfo rp_begin = {}; rp_begin.sType = VK_STRUCTURE_TYPE_RENDER_PASS_BEGIN_INFO; rp_begin.pNext = NULL; rp_begin.renderPass = info.render_pass; rp_begin.framebuffer = info.framebuffers[info.current_buffer]; rp_begin.renderArea.offset.x = 0; rp_begin.renderArea.offset.y = 0; rp_begin.renderArea.extent.width = info.width; rp_begin.renderArea.extent.height = info.height; rp_begin.clearValueCount = 2; rp_begin.pClearValues = clear_values; vkCmdBeginRenderPass(info.cmd, &rp_begin, VK_SUBPASS_CONTENTS_INLINE); vkCmdBindVertexBuffers(info.cmd, 0, /* Start Binding */ 1, /* Binding Count */ &info.vertex_buffer.buf, /* pBuffers */ offsets); /* pOffsets */ vkCmdEndRenderPass(info.cmd); execute_end_command_buffer(info); execute_queue_command_buffer(info); /* VULKAN_KEY_END */ vkDestroySemaphore(info.device, imageAcquiredSemaphore, NULL); vkDestroyBuffer(info.device, info.vertex_buffer.buf, NULL); vkFreeMemory(info.device, info.vertex_buffer.mem, NULL); destroy_framebuffers(info); destroy_renderpass(info); destroy_depth_buffer(info); destroy_swap_chain(info); destroy_command_buffer(info); destroy_command_pool(info); destroy_device(info); destroy_window(info); destroy_instance(info); return 0; }
WError WImage::CreateFromPixelsArray( void* pixels, unsigned int width, unsigned int height, bool bDynamic, unsigned int num_components, VkFormat fmt, size_t comp_size) { VkDevice device = m_app->GetVulkanDevice(); VkResult err; VkMemoryAllocateInfo memAllocInfo = vkTools::initializers::memoryAllocateInfo(); VkMemoryRequirements memReqs = {}; VkBufferImageCopy bufferCopyRegion = {}; VkImageCreateInfo imageCreateInfo = vkTools::initializers::imageCreateInfo(); VkBufferCreateInfo bufferCreateInfo = vkTools::initializers::bufferCreateInfo(); uint8_t *data; VkFormat format = fmt; if (fmt == VK_FORMAT_UNDEFINED) { switch (num_components) { case 1: format = VK_FORMAT_R32_SFLOAT; break; case 2: format = VK_FORMAT_R32G32_SFLOAT; break; case 3: format = VK_FORMAT_R32G32B32_SFLOAT; break; case 4: format = VK_FORMAT_R32G32B32A32_SFLOAT; break; default: return WError(W_INVALIDPARAM); } comp_size = 4; } _DestroyResources(); // Create a host-visible staging buffer that contains the raw image data bufferCreateInfo.size = width * height * num_components * comp_size; // This buffer is used as a transfer source for the buffer copy bufferCreateInfo.usage = VK_BUFFER_USAGE_TRANSFER_SRC_BIT; bufferCreateInfo.sharingMode = VK_SHARING_MODE_EXCLUSIVE; err = vkCreateBuffer(device, &bufferCreateInfo, nullptr, &m_stagingBuffer); if (err) { _DestroyResources(); return WError(W_OUTOFMEMORY); } // Get memory requirements for the staging buffer (alignment, memory type bits) vkGetBufferMemoryRequirements(device, m_stagingBuffer, &memReqs); memAllocInfo.allocationSize = memReqs.size; // Get memory type index for a host visible buffer m_app->GetMemoryType(memReqs.memoryTypeBits, VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT, &memAllocInfo.memoryTypeIndex); err = vkAllocateMemory(device, &memAllocInfo, nullptr, &m_stagingMemory); if (err) { vkDestroyBuffer(device, m_stagingBuffer, nullptr); _DestroyResources(); return WError(W_OUTOFMEMORY); } err = vkBindBufferMemory(device, m_stagingBuffer, m_stagingMemory, 0); if (err) goto free_buffers; // Copy texture data into staging buffer if (pixels) { err = vkMapMemory(device, m_stagingMemory, 0, memReqs.size, 0, (void **)&data); if (err) goto free_buffers; memcpy(data, pixels, bufferCreateInfo.size); vkUnmapMemory(device, m_stagingMemory); } // Create optimal tiled target image imageCreateInfo.imageType = VK_IMAGE_TYPE_2D; imageCreateInfo.format = format; imageCreateInfo.mipLevels = 1; // TODO: USE m_app->engineParams["numGeneratedMips"] imageCreateInfo.arrayLayers = 1; imageCreateInfo.samples = VK_SAMPLE_COUNT_1_BIT; imageCreateInfo.tiling = VK_IMAGE_TILING_OPTIMAL; imageCreateInfo.sharingMode = VK_SHARING_MODE_EXCLUSIVE; imageCreateInfo.initialLayout = VK_IMAGE_LAYOUT_PREINITIALIZED; imageCreateInfo.extent = { width, height, 1 }; imageCreateInfo.usage = VK_IMAGE_USAGE_TRANSFER_DST_BIT | VK_IMAGE_USAGE_SAMPLED_BIT; err = vkCreateImage(device, &imageCreateInfo, nullptr, &m_image); if (err) goto free_buffers; vkGetImageMemoryRequirements(device, m_image, &memReqs); memAllocInfo.allocationSize = memReqs.size; m_app->GetMemoryType(memReqs.memoryTypeBits, VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT, &memAllocInfo.memoryTypeIndex); err = vkAllocateMemory(device, &memAllocInfo, nullptr, &m_deviceMemory); if (err) goto free_buffers; err = vkBindImageMemory(device, m_image, m_deviceMemory, 0); if (err) goto free_buffers; // Setup buffer copy regions for each mip level bufferCopyRegion.imageSubresource.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT; bufferCopyRegion.imageSubresource.mipLevel = 0; bufferCopyRegion.imageSubresource.baseArrayLayer = 0; bufferCopyRegion.imageSubresource.layerCount = 1; bufferCopyRegion.imageExtent.width = width; bufferCopyRegion.imageExtent.height = height; bufferCopyRegion.imageExtent.depth = 1; bufferCopyRegion.bufferOffset = 0; err = m_app->BeginCommandBuffer(); if (err) goto free_buffers; // Image barrier for optimal image (target) // Optimal image will be used as destination for the copy vkTools::setImageLayout( m_app->GetCommandBuffer(), m_image, VK_IMAGE_ASPECT_COLOR_BIT, VK_IMAGE_LAYOUT_PREINITIALIZED, VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL); // Copy mip levels from staging buffer vkCmdCopyBufferToImage( m_app->GetCommandBuffer(), m_stagingBuffer, m_image, VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, 1, &bufferCopyRegion ); // Change texture image layout to shader read after all mip levels have been copied vkTools::setImageLayout( m_app->GetCommandBuffer(), m_image, VK_IMAGE_ASPECT_COLOR_BIT, VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL); err = m_app->EndCommandBuffer(); if (err) goto free_buffers; free_buffers: // Clean up staging resources if (err || !bDynamic) { vkFreeMemory(device, m_stagingMemory, nullptr); vkDestroyBuffer(device, m_stagingBuffer, nullptr); m_stagingMemory = VK_NULL_HANDLE; m_stagingBuffer = VK_NULL_HANDLE; } if (err) { _DestroyResources(); return WError(W_OUTOFMEMORY); } // Create image view // Textures are not directly accessed by the shaders and // are abstracted by image views containing additional // information and sub resource ranges VkImageViewCreateInfo view = vkTools::initializers::imageViewCreateInfo(); view.image = VK_NULL_HANDLE; view.viewType = VK_IMAGE_VIEW_TYPE_2D; view.format = format; view.components = { VK_COMPONENT_SWIZZLE_R, VK_COMPONENT_SWIZZLE_G, VK_COMPONENT_SWIZZLE_B, VK_COMPONENT_SWIZZLE_A }; view.subresourceRange.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT; view.subresourceRange.baseMipLevel = 0; view.subresourceRange.baseArrayLayer = 0; view.subresourceRange.layerCount = 1; // Linear tiling usually won't support mip maps // Only set mip map count if optimal tiling is used view.subresourceRange.levelCount = 1; // mips view.image = m_image; err = vkCreateImageView(device, &view, nullptr, &m_view); if (err) { _DestroyResources(); return WError(W_UNABLETOCREATEIMAGE); } m_width = width; m_height = height; m_numComponents = num_components; m_componentSize = comp_size; m_mapSize = bufferCreateInfo.size; m_format = format; return WError(W_SUCCEEDED); }
bool vulkan_buffer::create_internal(const bool copy_host_data, const compute_queue& cqueue) { const auto& vulkan_dev = ((const vulkan_device&)cqueue.get_device()).device; // create the buffer const VkBufferCreateInfo buffer_create_info { .sType = VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO, .pNext = nullptr, .flags = 0, // no sparse backing .size = size, // set all the bits here, might need some better restrictions later on // NOTE: not setting vertex bit here, b/c we're always using SSBOs .usage = (VK_BUFFER_USAGE_TRANSFER_SRC_BIT | VK_BUFFER_USAGE_TRANSFER_DST_BIT | VK_BUFFER_USAGE_STORAGE_BUFFER_BIT | VK_BUFFER_USAGE_INDEX_BUFFER_BIT | VK_BUFFER_USAGE_INDIRECT_BUFFER_BIT), // TODO: probably want a concurrent option later on .sharingMode = VK_SHARING_MODE_EXCLUSIVE, .queueFamilyIndexCount = 0, .pQueueFamilyIndices = nullptr, }; VK_CALL_RET(vkCreateBuffer(vulkan_dev, &buffer_create_info, nullptr, &buffer), "buffer creation failed", false) // allocate / back it up VkMemoryRequirements mem_req; vkGetBufferMemoryRequirements(vulkan_dev, buffer, &mem_req); const VkMemoryAllocateInfo alloc_info { .sType = VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO, .pNext = nullptr, .allocationSize = mem_req.size, .memoryTypeIndex = find_memory_type_index(mem_req.memoryTypeBits, true /* prefer device memory */), }; VK_CALL_RET(vkAllocateMemory(vulkan_dev, &alloc_info, nullptr, &mem), "buffer allocation failed", false) VK_CALL_RET(vkBindBufferMemory(vulkan_dev, buffer, mem, 0), "buffer allocation binding failed", false) // update buffer desc info buffer_info.buffer = buffer; buffer_info.offset = 0; buffer_info.range = size; // buffer init from host data pointer if(copy_host_data && host_ptr != nullptr && !has_flag<COMPUTE_MEMORY_FLAG::NO_INITIAL_COPY>(flags)) { if(!write_memory_data(cqueue, host_ptr, size, 0, 0, "failed to initialize buffer with host data (map failed)")) { return false; } } return true; } vulkan_buffer::~vulkan_buffer() { if(buffer != nullptr) { vkDestroyBuffer(((const vulkan_device&)dev).device, buffer, nullptr); buffer = nullptr; } buffer_info = { nullptr, 0, 0 }; } void vulkan_buffer::read(const compute_queue& cqueue, const size_t size_, const size_t offset) { read(cqueue, host_ptr, size_, offset); } void vulkan_buffer::read(const compute_queue& cqueue, void* dst, const size_t size_, const size_t offset) { if(buffer == nullptr) return; const size_t read_size = (size_ == 0 ? size : size_); if(!read_check(size, read_size, offset, flags)) return; GUARD(lock); read_memory_data(cqueue, dst, read_size, offset, 0, "failed to read buffer"); } void vulkan_buffer::write(const compute_queue& cqueue, const size_t size_, const size_t offset) { write(cqueue, host_ptr, size_, offset); }
/** * Good ol' main function. */ int main(int argc, char* argv[]) { static int windowWidth = 800; static int windowHeight = 600; static const char * applicationName = "SdlVulkanDemo_03_double_buffering"; static const char * engineName = applicationName; bool boolResult; VkResult result; /* * SDL2 Initialization */ SDL_Window *mySdlWindow; SDL_SysWMinfo mySdlSysWmInfo; boolResult = vkdemos::utils::sdl2Initialization(applicationName, windowWidth, windowHeight, mySdlWindow, mySdlSysWmInfo); assert(boolResult); /* * Vulkan initialization. */ std::vector<const char *> layersNamesToEnable; layersNamesToEnable.push_back("VK_LAYER_LUNARG_standard_validation"); std::vector<const char *> extensionsNamesToEnable; extensionsNamesToEnable.push_back(VK_KHR_SURFACE_EXTENSION_NAME); extensionsNamesToEnable.push_back(VK_EXT_DEBUG_REPORT_EXTENSION_NAME); extensionsNamesToEnable.push_back(VK_KHR_XCB_SURFACE_EXTENSION_NAME); // TODO: add support for other windowing systems VkInstance myInstance; boolResult = vkdemos::createVkInstance(layersNamesToEnable, extensionsNamesToEnable, applicationName, engineName, myInstance); assert(boolResult); VkDebugReportCallbackEXT myDebugReportCallback; vkdemos::createDebugReportCallback(myInstance, VK_DEBUG_REPORT_ERROR_BIT_EXT | VK_DEBUG_REPORT_WARNING_BIT_EXT | VK_DEBUG_REPORT_PERFORMANCE_WARNING_BIT_EXT | VK_DEBUG_REPORT_DEBUG_BIT_EXT, vkdemos::debugCallback, myDebugReportCallback ); VkPhysicalDevice myPhysicalDevice; boolResult = vkdemos::chooseVkPhysicalDevice(myInstance, 0, myPhysicalDevice); assert(boolResult); VkSurfaceKHR mySurface; boolResult = vkdemos::createVkSurface(myInstance, mySdlSysWmInfo, mySurface); assert(boolResult); VkDevice myDevice; VkQueue myQueue; uint32_t myQueueFamilyIndex; boolResult = vkdemos::createVkDeviceAndVkQueue(myPhysicalDevice, mySurface, layersNamesToEnable, myDevice, myQueue, myQueueFamilyIndex); assert(boolResult); VkSwapchainKHR mySwapchain; VkFormat mySurfaceFormat; boolResult = vkdemos::createVkSwapchain(myPhysicalDevice, myDevice, mySurface, windowWidth, windowHeight, FRAME_LAG, VK_NULL_HANDLE, mySwapchain, mySurfaceFormat); assert(boolResult); std::vector<VkImage> mySwapchainImagesVector; std::vector<VkImageView> mySwapchainImageViewsVector; boolResult = vkdemos::getSwapchainImagesAndViews(myDevice, mySwapchain, mySurfaceFormat, mySwapchainImagesVector, mySwapchainImageViewsVector); assert(boolResult); VkCommandPool myCommandPool; boolResult = vkdemos::createCommandPool(myDevice, myQueueFamilyIndex, VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT, myCommandPool); assert(boolResult); VkCommandBuffer myCmdBufferInitialization; boolResult = vkdemos::allocateCommandBuffer(myDevice, myCommandPool, VK_COMMAND_BUFFER_LEVEL_PRIMARY, myCmdBufferInitialization); assert(boolResult); /* * Initializations from Demo 02 (Triangle). */ VkPhysicalDeviceMemoryProperties myMemoryProperties; vkGetPhysicalDeviceMemoryProperties(myPhysicalDevice, &myMemoryProperties); // Create the Depth Buffer's Image and View. const VkFormat myDepthBufferFormat = VK_FORMAT_D16_UNORM; VkImage myDepthImage; VkImageView myDepthImageView; VkDeviceMemory myDepthMemory; boolResult = vkdemos::createAndAllocateImage(myDevice, myMemoryProperties, VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT, 0, myDepthBufferFormat, windowWidth, windowHeight, myDepthImage, myDepthMemory, &myDepthImageView, VK_IMAGE_ASPECT_DEPTH_BIT ); assert(boolResult); // Create the renderpass. VkRenderPass myRenderPass; boolResult = demo02CreateRenderPass(myDevice, mySurfaceFormat, myDepthBufferFormat, myRenderPass); assert(boolResult); // Create the Framebuffers, based on the number of swapchain images. std::vector<VkFramebuffer> myFramebuffersVector; myFramebuffersVector.reserve(mySwapchainImageViewsVector.size()); for(const auto view : mySwapchainImageViewsVector) { VkFramebuffer fb; boolResult = vkdemos::utils::createFramebuffer(myDevice, myRenderPass, {view, myDepthImageView}, windowWidth, windowHeight, fb); assert(boolResult); myFramebuffersVector.push_back(fb); } // Create a buffer to use as the vertex buffer. const size_t vertexBufferSize = sizeof(TriangleDemoVertex)*NUM_DEMO_VERTICES; VkBuffer myVertexBuffer; VkDeviceMemory myVertexBufferMemory; boolResult = vkdemos::createAndAllocateBuffer(myDevice, myMemoryProperties, VK_BUFFER_USAGE_VERTEX_BUFFER_BIT, VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT, vertexBufferSize, myVertexBuffer, myVertexBufferMemory ); assert(boolResult); // Map vertex buffer and insert data { void *mappedBuffer; result = vkMapMemory(myDevice, myVertexBufferMemory, 0, VK_WHOLE_SIZE, 0, &mappedBuffer); assert(result == VK_SUCCESS); memcpy(mappedBuffer, vertices, vertexBufferSize); vkUnmapMemory(myDevice, myVertexBufferMemory); } // Create the pipeline. const VkPipelineLayoutCreateInfo pipelineLayoutCreateInfo = { .sType = VK_STRUCTURE_TYPE_PIPELINE_LAYOUT_CREATE_INFO, .pNext = nullptr, .flags = 0, .setLayoutCount = 0, .pSetLayouts = nullptr, .pushConstantRangeCount = 0, .pPushConstantRanges = nullptr, }; VkPipelineLayout myPipelineLayout; result = vkCreatePipelineLayout(myDevice, &pipelineLayoutCreateInfo, nullptr, &myPipelineLayout); assert(result == VK_SUCCESS); // Create Pipeline. VkPipeline myGraphicsPipeline; boolResult = demo02CreatePipeline(myDevice, myRenderPass, myPipelineLayout, VERTEX_SHADER_FILENAME, FRAGMENT_SHADER_FILENAME, VERTEX_INPUT_BINDING, myGraphicsPipeline); assert(boolResult); /* * In the PerFrameData struct we group all the objects that are used in a single frame, and that * must remain valid for the duration of that frame. * */ PerFrameData perFrameDataVector[FRAME_LAG]; for(int i = 0; i < FRAME_LAG; i++) { boolResult = vkdemos::allocateCommandBuffer(myDevice, myCommandPool, VK_COMMAND_BUFFER_LEVEL_PRIMARY, perFrameDataVector[i].presentCmdBuffer); assert(boolResult); result = vkdemos::utils::createFence(myDevice, perFrameDataVector[i].presentFence); assert(result == VK_SUCCESS); result = vkdemos::utils::createSemaphore(myDevice, perFrameDataVector[i].imageAcquiredSemaphore); assert(result == VK_SUCCESS); result = vkdemos::utils::createSemaphore(myDevice, perFrameDataVector[i].renderingCompletedSemaphore); assert(result == VK_SUCCESS); perFrameDataVector[i].fenceInitialized = false; } /* * Generation and submission of the initialization commands' command buffer. */ // We fill the initialization command buffer with... the initialization commands. boolResult = demo02FillInitializationCommandBuffer(myCmdBufferInitialization, myDepthImage); assert(boolResult); // We now submit the command buffer to the queue we created before, and we wait // for its completition. VkSubmitInfo submitInfo = { .sType = VK_STRUCTURE_TYPE_SUBMIT_INFO, .pNext = nullptr, .waitSemaphoreCount = 0, .pWaitSemaphores = nullptr, .pWaitDstStageMask = nullptr, .commandBufferCount = 1, .pCommandBuffers = &myCmdBufferInitialization, .signalSemaphoreCount = 0, .pSignalSemaphores = nullptr }; result = vkQueueSubmit(myQueue, 1, &submitInfo, VK_NULL_HANDLE); assert(result == VK_SUCCESS); // Wait for the queue to complete its work. result = vkQueueWaitIdle(myQueue); assert(result == VK_SUCCESS); /* * Event loop */ SDL_Event sdlEvent; bool quit = false; // Just some variables for frame statistics long frameNumber = 0; long frameMaxTime = LONG_MIN; long frameMinTime = LONG_MAX; long frameAvgTimeSum = 0; long frameAvgTimeSumSquare = 0; constexpr long FRAMES_PER_STAT = 120; // How many frames to wait before printing frame time statistics. // The main event/render loop. while(!quit) { // Process events for this frame while(SDL_PollEvent(&sdlEvent)) { if (sdlEvent.type == SDL_QUIT) { quit = true; } if (sdlEvent.type == SDL_KEYDOWN && sdlEvent.key.keysym.sym == SDLK_ESCAPE) { quit = true; } } // Rendering code if(!quit) { // Render a single frame auto renderStartTime = std::chrono::high_resolution_clock::now(); quit = !demo03RenderSingleFrame(myDevice, myQueue, mySwapchain, myFramebuffersVector, myRenderPass, myGraphicsPipeline, myVertexBuffer, VERTEX_INPUT_BINDING, perFrameDataVector[frameNumber % FRAME_LAG], windowWidth, windowHeight); auto renderStopTime = std::chrono::high_resolution_clock::now(); // Compute frame time statistics auto elapsedTimeUs = std::chrono::duration_cast<std::chrono::microseconds>(renderStopTime - renderStartTime).count(); frameMaxTime = std::max(frameMaxTime, elapsedTimeUs); frameMinTime = std::min(frameMinTime, elapsedTimeUs); frameAvgTimeSum += elapsedTimeUs; frameAvgTimeSumSquare += elapsedTimeUs*elapsedTimeUs; // Print statistics if necessary if(frameNumber % FRAMES_PER_STAT == 0) { auto average = frameAvgTimeSum/FRAMES_PER_STAT; auto stddev = std::sqrt(frameAvgTimeSumSquare/FRAMES_PER_STAT - average*average); std::cout << "Frame time: average " << std::setw(6) << average << " us, maximum " << std::setw(6) << frameMaxTime << " us, minimum " << std::setw(6) << frameMinTime << " us, stddev " << (long)stddev << " (" << std::fixed << std::setprecision(2) << (stddev/average * 100.0f) << "%)" << std::endl; frameMaxTime = LONG_MIN; frameMinTime = LONG_MAX; frameAvgTimeSum = 0; frameAvgTimeSumSquare = 0; } frameNumber++; } } /* * Deinitialization */ // We wait for pending operations to complete before starting to destroy stuff. result = vkQueueWaitIdle(myQueue); assert(result == VK_SUCCESS); // Destroy the objects in the perFrameDataVector array. for(int i = 0; i < FRAME_LAG; i++) { vkDestroyFence(myDevice, perFrameDataVector[i].presentFence, nullptr); vkDestroySemaphore(myDevice, perFrameDataVector[i].imageAcquiredSemaphore, nullptr); vkDestroySemaphore(myDevice, perFrameDataVector[i].renderingCompletedSemaphore, nullptr); } /* * For more informations on the following commands, refer to Demo 02. */ vkDestroyPipeline(myDevice, myGraphicsPipeline, nullptr); vkDestroyPipelineLayout(myDevice, myPipelineLayout, nullptr); vkDestroyBuffer(myDevice, myVertexBuffer, nullptr); vkFreeMemory(myDevice, myVertexBufferMemory, nullptr); for(auto framebuffer : myFramebuffersVector) vkDestroyFramebuffer(myDevice, framebuffer, nullptr); vkDestroyRenderPass(myDevice, myRenderPass, nullptr); vkDestroyImageView(myDevice, myDepthImageView, nullptr); vkDestroyImage(myDevice, myDepthImage, nullptr); vkFreeMemory(myDevice, myDepthMemory, nullptr); /* * For more informations on the following commands, refer to Demo 01. */ vkDestroyCommandPool(myDevice, myCommandPool, nullptr); for(auto imgView : mySwapchainImageViewsVector) vkDestroyImageView(myDevice, imgView, nullptr); vkDestroySwapchainKHR(myDevice, mySwapchain, nullptr); vkDestroyDevice(myDevice, nullptr); vkDestroySurfaceKHR(myInstance, mySurface, nullptr); vkdemos::destroyDebugReportCallback(myInstance, myDebugReportCallback); vkDestroyInstance(myInstance, nullptr); SDL_DestroyWindow(mySdlWindow); SDL_Quit(); return 0; }
// Generate randomized noise and upload it to the 3D texture using staging void updateNoiseTexture() { const uint32_t texMemSize = texture.width * texture.height * texture.depth; uint8_t *data = new uint8_t[texMemSize]; memset(data, 0, texMemSize); // Generate perlin based noise std::cout << "Generating " << texture.width << " x " << texture.height << " x " << texture.depth << " noise texture..." << std::endl; auto tStart = std::chrono::high_resolution_clock::now(); PerlinNoise<float> perlinNoise; FractalNoise<float> fractalNoise(perlinNoise); std::default_random_engine rndEngine(std::random_device{}()); const int32_t noiseType = rand() % 2; const float noiseScale = static_cast<float>(rand() % 10) + 4.0f; #pragma omp parallel for for (int32_t z = 0; z < (int32_t)texture.depth; z++) { for (int32_t y = 0; y < (int32_t)texture.height; y++) { for (int32_t x = 0; x < (int32_t)texture.width; x++) { float nx = (float)x / (float)texture.width; float ny = (float)y / (float)texture.height; float nz = (float)z / (float)texture.depth; #define FRACTAL #ifdef FRACTAL float n = fractalNoise.noise(nx * noiseScale, ny * noiseScale, nz * noiseScale); #else float n = 20.0 * perlinNoise.noise(nx, ny, nz); #endif n = n - floor(n); data[x + y * texture.width + z * texture.width * texture.height] = static_cast<uint8_t>(floor(n * 255)); } } } auto tEnd = std::chrono::high_resolution_clock::now(); auto tDiff = std::chrono::duration<double, std::milli>(tEnd - tStart).count(); std::cout << "Done in " << tDiff << "ms" << std::endl; // Create a host-visible staging buffer that contains the raw image data VkBuffer stagingBuffer; VkDeviceMemory stagingMemory; // Buffer object VkBufferCreateInfo bufferCreateInfo = vkTools::initializers::bufferCreateInfo(); bufferCreateInfo.size = texMemSize; bufferCreateInfo.usage = VK_BUFFER_USAGE_TRANSFER_SRC_BIT; bufferCreateInfo.sharingMode = VK_SHARING_MODE_EXCLUSIVE; VK_CHECK_RESULT(vkCreateBuffer(device, &bufferCreateInfo, nullptr, &stagingBuffer)); // Allocate host visible memory for data upload VkMemoryAllocateInfo memAllocInfo = vkTools::initializers::memoryAllocateInfo(); VkMemoryRequirements memReqs = {}; vkGetBufferMemoryRequirements(device, stagingBuffer, &memReqs); memAllocInfo.allocationSize = memReqs.size; memAllocInfo.memoryTypeIndex = vulkanDevice->getMemoryType(memReqs.memoryTypeBits, VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT); VK_CHECK_RESULT(vkAllocateMemory(device, &memAllocInfo, nullptr, &stagingMemory)); VK_CHECK_RESULT(vkBindBufferMemory(device, stagingBuffer, stagingMemory, 0)); // Copy texture data into staging buffer uint8_t *mapped; VK_CHECK_RESULT(vkMapMemory(device, stagingMemory, 0, memReqs.size, 0, (void **)&mapped)); memcpy(mapped, data, texMemSize); vkUnmapMemory(device, stagingMemory); VkCommandBuffer copyCmd = VulkanExampleBase::createCommandBuffer(VK_COMMAND_BUFFER_LEVEL_PRIMARY, true); // Image barrier for optimal image // The sub resource range describes the regions of the image we will be transition VkImageSubresourceRange subresourceRange = {}; subresourceRange.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT; subresourceRange.baseMipLevel = 0; subresourceRange.levelCount = 1; subresourceRange.layerCount = 1; // Optimal image will be used as destination for the copy, so we must transfer from our // initial undefined image layout to the transfer destination layout vkTools::setImageLayout( copyCmd, texture.image, VK_IMAGE_ASPECT_COLOR_BIT, VK_IMAGE_LAYOUT_UNDEFINED, VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, subresourceRange); // Copy 3D noise data to texture // Setup buffer copy regions VkBufferImageCopy bufferCopyRegion{}; bufferCopyRegion.imageSubresource.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT; bufferCopyRegion.imageSubresource.mipLevel = 0; bufferCopyRegion.imageSubresource.baseArrayLayer = 0; bufferCopyRegion.imageSubresource.layerCount = 1; bufferCopyRegion.imageExtent.width = texture.width; bufferCopyRegion.imageExtent.height = texture.height; bufferCopyRegion.imageExtent.depth = texture.depth; vkCmdCopyBufferToImage( copyCmd, stagingBuffer, texture.image, VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, 1, &bufferCopyRegion); // Change texture image layout to shader read after all mip levels have been copied texture.imageLayout = VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL; vkTools::setImageLayout( copyCmd, texture.image, VK_IMAGE_ASPECT_COLOR_BIT, VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, texture.imageLayout, subresourceRange); VulkanExampleBase::flushCommandBuffer(copyCmd, queue, true); // Clean up staging resources delete[] data; vkFreeMemory(device, stagingMemory, nullptr); vkDestroyBuffer(device, stagingBuffer, nullptr); regenerateNoise = false; }
bool Cubemap::Init(VulkanDevice * device, VulkanCommandBuffer * cmdBuffer, std::string cubemapDir) { VkResult result; void * pData; mipMapLevels = -1; std::vector<MipMap> mipMapsRight; std::vector<MipMap> mipMapsLeft; std::vector<MipMap> mipMapsTop; std::vector<MipMap> mipMapsBottom; std::vector<MipMap> mipMapsBack; std::vector<MipMap> mipMapsFront; // Read each cube face if (!ReadCubeFace(cubemapDir + "/right.rct", mipMapsRight)) return false; if (!ReadCubeFace(cubemapDir + "/left.rct", mipMapsLeft)) return false; if (!ReadCubeFace(cubemapDir + "/up.rct", mipMapsTop)) return false; if (!ReadCubeFace(cubemapDir + "/down.rct", mipMapsBottom)) return false; if (!ReadCubeFace(cubemapDir + "/back.rct", mipMapsBack)) return false; if (!ReadCubeFace(cubemapDir + "/front.rct", mipMapsFront)) return false; unsigned int totalTextureSize = 0; for (unsigned int i = 0; i < mipMapsRight.size(); i++) totalTextureSize += mipMapsRight[i].size; for (unsigned int i = 0; i < mipMapsLeft.size(); i++) totalTextureSize += mipMapsLeft[i].size; for (unsigned int i = 0; i < mipMapsTop.size(); i++) totalTextureSize += mipMapsTop[i].size; for (unsigned int i = 0; i < mipMapsBottom.size(); i++) totalTextureSize += mipMapsBottom[i].size; for (unsigned int i = 0; i < mipMapsBack.size(); i++) totalTextureSize += mipMapsBack[i].size; for (unsigned int i = 0; i < mipMapsFront.size(); i++) totalTextureSize += mipMapsFront[i].size; // Create an array of bits which stores all of the texture data std::vector<unsigned char> textureData; for (unsigned int i = 0; i < mipMapsRight.size(); i++) for (unsigned int j = 0; j < mipMapsRight[i].size; j++) textureData.push_back(mipMapsRight[i].data[j]); for (unsigned int i = 0; i < mipMapsLeft.size(); i++) for (unsigned int j = 0; j < mipMapsLeft[i].size; j++) textureData.push_back(mipMapsLeft[i].data[j]); for (unsigned int i = 0; i < mipMapsTop.size(); i++) for (unsigned int j = 0; j < mipMapsTop[i].size; j++) textureData.push_back(mipMapsTop[i].data[j]); for (unsigned int i = 0; i < mipMapsBottom.size(); i++) for (unsigned int j = 0; j < mipMapsBottom[i].size; j++) textureData.push_back(mipMapsBottom[i].data[j]); for (unsigned int i = 0; i < mipMapsBack.size(); i++) for (unsigned int j = 0; j < mipMapsBack[i].size; j++) textureData.push_back(mipMapsBack[i].data[j]); for (unsigned int i = 0; i < mipMapsFront.size(); i++) for (unsigned int j = 0; j < mipMapsFront[i].size; j++) textureData.push_back(mipMapsFront[i].data[j]); for (int i = 0; i < mipMapsRight.size(); i++) delete[] mipMapsRight[i].data; for (int i = 0; i < mipMapsLeft.size(); i++) delete[] mipMapsLeft[i].data; for (int i = 0; i < mipMapsTop.size(); i++) delete[] mipMapsTop[i].data; for (int i = 0; i < mipMapsBottom.size(); i++) delete[] mipMapsBottom[i].data; for (int i = 0; i < mipMapsBack.size(); i++) delete[] mipMapsBack[i].data; for (int i = 0; i < mipMapsFront.size(); i++) delete[] mipMapsFront[i].data; VkMemoryRequirements memReq{}; VkMemoryAllocateInfo allocInfo{}; allocInfo.sType = VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO; VkBuffer stagingBuffer; VkDeviceMemory stagingMemory; VkBufferCreateInfo bufferCI{}; bufferCI.sType = VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO; bufferCI.size = totalTextureSize; bufferCI.usage = VK_BUFFER_USAGE_TRANSFER_SRC_BIT; bufferCI.sharingMode = VK_SHARING_MODE_EXCLUSIVE; result = vkCreateBuffer(device->GetDevice(), &bufferCI, VK_NULL_HANDLE, &stagingBuffer); if (result != VK_SUCCESS) return false; vkGetBufferMemoryRequirements(device->GetDevice(), stagingBuffer, &memReq); allocInfo.allocationSize = memReq.size; if (!device->MemoryTypeFromProperties(memReq.memoryTypeBits, VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT, &allocInfo.memoryTypeIndex)) return false; result = vkAllocateMemory(device->GetDevice(), &allocInfo, VK_NULL_HANDLE, &stagingMemory); if (result != VK_SUCCESS) return false; result = vkBindBufferMemory(device->GetDevice(), stagingBuffer, stagingMemory, 0); if (result != VK_SUCCESS) return false; result = vkMapMemory(device->GetDevice(), stagingMemory, 0, memReq.size, 0, &pData); if (result != VK_SUCCESS) return false; memcpy(pData, textureData.data(), textureData.size()); vkUnmapMemory(device->GetDevice(), stagingMemory); std::vector<VkBufferImageCopy> bufferCopyRegions; uint32_t offset = 0; for (int face = 0; face < 6; face++) { for (unsigned int level = 0; level < mipMapLevels; level++) { VkBufferImageCopy bufferCopyRegion{}; bufferCopyRegion.imageSubresource.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT; bufferCopyRegion.imageSubresource.mipLevel = level; bufferCopyRegion.imageSubresource.baseArrayLayer = face; bufferCopyRegion.imageSubresource.layerCount = 1; bufferCopyRegion.imageExtent.depth = 1; bufferCopyRegion.bufferOffset = offset; // Every face has the same width, height and mipmap bufferCopyRegion.imageExtent.width = mipMapsRight[level].width; bufferCopyRegion.imageExtent.height = mipMapsRight[level].height; offset += (uint32_t)mipMapsRight[level].size; bufferCopyRegions.push_back(bufferCopyRegion); } } VkImageCreateInfo imageCI{}; imageCI.sType = VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO; imageCI.imageType = VK_IMAGE_TYPE_2D; imageCI.format = VK_FORMAT_R8G8B8A8_UNORM; imageCI.mipLevels = mipMapLevels; imageCI.arrayLayers = 6; imageCI.samples = VK_SAMPLE_COUNT_1_BIT; imageCI.tiling = VK_IMAGE_TILING_OPTIMAL; imageCI.usage = VK_IMAGE_USAGE_TRANSFER_DST_BIT | VK_IMAGE_USAGE_SAMPLED_BIT; imageCI.sharingMode = VK_SHARING_MODE_EXCLUSIVE; imageCI.initialLayout = VK_IMAGE_LAYOUT_UNDEFINED; imageCI.extent.width = mipMapsRight[0].width; imageCI.extent.height = mipMapsRight[0].height; imageCI.extent.depth = 1; imageCI.flags = VK_IMAGE_CREATE_CUBE_COMPATIBLE_BIT; result = vkCreateImage(device->GetDevice(), &imageCI, VK_NULL_HANDLE, &textureImage); if (result != VK_SUCCESS) return false; vkGetImageMemoryRequirements(device->GetDevice(), textureImage, &memReq); VkMemoryAllocateInfo memAlloc{}; memAlloc.sType = VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO; memAlloc.allocationSize = memReq.size; if (!device->MemoryTypeFromProperties(memReq.memoryTypeBits, VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT, &memAlloc.memoryTypeIndex)) return false; result = vkAllocateMemory(device->GetDevice(), &memAlloc, VK_NULL_HANDLE, &textureMemory); if (result != VK_SUCCESS) return false; result = vkBindImageMemory(device->GetDevice(), textureImage, textureMemory, 0); if (result != VK_SUCCESS) return false; VkImageSubresourceRange range{}; range.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT; range.baseMipLevel = 0; range.levelCount = mipMapLevels; range.layerCount = 6; cmdBuffer->BeginRecording(); VulkanTools::SetImageLayout(textureImage, VK_IMAGE_ASPECT_COLOR_BIT, VK_IMAGE_LAYOUT_UNDEFINED, VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, &range, cmdBuffer, device, false); vkCmdCopyBufferToImage(cmdBuffer->GetCommandBuffer(), stagingBuffer, textureImage, VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, (uint32_t)bufferCopyRegions.size(), bufferCopyRegions.data()); VulkanTools::SetImageLayout(textureImage, VK_IMAGE_ASPECT_COLOR_BIT, VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL, &range, cmdBuffer, device, false); cmdBuffer->EndRecording(); cmdBuffer->Execute(device, NULL, NULL, NULL, true); vkFreeMemory(device->GetDevice(), stagingMemory, VK_NULL_HANDLE); vkDestroyBuffer(device->GetDevice(), stagingBuffer, VK_NULL_HANDLE); VkImageViewCreateInfo viewCI{}; viewCI.sType = VK_STRUCTURE_TYPE_IMAGE_VIEW_CREATE_INFO; viewCI.image = textureImage; viewCI.viewType = VK_IMAGE_VIEW_TYPE_CUBE; viewCI.format = VK_FORMAT_R8G8B8A8_UNORM; viewCI.components.r = VK_COMPONENT_SWIZZLE_R; viewCI.components.g = VK_COMPONENT_SWIZZLE_G; viewCI.components.b = VK_COMPONENT_SWIZZLE_B; viewCI.components.a = VK_COMPONENT_SWIZZLE_A; viewCI.subresourceRange.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT; viewCI.subresourceRange.baseMipLevel = 0; viewCI.subresourceRange.baseArrayLayer = 0; viewCI.subresourceRange.layerCount = 6; viewCI.subresourceRange.levelCount = mipMapLevels; result = vkCreateImageView(device->GetDevice(), &viewCI, VK_NULL_HANDLE, &textureImageView); if (result != VK_SUCCESS) return false; return true; }
void prepareVertices(bool useStagingBuffers) { struct Vertex { float position[3]; float color[3]; }; std::vector<Vertex> vertexBuffer = { { { 1.0f, 1.0f, 0.0f }, { 1.0f, 0.0f, 0.0f } }, { { -1.0f, 1.0f, 0.0f }, { 0.0f, 1.0f, 0.0f } }, { { 0.0f, -1.0f, 0.0f }, { 0.0f, 0.0f, 1.0f } } }; uint32_t vertexBufferSize = static_cast<uint32_t>(vertexBuffer.size()) * sizeof(Vertex); std::vector<uint32_t> indexBuffer = { 0, 1, 2 }; indices.count = static_cast<uint32_t>(indexBuffer.size()); uint32_t indexBufferSize = indices.count * sizeof(uint32_t); VkMemoryAllocateInfo memAlloc = {}; memAlloc.sType = VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO; VkMemoryRequirements memReqs; void *data; if (useStagingBuffers) { struct StagingBuffer { VkDeviceMemory memory; VkBuffer buffer; }; struct { StagingBuffer vertices; StagingBuffer indices; } stagingBuffers; VkBufferCreateInfo vertexBufferInfo = {}; vertexBufferInfo.sType = VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO; vertexBufferInfo.size = vertexBufferSize; vertexBufferInfo.usage = VK_BUFFER_USAGE_TRANSFER_SRC_BIT; VK_CHECK_RESULT(vkCreateBuffer(device, &vertexBufferInfo, nullptr, &stagingBuffers.vertices.buffer)); vkGetBufferMemoryRequirements(device, stagingBuffers.vertices.buffer, &memReqs); memAlloc.allocationSize = memReqs.size; memAlloc.memoryTypeIndex = getMemoryTypeIndex(memReqs.memoryTypeBits, VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT); VK_CHECK_RESULT(vkAllocateMemory(device, &memAlloc, nullptr, &stagingBuffers.vertices.memory)); VK_CHECK_RESULT(vkMapMemory(device, stagingBuffers.vertices.memory, 0, memAlloc.allocationSize, 0, &data)); memcpy(data, vertexBuffer.data(), vertexBufferSize); vkUnmapMemory(device, stagingBuffers.vertices.memory); VK_CHECK_RESULT(vkBindBufferMemory(device, stagingBuffers.vertices.buffer, stagingBuffers.vertices.memory, 0)); vertexBufferInfo.usage = VK_BUFFER_USAGE_VERTEX_BUFFER_BIT | VK_BUFFER_USAGE_TRANSFER_DST_BIT; VK_CHECK_RESULT(vkCreateBuffer(device, &vertexBufferInfo, nullptr, &vertices.buffer)); vkGetBufferMemoryRequirements(device, vertices.buffer, &memReqs); memAlloc.allocationSize = memReqs.size; memAlloc.memoryTypeIndex = getMemoryTypeIndex(memReqs.memoryTypeBits, VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT); VK_CHECK_RESULT(vkAllocateMemory(device, &memAlloc, nullptr, &vertices.memory)); VK_CHECK_RESULT(vkBindBufferMemory(device, vertices.buffer, vertices.memory, 0)); VkBufferCreateInfo indexbufferInfo = {}; indexbufferInfo.sType = VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO; indexbufferInfo.size = indexBufferSize; indexbufferInfo.usage = VK_BUFFER_USAGE_TRANSFER_SRC_BIT; VK_CHECK_RESULT(vkCreateBuffer(device, &indexbufferInfo, nullptr, &stagingBuffers.indices.buffer)); vkGetBufferMemoryRequirements(device, stagingBuffers.indices.buffer, &memReqs); memAlloc.allocationSize = memReqs.size; memAlloc.memoryTypeIndex = getMemoryTypeIndex(memReqs.memoryTypeBits, VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT); VK_CHECK_RESULT(vkAllocateMemory(device, &memAlloc, nullptr, &stagingBuffers.indices.memory)); VK_CHECK_RESULT(vkMapMemory(device, stagingBuffers.indices.memory, 0, indexBufferSize, 0, &data)); memcpy(data, indexBuffer.data(), indexBufferSize); vkUnmapMemory(device, stagingBuffers.indices.memory); VK_CHECK_RESULT(vkBindBufferMemory(device, stagingBuffers.indices.buffer, stagingBuffers.indices.memory, 0)); indexbufferInfo.usage = VK_BUFFER_USAGE_INDEX_BUFFER_BIT | VK_BUFFER_USAGE_TRANSFER_DST_BIT; VK_CHECK_RESULT(vkCreateBuffer(device, &indexbufferInfo, nullptr, &indices.buffer)); vkGetBufferMemoryRequirements(device, indices.buffer, &memReqs); memAlloc.allocationSize = memReqs.size; memAlloc.memoryTypeIndex = getMemoryTypeIndex(memReqs.memoryTypeBits, VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT); VK_CHECK_RESULT(vkAllocateMemory(device, &memAlloc, nullptr, &indices.memory)); VK_CHECK_RESULT(vkBindBufferMemory(device, indices.buffer, indices.memory, 0)); VkCommandBufferBeginInfo cmdBufferBeginInfo = {}; cmdBufferBeginInfo.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_BEGIN_INFO; cmdBufferBeginInfo.pNext = nullptr; VkCommandBuffer copyCmd = getCommandBuffer(true); VkBufferCopy copyRegion = {}; copyRegion.size = vertexBufferSize; vkCmdCopyBuffer(copyCmd, stagingBuffers.vertices.buffer, vertices.buffer, 1, ©Region); copyRegion.size = indexBufferSize; vkCmdCopyBuffer(copyCmd, stagingBuffers.indices.buffer, indices.buffer, 1, ©Region); flushCommandBuffer(copyCmd); vkDestroyBuffer(device, stagingBuffers.vertices.buffer, nullptr); vkFreeMemory(device, stagingBuffers.vertices.memory, nullptr); vkDestroyBuffer(device, stagingBuffers.indices.buffer, nullptr); vkFreeMemory(device, stagingBuffers.indices.memory, nullptr); } else { VkBufferCreateInfo vertexBufferInfo = {}; vertexBufferInfo.sType = VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO; vertexBufferInfo.size = vertexBufferSize; vertexBufferInfo.usage = VK_BUFFER_USAGE_VERTEX_BUFFER_BIT; VK_CHECK_RESULT(vkCreateBuffer(device, &vertexBufferInfo, nullptr, &vertices.buffer)); vkGetBufferMemoryRequirements(device, vertices.buffer, &memReqs); memAlloc.allocationSize = memReqs.size; memAlloc.memoryTypeIndex = getMemoryTypeIndex(memReqs.memoryTypeBits, VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT); VK_CHECK_RESULT(vkAllocateMemory(device, &memAlloc, nullptr, &vertices.memory)); VK_CHECK_RESULT(vkMapMemory(device, vertices.memory, 0, memAlloc.allocationSize, 0, &data)); memcpy(data, vertexBuffer.data(), vertexBufferSize); vkUnmapMemory(device, vertices.memory); VK_CHECK_RESULT(vkBindBufferMemory(device, vertices.buffer, vertices.memory, 0)); VkBufferCreateInfo indexbufferInfo = {}; indexbufferInfo.sType = VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO; indexbufferInfo.size = indexBufferSize; indexbufferInfo.usage = VK_BUFFER_USAGE_INDEX_BUFFER_BIT; VK_CHECK_RESULT(vkCreateBuffer(device, &indexbufferInfo, nullptr, &indices.buffer)); vkGetBufferMemoryRequirements(device, indices.buffer, &memReqs); memAlloc.allocationSize = memReqs.size; memAlloc.memoryTypeIndex = getMemoryTypeIndex(memReqs.memoryTypeBits, VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT); VK_CHECK_RESULT(vkAllocateMemory(device, &memAlloc, nullptr, &indices.memory)); VK_CHECK_RESULT(vkMapMemory(device, indices.memory, 0, indexBufferSize, 0, &data)); memcpy(data, indexBuffer.data(), indexBufferSize); vkUnmapMemory(device, indices.memory); VK_CHECK_RESULT(vkBindBufferMemory(device, indices.buffer, indices.memory, 0)); } vertices.inputBinding.binding = VERTEX_BUFFER_BIND_ID; vertices.inputBinding.stride = sizeof(Vertex); vertices.inputBinding.inputRate = VK_VERTEX_INPUT_RATE_VERTEX; vertices.inputAttributes.resize(2); vertices.inputAttributes[0].binding = VERTEX_BUFFER_BIND_ID; vertices.inputAttributes[0].location = 0; vertices.inputAttributes[0].format = VK_FORMAT_R32G32B32_SFLOAT; vertices.inputAttributes[0].offset = offsetof(Vertex, position); vertices.inputAttributes[1].binding = VERTEX_BUFFER_BIND_ID; vertices.inputAttributes[1].location = 1; vertices.inputAttributes[1].format = VK_FORMAT_R32G32B32_SFLOAT; vertices.inputAttributes[1].offset = offsetof(Vertex, color); vertices.inputState.sType = VK_STRUCTURE_TYPE_PIPELINE_VERTEX_INPUT_STATE_CREATE_INFO; vertices.inputState.pNext = nullptr; vertices.inputState.flags = VK_FLAGS_NONE; vertices.inputState.vertexBindingDescriptionCount = 1; vertices.inputState.pVertexBindingDescriptions = &vertices.inputBinding; vertices.inputState.vertexAttributeDescriptionCount = static_cast<uint32_t>(vertices.inputAttributes.size()); vertices.inputState.pVertexAttributeDescriptions = vertices.inputAttributes.data(); }
int sample_main(int argc, char *argv[]) { VkResult U_ASSERT_ONLY res; bool U_ASSERT_ONLY pass; struct sample_info info = {}; char sample_title[] = "Uniform Buffer Sample"; init_global_layer_properties(info); init_instance(info, sample_title); init_enumerate_device(info); init_queue_family_index(info); init_device(info); init_window_size(info, 50, 50); info.Projection = glm::perspective(glm::radians(45.0f), 1.0f, 0.1f, 100.0f); info.View = glm::lookAt( glm::vec3(0, 3, 10), // Camera is at (0,3,10), in World Space glm::vec3(0, 0, 0), // and looks at the origin glm::vec3(0, -1, 0) // Head is up (set to 0,-1,0 to look upside-down) ); info.Model = glm::mat4(1.0f); // Vulkan clip space has inverted Y and half Z. info.Clip = glm::mat4(1.0f, 0.0f, 0.0f, 0.0f, 0.0f, -1.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.5f, 0.0f, 0.0f, 0.0f, 0.5f, 1.0f); info.MVP = info.Clip * info.Projection * info.View * info.Model; /* VULKAN_KEY_START */ VkBufferCreateInfo buf_info = {}; buf_info.sType = VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO; buf_info.pNext = NULL; buf_info.usage = VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT; buf_info.size = sizeof(info.MVP); buf_info.queueFamilyIndexCount = 0; buf_info.pQueueFamilyIndices = NULL; buf_info.sharingMode = VK_SHARING_MODE_EXCLUSIVE; buf_info.flags = 0; res = vkCreateBuffer(info.device, &buf_info, NULL, &info.uniform_data.buf); assert(res == VK_SUCCESS); VkMemoryRequirements mem_reqs; vkGetBufferMemoryRequirements(info.device, info.uniform_data.buf, &mem_reqs); VkMemoryAllocateInfo alloc_info = {}; alloc_info.sType = VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO; alloc_info.pNext = NULL; alloc_info.memoryTypeIndex = 0; alloc_info.allocationSize = mem_reqs.size; pass = memory_type_from_properties(info, mem_reqs.memoryTypeBits, VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT, &alloc_info.memoryTypeIndex); assert(pass && "No mappable, coherent memory"); res = vkAllocateMemory(info.device, &alloc_info, NULL, &(info.uniform_data.mem)); assert(res == VK_SUCCESS); uint8_t *pData; res = vkMapMemory(info.device, info.uniform_data.mem, 0, mem_reqs.size, 0, (void **)&pData); assert(res == VK_SUCCESS); memcpy(pData, &info.MVP, sizeof(info.MVP)); vkUnmapMemory(info.device, info.uniform_data.mem); res = vkBindBufferMemory(info.device, info.uniform_data.buf, info.uniform_data.mem, 0); assert(res == VK_SUCCESS); info.uniform_data.buffer_info.buffer = info.uniform_data.buf; info.uniform_data.buffer_info.offset = 0; info.uniform_data.buffer_info.range = sizeof(info.MVP); /* VULKAN_KEY_END */ vkDestroyBuffer(info.device, info.uniform_data.buf, NULL); vkFreeMemory(info.device, info.uniform_data.mem, NULL); destroy_device(info); destroy_instance(info); return 0; }
// This is the main rendering function that you have to implement and provide to ImGui (via setting up 'RenderDrawListsFn' in the ImGuiIO structure) void ImGui_ImplGlfwVulkan_RenderDrawLists(ImDrawData* draw_data) { VkResult err; ImGuiIO& io = ImGui::GetIO(); // Create the Vertex Buffer: size_t vertex_size = draw_data->TotalVtxCount * sizeof(ImDrawVert); if (!g_VertexBuffer[g_FrameIndex] || g_VertexBufferSize[g_FrameIndex] < vertex_size) { if (g_VertexBuffer[g_FrameIndex]) vkDestroyBuffer(g_Device, g_VertexBuffer[g_FrameIndex], g_Allocator); if (g_VertexBufferMemory[g_FrameIndex]) vkFreeMemory(g_Device, g_VertexBufferMemory[g_FrameIndex], g_Allocator); size_t vertex_buffer_size = ((vertex_size-1) / g_BufferMemoryAlignment+1) * g_BufferMemoryAlignment; VkBufferCreateInfo buffer_info = {}; buffer_info.sType = VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO; buffer_info.size = vertex_buffer_size; buffer_info.usage = VK_BUFFER_USAGE_VERTEX_BUFFER_BIT; buffer_info.sharingMode = VK_SHARING_MODE_EXCLUSIVE; err = vkCreateBuffer(g_Device, &buffer_info, g_Allocator, &g_VertexBuffer[g_FrameIndex]); ImGui_ImplGlfwVulkan_VkResult(err); VkMemoryRequirements req; vkGetBufferMemoryRequirements(g_Device, g_VertexBuffer[g_FrameIndex], &req); g_BufferMemoryAlignment = (g_BufferMemoryAlignment > req.alignment) ? g_BufferMemoryAlignment : req.alignment; VkMemoryAllocateInfo alloc_info = {}; alloc_info.sType = VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO; alloc_info.allocationSize = req.size; alloc_info.memoryTypeIndex = ImGui_ImplGlfwVulkan_MemoryType(VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT, req.memoryTypeBits); err = vkAllocateMemory(g_Device, &alloc_info, g_Allocator, &g_VertexBufferMemory[g_FrameIndex]); ImGui_ImplGlfwVulkan_VkResult(err); err = vkBindBufferMemory(g_Device, g_VertexBuffer[g_FrameIndex], g_VertexBufferMemory[g_FrameIndex], 0); ImGui_ImplGlfwVulkan_VkResult(err); g_VertexBufferSize[g_FrameIndex] = vertex_buffer_size; } // Create the Index Buffer: size_t index_size = draw_data->TotalIdxCount * sizeof(ImDrawIdx); if (!g_IndexBuffer[g_FrameIndex] || g_IndexBufferSize[g_FrameIndex] < index_size) { if (g_IndexBuffer[g_FrameIndex]) vkDestroyBuffer(g_Device, g_IndexBuffer[g_FrameIndex], g_Allocator); if (g_IndexBufferMemory[g_FrameIndex]) vkFreeMemory(g_Device, g_IndexBufferMemory[g_FrameIndex], g_Allocator); size_t index_buffer_size = ((index_size-1) / g_BufferMemoryAlignment+1) * g_BufferMemoryAlignment; VkBufferCreateInfo buffer_info = {}; buffer_info.sType = VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO; buffer_info.size = index_buffer_size; buffer_info.usage = VK_BUFFER_USAGE_INDEX_BUFFER_BIT; buffer_info.sharingMode = VK_SHARING_MODE_EXCLUSIVE; err = vkCreateBuffer(g_Device, &buffer_info, g_Allocator, &g_IndexBuffer[g_FrameIndex]); ImGui_ImplGlfwVulkan_VkResult(err); VkMemoryRequirements req; vkGetBufferMemoryRequirements(g_Device, g_IndexBuffer[g_FrameIndex], &req); g_BufferMemoryAlignment = (g_BufferMemoryAlignment > req.alignment) ? g_BufferMemoryAlignment : req.alignment; VkMemoryAllocateInfo alloc_info = {}; alloc_info.sType = VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO; alloc_info.allocationSize = req.size; alloc_info.memoryTypeIndex = ImGui_ImplGlfwVulkan_MemoryType(VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT, req.memoryTypeBits); err = vkAllocateMemory(g_Device, &alloc_info, g_Allocator, &g_IndexBufferMemory[g_FrameIndex]); ImGui_ImplGlfwVulkan_VkResult(err); err = vkBindBufferMemory(g_Device, g_IndexBuffer[g_FrameIndex], g_IndexBufferMemory[g_FrameIndex], 0); ImGui_ImplGlfwVulkan_VkResult(err); g_IndexBufferSize[g_FrameIndex] = index_buffer_size; } // Upload Vertex and index Data: { ImDrawVert* vtx_dst; ImDrawIdx* idx_dst; err = vkMapMemory(g_Device, g_VertexBufferMemory[g_FrameIndex], 0, vertex_size, 0, (void**)(&vtx_dst)); ImGui_ImplGlfwVulkan_VkResult(err); err = vkMapMemory(g_Device, g_IndexBufferMemory[g_FrameIndex], 0, index_size, 0, (void**)(&idx_dst)); ImGui_ImplGlfwVulkan_VkResult(err); for (int n = 0; n < draw_data->CmdListsCount; n++) { const ImDrawList* cmd_list = draw_data->CmdLists[n]; memcpy(vtx_dst, cmd_list->VtxBuffer.Data, cmd_list->VtxBuffer.Size * sizeof(ImDrawVert)); memcpy(idx_dst, cmd_list->IdxBuffer.Data, cmd_list->IdxBuffer.Size * sizeof(ImDrawIdx)); vtx_dst += cmd_list->VtxBuffer.Size; idx_dst += cmd_list->IdxBuffer.Size; } VkMappedMemoryRange range[2] = {}; range[0].sType = VK_STRUCTURE_TYPE_MAPPED_MEMORY_RANGE; range[0].memory = g_VertexBufferMemory[g_FrameIndex]; range[0].size = vertex_size; range[1].sType = VK_STRUCTURE_TYPE_MAPPED_MEMORY_RANGE; range[1].memory = g_IndexBufferMemory[g_FrameIndex]; range[1].size = index_size; err = vkFlushMappedMemoryRanges(g_Device, 2, range); ImGui_ImplGlfwVulkan_VkResult(err); vkUnmapMemory(g_Device, g_VertexBufferMemory[g_FrameIndex]); vkUnmapMemory(g_Device, g_IndexBufferMemory[g_FrameIndex]); } // Bind pipeline and descriptor sets: { vkCmdBindPipeline(g_CommandBuffer, VK_PIPELINE_BIND_POINT_GRAPHICS, g_Pipeline); VkDescriptorSet desc_set[1] = {g_DescriptorSet}; vkCmdBindDescriptorSets(g_CommandBuffer, VK_PIPELINE_BIND_POINT_GRAPHICS, g_PipelineLayout, 0, 1, desc_set, 0, NULL); } // Bind Vertex And Index Buffer: { VkBuffer vertex_buffers[1] = {g_VertexBuffer[g_FrameIndex]}; VkDeviceSize vertex_offset[1] = {0}; vkCmdBindVertexBuffers(g_CommandBuffer, 0, 1, vertex_buffers, vertex_offset); vkCmdBindIndexBuffer(g_CommandBuffer, g_IndexBuffer[g_FrameIndex], 0, VK_INDEX_TYPE_UINT16); } // Setup viewport: { VkViewport viewport; viewport.x = 0; viewport.y = 0; viewport.width = ImGui::GetIO().DisplaySize.x; viewport.height = ImGui::GetIO().DisplaySize.y; viewport.minDepth = 0.0f; viewport.maxDepth = 1.0f; vkCmdSetViewport(g_CommandBuffer, 0, 1, &viewport); } // Setup scale and translation: { float scale[2]; scale[0] = 2.0f/io.DisplaySize.x; scale[1] = 2.0f/io.DisplaySize.y; float translate[2]; translate[0] = -1.0f; translate[1] = -1.0f; vkCmdPushConstants(g_CommandBuffer, g_PipelineLayout, VK_SHADER_STAGE_VERTEX_BIT, sizeof(float) * 0, sizeof(float) * 2, scale); vkCmdPushConstants(g_CommandBuffer, g_PipelineLayout, VK_SHADER_STAGE_VERTEX_BIT, sizeof(float) * 2, sizeof(float) * 2, translate); } // Render the command lists: int vtx_offset = 0; int idx_offset = 0; for (int n = 0; n < draw_data->CmdListsCount; n++) { const ImDrawList* cmd_list = draw_data->CmdLists[n]; for (int cmd_i = 0; cmd_i < cmd_list->CmdBuffer.Size; cmd_i++) { const ImDrawCmd* pcmd = &cmd_list->CmdBuffer[cmd_i]; if (pcmd->UserCallback) { pcmd->UserCallback(cmd_list, pcmd); } else { VkRect2D scissor; scissor.offset.x = (int32_t)(pcmd->ClipRect.x); scissor.offset.y = (int32_t)(pcmd->ClipRect.y); scissor.extent.width = (uint32_t)(pcmd->ClipRect.z - pcmd->ClipRect.x); scissor.extent.height = (uint32_t)(pcmd->ClipRect.w - pcmd->ClipRect.y + 1); // TODO: + 1?????? vkCmdSetScissor(g_CommandBuffer, 0, 1, &scissor); vkCmdDrawIndexed(g_CommandBuffer, pcmd->ElemCount, 1, idx_offset, vtx_offset, 0); } idx_offset += pcmd->ElemCount; } vtx_offset += cmd_list->VtxBuffer.Size; } }
int main(void) { VkInstance instance; { const char debug_ext[] = "VK_EXT_debug_report"; const char* extensions[] = {debug_ext,}; const char validation_layer[] = "VK_LAYER_LUNARG_standard_validation"; const char* layers[] = {validation_layer,}; VkInstanceCreateInfo create_info = { .sType = VK_STRUCTURE_TYPE_INSTANCE_CREATE_INFO, .pNext = NULL, .flags = 0, .pApplicationInfo = NULL, .enabledLayerCount = NELEMS(layers), .ppEnabledLayerNames = layers, .enabledExtensionCount = NELEMS(extensions), .ppEnabledExtensionNames = extensions, }; assert(vkCreateInstance(&create_info, NULL, &instance) == VK_SUCCESS); } VkDebugReportCallbackEXT debug_callback; { VkDebugReportCallbackCreateInfoEXT create_info = { .sType = VK_STRUCTURE_TYPE_DEBUG_REPORT_CREATE_INFO_EXT, .pNext = NULL, .flags = (VK_DEBUG_REPORT_ERROR_BIT_EXT | VK_DEBUG_REPORT_WARNING_BIT_EXT | VK_DEBUG_REPORT_PERFORMANCE_WARNING_BIT_EXT), .pfnCallback = &debugReportCallback, .pUserData = NULL, }; PFN_vkCreateDebugReportCallbackEXT createDebugReportCallback = (PFN_vkCreateDebugReportCallbackEXT) vkGetInstanceProcAddr(instance, "vkCreateDebugReportCallbackEXT"); assert(createDebugReportCallback); assert(createDebugReportCallback(instance, &create_info, NULL, &debug_callback) == VK_SUCCESS); } VkPhysicalDevice phy_device; { uint32_t num_devices; assert(vkEnumeratePhysicalDevices(instance, &num_devices, NULL) == VK_SUCCESS); assert(num_devices >= 1); VkPhysicalDevice * phy_devices = malloc(sizeof(*phy_devices) * num_devices); assert(vkEnumeratePhysicalDevices(instance, &num_devices, phy_devices) == VK_SUCCESS); phy_device = phy_devices[0]; free(phy_devices); } VkPhysicalDeviceMemoryProperties memory_properties; vkGetPhysicalDeviceMemoryProperties(phy_device, &memory_properties); VkDevice device; { float queue_priorities[] = {1.0}; const char validation_layer[] = "VK_LAYER_LUNARG_standard_validation"; const char* layers[] = {validation_layer,}; uint32_t nqueues; matchingQueues(phy_device, VK_QUEUE_GRAPHICS_BIT, &nqueues, NULL); assert(nqueues > 0); uint32_t * queue_family_idxs = malloc(sizeof(*queue_family_idxs) * nqueues); matchingQueues(phy_device, VK_QUEUE_GRAPHICS_BIT, &nqueues, queue_family_idxs); VkDeviceQueueCreateInfo queue_info = { .sType = VK_STRUCTURE_TYPE_DEVICE_QUEUE_CREATE_INFO, .pNext = NULL, .flags = 0, .queueFamilyIndex = queue_family_idxs[0], .queueCount = 1, .pQueuePriorities = queue_priorities, }; free(queue_family_idxs); VkPhysicalDeviceFeatures features = { .geometryShader = VK_TRUE, .fillModeNonSolid = VK_TRUE, }; VkDeviceCreateInfo create_info = { .sType = VK_STRUCTURE_TYPE_DEVICE_CREATE_INFO, .pNext = NULL, .flags = 0, .queueCreateInfoCount = 1, .pQueueCreateInfos = &queue_info, .enabledLayerCount = NELEMS(layers), .ppEnabledLayerNames = layers, .enabledExtensionCount = 0, .ppEnabledExtensionNames = NULL, .pEnabledFeatures = &features, }; assert(vkCreateDevice(phy_device, &create_info, NULL, &device) == VK_SUCCESS); } VkQueue queue; vkGetDeviceQueue(device, 0, 0, &queue); VkCommandPool cmd_pool; { VkCommandPoolCreateInfo create_info = { .sType = VK_STRUCTURE_TYPE_COMMAND_POOL_CREATE_INFO, .pNext = NULL, .flags = VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT, .queueFamilyIndex = 0, }; assert(vkCreateCommandPool(device, &create_info, NULL, &cmd_pool) == VK_SUCCESS); } VkRenderPass render_pass; { VkAttachmentDescription attachments[] = {{ .flags = 0, .format = VK_FORMAT_R8G8B8A8_UNORM, .samples = VK_SAMPLE_COUNT_8_BIT, .loadOp = VK_ATTACHMENT_LOAD_OP_CLEAR, .storeOp = VK_ATTACHMENT_STORE_OP_DONT_CARE, .stencilLoadOp = VK_ATTACHMENT_LOAD_OP_DONT_CARE, .stencilStoreOp = VK_ATTACHMENT_STORE_OP_DONT_CARE, .initialLayout = VK_IMAGE_LAYOUT_UNDEFINED, .finalLayout = VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL, }, { .flags = 0, .format = VK_FORMAT_D16_UNORM, .samples = VK_SAMPLE_COUNT_8_BIT, .loadOp = VK_ATTACHMENT_LOAD_OP_CLEAR, .storeOp = VK_ATTACHMENT_STORE_OP_DONT_CARE, .stencilLoadOp = VK_ATTACHMENT_LOAD_OP_DONT_CARE, .stencilStoreOp = VK_ATTACHMENT_STORE_OP_DONT_CARE, .initialLayout = VK_IMAGE_LAYOUT_UNDEFINED, .finalLayout = VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL, }, { .flags = 0, .format = VK_FORMAT_R8G8B8A8_UNORM, .samples = VK_SAMPLE_COUNT_1_BIT, .loadOp = VK_ATTACHMENT_LOAD_OP_DONT_CARE, .storeOp = VK_ATTACHMENT_STORE_OP_STORE, .stencilLoadOp = VK_ATTACHMENT_LOAD_OP_DONT_CARE, .stencilStoreOp = VK_ATTACHMENT_STORE_OP_DONT_CARE, .initialLayout = VK_IMAGE_LAYOUT_UNDEFINED, .finalLayout = VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL, }}; VkAttachmentReference attachment_refs[NELEMS(attachments)] = {{ .attachment = 0, .layout = VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL, }, { .attachment = 1, .layout = VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL, }, { .attachment = 2, .layout = VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL, }}; VkSubpassDescription subpasses[1] = {{ .flags = 0, .pipelineBindPoint = VK_PIPELINE_BIND_POINT_GRAPHICS, .inputAttachmentCount = 0, .pInputAttachments = NULL, .colorAttachmentCount = 1, .pColorAttachments = &attachment_refs[0], .pResolveAttachments = &attachment_refs[2], .pDepthStencilAttachment = &attachment_refs[1], .preserveAttachmentCount = 0, .pPreserveAttachments = NULL, }}; VkSubpassDependency dependencies[] = {{ .srcSubpass = 0, .dstSubpass = VK_SUBPASS_EXTERNAL, .srcStageMask = VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT, .dstStageMask = VK_PIPELINE_STAGE_TRANSFER_BIT, .srcAccessMask = VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT, .dstAccessMask = VK_ACCESS_TRANSFER_READ_BIT, .dependencyFlags = 0, }}; VkRenderPassCreateInfo create_info = { .sType = VK_STRUCTURE_TYPE_RENDER_PASS_CREATE_INFO, .pNext = NULL, .flags = 0, .attachmentCount = NELEMS(attachments), .pAttachments = attachments, .subpassCount = NELEMS(subpasses), .pSubpasses = subpasses, .dependencyCount = NELEMS(dependencies), .pDependencies = dependencies, }; assert(vkCreateRenderPass(device, &create_info, NULL, &render_pass) == VK_SUCCESS); } VkImage images[3]; VkDeviceMemory image_memories[NELEMS(images)]; VkImageView views[NELEMS(images)]; createFrameImage(memory_properties, device, render_size, VK_FORMAT_R8G8B8A8_UNORM, VK_SAMPLE_COUNT_8_BIT, VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT, VK_IMAGE_ASPECT_COLOR_BIT, &images[0], &image_memories[0], &views[0]); createFrameImage(memory_properties, device, render_size, VK_FORMAT_D16_UNORM, VK_SAMPLE_COUNT_8_BIT, VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT, VK_IMAGE_ASPECT_DEPTH_BIT, &images[1], &image_memories[1], &views[1]); createFrameImage(memory_properties, device, render_size, VK_FORMAT_R8G8B8A8_UNORM, VK_SAMPLE_COUNT_1_BIT, VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT | VK_IMAGE_USAGE_TRANSFER_SRC_BIT, VK_IMAGE_ASPECT_COLOR_BIT, &images[2], &image_memories[2], &views[2]); VkBuffer verts_buffer; VkDeviceMemory verts_memory; createBuffer(memory_properties, device, VK_BUFFER_USAGE_VERTEX_BUFFER_BIT, sizeof(verts), verts, &verts_buffer, &verts_memory); VkBuffer index_buffer; VkDeviceMemory index_memory; createBuffer(memory_properties, device, VK_BUFFER_USAGE_INDEX_BUFFER_BIT, sizeof(indices), indices, &index_buffer, &index_memory); VkBuffer image_buffer; VkDeviceMemory image_buffer_memory; createBuffer(memory_properties, device, VK_BUFFER_USAGE_TRANSFER_DST_BIT, render_size.height * render_size.width * 4, NULL, &image_buffer, &image_buffer_memory); VkFramebuffer framebuffer; createFramebuffer(device, render_size, 3, views, render_pass, &framebuffer); VkShaderModule shaders[5]; { char* filenames[NELEMS(shaders)] = {"cube.vert.spv", "cube.geom.spv", "cube.frag.spv", "wireframe.geom.spv", "color.frag.spv"}; for (size_t i = 0; i < NELEMS(shaders); i++){ size_t code_size; uint32_t * code; assert((code_size = loadModule(filenames[i], &code)) != 0); VkShaderModuleCreateInfo create_info = { .sType = VK_STRUCTURE_TYPE_SHADER_MODULE_CREATE_INFO, .pNext = NULL, .flags = 0, .codeSize = code_size, .pCode = code, }; assert(vkCreateShaderModule(device, &create_info, NULL, &shaders[i]) == VK_SUCCESS); free(code); } } VkPipelineLayout pipeline_layout; { VkPushConstantRange push_range = { .stageFlags = VK_SHADER_STAGE_VERTEX_BIT, .offset = 0, .size = 4, }; VkPipelineLayoutCreateInfo create_info = { .sType = VK_STRUCTURE_TYPE_PIPELINE_LAYOUT_CREATE_INFO, .pNext = NULL, .flags = 0, .setLayoutCount = 0, .pSetLayouts = NULL, .pushConstantRangeCount = 1, .pPushConstantRanges = &push_range, }; assert(vkCreatePipelineLayout(device, &create_info, NULL, &pipeline_layout) == VK_SUCCESS); } VkPipeline pipelines[2]; { VkPipelineShaderStageCreateInfo stages[3] = {{ .sType = VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO, .pNext = NULL, .flags = 0, .stage = VK_SHADER_STAGE_VERTEX_BIT, .module = shaders[0], .pName = "main", .pSpecializationInfo = NULL, },{ .sType = VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO, .pNext = NULL, .flags = 0, .stage = VK_SHADER_STAGE_GEOMETRY_BIT, .module = shaders[1], .pName = "main", .pSpecializationInfo = NULL, },{ .sType = VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO, .pNext = NULL, .flags = 0, .stage = VK_SHADER_STAGE_FRAGMENT_BIT, .module = shaders[2], .pName = "main", .pSpecializationInfo = NULL, }}; VkVertexInputBindingDescription vtx_binding = { .binding = 0, .stride = sizeof(struct Vertex), .inputRate = VK_VERTEX_INPUT_RATE_VERTEX, }; VkVertexInputAttributeDescription vtx_attr = { .location = 0, .binding = 0, .format = VK_FORMAT_R32G32B32_SFLOAT, .offset = offsetof(struct Vertex, pos), }; VkPipelineVertexInputStateCreateInfo vtx_state = { .sType = VK_STRUCTURE_TYPE_PIPELINE_VERTEX_INPUT_STATE_CREATE_INFO, .pNext = NULL, .flags = 0, .vertexBindingDescriptionCount = 1, .pVertexBindingDescriptions = &vtx_binding, .vertexAttributeDescriptionCount = 1, .pVertexAttributeDescriptions = &vtx_attr, }; VkPipelineInputAssemblyStateCreateInfo ia_state = { .sType = VK_STRUCTURE_TYPE_PIPELINE_INPUT_ASSEMBLY_STATE_CREATE_INFO, .pNext = NULL, .flags = 0, .topology = VK_PRIMITIVE_TOPOLOGY_TRIANGLE_STRIP, .primitiveRestartEnable = VK_TRUE, }; VkViewport viewport = { .x = 0, .y = 0, .width = render_size.width, .height = render_size.height, .minDepth = 0.0, .maxDepth = 1.0, }; VkRect2D scissor= { .offset = {.x = 0, .y = 0,}, .extent = render_size, }; VkPipelineViewportStateCreateInfo viewport_state = { .sType = VK_STRUCTURE_TYPE_PIPELINE_VIEWPORT_STATE_CREATE_INFO, .pNext = NULL, .flags = 0, .viewportCount = 1, .pViewports = &viewport, .scissorCount = 1, .pScissors = &scissor, }; VkPipelineRasterizationStateCreateInfo rasterization_state = { .sType = VK_STRUCTURE_TYPE_PIPELINE_RASTERIZATION_STATE_CREATE_INFO, .pNext = NULL, .flags = 0, .depthClampEnable = VK_FALSE, .rasterizerDiscardEnable = VK_FALSE, .polygonMode = VK_POLYGON_MODE_FILL, .cullMode = VK_CULL_MODE_NONE, .frontFace = VK_FRONT_FACE_COUNTER_CLOCKWISE, .depthBiasEnable = VK_FALSE, .lineWidth = 1.0, }; VkPipelineMultisampleStateCreateInfo multisample_state = { .sType = VK_STRUCTURE_TYPE_PIPELINE_MULTISAMPLE_STATE_CREATE_INFO, .pNext = NULL, .flags = 0, .rasterizationSamples = VK_SAMPLE_COUNT_8_BIT, .sampleShadingEnable = VK_FALSE, .minSampleShading = 0.0, .pSampleMask = NULL, .alphaToCoverageEnable = VK_FALSE, .alphaToOneEnable = VK_FALSE, }; VkPipelineDepthStencilStateCreateInfo depth_stencil_state = { .sType = VK_STRUCTURE_TYPE_PIPELINE_DEPTH_STENCIL_STATE_CREATE_INFO, .pNext = NULL, .flags = 0, .depthTestEnable = VK_TRUE, .depthWriteEnable = VK_TRUE, .depthCompareOp = VK_COMPARE_OP_LESS_OR_EQUAL, .depthBoundsTestEnable = VK_FALSE, .stencilTestEnable = VK_FALSE, .front = {}, .back = {}, .minDepthBounds = 0.0, .maxDepthBounds = 1.0, }; VkPipelineColorBlendAttachmentState color_blend_attachment = { .blendEnable = VK_FALSE, .colorWriteMask = ( VK_COLOR_COMPONENT_R_BIT | VK_COLOR_COMPONENT_G_BIT | VK_COLOR_COMPONENT_B_BIT | VK_COLOR_COMPONENT_A_BIT), }; VkPipelineColorBlendStateCreateInfo color_blend_state = { .sType = VK_STRUCTURE_TYPE_PIPELINE_COLOR_BLEND_STATE_CREATE_INFO, .pNext = NULL, .flags = 0, .logicOpEnable = VK_FALSE, //.logicOp = 0, .attachmentCount = 1, .pAttachments = &color_blend_attachment, .blendConstants = {}, }; VkGraphicsPipelineCreateInfo create_info = { .sType = VK_STRUCTURE_TYPE_GRAPHICS_PIPELINE_CREATE_INFO, .pNext = NULL, .flags = 0, .stageCount = NELEMS(stages), .pStages = stages, .pVertexInputState = &vtx_state, .pInputAssemblyState = &ia_state, .pTessellationState = NULL, .pViewportState = &viewport_state, .pRasterizationState = &rasterization_state, .pMultisampleState = &multisample_state, .pDepthStencilState = &depth_stencil_state, .pColorBlendState = &color_blend_state, .pDynamicState = NULL, .layout = pipeline_layout, .renderPass = render_pass, .subpass = 0, .basePipelineHandle = VK_NULL_HANDLE, .basePipelineIndex = 0, }; assert(vkCreateGraphicsPipelines(device, VK_NULL_HANDLE, 1, &create_info, NULL, &pipelines[0]) == VK_SUCCESS); stages[1].module = shaders[3]; stages[2].module = shaders[4]; rasterization_state.polygonMode = VK_POLYGON_MODE_LINE; assert(vkCreateGraphicsPipelines(device, VK_NULL_HANDLE, 1, &create_info, NULL, &pipelines[1]) == VK_SUCCESS); } VkCommandBuffer draw_buffers[2]; { VkCommandBufferAllocateInfo allocate_info = { .sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_ALLOCATE_INFO, .pNext = NULL, .commandPool = cmd_pool, .level = VK_COMMAND_BUFFER_LEVEL_PRIMARY, .commandBufferCount = NELEMS(draw_buffers), }; assert(vkAllocateCommandBuffers(device, &allocate_info, draw_buffers) == VK_SUCCESS); } { VkCommandBufferBeginInfo begin_info = { .sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_BEGIN_INFO, .pNext = NULL, .flags = 0, .pInheritanceInfo = NULL, }; VkClearValue clear_values[] = {{ .color.float32 = {0.0, 0.0, 0.0, 1.0}, }, { .depthStencil = {.depth = 1.0}, }}; VkRenderPassBeginInfo renderpass_begin_info = { .sType = VK_STRUCTURE_TYPE_RENDER_PASS_BEGIN_INFO, .pNext = NULL, .renderPass = render_pass, .framebuffer = framebuffer, .renderArea = { .offset = {.x = 0, .y = 0}, .extent = render_size, }, .clearValueCount = NELEMS(clear_values), .pClearValues = clear_values, }; for (size_t i = 0; i < NELEMS(draw_buffers); i++){ assert(vkBeginCommandBuffer(draw_buffers[i], &begin_info) == VK_SUCCESS); uint32_t persp = i == 0; vkCmdPushConstants(draw_buffers[i], pipeline_layout, VK_SHADER_STAGE_VERTEX_BIT, 0, sizeof(persp), &persp); vkCmdBeginRenderPass(draw_buffers[i], &renderpass_begin_info, VK_SUBPASS_CONTENTS_INLINE); VkDeviceSize offset = 0; vkCmdBindVertexBuffers(draw_buffers[i], 0, 1, &verts_buffer, &offset); vkCmdBindIndexBuffer(draw_buffers[i], index_buffer, 0, VK_INDEX_TYPE_UINT32); for (size_t j = 0; j < NELEMS(pipelines); j++) { vkCmdBindPipeline(draw_buffers[i], VK_PIPELINE_BIND_POINT_GRAPHICS, pipelines[j]); vkCmdDrawIndexed(draw_buffers[i], 20, 27, 0, 0, 0); } vkCmdEndRenderPass(draw_buffers[i]); } VkBufferImageCopy copy = { .bufferOffset = 0, .bufferRowLength = 0, // Tightly packed .bufferImageHeight = 0, // Tightly packed .imageSubresource = {VK_IMAGE_ASPECT_COLOR_BIT, 0, 0, 1}, .imageOffset = {0, 0, 0}, .imageExtent = {.width = render_size.width, .height = render_size.height, .depth = 1}, }; VkBufferMemoryBarrier transfer_barrier = { .sType = VK_STRUCTURE_TYPE_BUFFER_MEMORY_BARRIER, .pNext = 0, .srcAccessMask = VK_ACCESS_TRANSFER_WRITE_BIT, .dstAccessMask = VK_ACCESS_HOST_READ_BIT, .srcQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED, .dstQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED, .buffer = image_buffer, .offset = 0, .size = VK_WHOLE_SIZE, }; for (size_t i = 0; i < NELEMS(draw_buffers); i++){ vkCmdCopyImageToBuffer(draw_buffers[i], images[2], VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL, image_buffer, 1, ©); vkCmdPipelineBarrier(draw_buffers[i], VK_PIPELINE_STAGE_TRANSFER_BIT, VK_PIPELINE_STAGE_HOST_BIT, 0, 0, NULL, 1, &transfer_barrier, 0, NULL); assert(vkEndCommandBuffer(draw_buffers[i]) == VK_SUCCESS); } } VkFence fence; { VkFenceCreateInfo create_info = { .sType = VK_STRUCTURE_TYPE_FENCE_CREATE_INFO, .pNext = 0, .flags = 0, }; assert(vkCreateFence(device, &create_info, NULL, &fence) == VK_SUCCESS); } { char * filenames[] = {"cube_persp.tif", "cube_ortho.tif"}; char * image_data; assert(vkMapMemory(device, image_buffer_memory, 0, VK_WHOLE_SIZE, 0, (void **) &image_data) == VK_SUCCESS); VkMappedMemoryRange image_flush = { .sType = VK_STRUCTURE_TYPE_MAPPED_MEMORY_RANGE, .pNext = NULL, .memory = image_buffer_memory, .offset = 0, .size = VK_WHOLE_SIZE, }; VkSubmitInfo submit_info = { .sType = VK_STRUCTURE_TYPE_SUBMIT_INFO, .pNext = NULL, .waitSemaphoreCount = 0, .pWaitSemaphores = NULL, .pWaitDstStageMask = NULL, .commandBufferCount = 1, .pCommandBuffers = NULL, .signalSemaphoreCount = 0, .pSignalSemaphores = NULL, }; for (size_t i = 0; i < NELEMS(filenames); i++){ submit_info.pCommandBuffers = &draw_buffers[i]; assert(vkResetFences(device, 1, &fence) == VK_SUCCESS); assert(vkQueueSubmit(queue, 1, &submit_info, fence) == VK_SUCCESS); assert(vkWaitForFences(device, 1, &fence, VK_TRUE, UINT64_MAX) == VK_SUCCESS); assert(vkInvalidateMappedMemoryRanges(device, 1, &image_flush) == VK_SUCCESS); assert(writeTiff(filenames[i], image_data, render_size, nchannels) == 0); } vkUnmapMemory(device, image_buffer_memory); } assert(vkQueueWaitIdle(queue) == VK_SUCCESS); vkDestroyFence(device, fence, NULL); vkDestroyFramebuffer(device, framebuffer, NULL); for (size_t i = 0; i < NELEMS(images); i++){ vkDestroyImage(device, images[i], NULL); vkDestroyImageView(device, views[i], NULL); vkFreeMemory(device, image_memories[i], NULL); } vkDestroyBuffer(device, image_buffer, NULL); vkFreeMemory(device, image_buffer_memory, NULL); vkDestroyBuffer(device, verts_buffer, NULL); vkFreeMemory(device, verts_memory, NULL); vkDestroyBuffer(device, index_buffer, NULL); vkFreeMemory(device, index_memory, NULL); for (size_t i = 0; i < NELEMS(pipelines); i++){ vkDestroyPipeline(device, pipelines[i], NULL); } vkDestroyPipelineLayout(device, pipeline_layout, NULL); for(size_t i = 0; i < NELEMS(shaders); i++) vkDestroyShaderModule(device, shaders[i], NULL); vkDestroyRenderPass(device, render_pass, NULL); vkFreeCommandBuffers(device, cmd_pool, NELEMS(draw_buffers), draw_buffers); vkDestroyCommandPool(device, cmd_pool, NULL); vkDestroyDevice(device, NULL); { PFN_vkDestroyDebugReportCallbackEXT destroyDebugReportCallback = (PFN_vkDestroyDebugReportCallbackEXT) vkGetInstanceProcAddr(instance, "vkDestroyDebugReportCallbackEXT"); assert(destroyDebugReportCallback); destroyDebugReportCallback(instance, debug_callback, NULL); } vkDestroyInstance(instance, NULL); return 0; }
void set_current_renderer(const vk::render_device &device) { g_current_renderer = &device; g_cb_no_interrupt_flag.store(false); g_drv_no_primitive_restart_flag = false; g_drv_sanitize_fp_values = false; g_drv_disable_fence_reset = false; g_num_processed_frames = 0; g_num_total_frames = 0; g_driver_vendor = driver_vendor::unknown; g_heap_compatible_buffer_types = 0; const auto gpu_name = g_current_renderer->gpu().get_name(); switch (g_driver_vendor = g_current_renderer->gpu().get_driver_vendor()) { case driver_vendor::AMD: // Radeon proprietary driver does not properly handle fence reset and can segfault during vkResetFences // Disable fence reset for proprietary driver and delete+initialize a new fence instead g_drv_disable_fence_reset = true; // Fall through case driver_vendor::RADV: // Radeon fails to properly handle degenerate primitives if primitive restart is enabled // One has to choose between using degenerate primitives or primitive restart to break up lists but not both // Polaris and newer will crash with ERROR_DEVICE_LOST // Older GCN will work okay most of the time but also occasionally draws garbage without reason (proprietary driver only) if (g_driver_vendor == driver_vendor::AMD || gpu_name.find("VEGA") != std::string::npos || gpu_name.find("POLARIS") != std::string::npos) { g_drv_no_primitive_restart_flag = !g_cfg.video.vk.force_primitive_restart; } break; case driver_vendor::NVIDIA: // Nvidia cards are easily susceptible to NaN poisoning g_drv_sanitize_fp_values = true; break; case driver_vendor::INTEL: default: LOG_WARNING(RSX, "Unsupported device: %s", gpu_name); } LOG_NOTICE(RSX, "Vulkan: Renderer initialized on device '%s'", gpu_name); { // Buffer memory tests, only useful for portability on macOS VkBufferUsageFlags types[] = { VK_BUFFER_USAGE_TRANSFER_SRC_BIT, VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT, VK_BUFFER_USAGE_UNIFORM_TEXEL_BUFFER_BIT, VK_BUFFER_USAGE_INDEX_BUFFER_BIT, VK_BUFFER_USAGE_STORAGE_BUFFER_BIT, VK_BUFFER_USAGE_VERTEX_BUFFER_BIT }; VkFlags memory_flags = VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT; VkBuffer tmp; VkMemoryRequirements memory_reqs; VkBufferCreateInfo info = {}; info.sType = VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO; info.size = 4096; info.sharingMode = VK_SHARING_MODE_EXCLUSIVE; info.flags = 0; for (const auto &usage : types) { info.usage = usage; CHECK_RESULT(vkCreateBuffer(*g_current_renderer, &info, nullptr, &tmp)); vkGetBufferMemoryRequirements(*g_current_renderer, tmp, &memory_reqs); if (g_current_renderer->get_compatible_memory_type(memory_reqs.memoryTypeBits, memory_flags, nullptr)) { g_heap_compatible_buffer_types |= usage; } vkDestroyBuffer(*g_current_renderer, tmp, nullptr); } } }
void VulkanGear::generate(GearInfo *gearinfo, VkQueue queue) { this->color = gearinfo->color; this->pos = gearinfo->pos; this->rotOffset = gearinfo->rotOffset; this->rotSpeed = gearinfo->rotSpeed; std::vector<Vertex> vBuffer; std::vector<uint32_t> iBuffer; int i, j; float r0, r1, r2; float ta, da; float u1, v1, u2, v2, len; float cos_ta, cos_ta_1da, cos_ta_2da, cos_ta_3da, cos_ta_4da; float sin_ta, sin_ta_1da, sin_ta_2da, sin_ta_3da, sin_ta_4da; int32_t ix0, ix1, ix2, ix3, ix4, ix5; r0 = gearinfo->innerRadius; r1 = gearinfo->outerRadius - gearinfo->toothDepth / 2.0; r2 = gearinfo->outerRadius + gearinfo->toothDepth / 2.0; da = 2.0 * M_PI / gearinfo->numTeeth / 4.0; glm::vec3 normal; for (i = 0; i < gearinfo->numTeeth; i++) { ta = i * 2.0 * M_PI / gearinfo->numTeeth; cos_ta = cos(ta); cos_ta_1da = cos(ta + da); cos_ta_2da = cos(ta + 2 * da); cos_ta_3da = cos(ta + 3 * da); cos_ta_4da = cos(ta + 4 * da); sin_ta = sin(ta); sin_ta_1da = sin(ta + da); sin_ta_2da = sin(ta + 2 * da); sin_ta_3da = sin(ta + 3 * da); sin_ta_4da = sin(ta + 4 * da); u1 = r2 * cos_ta_1da - r1 * cos_ta; v1 = r2 * sin_ta_1da - r1 * sin_ta; len = sqrt(u1 * u1 + v1 * v1); u1 /= len; v1 /= len; u2 = r1 * cos_ta_3da - r2 * cos_ta_2da; v2 = r1 * sin_ta_3da - r2 * sin_ta_2da; // front face normal = glm::vec3(0.0, 0.0, 1.0); ix0 = newVertex(&vBuffer, r0 * cos_ta, r0 * sin_ta, gearinfo->width * 0.5, normal); ix1 = newVertex(&vBuffer, r1 * cos_ta, r1 * sin_ta, gearinfo->width * 0.5, normal); ix2 = newVertex(&vBuffer, r0 * cos_ta, r0 * sin_ta, gearinfo->width * 0.5, normal); ix3 = newVertex(&vBuffer, r1 * cos_ta_3da, r1 * sin_ta_3da, gearinfo->width * 0.5, normal); ix4 = newVertex(&vBuffer, r0 * cos_ta_4da, r0 * sin_ta_4da, gearinfo->width * 0.5, normal); ix5 = newVertex(&vBuffer, r1 * cos_ta_4da, r1 * sin_ta_4da, gearinfo->width * 0.5, normal); newFace(&iBuffer, ix0, ix1, ix2); newFace(&iBuffer, ix1, ix3, ix2); newFace(&iBuffer, ix2, ix3, ix4); newFace(&iBuffer, ix3, ix5, ix4); // front sides of teeth normal = glm::vec3(0.0, 0.0, 1.0); ix0 = newVertex(&vBuffer, r1 * cos_ta, r1 * sin_ta, gearinfo->width * 0.5, normal); ix1 = newVertex(&vBuffer, r2 * cos_ta_1da, r2 * sin_ta_1da, gearinfo->width * 0.5, normal); ix2 = newVertex(&vBuffer, r1 * cos_ta_3da, r1 * sin_ta_3da, gearinfo->width * 0.5, normal); ix3 = newVertex(&vBuffer, r2 * cos_ta_2da, r2 * sin_ta_2da, gearinfo->width * 0.5, normal); newFace(&iBuffer, ix0, ix1, ix2); newFace(&iBuffer, ix1, ix3, ix2); // back face normal = glm::vec3(0.0, 0.0, -1.0); ix0 = newVertex(&vBuffer, r1 * cos_ta, r1 * sin_ta, -gearinfo->width * 0.5, normal); ix1 = newVertex(&vBuffer, r0 * cos_ta, r0 * sin_ta, -gearinfo->width * 0.5, normal); ix2 = newVertex(&vBuffer, r1 * cos_ta_3da, r1 * sin_ta_3da, -gearinfo->width * 0.5, normal); ix3 = newVertex(&vBuffer, r0 * cos_ta, r0 * sin_ta, -gearinfo->width * 0.5, normal); ix4 = newVertex(&vBuffer, r1 * cos_ta_4da, r1 * sin_ta_4da, -gearinfo->width * 0.5, normal); ix5 = newVertex(&vBuffer, r0 * cos_ta_4da, r0 * sin_ta_4da, -gearinfo->width * 0.5, normal); newFace(&iBuffer, ix0, ix1, ix2); newFace(&iBuffer, ix1, ix3, ix2); newFace(&iBuffer, ix2, ix3, ix4); newFace(&iBuffer, ix3, ix5, ix4); // back sides of teeth normal = glm::vec3(0.0, 0.0, -1.0); ix0 = newVertex(&vBuffer, r1 * cos_ta_3da, r1 * sin_ta_3da, -gearinfo->width * 0.5, normal); ix1 = newVertex(&vBuffer, r2 * cos_ta_2da, r2 * sin_ta_2da, -gearinfo->width * 0.5, normal); ix2 = newVertex(&vBuffer, r1 * cos_ta, r1 * sin_ta, -gearinfo->width * 0.5, normal); ix3 = newVertex(&vBuffer, r2 * cos_ta_1da, r2 * sin_ta_1da, -gearinfo->width * 0.5, normal); newFace(&iBuffer, ix0, ix1, ix2); newFace(&iBuffer, ix1, ix3, ix2); // draw outward faces of teeth normal = glm::vec3(v1, -u1, 0.0); ix0 = newVertex(&vBuffer, r1 * cos_ta, r1 * sin_ta, gearinfo->width * 0.5, normal); ix1 = newVertex(&vBuffer, r1 * cos_ta, r1 * sin_ta, -gearinfo->width * 0.5, normal); ix2 = newVertex(&vBuffer, r2 * cos_ta_1da, r2 * sin_ta_1da, gearinfo->width * 0.5, normal); ix3 = newVertex(&vBuffer, r2 * cos_ta_1da, r2 * sin_ta_1da, -gearinfo->width * 0.5, normal); newFace(&iBuffer, ix0, ix1, ix2); newFace(&iBuffer, ix1, ix3, ix2); normal = glm::vec3(cos_ta, sin_ta, 0.0); ix0 = newVertex(&vBuffer, r2 * cos_ta_1da, r2 * sin_ta_1da, gearinfo->width * 0.5, normal); ix1 = newVertex(&vBuffer, r2 * cos_ta_1da, r2 * sin_ta_1da, -gearinfo->width * 0.5, normal); ix2 = newVertex(&vBuffer, r2 * cos_ta_2da, r2 * sin_ta_2da, gearinfo->width * 0.5, normal); ix3 = newVertex(&vBuffer, r2 * cos_ta_2da, r2 * sin_ta_2da, -gearinfo->width * 0.5, normal); newFace(&iBuffer, ix0, ix1, ix2); newFace(&iBuffer, ix1, ix3, ix2); normal = glm::vec3(v2, -u2, 0.0); ix0 = newVertex(&vBuffer, r2 * cos_ta_2da, r2 * sin_ta_2da, gearinfo->width * 0.5, normal); ix1 = newVertex(&vBuffer, r2 * cos_ta_2da, r2 * sin_ta_2da, -gearinfo->width * 0.5, normal); ix2 = newVertex(&vBuffer, r1 * cos_ta_3da, r1 * sin_ta_3da, gearinfo->width * 0.5, normal); ix3 = newVertex(&vBuffer, r1 * cos_ta_3da, r1 * sin_ta_3da, -gearinfo->width * 0.5, normal); newFace(&iBuffer, ix0, ix1, ix2); newFace(&iBuffer, ix1, ix3, ix2); normal = glm::vec3(cos_ta, sin_ta, 0.0); ix0 = newVertex(&vBuffer, r1 * cos_ta_3da, r1 * sin_ta_3da, gearinfo->width * 0.5, normal); ix1 = newVertex(&vBuffer, r1 * cos_ta_3da, r1 * sin_ta_3da, -gearinfo->width * 0.5, normal); ix2 = newVertex(&vBuffer, r1 * cos_ta_4da, r1 * sin_ta_4da, gearinfo->width * 0.5, normal); ix3 = newVertex(&vBuffer, r1 * cos_ta_4da, r1 * sin_ta_4da, -gearinfo->width * 0.5, normal); newFace(&iBuffer, ix0, ix1, ix2); newFace(&iBuffer, ix1, ix3, ix2); // draw inside radius cylinder ix0 = newVertex(&vBuffer, r0 * cos_ta, r0 * sin_ta, -gearinfo->width * 0.5, glm::vec3(-cos_ta, -sin_ta, 0.0)); ix1 = newVertex(&vBuffer, r0 * cos_ta, r0 * sin_ta, gearinfo->width * 0.5, glm::vec3(-cos_ta, -sin_ta, 0.0)); ix2 = newVertex(&vBuffer, r0 * cos_ta_4da, r0 * sin_ta_4da, -gearinfo->width * 0.5, glm::vec3(-cos_ta_4da, -sin_ta_4da, 0.0)); ix3 = newVertex(&vBuffer, r0 * cos_ta_4da, r0 * sin_ta_4da, gearinfo->width * 0.5, glm::vec3(-cos_ta_4da, -sin_ta_4da, 0.0)); newFace(&iBuffer, ix0, ix1, ix2); newFace(&iBuffer, ix1, ix3, ix2); } int vertexBufferSize = vBuffer.size() * sizeof(Vertex); int indexBufferSize = iBuffer.size() * sizeof(uint32_t); bool useStaging = true; if (useStaging) { struct { VkBuffer buffer; VkDeviceMemory memory; } vertexStaging, indexStaging; // Create staging buffers // Vertex data exampleBase->createBuffer( VK_BUFFER_USAGE_TRANSFER_SRC_BIT, VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT, vertexBufferSize, vBuffer.data(), &vertexStaging.buffer, &vertexStaging.memory); // Index data exampleBase->createBuffer( VK_BUFFER_USAGE_TRANSFER_SRC_BIT, VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT, indexBufferSize, iBuffer.data(), &indexStaging.buffer, &indexStaging.memory); // Create device local buffers // Vertex buffer exampleBase->createBuffer( VK_BUFFER_USAGE_VERTEX_BUFFER_BIT | VK_BUFFER_USAGE_TRANSFER_DST_BIT, VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT, vertexBufferSize, nullptr, &vertexBuffer.buf, &vertexBuffer.mem); // Index buffer exampleBase->createBuffer( VK_BUFFER_USAGE_INDEX_BUFFER_BIT | VK_BUFFER_USAGE_TRANSFER_DST_BIT, VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT, indexBufferSize, nullptr, &indexBuffer.buf, &indexBuffer.mem); // Copy from staging buffers VkCommandBuffer copyCmd = exampleBase->createCommandBuffer(VK_COMMAND_BUFFER_LEVEL_PRIMARY, true); VkBufferCopy copyRegion = {}; copyRegion.size = vertexBufferSize; vkCmdCopyBuffer( copyCmd, vertexStaging.buffer, vertexBuffer.buf, 1, ©Region); copyRegion.size = indexBufferSize; vkCmdCopyBuffer( copyCmd, indexStaging.buffer, indexBuffer.buf, 1, ©Region); exampleBase->flushCommandBuffer(copyCmd, queue, true); vkDestroyBuffer(device, vertexStaging.buffer, nullptr); vkFreeMemory(device, vertexStaging.memory, nullptr); vkDestroyBuffer(device, indexStaging.buffer, nullptr); vkFreeMemory(device, indexStaging.memory, nullptr); } else { // Vertex buffer exampleBase->createBuffer( VK_BUFFER_USAGE_VERTEX_BUFFER_BIT, VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT, vertexBufferSize, vBuffer.data(), &vertexBuffer.buf, &vertexBuffer.mem); // Index buffer exampleBase->createBuffer( VK_BUFFER_USAGE_INDEX_BUFFER_BIT, VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT, indexBufferSize, iBuffer.data(), &indexBuffer.buf, &indexBuffer.mem); } indexBuffer.count = iBuffer.size(); prepareUniformBuffer(); }
void loadTexture(std::string fileName, VkFormat format, bool forceLinearTiling) { #if defined(__ANDROID__) // Textures are stored inside the apk on Android (compressed) // So they need to be loaded via the asset manager AAsset* asset = AAssetManager_open(androidApp->activity->assetManager, fileName.c_str(), AASSET_MODE_STREAMING); assert(asset); size_t size = AAsset_getLength(asset); assert(size > 0); void *textureData = malloc(size); AAsset_read(asset, textureData, size); AAsset_close(asset); gli::texture2d tex2D(gli::load((const char*)textureData, size)); #else gli::texture2d tex2D(gli::load(fileName)); #endif assert(!tex2D.empty()); VkFormatProperties formatProperties; texture.width = static_cast<uint32_t>(tex2D[0].extent().x); texture.height = static_cast<uint32_t>(tex2D[0].extent().y); // calculate num of mip maps // numLevels = 1 + floor(log2(max(w, h, d))) // Calculated as log2(max(width, height, depth))c + 1 (see specs) texture.mipLevels = floor(log2(std::max(texture.width, texture.height))) + 1; // Get device properites for the requested texture format vkGetPhysicalDeviceFormatProperties(physicalDevice, format, &formatProperties); // Mip-chain generation requires support for blit source and destination assert(formatProperties.optimalTilingFeatures & VK_FORMAT_FEATURE_BLIT_SRC_BIT); assert(formatProperties.optimalTilingFeatures & VK_FORMAT_FEATURE_BLIT_DST_BIT); VkMemoryAllocateInfo memAllocInfo = vkTools::initializers::memoryAllocateInfo(); VkMemoryRequirements memReqs = {}; // Create a host-visible staging buffer that contains the raw image data VkBuffer stagingBuffer; VkDeviceMemory stagingMemory; VkBufferCreateInfo bufferCreateInfo = vkTools::initializers::bufferCreateInfo(); bufferCreateInfo.size = tex2D.size(); // This buffer is used as a transfer source for the buffer copy bufferCreateInfo.usage = VK_BUFFER_USAGE_TRANSFER_SRC_BIT; bufferCreateInfo.sharingMode = VK_SHARING_MODE_EXCLUSIVE; VK_CHECK_RESULT(vkCreateBuffer(device, &bufferCreateInfo, nullptr, &stagingBuffer)); vkGetBufferMemoryRequirements(device, stagingBuffer, &memReqs); memAllocInfo.allocationSize = memReqs.size; memAllocInfo.memoryTypeIndex = vulkanDevice->getMemoryType(memReqs.memoryTypeBits, VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT); VK_CHECK_RESULT(vkAllocateMemory(device, &memAllocInfo, nullptr, &stagingMemory)); VK_CHECK_RESULT(vkBindBufferMemory(device, stagingBuffer, stagingMemory, 0)); // Copy texture data into staging buffer uint8_t *data; VK_CHECK_RESULT(vkMapMemory(device, stagingMemory, 0, memReqs.size, 0, (void **)&data)); memcpy(data, tex2D.data(), tex2D.size()); vkUnmapMemory(device, stagingMemory); // Create optimal tiled target image VkImageCreateInfo imageCreateInfo = vkTools::initializers::imageCreateInfo(); imageCreateInfo.imageType = VK_IMAGE_TYPE_2D; imageCreateInfo.format = format; imageCreateInfo.mipLevels = texture.mipLevels; imageCreateInfo.arrayLayers = 1; imageCreateInfo.samples = VK_SAMPLE_COUNT_1_BIT; imageCreateInfo.tiling = VK_IMAGE_TILING_OPTIMAL; imageCreateInfo.usage = VK_IMAGE_USAGE_SAMPLED_BIT; imageCreateInfo.sharingMode = VK_SHARING_MODE_EXCLUSIVE; imageCreateInfo.initialLayout = VK_IMAGE_LAYOUT_UNDEFINED; imageCreateInfo.extent = { texture.width, texture.height, 1 }; imageCreateInfo.usage = VK_IMAGE_USAGE_TRANSFER_DST_BIT | VK_IMAGE_USAGE_TRANSFER_SRC_BIT | VK_IMAGE_USAGE_SAMPLED_BIT; VK_CHECK_RESULT(vkCreateImage(device, &imageCreateInfo, nullptr, &texture.image)); vkGetImageMemoryRequirements(device, texture.image, &memReqs); memAllocInfo.allocationSize = memReqs.size; memAllocInfo.memoryTypeIndex = vulkanDevice->getMemoryType(memReqs.memoryTypeBits, VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT); VK_CHECK_RESULT(vkAllocateMemory(device, &memAllocInfo, nullptr, &texture.deviceMemory)); VK_CHECK_RESULT(vkBindImageMemory(device, texture.image, texture.deviceMemory, 0)); VkCommandBuffer copyCmd = VulkanExampleBase::createCommandBuffer(VK_COMMAND_BUFFER_LEVEL_PRIMARY, true); VkImageSubresourceRange subresourceRange = {}; subresourceRange.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT; subresourceRange.levelCount = 1; subresourceRange.layerCount = 1; // Optimal image will be used as destination for the copy, so we must transfer from our initial undefined image layout to the transfer destination layout vkTools::setImageLayout(copyCmd, texture.image, VK_IMAGE_ASPECT_COLOR_BIT, VK_IMAGE_LAYOUT_UNDEFINED, VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, subresourceRange); // Copy the first mip of the chain, remaining mips will be generated VkBufferImageCopy bufferCopyRegion = {}; bufferCopyRegion.imageSubresource.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT; bufferCopyRegion.imageSubresource.mipLevel = 0; bufferCopyRegion.imageSubresource.baseArrayLayer = 0; bufferCopyRegion.imageSubresource.layerCount = 1; bufferCopyRegion.imageExtent.width = texture.width; bufferCopyRegion.imageExtent.height = texture.height; bufferCopyRegion.imageExtent.depth = 1; vkCmdCopyBufferToImage(copyCmd, stagingBuffer, texture.image, VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, 1, &bufferCopyRegion); // Transition first mip level to transfer source for read during blit texture.imageLayout = VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL; vkTools::setImageLayout( copyCmd, texture.image, VK_IMAGE_ASPECT_COLOR_BIT, VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL, subresourceRange); VulkanExampleBase::flushCommandBuffer(copyCmd, queue, true); // Clean up staging resources vkFreeMemory(device, stagingMemory, nullptr); vkDestroyBuffer(device, stagingBuffer, nullptr); // Generate the mip chain // --------------------------------------------------------------- // We copy down the whole mip chain doing a blit from mip-1 to mip // An alternative way would be to always blit from the first mip level and sample that one down VkCommandBuffer blitCmd = VulkanExampleBase::createCommandBuffer(VK_COMMAND_BUFFER_LEVEL_PRIMARY, true); // Copy down mips from n-1 to n for (int32_t i = 1; i < texture.mipLevels; i++) { VkImageBlit imageBlit{}; // Source imageBlit.srcSubresource.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT; imageBlit.srcSubresource.layerCount = 1; imageBlit.srcSubresource.mipLevel = i-1; imageBlit.srcOffsets[1].x = int32_t(texture.width >> (i - 1)); imageBlit.srcOffsets[1].y = int32_t(texture.height >> (i - 1)); imageBlit.srcOffsets[1].z = 1; // Destination imageBlit.dstSubresource.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT; imageBlit.dstSubresource.layerCount = 1; imageBlit.dstSubresource.mipLevel = i; imageBlit.dstOffsets[1].x = int32_t(texture.width >> i); imageBlit.dstOffsets[1].y = int32_t(texture.height >> i); imageBlit.dstOffsets[1].z = 1; VkImageSubresourceRange mipSubRange = {}; mipSubRange.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT; mipSubRange.baseMipLevel = i; mipSubRange.levelCount = 1; mipSubRange.layerCount = 1; // Transiton current mip level to transfer dest vkTools::setImageLayout( blitCmd, texture.image, VK_IMAGE_ASPECT_COLOR_BIT, VK_IMAGE_LAYOUT_UNDEFINED, VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, mipSubRange, VK_PIPELINE_STAGE_TRANSFER_BIT, VK_PIPELINE_STAGE_HOST_BIT); // Blit from previous level vkCmdBlitImage( blitCmd, texture.image, VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL, texture.image, VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, 1, &imageBlit, VK_FILTER_LINEAR); // Transiton current mip level to transfer source for read in next iteration vkTools::setImageLayout( blitCmd, texture.image, VK_IMAGE_ASPECT_COLOR_BIT, VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL, mipSubRange, VK_PIPELINE_STAGE_HOST_BIT, VK_PIPELINE_STAGE_TRANSFER_BIT); } // After the loop, all mip layers are in TRANSFER_SRC layout, so transition all to SHADER_READ subresourceRange.levelCount = texture.mipLevels; vkTools::setImageLayout( blitCmd, texture.image, VK_IMAGE_ASPECT_COLOR_BIT, VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL, texture.imageLayout, subresourceRange); VulkanExampleBase::flushCommandBuffer(blitCmd, queue, true); // --------------------------------------------------------------- // Create samplers samplers.resize(3); VkSamplerCreateInfo sampler = vkTools::initializers::samplerCreateInfo(); sampler.magFilter = VK_FILTER_LINEAR; sampler.minFilter = VK_FILTER_LINEAR; sampler.mipmapMode = VK_SAMPLER_MIPMAP_MODE_LINEAR; sampler.addressModeU = VK_SAMPLER_ADDRESS_MODE_MIRRORED_REPEAT; sampler.addressModeV = VK_SAMPLER_ADDRESS_MODE_MIRRORED_REPEAT; sampler.addressModeW = VK_SAMPLER_ADDRESS_MODE_MIRRORED_REPEAT; sampler.mipLodBias = 0.0f; sampler.compareOp = VK_COMPARE_OP_NEVER; sampler.minLod = 0.0f; sampler.maxLod = 0.0f; sampler.borderColor = VK_BORDER_COLOR_FLOAT_OPAQUE_WHITE; sampler.maxAnisotropy = 1.0; sampler.anisotropyEnable = VK_FALSE; // Without mip mapping VK_CHECK_RESULT(vkCreateSampler(device, &sampler, nullptr, &samplers[0])); // With mip mapping sampler.maxLod = (float)texture.mipLevels; VK_CHECK_RESULT(vkCreateSampler(device, &sampler, nullptr, &samplers[1])); // With mip mapping and anisotropic filtering if (vulkanDevice->features.samplerAnisotropy) { sampler.maxAnisotropy = vulkanDevice->properties.limits.maxSamplerAnisotropy; sampler.anisotropyEnable = VK_TRUE; } VK_CHECK_RESULT(vkCreateSampler(device, &sampler, nullptr, &samplers[2])); // Create image view VkImageViewCreateInfo view = vkTools::initializers::imageViewCreateInfo(); view.image = texture.image; view.viewType = VK_IMAGE_VIEW_TYPE_2D; view.format = format; view.components = { VK_COMPONENT_SWIZZLE_R, VK_COMPONENT_SWIZZLE_G, VK_COMPONENT_SWIZZLE_B, VK_COMPONENT_SWIZZLE_A }; view.subresourceRange.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT; view.subresourceRange.baseMipLevel = 0; view.subresourceRange.baseArrayLayer = 0; view.subresourceRange.layerCount = 1; view.subresourceRange.levelCount = texture.mipLevels; VK_CHECK_RESULT(vkCreateImageView(device, &view, nullptr, &texture.view)); }
void vkImageBase::updateMipVkImage(uint64_t texSize, std::vector<void *> &pixels, std::vector<ImageInfo> &bitmapInfos, std::vector<VkBufferImageCopy> &bufferCopyRegions, VkImageViewType target, VkFormat internalFormat, int mipLevels, VkImageCreateFlags flags) { VkResult err; bool pass; VulkanRenderer *vk_renderer = static_cast<VulkanRenderer *>(Renderer::getInstance()); VkDevice device = vk_renderer->getDevice(); VkFormatProperties formatProperties; vkGetPhysicalDeviceFormatProperties(vk_renderer->getPhysicalDevice(), internalFormat, &formatProperties); VkBuffer texBuffer; VkDeviceMemory texMemory; VkMemoryAllocateInfo memoryAllocateInfo = {}; memoryAllocateInfo.sType = VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO; memoryAllocateInfo.pNext = NULL; memoryAllocateInfo.allocationSize = 0; memoryAllocateInfo.memoryTypeIndex = 0; err = vkCreateBuffer(device, gvr::BufferCreateInfo(texSize, VK_BUFFER_USAGE_TRANSFER_SRC_BIT), nullptr, &texBuffer); GVR_VK_CHECK(!err); // Obtain the requirements on memory for this buffer VkMemoryRequirements mem_reqs; vkGetBufferMemoryRequirements(device, texBuffer, &mem_reqs); assert(!err); memoryAllocateInfo.allocationSize = mem_reqs.size; pass = vk_renderer->GetMemoryTypeFromProperties(mem_reqs.memoryTypeBits, VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT, &memoryAllocateInfo.memoryTypeIndex); assert(pass); size = mem_reqs.size; err = vkAllocateMemory(device, gvr::MemoryAllocateInfo(mem_reqs.size, memoryAllocateInfo.memoryTypeIndex), NULL, &texMemory); unsigned char *texData; err = vkMapMemory(device, texMemory, 0, memoryAllocateInfo.allocationSize, 0, (void **) &texData); assert(!err); int i = 0; for (auto &buffer_copy_region: bufferCopyRegions) { memcpy(texData + buffer_copy_region.bufferOffset, pixels[i], bitmapInfos[i].size); i++; } vkUnmapMemory(device, texMemory); // Bind our buffer to the memory err = vkBindBufferMemory(device, texBuffer, texMemory, 0); assert(!err); err = vkCreateImage(device, gvr::ImageCreateInfo(VK_IMAGE_TYPE_2D, internalFormat, bitmapInfos[0].width, bitmapInfos[0].height, 1, mipLevels, pixels.size(), VK_IMAGE_TILING_OPTIMAL, VK_IMAGE_USAGE_TRANSFER_DST_BIT | VK_IMAGE_USAGE_TRANSFER_SRC_BIT | VK_IMAGE_USAGE_SAMPLED_BIT, flags, getVKSampleBit(mSampleCount), VK_IMAGE_LAYOUT_UNDEFINED), NULL, &imageHandle); assert(!err); vkGetImageMemoryRequirements(device, imageHandle, &mem_reqs); memoryAllocateInfo.allocationSize = mem_reqs.size; pass = vk_renderer->GetMemoryTypeFromProperties(mem_reqs.memoryTypeBits, VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT, &memoryAllocateInfo.memoryTypeIndex); assert(pass); /* allocate memory */ err = vkAllocateMemory(device, &memoryAllocateInfo, NULL, &device_memory); assert(!err); /* bind memory */ err = vkBindImageMemory(device, imageHandle, device_memory, 0); assert(!err); // Reset the setup command buffer VkCommandBuffer textureCmdBuffer; vk_renderer->initCmdBuffer(VK_COMMAND_BUFFER_LEVEL_PRIMARY, textureCmdBuffer); vkResetCommandBuffer(textureCmdBuffer, 0); VkCommandBufferInheritanceInfo commandBufferInheritanceInfo = {}; commandBufferInheritanceInfo.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_INHERITANCE_INFO; commandBufferInheritanceInfo.pNext = NULL; commandBufferInheritanceInfo.renderPass = VK_NULL_HANDLE; commandBufferInheritanceInfo.subpass = 0; commandBufferInheritanceInfo.framebuffer = VK_NULL_HANDLE; commandBufferInheritanceInfo.occlusionQueryEnable = VK_FALSE; commandBufferInheritanceInfo.queryFlags = 0; commandBufferInheritanceInfo.pipelineStatistics = 0; VkCommandBufferBeginInfo setupCmdsBeginInfo; setupCmdsBeginInfo.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_BEGIN_INFO; setupCmdsBeginInfo.pNext = NULL; setupCmdsBeginInfo.flags = 0; setupCmdsBeginInfo.pInheritanceInfo = &commandBufferInheritanceInfo; // Begin recording to the command buffer. vkBeginCommandBuffer(textureCmdBuffer, &setupCmdsBeginInfo); VkImageMemoryBarrier imageMemoryBarrier = {}; imageMemoryBarrier.sType = VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER; imageMemoryBarrier.pNext = NULL; imageMemoryBarrier.subresourceRange.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT; imageMemoryBarrier.subresourceRange.baseMipLevel = 0; imageMemoryBarrier.subresourceRange.levelCount = 1; imageMemoryBarrier.subresourceRange.baseArrayLayer = 0; imageMemoryBarrier.subresourceRange.layerCount = pixels.size(); imageMemoryBarrier.srcAccessMask = 0; imageMemoryBarrier.dstAccessMask = VK_ACCESS_SHADER_READ_BIT | VK_ACCESS_INPUT_ATTACHMENT_READ_BIT; // Optimal image will be used as destination for the copy, so we must transfer from our initial undefined image layout to the transfer destination layout setImageLayout(imageMemoryBarrier, textureCmdBuffer, imageHandle, VK_IMAGE_ASPECT_COLOR_BIT, VK_IMAGE_LAYOUT_UNDEFINED, VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, imageMemoryBarrier.subresourceRange); vkCmdCopyBufferToImage( textureCmdBuffer, texBuffer, imageHandle, VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, static_cast<uint32_t>(bufferCopyRegions.size()), bufferCopyRegions.data()); imageLayout = VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL; setImageLayout(imageMemoryBarrier, textureCmdBuffer, imageHandle, VK_IMAGE_ASPECT_COLOR_BIT, VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL, imageMemoryBarrier.subresourceRange); // We are finished recording operations. vkEndCommandBuffer(textureCmdBuffer); VkCommandBuffer buffers[1]; buffers[0] = textureCmdBuffer; VkSubmitInfo submit_info; submit_info.sType = VK_STRUCTURE_TYPE_SUBMIT_INFO; submit_info.pNext = NULL; submit_info.waitSemaphoreCount = 0; submit_info.pWaitSemaphores = NULL; submit_info.pWaitDstStageMask = NULL; submit_info.commandBufferCount = 1; submit_info.pCommandBuffers = &buffers[0]; submit_info.signalSemaphoreCount = 0; submit_info.pSignalSemaphores = NULL; VkQueue queue = vk_renderer->getQueue(); // Submit to our shared graphics queue. err = vkQueueSubmit(queue, 1, &submit_info, VK_NULL_HANDLE); assert(!err); // Wait for the queue to become idle. err = vkQueueWaitIdle(queue); assert(!err); vkFreeMemory(device, texMemory, nullptr); vkDestroyBuffer(device, texBuffer, nullptr); if(mipLevels > 1) createMipLevels(formatProperties, vk_renderer, setupCmdsBeginInfo, bufferCopyRegions, mipLevels, bitmapInfos, imageMemoryBarrier, submit_info, buffers, queue); err = vkCreateImageView(device, gvr::ImageViewCreateInfo(imageHandle, target, internalFormat, mipLevels,0, pixels.size(), VK_IMAGE_ASPECT_COLOR_BIT), NULL, &imageView); assert(!err); }
int sample_main() { VkResult U_ASSERT_ONLY res; char sample_title[] = "MT Cmd Buffer Sample"; const bool depthPresent = false; init_global_layer_properties(info); init_instance_extension_names(info); init_device_extension_names(info); init_instance(info, sample_title); init_enumerate_device(info); init_window_size(info, 500, 500); init_connection(info); init_window(info); init_swapchain_extension(info); init_device(info); init_command_pool(info); init_command_buffer(info); execute_begin_command_buffer(info); init_device_queue(info); init_swap_chain(info); VkSemaphoreCreateInfo presentCompleteSemaphoreCreateInfo; presentCompleteSemaphoreCreateInfo.sType = VK_STRUCTURE_TYPE_SEMAPHORE_CREATE_INFO; presentCompleteSemaphoreCreateInfo.pNext = NULL; presentCompleteSemaphoreCreateInfo.flags = 0; res = vkCreateSemaphore(info.device, &presentCompleteSemaphoreCreateInfo, NULL, &info.presentCompleteSemaphore); assert(res == VK_SUCCESS); // Get the index of the next available swapchain image: res = vkAcquireNextImageKHR(info.device, info.swap_chain, UINT64_MAX, info.presentCompleteSemaphore, NULL, &info.current_buffer); // TODO: Deal with the VK_SUBOPTIMAL_KHR and VK_ERROR_OUT_OF_DATE_KHR // return codes assert(res == VK_SUCCESS); VkPipelineLayoutCreateInfo pPipelineLayoutCreateInfo = {}; pPipelineLayoutCreateInfo.sType = VK_STRUCTURE_TYPE_PIPELINE_LAYOUT_CREATE_INFO; pPipelineLayoutCreateInfo.pNext = NULL; pPipelineLayoutCreateInfo.pushConstantRangeCount = 0; pPipelineLayoutCreateInfo.pPushConstantRanges = NULL; pPipelineLayoutCreateInfo.setLayoutCount = 0; pPipelineLayoutCreateInfo.pSetLayouts = NULL; res = vkCreatePipelineLayout(info.device, &pPipelineLayoutCreateInfo, NULL, &info.pipeline_layout); assert(res == VK_SUCCESS); init_renderpass( info, depthPresent, false); // Can't clear in renderpass load because we re-use pipeline init_shaders(info, vertShaderText, fragShaderText); init_framebuffers(info, depthPresent); /* The binding and attributes should be the same for all 3 vertex buffers, * so init here */ info.vi_binding.binding = 0; info.vi_binding.inputRate = VK_VERTEX_INPUT_RATE_VERTEX; info.vi_binding.stride = sizeof(triData[0]); info.vi_attribs[0].binding = 0; info.vi_attribs[0].location = 0; info.vi_attribs[0].format = VK_FORMAT_R32G32B32A32_SFLOAT; info.vi_attribs[0].offset = 0; info.vi_attribs[1].binding = 0; info.vi_attribs[1].location = 1; info.vi_attribs[1].format = VK_FORMAT_R32G32B32A32_SFLOAT; info.vi_attribs[1].offset = 16; init_pipeline_cache(info); init_pipeline(info, depthPresent); VkImageSubresourceRange srRange = {}; srRange.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT; srRange.baseMipLevel = 0; srRange.levelCount = VK_REMAINING_MIP_LEVELS; srRange.baseArrayLayer = 0; srRange.layerCount = VK_REMAINING_ARRAY_LAYERS; VkClearColorValue clear_color[1]; clear_color[0].float32[0] = 0.2f; clear_color[0].float32[1] = 0.2f; clear_color[0].float32[2] = 0.2f; clear_color[0].float32[3] = 0.2f; /* We need to do the clear here instead of as a load op since all 3 threads * share the same pipeline / renderpass */ set_image_layout(info, info.buffers[info.current_buffer].image, VK_IMAGE_ASPECT_COLOR_BIT, VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL, VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL); vkCmdClearColorImage(info.cmd, info.buffers[info.current_buffer].image, VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, clear_color, 1, &srRange); set_image_layout(info, info.buffers[info.current_buffer].image, VK_IMAGE_ASPECT_COLOR_BIT, VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL); res = vkEndCommandBuffer(info.cmd); const VkCommandBuffer cmd_bufs[] = {info.cmd}; VkFence clearFence; init_fence(info, clearFence); VkPipelineStageFlags pipe_stage_flags = VK_PIPELINE_STAGE_BOTTOM_OF_PIPE_BIT; VkSubmitInfo submit_info[1] = {}; submit_info[0].pNext = NULL; submit_info[0].sType = VK_STRUCTURE_TYPE_SUBMIT_INFO; submit_info[0].waitSemaphoreCount = 1; submit_info[0].pWaitSemaphores = &info.presentCompleteSemaphore; submit_info[0].pWaitDstStageMask = NULL; submit_info[0].commandBufferCount = 1; submit_info[0].pCommandBuffers = cmd_bufs; submit_info[0].signalSemaphoreCount = 0; submit_info[0].pSignalSemaphores = NULL; /* Queue the command buffer for execution */ res = vkQueueSubmit(info.queue, 1, submit_info, clearFence); assert(!res); do { res = vkWaitForFences(info.device, 1, &clearFence, VK_TRUE, FENCE_TIMEOUT); } while (res == VK_TIMEOUT); assert(res == VK_SUCCESS); vkDestroyFence(info.device, clearFence, NULL); /* VULKAN_KEY_START */ /* Use the fourth slot in the command buffer array for the presentation */ /* barrier using the command buffer in info */ threadCmdBufs[3] = info.cmd; sample_platform_thread vk_threads[3]; for (size_t i = 0; i < 3; i++) { sample_platform_thread_create(&vk_threads[i], &per_thread_code, (void *)i); } VkCommandBufferBeginInfo cmd_buf_info = {}; cmd_buf_info.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_BEGIN_INFO; cmd_buf_info.pNext = NULL; cmd_buf_info.flags = 0; cmd_buf_info.pInheritanceInfo = NULL; res = vkBeginCommandBuffer(threadCmdBufs[3], &cmd_buf_info); assert(res == VK_SUCCESS); VkImageMemoryBarrier prePresentBarrier = {}; prePresentBarrier.sType = VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER; prePresentBarrier.pNext = NULL; prePresentBarrier.srcAccessMask = VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT; prePresentBarrier.dstAccessMask = VK_ACCESS_MEMORY_READ_BIT; prePresentBarrier.oldLayout = VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL; prePresentBarrier.newLayout = VK_IMAGE_LAYOUT_PRESENT_SRC_KHR; prePresentBarrier.srcQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED; prePresentBarrier.dstQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED; prePresentBarrier.subresourceRange.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT; prePresentBarrier.subresourceRange.baseMipLevel = 0; prePresentBarrier.subresourceRange.levelCount = 1; prePresentBarrier.subresourceRange.baseArrayLayer = 0; prePresentBarrier.subresourceRange.layerCount = 1; prePresentBarrier.image = info.buffers[info.current_buffer].image; vkCmdPipelineBarrier(threadCmdBufs[3], VK_PIPELINE_STAGE_ALL_COMMANDS_BIT, VK_PIPELINE_STAGE_BOTTOM_OF_PIPE_BIT, 0, 0, NULL, 0, NULL, 1, &prePresentBarrier); res = vkEndCommandBuffer(threadCmdBufs[3]); assert(res == VK_SUCCESS); pipe_stage_flags = VK_PIPELINE_STAGE_BOTTOM_OF_PIPE_BIT; submit_info[0].pNext = NULL; submit_info[0].sType = VK_STRUCTURE_TYPE_SUBMIT_INFO; submit_info[0].waitSemaphoreCount = 0; submit_info[0].pWaitSemaphores = NULL; submit_info[0].pWaitDstStageMask = &pipe_stage_flags; submit_info[0].commandBufferCount = 4; /* 3 from threads + prePresentBarrier */ submit_info[0].pCommandBuffers = threadCmdBufs; submit_info[0].signalSemaphoreCount = 0; submit_info[0].pSignalSemaphores = NULL; /* Wait for all of the threads to finish */ for (int i = 0; i < 3; i++) { sample_platform_thread_join(vk_threads[i], NULL); } VkFenceCreateInfo fenceInfo; VkFence drawFence; fenceInfo.sType = VK_STRUCTURE_TYPE_FENCE_CREATE_INFO; fenceInfo.pNext = NULL; fenceInfo.flags = 0; vkCreateFence(info.device, &fenceInfo, NULL, &drawFence); /* Queue the command buffer for execution */ res = vkQueueSubmit(info.queue, 1, submit_info, drawFence); assert(!res); /* Make sure command buffer is finished before presenting */ do { res = vkWaitForFences(info.device, 1, &drawFence, VK_TRUE, FENCE_TIMEOUT); } while (res == VK_TIMEOUT); assert(res == VK_SUCCESS); execute_present_image(info); wait_seconds(1); /* VULKAN_KEY_END */ vkDestroyBuffer(info.device, vertex_buffer[0].buf, NULL); vkDestroyBuffer(info.device, vertex_buffer[1].buf, NULL); vkDestroyBuffer(info.device, vertex_buffer[2].buf, NULL); vkFreeMemory(info.device, vertex_buffer[0].mem, NULL); vkFreeMemory(info.device, vertex_buffer[1].mem, NULL); vkFreeMemory(info.device, vertex_buffer[2].mem, NULL); for (int i = 0; i < 3; i++) { vkFreeCommandBuffers(info.device, threadCmdPools[i], 1, &threadCmdBufs[i]); vkDestroyCommandPool(info.device, threadCmdPools[i], NULL); } vkDestroySemaphore(info.device, info.presentCompleteSemaphore, NULL); vkDestroyFence(info.device, drawFence, NULL); destroy_pipeline(info); destroy_pipeline_cache(info); destroy_framebuffers(info); destroy_shaders(info); destroy_renderpass(info); vkDestroyPipelineLayout(info.device, info.pipeline_layout, NULL); destroy_swap_chain(info); destroy_command_buffer(info); destroy_command_pool(info); destroy_window(info); destroy_device(info); destroy_instance(info); return 0; }
// // Vulkan termination. // void Example::terminate(const vkts::IUpdateThreadContext& updateContext) { if (device.get()) { terminateResources(updateContext); // if (swapchain.get()) { swapchain->destroy(); } if (pipelineLayout != VK_NULL_HANDLE) { vkDestroyPipelineLayout(device->getDevice(), pipelineLayout, nullptr); pipelineLayout = VK_NULL_HANDLE; } if (pipelineCache != VK_NULL_HANDLE) { vkDestroyPipelineCache(device->getDevice(), pipelineCache, nullptr); pipelineCache = VK_NULL_HANDLE; } if (vertexShaderModule != VK_NULL_HANDLE) { vkDestroyShaderModule(device->getDevice(), vertexShaderModule, nullptr); vertexShaderModule = VK_NULL_HANDLE; } if (fragmentShaderModule != VK_NULL_HANDLE) { vkDestroyShaderModule(device->getDevice(), fragmentShaderModule, nullptr); fragmentShaderModule = VK_NULL_HANDLE; } if (vertexBuffer != VK_NULL_HANDLE) { vkDestroyBuffer(device->getDevice(), vertexBuffer, nullptr); vertexBuffer = VK_NULL_HANDLE; } if (deviceMemoryVertexBuffer != VK_NULL_HANDLE) { vkFreeMemory(device->getDevice(), deviceMemoryVertexBuffer, nullptr); deviceMemoryVertexBuffer = VK_NULL_HANDLE; } if (renderingCompleteSemaphore.get()) { renderingCompleteSemaphore->destroy(); } if (imageAcquiredSemaphore.get()) { imageAcquiredSemaphore->destroy(); } if (commandPool.get()) { commandPool->destroy(); } if (surface.get()) { surface->destroy(); } device->destroy(); } if (instance.get()) { instance->destroy(); } }
void VkeCubeTexture::loadTextureFiles(const char **inPath){ bool imagesOK = true; VKA_INFO_MSG("Loading Cube Texture.\n"); for (uint32_t i = 0; i < 6; ++i){ if (!loadTexture(inPath[i], NULL, NULL, &m_width, &m_height)){ VKA_ERROR_MSG("Error loading texture image.\n"); printf("Texture : %d not available (%s).\n", i, inPath[i]); return; } } VulkanDC::Device::Queue::Name queueName = "DEFAULT_GRAPHICS_QUEUE"; VulkanDC::Device::Queue::CommandBufferID cmdID = INIT_COMMAND_ID; VulkanDC *dc = VulkanDC::Get(); VulkanDC::Device *device = dc->getDefaultDevice(); VulkanDC::Device::Queue *queue = device->getQueue(queueName); VkCommandBuffer cmd = VK_NULL_HANDLE; queue->beginCommandBuffer(cmdID, &cmd, VK_COMMAND_BUFFER_USAGE_ONE_TIME_SUBMIT_BIT); imageCreateAndBind( &m_data.image, &m_data.memory, m_format, VK_IMAGE_TYPE_2D, m_width, m_height, 1, 6, VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT, (VkImageUsageFlagBits)( VK_IMAGE_USAGE_TRANSFER_DST_BIT | VK_IMAGE_USAGE_SAMPLED_BIT ), VK_IMAGE_TILING_OPTIMAL); VkBuffer cubeMapBuffer; VkDeviceMemory cubeMapMem; bufferCreate(&cubeMapBuffer, m_width*m_height * 4 * 6, VK_BUFFER_USAGE_TRANSFER_SRC_BIT); bufferAlloc(&cubeMapBuffer, &cubeMapMem, VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT); VkDeviceSize dSize = m_width * m_height * 4; uint32_t rowPitch = m_width * 4; if (m_memory_flags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT){ imageSetLayoutBarrier(cmdID, queueName, m_data.image, VK_IMAGE_ASPECT_COLOR_BIT, VK_IMAGE_LAYOUT_PREINITIALIZED, VK_IMAGE_LAYOUT_GENERAL); for (uint32_t i = 0; i < 6; ++i){ void *data = NULL; VkDeviceSize ofst = dSize*i; VKA_CHECK_ERROR(vkMapMemory(getDefaultDevice(),cubeMapMem, ofst, dSize, 0, &data), "Could not map memory for image.\n"); if (!loadTexture(inPath[i], (uint8_t**)&data, rowPitch, &m_width, &m_height)){ VKA_ERROR_MSG("Could not load final image.\n"); } vkUnmapMemory(getDefaultDevice(), cubeMapMem); } VkBufferImageCopy biCpyRgn[6]; for (uint32_t k = 0; k < 6; ++k){ VkDeviceSize ofst = dSize*k; biCpyRgn[k].bufferOffset = ofst; biCpyRgn[k].bufferImageHeight = 0; biCpyRgn[k].bufferRowLength = 0; biCpyRgn[k].imageExtent.width = m_width; biCpyRgn[k].imageExtent.height = m_height; biCpyRgn[k].imageExtent.depth = 1; biCpyRgn[k].imageOffset.x = 0; biCpyRgn[k].imageOffset.y = 0; biCpyRgn[k].imageOffset.z = 0; biCpyRgn[k].imageSubresource.baseArrayLayer = k; biCpyRgn[k].imageSubresource.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT; biCpyRgn[k].imageSubresource.layerCount = 1; biCpyRgn[k].imageSubresource.mipLevel = 0; } VkFence copyFence; VkFenceCreateInfo fenceInfo; memset(&fenceInfo, 0, sizeof(fenceInfo)); fenceInfo.sType = VK_STRUCTURE_TYPE_FENCE_CREATE_INFO; vkCreateFence(device->getVKDevice(), &fenceInfo,NULL , ©Fence); vkCmdCopyBufferToImage(cmd, cubeMapBuffer, m_data.image, m_data.imageLayout, 6, biCpyRgn); queue->flushCommandBuffer(cmdID , ©Fence); vkWaitForFences(device->getVKDevice(), 1, ©Fence, VK_TRUE, 100000000000); vkDestroyBuffer(device->getVKDevice(), cubeMapBuffer, NULL); vkFreeMemory(device->getVKDevice(), cubeMapMem, NULL); } VkSamplerCreateInfo sampler; sampler.sType = VK_STRUCTURE_TYPE_SAMPLER_CREATE_INFO; sampler.pNext = NULL; sampler.magFilter = VK_FILTER_NEAREST; sampler.minFilter = VK_FILTER_NEAREST; sampler.mipmapMode = VK_SAMPLER_MIPMAP_MODE_NEAREST; sampler.addressModeU = VK_SAMPLER_ADDRESS_MODE_CLAMP_TO_EDGE; sampler.addressModeU = VK_SAMPLER_ADDRESS_MODE_CLAMP_TO_EDGE; sampler.addressModeU = VK_SAMPLER_ADDRESS_MODE_CLAMP_TO_EDGE; sampler.mipLodBias = 0.0f; sampler.maxAnisotropy = 1; sampler.compareOp = VK_COMPARE_OP_NEVER; sampler.minLod = 0.0f; sampler.maxLod = 0.0f; sampler.borderColor = VK_BORDER_COLOR_FLOAT_OPAQUE_WHITE; VkImageViewCreateInfo view; view.sType = VK_STRUCTURE_TYPE_IMAGE_VIEW_CREATE_INFO; view.pNext = NULL; view.viewType = VK_IMAGE_VIEW_TYPE_CUBE; view.format = m_format; view.components = { VK_COMPONENT_SWIZZLE_R, VK_COMPONENT_SWIZZLE_G, VK_COMPONENT_SWIZZLE_B, VK_COMPONENT_SWIZZLE_A }; view.subresourceRange = { VK_IMAGE_ASPECT_COLOR_BIT, 0, 1, 0, 0 }; view.subresourceRange.baseArrayLayer = 0; view.subresourceRange.levelCount = 1; view.subresourceRange.baseMipLevel = 0; view.subresourceRange.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT; view.subresourceRange.layerCount = 1; VKA_CHECK_ERROR(vkCreateSampler(getDefaultDevice(), &sampler,NULL, &m_data.sampler), "Could not create sampler for image texture.\n"); view.image = m_data.image; VKA_CHECK_ERROR(vkCreateImageView(getDefaultDevice(), &view,NULL, &m_data.view), "Could not create image view for texture.\n"); VKA_INFO_MSG("Created CUBE Image Texture.\n"); }
bool Texture::Init(VulkanDevice * device, VulkanCommandBuffer * cmdBuffer, std::string filename) { struct MipMap { unsigned char * data; int width; int height; unsigned int size; }; VkResult result; void * pData; std::vector<MipMap> mipMaps; FILE * file = fopen(filename.c_str(), "rb"); if (file == NULL) { gLogManager->AddMessage("ERROR: Texture file not found! (" + filename + ")"); return false; } // Read original image (as mipmap level 0) MipMap originalImage; fread(&originalImage.width, sizeof(unsigned int), 1, file); fread(&originalImage.height, sizeof(unsigned int), 1, file); fread(&originalImage.size, sizeof(unsigned int), 1, file); originalImage.data = new unsigned char[originalImage.size]; fread(originalImage.data, sizeof(unsigned char), originalImage.size, file); mipMaps.push_back(originalImage); // Read mipmaps fread(&mipMapsCount, sizeof(int), 1, file); for (int i = 0; i < mipMapsCount; i++) { MipMap mipMap; fread(&mipMap.width, sizeof(int), 1, file); fread(&mipMap.height, sizeof(int), 1, file); fread(&mipMap.size, sizeof(unsigned int), 1, file); mipMap.data = new unsigned char[mipMap.size]; fread(mipMap.data, sizeof(unsigned char), mipMap.size, file); mipMaps.push_back(mipMap); } fclose(file); unsigned int totalTextureSize = 0; for (unsigned int i = 0; i < mipMaps.size(); i++) totalTextureSize += mipMaps[i].size; // Create an array of bits which stores all of the texture data std::vector<unsigned char> textureData; for (unsigned int i = 0; i < mipMaps.size(); i++) for (unsigned int j = 0; j < mipMaps[i].size; j++) textureData.push_back(mipMaps[i].data[j]); for (int i = 0; i < mipMaps.size(); i++) delete[] mipMaps[i].data; VkMemoryRequirements memReq{}; VkMemoryAllocateInfo allocInfo{}; allocInfo.sType = VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO; VkBuffer stagingBuffer; VkDeviceMemory stagingMemory; VkBufferCreateInfo bufferCI{}; bufferCI.sType = VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO; bufferCI.size = totalTextureSize; bufferCI.usage = VK_BUFFER_USAGE_TRANSFER_SRC_BIT; bufferCI.sharingMode = VK_SHARING_MODE_EXCLUSIVE; result = vkCreateBuffer(device->GetDevice(), &bufferCI, VK_NULL_HANDLE, &stagingBuffer); if (result != VK_SUCCESS) return false; vkGetBufferMemoryRequirements(device->GetDevice(), stagingBuffer, &memReq); allocInfo.allocationSize = memReq.size; if (!device->MemoryTypeFromProperties(memReq.memoryTypeBits, VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT, &allocInfo.memoryTypeIndex)) return false; result = vkAllocateMemory(device->GetDevice(), &allocInfo, VK_NULL_HANDLE, &stagingMemory); if (result != VK_SUCCESS) return false; result = vkBindBufferMemory(device->GetDevice(), stagingBuffer, stagingMemory, 0); if (result != VK_SUCCESS) return false; result = vkMapMemory(device->GetDevice(), stagingMemory, 0, memReq.size, 0, &pData); if (result != VK_SUCCESS) return false; memcpy(pData, textureData.data(), textureData.size()); vkUnmapMemory(device->GetDevice(), stagingMemory); std::vector<VkBufferImageCopy> bufferCopyRegions; uint32_t offset = 0; for (unsigned int level = 0; level < mipMaps.size(); level++) { VkBufferImageCopy bufferCopyRegion{}; bufferCopyRegion.imageSubresource.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT; bufferCopyRegion.imageSubresource.mipLevel = level; bufferCopyRegion.imageSubresource.baseArrayLayer = 0; bufferCopyRegion.imageSubresource.layerCount = 1; bufferCopyRegion.imageExtent.depth = 1; bufferCopyRegion.bufferOffset = offset; bufferCopyRegion.imageExtent.width = mipMaps[level].width; bufferCopyRegion.imageExtent.height = mipMaps[level].height; offset += (uint32_t)mipMaps[level].size; bufferCopyRegions.push_back(bufferCopyRegion); } VkImageCreateInfo imageCI{}; imageCI.sType = VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO; imageCI.imageType = VK_IMAGE_TYPE_2D; imageCI.format = VK_FORMAT_R8G8B8A8_UNORM; imageCI.mipLevels = (uint32_t)mipMaps.size(); imageCI.arrayLayers = 1; imageCI.samples = VK_SAMPLE_COUNT_1_BIT; imageCI.tiling = VK_IMAGE_TILING_OPTIMAL; imageCI.usage = VK_IMAGE_USAGE_TRANSFER_DST_BIT | VK_IMAGE_USAGE_SAMPLED_BIT; imageCI.sharingMode = VK_SHARING_MODE_EXCLUSIVE; imageCI.initialLayout = VK_IMAGE_LAYOUT_UNDEFINED; imageCI.extent.width = mipMaps[0].width; imageCI.extent.height = mipMaps[0].height; imageCI.extent.depth = 1; result = vkCreateImage(device->GetDevice(), &imageCI, VK_NULL_HANDLE, &textureImage); if (result != VK_SUCCESS) return false; vkGetImageMemoryRequirements(device->GetDevice(), textureImage, &memReq); VkMemoryAllocateInfo memAlloc{}; memAlloc.sType = VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO; memAlloc.allocationSize = memReq.size; if (!device->MemoryTypeFromProperties(memReq.memoryTypeBits, VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT, &memAlloc.memoryTypeIndex)) return false; result = vkAllocateMemory(device->GetDevice(), &memAlloc, VK_NULL_HANDLE, &textureMemory); if (result != VK_SUCCESS) return false; result = vkBindImageMemory(device->GetDevice(), textureImage, textureMemory, 0); if (result != VK_SUCCESS) return false; VkImageSubresourceRange range{}; range.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT; range.baseMipLevel = 0; range.levelCount = (uint32_t)mipMaps.size(); range.layerCount = 1; cmdBuffer->BeginRecording(); VulkanTools::SetImageLayout(textureImage, VK_IMAGE_ASPECT_COLOR_BIT, VK_IMAGE_LAYOUT_UNDEFINED, VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, &range, cmdBuffer, device, false); vkCmdCopyBufferToImage(cmdBuffer->GetCommandBuffer(), stagingBuffer, textureImage, VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, (uint32_t)bufferCopyRegions.size(), bufferCopyRegions.data()); VulkanTools::SetImageLayout(textureImage, VK_IMAGE_ASPECT_COLOR_BIT, VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL, &range, cmdBuffer, device, false); cmdBuffer->EndRecording(); cmdBuffer->Execute(device, NULL, NULL, NULL, true); vkFreeMemory(device->GetDevice(), stagingMemory, VK_NULL_HANDLE); vkDestroyBuffer(device->GetDevice(), stagingBuffer, VK_NULL_HANDLE); VkImageViewCreateInfo viewCI{}; viewCI.sType = VK_STRUCTURE_TYPE_IMAGE_VIEW_CREATE_INFO; viewCI.image = textureImage; viewCI.viewType = VK_IMAGE_VIEW_TYPE_2D; viewCI.format = VK_FORMAT_R8G8B8A8_UNORM; viewCI.components.r = VK_COMPONENT_SWIZZLE_R; viewCI.components.g = VK_COMPONENT_SWIZZLE_G; viewCI.components.b = VK_COMPONENT_SWIZZLE_B; viewCI.components.a = VK_COMPONENT_SWIZZLE_A; viewCI.subresourceRange.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT; viewCI.subresourceRange.baseMipLevel = 0; viewCI.subresourceRange.baseArrayLayer = 0; viewCI.subresourceRange.layerCount = 1; viewCI.subresourceRange.levelCount = (uint32_t)mipMaps.size(); result = vkCreateImageView(device->GetDevice(), &viewCI, VK_NULL_HANDLE, &textureImageView); if (result != VK_SUCCESS) return false; return true; }
void loadCubemap(std::string filename, VkFormat format, bool forceLinearTiling) { #if defined(__ANDROID__) // Textures are stored inside the apk on Android (compressed) // So they need to be loaded via the asset manager AAsset* asset = AAssetManager_open(androidApp->activity->assetManager, filename.c_str(), AASSET_MODE_STREAMING); assert(asset); size_t size = AAsset_getLength(asset); assert(size > 0); void *textureData = malloc(size); AAsset_read(asset, textureData, size); AAsset_close(asset); gli::texture_cube texCube(gli::load((const char*)textureData, size)); #else gli::texture_cube texCube(gli::load(filename)); #endif assert(!texCube.empty()); cubeMap.width = texCube.extent().x; cubeMap.height = texCube.extent().y; cubeMap.mipLevels = texCube.levels(); VkMemoryAllocateInfo memAllocInfo = vks::initializers::memoryAllocateInfo(); VkMemoryRequirements memReqs; // Create a host-visible staging buffer that contains the raw image data VkBuffer stagingBuffer; VkDeviceMemory stagingMemory; VkBufferCreateInfo bufferCreateInfo = vks::initializers::bufferCreateInfo(); bufferCreateInfo.size = texCube.size(); // This buffer is used as a transfer source for the buffer copy bufferCreateInfo.usage = VK_BUFFER_USAGE_TRANSFER_SRC_BIT; bufferCreateInfo.sharingMode = VK_SHARING_MODE_EXCLUSIVE; VK_CHECK_RESULT(vkCreateBuffer(device, &bufferCreateInfo, nullptr, &stagingBuffer)); // Get memory requirements for the staging buffer (alignment, memory type bits) vkGetBufferMemoryRequirements(device, stagingBuffer, &memReqs); memAllocInfo.allocationSize = memReqs.size; // Get memory type index for a host visible buffer memAllocInfo.memoryTypeIndex = vulkanDevice->getMemoryType(memReqs.memoryTypeBits, VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT); VK_CHECK_RESULT(vkAllocateMemory(device, &memAllocInfo, nullptr, &stagingMemory)); VK_CHECK_RESULT(vkBindBufferMemory(device, stagingBuffer, stagingMemory, 0)); // Copy texture data into staging buffer uint8_t *data; VK_CHECK_RESULT(vkMapMemory(device, stagingMemory, 0, memReqs.size, 0, (void **)&data)); memcpy(data, texCube.data(), texCube.size()); vkUnmapMemory(device, stagingMemory); // Create optimal tiled target image VkImageCreateInfo imageCreateInfo = vks::initializers::imageCreateInfo(); imageCreateInfo.imageType = VK_IMAGE_TYPE_2D; imageCreateInfo.format = format; imageCreateInfo.mipLevels = cubeMap.mipLevels; imageCreateInfo.samples = VK_SAMPLE_COUNT_1_BIT; imageCreateInfo.tiling = VK_IMAGE_TILING_OPTIMAL; imageCreateInfo.sharingMode = VK_SHARING_MODE_EXCLUSIVE; imageCreateInfo.initialLayout = VK_IMAGE_LAYOUT_UNDEFINED; imageCreateInfo.extent = { cubeMap.width, cubeMap.height, 1 }; imageCreateInfo.usage = VK_IMAGE_USAGE_TRANSFER_DST_BIT | VK_IMAGE_USAGE_SAMPLED_BIT; // Cube faces count as array layers in Vulkan imageCreateInfo.arrayLayers = 6; // This flag is required for cube map images imageCreateInfo.flags = VK_IMAGE_CREATE_CUBE_COMPATIBLE_BIT; VK_CHECK_RESULT(vkCreateImage(device, &imageCreateInfo, nullptr, &cubeMap.image)); vkGetImageMemoryRequirements(device, cubeMap.image, &memReqs); memAllocInfo.allocationSize = memReqs.size; memAllocInfo.memoryTypeIndex = vulkanDevice->getMemoryType(memReqs.memoryTypeBits, VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT); VK_CHECK_RESULT(vkAllocateMemory(device, &memAllocInfo, nullptr, &cubeMap.deviceMemory)); VK_CHECK_RESULT(vkBindImageMemory(device, cubeMap.image, cubeMap.deviceMemory, 0)); VkCommandBuffer copyCmd = VulkanExampleBase::createCommandBuffer(VK_COMMAND_BUFFER_LEVEL_PRIMARY, true); // Setup buffer copy regions for each face including all of it's miplevels std::vector<VkBufferImageCopy> bufferCopyRegions; uint32_t offset = 0; for (uint32_t face = 0; face < 6; face++) { for (uint32_t level = 0; level < cubeMap.mipLevels; level++) { VkBufferImageCopy bufferCopyRegion = {}; bufferCopyRegion.imageSubresource.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT; bufferCopyRegion.imageSubresource.mipLevel = level; bufferCopyRegion.imageSubresource.baseArrayLayer = face; bufferCopyRegion.imageSubresource.layerCount = 1; bufferCopyRegion.imageExtent.width = texCube[face][level].extent().x; bufferCopyRegion.imageExtent.height = texCube[face][level].extent().y; bufferCopyRegion.imageExtent.depth = 1; bufferCopyRegion.bufferOffset = offset; bufferCopyRegions.push_back(bufferCopyRegion); // Increase offset into staging buffer for next level / face offset += texCube[face][level].size(); } } // Image barrier for optimal image (target) // Set initial layout for all array layers (faces) of the optimal (target) tiled texture VkImageSubresourceRange subresourceRange = {}; subresourceRange.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT; subresourceRange.baseMipLevel = 0; subresourceRange.levelCount = cubeMap.mipLevels; subresourceRange.layerCount = 6; vks::tools::setImageLayout( copyCmd, cubeMap.image, VK_IMAGE_LAYOUT_UNDEFINED, VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, subresourceRange); // Copy the cube map faces from the staging buffer to the optimal tiled image vkCmdCopyBufferToImage( copyCmd, stagingBuffer, cubeMap.image, VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, static_cast<uint32_t>(bufferCopyRegions.size()), bufferCopyRegions.data() ); // Change texture image layout to shader read after all faces have been copied cubeMap.imageLayout = VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL; vks::tools::setImageLayout( copyCmd, cubeMap.image, VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, cubeMap.imageLayout, subresourceRange); VulkanExampleBase::flushCommandBuffer(copyCmd, queue, true); // Create sampler VkSamplerCreateInfo sampler = vks::initializers::samplerCreateInfo(); sampler.magFilter = VK_FILTER_LINEAR; sampler.minFilter = VK_FILTER_LINEAR; sampler.mipmapMode = VK_SAMPLER_MIPMAP_MODE_LINEAR; sampler.addressModeU = VK_SAMPLER_ADDRESS_MODE_CLAMP_TO_EDGE; sampler.addressModeV = sampler.addressModeU; sampler.addressModeW = sampler.addressModeU; sampler.mipLodBias = 0.0f; sampler.compareOp = VK_COMPARE_OP_NEVER; sampler.minLod = 0.0f; sampler.maxLod = cubeMap.mipLevels; sampler.borderColor = VK_BORDER_COLOR_FLOAT_OPAQUE_WHITE; sampler.maxAnisotropy = 1.0f; if (vulkanDevice->features.samplerAnisotropy) { sampler.maxAnisotropy = vulkanDevice->properties.limits.maxSamplerAnisotropy; sampler.anisotropyEnable = VK_TRUE; } VK_CHECK_RESULT(vkCreateSampler(device, &sampler, nullptr, &cubeMap.sampler)); // Create image view VkImageViewCreateInfo view = vks::initializers::imageViewCreateInfo(); // Cube map view type view.viewType = VK_IMAGE_VIEW_TYPE_CUBE; view.format = format; view.components = { VK_COMPONENT_SWIZZLE_R, VK_COMPONENT_SWIZZLE_G, VK_COMPONENT_SWIZZLE_B, VK_COMPONENT_SWIZZLE_A }; view.subresourceRange = { VK_IMAGE_ASPECT_COLOR_BIT, 0, 1, 0, 1 }; // 6 array layers (faces) view.subresourceRange.layerCount = 6; // Set number of mip levels view.subresourceRange.levelCount = cubeMap.mipLevels; view.image = cubeMap.image; VK_CHECK_RESULT(vkCreateImageView(device, &view, nullptr, &cubeMap.view)); // Clean up staging resources vkFreeMemory(device, stagingMemory, nullptr); vkDestroyBuffer(device, stagingBuffer, nullptr); }
/** * Create Vulkan buffers for the index and vertex buffer using a vertex layout * * @note Only does staging if a valid command buffer and transfer queue are passed * * @param meshBuffer Pointer to the mesh buffer containing buffer handles and memory * @param layout Vertex layout for the vertex buffer * @param createInfo Structure containing information for mesh creation time (center, scaling, etc.) * @param useStaging If true, buffers are staged to device local memory * @param copyCmd (Required for staging) Command buffer to put the copy commands into * @param copyQueue (Required for staging) Queue to put copys into */ void createBuffers( vkMeshLoader::MeshBuffer *meshBuffer, std::vector<vkMeshLoader::VertexLayout> layout, vkMeshLoader::MeshCreateInfo *createInfo, bool useStaging, VkCommandBuffer copyCmd, VkQueue copyQueue) { glm::vec3 scale; glm::vec2 uvscale; glm::vec3 center; if (createInfo == nullptr) { scale = glm::vec3(1.0f); uvscale = glm::vec2(1.0f); center = glm::vec3(0.0f); } else { scale = createInfo->scale; uvscale = createInfo->uvscale; center = createInfo->center; } std::vector<float> vertexBuffer; for (size_t m = 0; m < m_Entries.size(); m++) { for (size_t i = 0; i < m_Entries[m].Vertices.size(); i++) { // Push vertex data depending on layout for (auto& layoutDetail : layout) { // Position if (layoutDetail == vkMeshLoader::VERTEX_LAYOUT_POSITION) { vertexBuffer.push_back(m_Entries[m].Vertices[i].m_pos.x * scale.x + center.x); vertexBuffer.push_back(m_Entries[m].Vertices[i].m_pos.y * scale.y + center.y); vertexBuffer.push_back(m_Entries[m].Vertices[i].m_pos.z * scale.z + center.z); } // Normal if (layoutDetail == vkMeshLoader::VERTEX_LAYOUT_NORMAL) { vertexBuffer.push_back(m_Entries[m].Vertices[i].m_normal.x); vertexBuffer.push_back(-m_Entries[m].Vertices[i].m_normal.y); vertexBuffer.push_back(m_Entries[m].Vertices[i].m_normal.z); } // Texture coordinates if (layoutDetail == vkMeshLoader::VERTEX_LAYOUT_UV) { vertexBuffer.push_back(m_Entries[m].Vertices[i].m_tex.s * uvscale.s); vertexBuffer.push_back(m_Entries[m].Vertices[i].m_tex.t * uvscale.t); } // Color if (layoutDetail == vkMeshLoader::VERTEX_LAYOUT_COLOR) { vertexBuffer.push_back(m_Entries[m].Vertices[i].m_color.r); vertexBuffer.push_back(m_Entries[m].Vertices[i].m_color.g); vertexBuffer.push_back(m_Entries[m].Vertices[i].m_color.b); } // Tangent if (layoutDetail == vkMeshLoader::VERTEX_LAYOUT_TANGENT) { vertexBuffer.push_back(m_Entries[m].Vertices[i].m_tangent.x); vertexBuffer.push_back(m_Entries[m].Vertices[i].m_tangent.y); vertexBuffer.push_back(m_Entries[m].Vertices[i].m_tangent.z); } // Bitangent if (layoutDetail == vkMeshLoader::VERTEX_LAYOUT_BITANGENT) { vertexBuffer.push_back(m_Entries[m].Vertices[i].m_binormal.x); vertexBuffer.push_back(m_Entries[m].Vertices[i].m_binormal.y); vertexBuffer.push_back(m_Entries[m].Vertices[i].m_binormal.z); } // Dummy layout components for padding if (layoutDetail == vkMeshLoader::VERTEX_LAYOUT_DUMMY_FLOAT) { vertexBuffer.push_back(0.0f); } if (layoutDetail == vkMeshLoader::VERTEX_LAYOUT_DUMMY_VEC4) { vertexBuffer.push_back(0.0f); vertexBuffer.push_back(0.0f); vertexBuffer.push_back(0.0f); vertexBuffer.push_back(0.0f); } } } } meshBuffer->vertices.size = vertexBuffer.size() * sizeof(float); dim.min *= scale; dim.max *= scale; dim.size *= scale; std::vector<uint32_t> indexBuffer; for (uint32_t m = 0; m < m_Entries.size(); m++) { uint32_t indexBase = static_cast<uint32_t>(indexBuffer.size()); for (uint32_t i = 0; i < m_Entries[m].Indices.size(); i++) { indexBuffer.push_back(m_Entries[m].Indices[i] + indexBase); } vkMeshLoader::MeshDescriptor descriptor{}; descriptor.indexBase = indexBase; descriptor.indexCount = static_cast<uint32_t>(m_Entries[m].Indices.size()); descriptor.vertexCount = static_cast<uint32_t>(m_Entries[m].Vertices.size()); meshBuffer->meshDescriptors.push_back(descriptor); } meshBuffer->indices.size = indexBuffer.size() * sizeof(uint32_t); meshBuffer->indexCount = static_cast<uint32_t>(indexBuffer.size()); // Use staging buffer to move vertex and index buffer to device local memory if (useStaging && copyQueue != VK_NULL_HANDLE && copyCmd != VK_NULL_HANDLE) { // Create staging buffers struct { VkBuffer buffer; VkDeviceMemory memory; } vertexStaging, indexStaging; // Vertex buffer vulkanDevice->createBuffer( VK_BUFFER_USAGE_TRANSFER_SRC_BIT, VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT, meshBuffer->vertices.size, &vertexStaging.buffer, &vertexStaging.memory, vertexBuffer.data()); // Index buffer vulkanDevice->createBuffer( VK_BUFFER_USAGE_TRANSFER_SRC_BIT, VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT, meshBuffer->indices.size, &indexStaging.buffer, &indexStaging.memory, indexBuffer.data()); // Create device local target buffers // Vertex buffer vulkanDevice->createBuffer( VK_BUFFER_USAGE_VERTEX_BUFFER_BIT | VK_BUFFER_USAGE_TRANSFER_DST_BIT, VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT, meshBuffer->vertices.size, &meshBuffer->vertices.buf, &meshBuffer->vertices.mem); // Index buffer vulkanDevice->createBuffer( VK_BUFFER_USAGE_INDEX_BUFFER_BIT | VK_BUFFER_USAGE_TRANSFER_DST_BIT, VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT, meshBuffer->indices.size, &meshBuffer->indices.buf, &meshBuffer->indices.mem); // Copy from staging buffers VkCommandBufferBeginInfo cmdBufInfo = vkTools::initializers::commandBufferBeginInfo(); VK_CHECK_RESULT(vkBeginCommandBuffer(copyCmd, &cmdBufInfo)); VkBufferCopy copyRegion = {}; copyRegion.size = meshBuffer->vertices.size; vkCmdCopyBuffer( copyCmd, vertexStaging.buffer, meshBuffer->vertices.buf, 1, ©Region); copyRegion.size = meshBuffer->indices.size; vkCmdCopyBuffer( copyCmd, indexStaging.buffer, meshBuffer->indices.buf, 1, ©Region); VK_CHECK_RESULT(vkEndCommandBuffer(copyCmd)); VkSubmitInfo submitInfo = {}; submitInfo.sType = VK_STRUCTURE_TYPE_SUBMIT_INFO; submitInfo.commandBufferCount = 1; submitInfo.pCommandBuffers = ©Cmd; VK_CHECK_RESULT(vkQueueSubmit(copyQueue, 1, &submitInfo, VK_NULL_HANDLE)); VK_CHECK_RESULT(vkQueueWaitIdle(copyQueue)); vkDestroyBuffer(vulkanDevice->logicalDevice, vertexStaging.buffer, nullptr); vkFreeMemory(vulkanDevice->logicalDevice, vertexStaging.memory, nullptr); vkDestroyBuffer(vulkanDevice->logicalDevice, indexStaging.buffer, nullptr); vkFreeMemory(vulkanDevice->logicalDevice, indexStaging.memory, nullptr); } else { // Generate vertex buffer vulkanDevice->createBuffer( VK_BUFFER_USAGE_VERTEX_BUFFER_BIT | VK_BUFFER_USAGE_TRANSFER_SRC_BIT, VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT, meshBuffer->vertices.size, &meshBuffer->vertices.buf, &meshBuffer->vertices.mem, vertexBuffer.data()); // Generate index buffer vulkanDevice->createBuffer( VK_BUFFER_USAGE_INDEX_BUFFER_BIT | VK_BUFFER_USAGE_TRANSFER_SRC_BIT, VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT, meshBuffer->indices.size, &meshBuffer->indices.buf, &meshBuffer->indices.mem, indexBuffer.data()); } }
int main(int argc, char *argv[]) { VkResult U_ASSERT_ONLY res; bool U_ASSERT_ONLY pass; struct sample_info info = {}; char sample_title[] = "Draw Cube"; process_command_line_args(info, argc, argv); init_global_layer_properties(info); info.instance_extension_names.push_back(VK_KHR_SURFACE_EXTENSION_NAME); #ifdef _WIN32 info.instance_extension_names.push_back( VK_KHR_WIN32_SURFACE_EXTENSION_NAME); #else info.instance_extension_names.push_back(VK_KHR_XCB_SURFACE_EXTENSION_NAME); #endif info.device_extension_names.push_back(VK_KHR_SWAPCHAIN_EXTENSION_NAME); init_instance(info, sample_title); init_enumerate_device(info); init_window_size(info, 500, 500); init_connection(info); init_window(info); init_swapchain_extension(info); init_device(info); init_command_pool(info); init_command_buffer(info); execute_begin_command_buffer(info); init_device_queue(info); init_swap_chain(info); init_depth_buffer(info); init_uniform_buffer(info); init_descriptor_and_pipeline_layouts(info, false); init_renderpass(info, DEPTH_PRESENT); init_shaders(info, vertShaderText, fragShaderText); init_framebuffers(info, DEPTH_PRESENT); init_vertex_buffer(info, g_vb_solid_face_colors_Data, sizeof(g_vb_solid_face_colors_Data), sizeof(g_vb_solid_face_colors_Data[0]), false); init_descriptor_pool(info, false); init_descriptor_set(info, false); init_pipeline_cache(info); init_pipeline(info, DEPTH_PRESENT); /* VULKAN_KEY_START */ VkClearValue clear_values[2]; clear_values[0].color.float32[0] = 0.2f; clear_values[0].color.float32[1] = 0.2f; clear_values[0].color.float32[2] = 0.2f; clear_values[0].color.float32[3] = 0.2f; clear_values[1].depthStencil.depth = 1.0f; clear_values[1].depthStencil.stencil = 0; VkSemaphore presentCompleteSemaphore; VkSemaphoreCreateInfo presentCompleteSemaphoreCreateInfo; presentCompleteSemaphoreCreateInfo.sType = VK_STRUCTURE_TYPE_SEMAPHORE_CREATE_INFO; presentCompleteSemaphoreCreateInfo.pNext = NULL; presentCompleteSemaphoreCreateInfo.flags = VK_FENCE_CREATE_SIGNALED_BIT; res = vkCreateSemaphore(info.device, &presentCompleteSemaphoreCreateInfo, NULL, &presentCompleteSemaphore); assert(res == VK_SUCCESS); // Get the index of the next available swapchain image: res = vkAcquireNextImageKHR(info.device, info.swap_chain, UINT64_MAX, presentCompleteSemaphore, NULL, &info.current_buffer); // TODO: Deal with the VK_SUBOPTIMAL_KHR and VK_ERROR_OUT_OF_DATE_KHR // return codes assert(res == VK_SUCCESS); /* Allocate a uniform buffer that will take query results. */ VkBuffer query_result_buf; VkDeviceMemory query_result_mem; VkBufferCreateInfo buf_info = {}; buf_info.sType = VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO; buf_info.pNext = NULL; buf_info.usage = VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT | VK_BUFFER_USAGE_TRANSFER_DST_BIT; buf_info.size = 4 * sizeof(uint64_t); buf_info.queueFamilyIndexCount = 0; buf_info.pQueueFamilyIndices = NULL; buf_info.sharingMode = VK_SHARING_MODE_EXCLUSIVE; buf_info.flags = 0; res = vkCreateBuffer(info.device, &buf_info, NULL, &query_result_buf); assert(res == VK_SUCCESS); VkMemoryRequirements mem_reqs; vkGetBufferMemoryRequirements(info.device, query_result_buf, &mem_reqs); VkMemoryAllocateInfo alloc_info = {}; alloc_info.sType = VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO; alloc_info.pNext = NULL; alloc_info.memoryTypeIndex = 0; alloc_info.allocationSize = mem_reqs.size; pass = memory_type_from_properties(info, mem_reqs.memoryTypeBits, VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT, &alloc_info.memoryTypeIndex); assert(pass); res = vkAllocateMemory(info.device, &alloc_info, NULL, &query_result_mem); assert(res == VK_SUCCESS); res = vkBindBufferMemory(info.device, query_result_buf, query_result_mem, 0); assert(res == VK_SUCCESS); VkQueryPool query_pool; VkQueryPoolCreateInfo query_pool_info; query_pool_info.sType = VK_STRUCTURE_TYPE_QUERY_POOL_CREATE_INFO; query_pool_info.pNext = NULL; query_pool_info.queryType = VK_QUERY_TYPE_OCCLUSION; query_pool_info.flags = 0; query_pool_info.queryCount = 2; query_pool_info.pipelineStatistics = 0; res = vkCreateQueryPool(info.device, &query_pool_info, NULL, &query_pool); assert(res == VK_SUCCESS); vkCmdResetQueryPool(info.cmd, query_pool, 0 /*startQuery*/, 2 /*queryCount*/); VkRenderPassBeginInfo rp_begin; rp_begin.sType = VK_STRUCTURE_TYPE_RENDER_PASS_BEGIN_INFO; rp_begin.pNext = NULL; rp_begin.renderPass = info.render_pass; rp_begin.framebuffer = info.framebuffers[info.current_buffer]; rp_begin.renderArea.offset.x = 0; rp_begin.renderArea.offset.y = 0; rp_begin.renderArea.extent.width = info.width; rp_begin.renderArea.extent.height = info.height; rp_begin.clearValueCount = 2; rp_begin.pClearValues = clear_values; vkCmdBeginRenderPass(info.cmd, &rp_begin, VK_SUBPASS_CONTENTS_INLINE); vkCmdBindPipeline(info.cmd, VK_PIPELINE_BIND_POINT_GRAPHICS, info.pipeline); vkCmdBindDescriptorSets(info.cmd, VK_PIPELINE_BIND_POINT_GRAPHICS, info.pipeline_layout, 0, NUM_DESCRIPTOR_SETS, info.desc_set.data(), 0, NULL); const VkDeviceSize offsets[1] = {0}; vkCmdBindVertexBuffers(info.cmd, 0, 1, &info.vertex_buffer.buf, offsets); VkViewport viewport; viewport.height = (float)info.height; viewport.width = (float)info.width; viewport.minDepth = (float)0.0f; viewport.maxDepth = (float)1.0f; viewport.x = 0; viewport.y = 0; vkCmdSetViewport(info.cmd, 0, NUM_VIEWPORTS, &viewport); VkRect2D scissor; scissor.extent.width = info.width; scissor.extent.height = info.height; scissor.offset.x = 0; scissor.offset.y = 0; vkCmdSetScissor(info.cmd, 0, NUM_SCISSORS, &scissor); vkCmdBeginQuery(info.cmd, query_pool, 0 /*slot*/, 0 /*flags*/); vkCmdEndQuery(info.cmd, query_pool, 0 /*slot*/); vkCmdBeginQuery(info.cmd, query_pool, 1 /*slot*/, 0 /*flags*/); vkCmdDraw(info.cmd, 12 * 3, 1, 0, 0); vkCmdEndRenderPass(info.cmd); vkCmdEndQuery(info.cmd, query_pool, 1 /*slot*/); vkCmdCopyQueryPoolResults( info.cmd, query_pool, 0 /*firstQuery*/, 2 /*queryCount*/, query_result_buf, 0 /*dstOffset*/, sizeof(uint64_t) /*stride*/, VK_QUERY_RESULT_64_BIT | VK_QUERY_RESULT_WAIT_BIT); VkImageMemoryBarrier prePresentBarrier = {}; prePresentBarrier.sType = VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER; prePresentBarrier.pNext = NULL; prePresentBarrier.srcAccessMask = VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT; prePresentBarrier.dstAccessMask = 0; prePresentBarrier.oldLayout = VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL; prePresentBarrier.newLayout = VK_IMAGE_LAYOUT_PRESENT_SRC_KHR; prePresentBarrier.srcQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED; prePresentBarrier.dstQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED; prePresentBarrier.subresourceRange.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT; prePresentBarrier.subresourceRange.baseMipLevel = 0; prePresentBarrier.subresourceRange.levelCount = 1; prePresentBarrier.subresourceRange.baseArrayLayer = 0; prePresentBarrier.subresourceRange.layerCount = 1; prePresentBarrier.image = info.buffers[info.current_buffer].image; vkCmdPipelineBarrier(info.cmd, VK_PIPELINE_STAGE_ALL_COMMANDS_BIT, VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT, 0, 0, NULL, 0, NULL, 1, &prePresentBarrier); res = vkEndCommandBuffer(info.cmd); const VkCommandBuffer cmd_bufs[] = {info.cmd}; VkFenceCreateInfo fenceInfo; VkFence drawFence; fenceInfo.sType = VK_STRUCTURE_TYPE_FENCE_CREATE_INFO; fenceInfo.pNext = NULL; fenceInfo.flags = 0; vkCreateFence(info.device, &fenceInfo, NULL, &drawFence); VkPipelineStageFlags pipe_stage_flags = VK_PIPELINE_STAGE_BOTTOM_OF_PIPE_BIT; VkSubmitInfo submit_info[1] = {}; submit_info[0].pNext = NULL; submit_info[0].sType = VK_STRUCTURE_TYPE_SUBMIT_INFO; submit_info[0].waitSemaphoreCount = 1; submit_info[0].pWaitSemaphores = &presentCompleteSemaphore; submit_info[0].pWaitDstStageMask = &pipe_stage_flags; submit_info[0].commandBufferCount = 1; submit_info[0].pCommandBuffers = cmd_bufs; submit_info[0].signalSemaphoreCount = 0; submit_info[0].pSignalSemaphores = NULL; /* Queue the command buffer for execution */ res = vkQueueSubmit(info.queue, 1, submit_info, drawFence); assert(res == VK_SUCCESS); res = vkQueueWaitIdle(info.queue); assert(res == VK_SUCCESS); uint64_t samples_passed[4]; samples_passed[0] = 0; samples_passed[1] = 0; res = vkGetQueryPoolResults( info.device, query_pool, 0 /*firstQuery*/, 2 /*queryCount*/, sizeof(samples_passed) /*dataSize*/, samples_passed, sizeof(uint64_t) /*stride*/, VK_QUERY_RESULT_64_BIT | VK_QUERY_RESULT_WAIT_BIT); assert(res == VK_SUCCESS); std::cout << "vkGetQueryPoolResults data" << "\n"; std::cout << "samples_passed[0] = " << samples_passed[0] << "\n"; std::cout << "samples_passed[1] = " << samples_passed[1] << "\n"; /* Read back query result from buffer */ uint64_t *samples_passed_ptr; res = vkMapMemory(info.device, query_result_mem, 0, mem_reqs.size, 0, (void **)&samples_passed_ptr); assert(res == VK_SUCCESS); std::cout << "vkCmdCopyQueryPoolResults data" << "\n"; std::cout << "samples_passed[0] = " << samples_passed_ptr[0] << "\n"; std::cout << "samples_passed[1] = " << samples_passed_ptr[1] << "\n"; vkUnmapMemory(info.device, query_result_mem); /* Now present the image in the window */ VkPresentInfoKHR present; present.sType = VK_STRUCTURE_TYPE_PRESENT_INFO_KHR; present.pNext = NULL; present.swapchainCount = 1; present.pSwapchains = &info.swap_chain; present.pImageIndices = &info.current_buffer; present.pWaitSemaphores = NULL; present.waitSemaphoreCount = 0; present.pResults = NULL; /* Make sure command buffer is finished before presenting */ do { res = vkWaitForFences(info.device, 1, &drawFence, VK_TRUE, FENCE_TIMEOUT); } while (res == VK_TIMEOUT); assert(res == VK_SUCCESS); res = vkQueuePresentKHR(info.queue, &present); assert(res == VK_SUCCESS); wait_seconds(1); /* VULKAN_KEY_END */ if (info.save_images) write_ppm(info, "occlusion_query"); vkDestroyBuffer(info.device, query_result_buf, NULL); vkFreeMemory(info.device, query_result_mem, NULL); vkDestroySemaphore(info.device, presentCompleteSemaphore, NULL); vkDestroyQueryPool(info.device, query_pool, NULL); vkDestroyFence(info.device, drawFence, NULL); destroy_pipeline(info); destroy_pipeline_cache(info); destroy_descriptor_pool(info); destroy_vertex_buffer(info); destroy_framebuffers(info); destroy_shaders(info); destroy_renderpass(info); destroy_descriptor_and_pipeline_layouts(info); destroy_uniform_buffer(info); destroy_depth_buffer(info); destroy_swap_chain(info); destroy_command_buffer(info); destroy_command_pool(info); destroy_device(info); destroy_window(info); destroy_instance(info); return 0; }
void loadTextureArray(std::string filename, VkFormat format) { #if defined(__ANDROID__) // Textures are stored inside the apk on Android (compressed) // So they need to be loaded via the asset manager AAsset* asset = AAssetManager_open(androidApp->activity->assetManager, filename.c_str(), AASSET_MODE_STREAMING); assert(asset); size_t size = AAsset_getLength(asset); assert(size > 0); void *textureData = malloc(size); AAsset_read(asset, textureData, size); AAsset_close(asset); gli::texture2DArray tex2DArray(gli::load((const char*)textureData, size)); #else gli::texture2DArray tex2DArray(gli::load(filename)); #endif assert(!tex2DArray.empty()); textureArray.width = tex2DArray.dimensions().x; textureArray.height = tex2DArray.dimensions().y; layerCount = tex2DArray.layers(); VkMemoryAllocateInfo memAllocInfo = vkTools::initializers::memoryAllocateInfo(); VkMemoryRequirements memReqs; // Create a host-visible staging buffer that contains the raw image data VkBuffer stagingBuffer; VkDeviceMemory stagingMemory; VkBufferCreateInfo bufferCreateInfo = vkTools::initializers::bufferCreateInfo(); bufferCreateInfo.size = tex2DArray.size(); // This buffer is used as a transfer source for the buffer copy bufferCreateInfo.usage = VK_BUFFER_USAGE_TRANSFER_SRC_BIT; bufferCreateInfo.sharingMode = VK_SHARING_MODE_EXCLUSIVE; vkTools::checkResult(vkCreateBuffer(device, &bufferCreateInfo, nullptr, &stagingBuffer)); // Get memory requirements for the staging buffer (alignment, memory type bits) vkGetBufferMemoryRequirements(device, stagingBuffer, &memReqs); memAllocInfo.allocationSize = memReqs.size; // Get memory type index for a host visible buffer memAllocInfo.memoryTypeIndex = getMemoryType(memReqs.memoryTypeBits, VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT); vkTools::checkResult(vkAllocateMemory(device, &memAllocInfo, nullptr, &stagingMemory)); vkTools::checkResult(vkBindBufferMemory(device, stagingBuffer, stagingMemory, 0)); // Copy texture data into staging buffer uint8_t *data; vkTools::checkResult(vkMapMemory(device, stagingMemory, 0, memReqs.size, 0, (void **)&data)); memcpy(data, tex2DArray.data(), tex2DArray.size()); vkUnmapMemory(device, stagingMemory); // Setup buffer copy regions for array layers std::vector<VkBufferImageCopy> bufferCopyRegions; uint32_t offset = 0; // Check if all array layers have the same dimesions bool sameDims = true; for (uint32_t layer = 0; layer < layerCount; layer++) { if (tex2DArray[layer].dimensions().x != textureArray.width || tex2DArray[layer].dimensions().y != textureArray.height) { sameDims = false; break; } } // If all layers of the texture array have the same dimensions, we only need to do one copy if (sameDims) { VkBufferImageCopy bufferCopyRegion = {}; bufferCopyRegion.imageSubresource.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT; bufferCopyRegion.imageSubresource.mipLevel = 0; bufferCopyRegion.imageSubresource.baseArrayLayer = 0; bufferCopyRegion.imageSubresource.layerCount = layerCount; bufferCopyRegion.imageExtent.width = tex2DArray[0].dimensions().x; bufferCopyRegion.imageExtent.height = tex2DArray[0].dimensions().y; bufferCopyRegion.imageExtent.depth = 1; bufferCopyRegion.bufferOffset = offset; bufferCopyRegions.push_back(bufferCopyRegion); } else { // If dimensions differ, copy layer by layer and pass offsets for (uint32_t layer = 0; layer < layerCount; layer++) { VkBufferImageCopy bufferCopyRegion = {}; bufferCopyRegion.imageSubresource.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT; bufferCopyRegion.imageSubresource.mipLevel = 0; bufferCopyRegion.imageSubresource.baseArrayLayer = layer; bufferCopyRegion.imageSubresource.layerCount = 1; bufferCopyRegion.imageExtent.width = tex2DArray[layer].dimensions().x; bufferCopyRegion.imageExtent.height = tex2DArray[layer].dimensions().y; bufferCopyRegion.imageExtent.depth = 1; bufferCopyRegion.bufferOffset = offset; bufferCopyRegions.push_back(bufferCopyRegion); offset += tex2DArray[layer].size(); } } // Create optimal tiled target image VkImageCreateInfo imageCreateInfo = vkTools::initializers::imageCreateInfo(); imageCreateInfo.imageType = VK_IMAGE_TYPE_2D; imageCreateInfo.format = format; imageCreateInfo.mipLevels = 1; imageCreateInfo.samples = VK_SAMPLE_COUNT_1_BIT; imageCreateInfo.tiling = VK_IMAGE_TILING_OPTIMAL; imageCreateInfo.usage = VK_IMAGE_USAGE_SAMPLED_BIT; imageCreateInfo.sharingMode = VK_SHARING_MODE_EXCLUSIVE; imageCreateInfo.initialLayout = VK_IMAGE_LAYOUT_PREINITIALIZED; imageCreateInfo.extent = { textureArray.width, textureArray.height, 1 }; imageCreateInfo.usage = VK_IMAGE_USAGE_TRANSFER_DST_BIT | VK_IMAGE_USAGE_SAMPLED_BIT; imageCreateInfo.arrayLayers = layerCount; VK_CHECK_RESULT(vkCreateImage(device, &imageCreateInfo, nullptr, &textureArray.image)); vkGetImageMemoryRequirements(device, textureArray.image, &memReqs); memAllocInfo.allocationSize = memReqs.size; memAllocInfo.memoryTypeIndex = getMemoryType(memReqs.memoryTypeBits, VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT); VK_CHECK_RESULT(vkAllocateMemory(device, &memAllocInfo, nullptr, &textureArray.deviceMemory)); VK_CHECK_RESULT(vkBindImageMemory(device, textureArray.image, textureArray.deviceMemory, 0)); VkCommandBuffer copyCmd = VulkanExampleBase::createCommandBuffer(VK_COMMAND_BUFFER_LEVEL_PRIMARY, true); // Image barrier for optimal image (target) // Set initial layout for all array layers (faces) of the optimal (target) tiled texture VkImageSubresourceRange subresourceRange = {}; subresourceRange.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT; subresourceRange.baseMipLevel = 0; subresourceRange.levelCount = 1; subresourceRange.layerCount = layerCount; vkTools::setImageLayout( copyCmd, textureArray.image, VK_IMAGE_ASPECT_COLOR_BIT, VK_IMAGE_LAYOUT_PREINITIALIZED, VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, subresourceRange); // Copy the cube map faces from the staging buffer to the optimal tiled image vkCmdCopyBufferToImage( copyCmd, stagingBuffer, textureArray.image, VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, bufferCopyRegions.size(), bufferCopyRegions.data() ); // Change texture image layout to shader read after all faces have been copied textureArray.imageLayout = VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL; vkTools::setImageLayout( copyCmd, textureArray.image, VK_IMAGE_ASPECT_COLOR_BIT, VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, textureArray.imageLayout, subresourceRange); VulkanExampleBase::flushCommandBuffer(copyCmd, queue, true); // Create sampler VkSamplerCreateInfo sampler = vkTools::initializers::samplerCreateInfo(); sampler.magFilter = VK_FILTER_LINEAR; sampler.minFilter = VK_FILTER_LINEAR; sampler.mipmapMode = VK_SAMPLER_MIPMAP_MODE_LINEAR; sampler.addressModeU = VK_SAMPLER_ADDRESS_MODE_CLAMP_TO_EDGE; sampler.addressModeV = sampler.addressModeU; sampler.addressModeW = sampler.addressModeU; sampler.mipLodBias = 0.0f; sampler.maxAnisotropy = 8; sampler.compareOp = VK_COMPARE_OP_NEVER; sampler.minLod = 0.0f; sampler.maxLod = 0.0f; sampler.borderColor = VK_BORDER_COLOR_FLOAT_OPAQUE_WHITE; VK_CHECK_RESULT(vkCreateSampler(device, &sampler, nullptr, &textureArray.sampler)); // Create image view VkImageViewCreateInfo view = vkTools::initializers::imageViewCreateInfo(); view.viewType = VK_IMAGE_VIEW_TYPE_2D_ARRAY; view.format = format; view.components = { VK_COMPONENT_SWIZZLE_R, VK_COMPONENT_SWIZZLE_G, VK_COMPONENT_SWIZZLE_B, VK_COMPONENT_SWIZZLE_A }; view.subresourceRange = { VK_IMAGE_ASPECT_COLOR_BIT, 0, 1, 0, 1 }; view.subresourceRange.layerCount = layerCount; view.image = textureArray.image; VK_CHECK_RESULT(vkCreateImageView(device, &view, nullptr, &textureArray.view)); // Clean up staging resources vkFreeMemory(device, stagingMemory, nullptr); vkDestroyBuffer(device, stagingBuffer, nullptr); }
void CommandBufferManager::DeferBufferDestruction(VkBuffer object) { FrameResources& resources = m_frame_resources[m_current_frame]; resources.cleanup_resources.push_back( [object]() { vkDestroyBuffer(g_vulkan_context->GetDevice(), object, nullptr); }); }
int sample_main(int argc, char *argv[]) { VkResult U_ASSERT_ONLY res; bool U_ASSERT_ONLY pass; struct sample_info info = {}; char sample_title[] = "Texel Buffer Sample"; float texels[] = {1.0, 0.0, 1.0}; const bool depthPresent = false; const bool vertexPresent = false; process_command_line_args(info, argc, argv); init_global_layer_properties(info); init_instance_extension_names(info); init_device_extension_names(info); init_instance(info, sample_title); init_enumerate_device(info); if (info.gpu_props.limits.maxTexelBufferElements < 4) { std::cout << "maxTexelBufferElements too small\n"; exit(-1); } VkFormatProperties props; vkGetPhysicalDeviceFormatProperties(info.gpus[0], VK_FORMAT_R32_SFLOAT, &props); if (!(props.bufferFeatures & VK_FORMAT_FEATURE_UNIFORM_TEXEL_BUFFER_BIT)) { std::cout << "R32_SFLOAT format unsupported for texel buffer\n"; exit(-1); } init_window_size(info, 500, 500); init_connection(info); init_window(info); init_swapchain_extension(info); init_device(info); init_command_pool(info); init_command_buffer(info); execute_begin_command_buffer(info); init_device_queue(info); init_swap_chain(info); VkBufferCreateInfo buf_info = {}; buf_info.sType = VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO; buf_info.pNext = NULL; buf_info.usage = VK_BUFFER_USAGE_UNIFORM_TEXEL_BUFFER_BIT; buf_info.size = sizeof(texels); buf_info.queueFamilyIndexCount = 0; buf_info.pQueueFamilyIndices = NULL; buf_info.sharingMode = VK_SHARING_MODE_EXCLUSIVE; buf_info.flags = 0; VkBuffer texelBuf; res = vkCreateBuffer(info.device, &buf_info, NULL, &texelBuf); assert(res == VK_SUCCESS); VkMemoryRequirements mem_reqs; vkGetBufferMemoryRequirements(info.device, texelBuf, &mem_reqs); VkMemoryAllocateInfo alloc_info = {}; alloc_info.sType = VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO; alloc_info.pNext = NULL; alloc_info.memoryTypeIndex = 0; alloc_info.allocationSize = mem_reqs.size; pass = memory_type_from_properties(info, mem_reqs.memoryTypeBits, VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT, &alloc_info.memoryTypeIndex); assert(pass && "No mappable, coherent memory"); VkDeviceMemory texelMem; res = vkAllocateMemory(info.device, &alloc_info, NULL, &texelMem); assert(res == VK_SUCCESS); uint8_t *pData; res = vkMapMemory(info.device, texelMem, 0, mem_reqs.size, 0, (void **)&pData); assert(res == VK_SUCCESS); memcpy(pData, &texels, sizeof(texels)); vkUnmapMemory(info.device, texelMem); res = vkBindBufferMemory(info.device, texelBuf, texelMem, 0); assert(res == VK_SUCCESS); VkBufferView texel_view; VkBufferViewCreateInfo view_info = {}; view_info.sType = VK_STRUCTURE_TYPE_BUFFER_VIEW_CREATE_INFO; view_info.pNext = NULL; view_info.buffer = texelBuf; view_info.format = VK_FORMAT_R32_SFLOAT; view_info.offset = 0; view_info.range = sizeof(texels); vkCreateBufferView(info.device, &view_info, NULL, &texel_view); VkDescriptorBufferInfo texel_buffer_info = {}; texel_buffer_info.buffer = texelBuf; texel_buffer_info.offset = 0; texel_buffer_info.range = sizeof(texels); // init_descriptor_and_pipeline_layouts(info, false); VkDescriptorSetLayoutBinding layout_bindings[1]; layout_bindings[0].binding = 0; layout_bindings[0].descriptorType = VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER; layout_bindings[0].descriptorCount = 1; layout_bindings[0].stageFlags = VK_SHADER_STAGE_VERTEX_BIT; layout_bindings[0].pImmutableSamplers = NULL; /* Next take layout bindings and use them to create a descriptor set layout */ VkDescriptorSetLayoutCreateInfo descriptor_layout = {}; descriptor_layout.sType = VK_STRUCTURE_TYPE_DESCRIPTOR_SET_LAYOUT_CREATE_INFO; descriptor_layout.pNext = NULL; descriptor_layout.bindingCount = 1; descriptor_layout.pBindings = layout_bindings; info.desc_layout.resize(NUM_DESCRIPTOR_SETS); res = vkCreateDescriptorSetLayout(info.device, &descriptor_layout, NULL, info.desc_layout.data()); assert(res == VK_SUCCESS); /* Now use the descriptor layout to create a pipeline layout */ VkPipelineLayoutCreateInfo pPipelineLayoutCreateInfo = {}; pPipelineLayoutCreateInfo.sType = VK_STRUCTURE_TYPE_PIPELINE_LAYOUT_CREATE_INFO; pPipelineLayoutCreateInfo.pNext = NULL; pPipelineLayoutCreateInfo.pushConstantRangeCount = 0; pPipelineLayoutCreateInfo.pPushConstantRanges = NULL; pPipelineLayoutCreateInfo.setLayoutCount = NUM_DESCRIPTOR_SETS; pPipelineLayoutCreateInfo.pSetLayouts = info.desc_layout.data(); res = vkCreatePipelineLayout(info.device, &pPipelineLayoutCreateInfo, NULL, &info.pipeline_layout); assert(res == VK_SUCCESS); init_renderpass(info, depthPresent); init_shaders(info, vertShaderText, fragShaderText); init_framebuffers(info, depthPresent); VkDescriptorPoolSize type_count[1]; type_count[0].type = VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER; type_count[0].descriptorCount = 1; VkDescriptorPoolCreateInfo descriptor_pool = {}; descriptor_pool.sType = VK_STRUCTURE_TYPE_DESCRIPTOR_POOL_CREATE_INFO; descriptor_pool.pNext = NULL; descriptor_pool.maxSets = 1; descriptor_pool.poolSizeCount = 1; descriptor_pool.pPoolSizes = type_count; res = vkCreateDescriptorPool(info.device, &descriptor_pool, NULL, &info.desc_pool); assert(res == VK_SUCCESS); VkDescriptorSetAllocateInfo desc_alloc_info[1]; desc_alloc_info[0].sType = VK_STRUCTURE_TYPE_DESCRIPTOR_SET_ALLOCATE_INFO; desc_alloc_info[0].pNext = NULL; desc_alloc_info[0].descriptorPool = info.desc_pool; desc_alloc_info[0].descriptorSetCount = NUM_DESCRIPTOR_SETS; desc_alloc_info[0].pSetLayouts = info.desc_layout.data(); /* Allocate descriptor set with UNIFORM_BUFFER_DYNAMIC */ info.desc_set.resize(NUM_DESCRIPTOR_SETS); res = vkAllocateDescriptorSets(info.device, desc_alloc_info, info.desc_set.data()); assert(res == VK_SUCCESS); VkWriteDescriptorSet writes[1]; writes[0] = {}; writes[0].sType = VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET; writes[0].dstSet = info.desc_set[0]; writes[0].dstBinding = 0; writes[0].descriptorCount = 1; writes[0].descriptorType = VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER; writes[0].pBufferInfo = &texel_buffer_info; writes[0].pTexelBufferView = &texel_view; writes[0].dstArrayElement = 0; vkUpdateDescriptorSets(info.device, 1, writes, 0, NULL); init_pipeline_cache(info); init_pipeline(info, depthPresent, vertexPresent); /* VULKAN_KEY_START */ VkClearValue clear_values[1]; clear_values[0].color.float32[0] = 0.2f; clear_values[0].color.float32[1] = 0.2f; clear_values[0].color.float32[2] = 0.2f; clear_values[0].color.float32[3] = 0.2f; VkSemaphoreCreateInfo imageAcquiredSemaphoreCreateInfo; imageAcquiredSemaphoreCreateInfo.sType = VK_STRUCTURE_TYPE_SEMAPHORE_CREATE_INFO; imageAcquiredSemaphoreCreateInfo.pNext = NULL; imageAcquiredSemaphoreCreateInfo.flags = 0; res = vkCreateSemaphore(info.device, &imageAcquiredSemaphoreCreateInfo, NULL, &info.imageAcquiredSemaphore); assert(res == VK_SUCCESS); // Get the index of the next available swapchain image: res = vkAcquireNextImageKHR(info.device, info.swap_chain, UINT64_MAX, info.imageAcquiredSemaphore, VK_NULL_HANDLE, &info.current_buffer); // TODO: Deal with the VK_SUBOPTIMAL_KHR and VK_ERROR_OUT_OF_DATE_KHR // return codes assert(res == VK_SUCCESS); VkRenderPassBeginInfo rp_begin; rp_begin.sType = VK_STRUCTURE_TYPE_RENDER_PASS_BEGIN_INFO; rp_begin.pNext = NULL; rp_begin.renderPass = info.render_pass; rp_begin.framebuffer = info.framebuffers[info.current_buffer]; rp_begin.renderArea.offset.x = 0; rp_begin.renderArea.offset.y = 0; rp_begin.renderArea.extent.width = info.width; rp_begin.renderArea.extent.height = info.height; rp_begin.clearValueCount = 1; rp_begin.pClearValues = clear_values; vkCmdBeginRenderPass(info.cmd, &rp_begin, VK_SUBPASS_CONTENTS_INLINE); vkCmdBindPipeline(info.cmd, VK_PIPELINE_BIND_POINT_GRAPHICS, info.pipeline); vkCmdBindDescriptorSets(info.cmd, VK_PIPELINE_BIND_POINT_GRAPHICS, info.pipeline_layout, 0, NUM_DESCRIPTOR_SETS, info.desc_set.data(), 0, NULL); init_viewports(info); init_scissors(info); vkCmdDraw(info.cmd, 3, 1, 0, 0); vkCmdEndRenderPass(info.cmd); res = vkEndCommandBuffer(info.cmd); const VkCommandBuffer cmd_bufs[] = {info.cmd}; VkFenceCreateInfo fenceInfo; VkFence drawFence; fenceInfo.sType = VK_STRUCTURE_TYPE_FENCE_CREATE_INFO; fenceInfo.pNext = NULL; fenceInfo.flags = 0; vkCreateFence(info.device, &fenceInfo, NULL, &drawFence); execute_queue_cmdbuf(info, cmd_bufs, drawFence); do { res = vkWaitForFences(info.device, 1, &drawFence, VK_TRUE, FENCE_TIMEOUT); } while (res == VK_TIMEOUT); assert(res == VK_SUCCESS); vkDestroyFence(info.device, drawFence, NULL); execute_present_image(info); wait_seconds(1); /* VULKAN_KEY_END */ if (info.save_images) write_ppm(info, "texel_buffer"); vkDestroySemaphore(info.device, info.imageAcquiredSemaphore, NULL); vkDestroyBufferView(info.device, texel_view, NULL); vkDestroyBuffer(info.device, texelBuf, NULL); vkFreeMemory(info.device, texelMem, NULL); destroy_pipeline(info); destroy_pipeline_cache(info); destroy_descriptor_pool(info); destroy_framebuffers(info); destroy_shaders(info); destroy_renderpass(info); destroy_descriptor_and_pipeline_layouts(info); destroy_swap_chain(info); destroy_command_buffer(info); destroy_command_pool(info); destroy_device(info); destroy_window(info); destroy_instance(info); return 0; }
void destroyUniformData(VkDevice device, vkTools::UniformData *uniformData) { vkDestroyBuffer(device, uniformData->buffer, nullptr); vkFreeMemory(device, uniformData->memory, nullptr); }
// Load a model from file using the ASSIMP model loader and generate all resources required to render the model void loadModel(std::string filename) { // Load the model from file using ASSIMP const aiScene* scene; Assimp::Importer Importer; // Flags for loading the mesh static const int assimpFlags = aiProcess_FlipWindingOrder | aiProcess_Triangulate | aiProcess_PreTransformVertices; #if defined(__ANDROID__) // Meshes are stored inside the apk on Android (compressed) // So they need to be loaded via the asset manager AAsset* asset = AAssetManager_open(androidApp->activity->assetManager, filename.c_str(), AASSET_MODE_STREAMING); assert(asset); size_t size = AAsset_getLength(asset); assert(size > 0); void *meshData = malloc(size); AAsset_read(asset, meshData, size); AAsset_close(asset); scene = Importer.ReadFileFromMemory(meshData, size, assimpFlags); free(meshData); #else scene = Importer.ReadFile(filename.c_str(), assimpFlags); #endif // Generate vertex buffer from ASSIMP scene data float scale = 1.0f; std::vector<Vertex> vertexBuffer; // Iterate through all meshes in the file and extract the vertex components for (uint32_t m = 0; m < scene->mNumMeshes; m++) { for (uint32_t v = 0; v < scene->mMeshes[m]->mNumVertices; v++) { Vertex vertex; // Use glm make_* functions to convert ASSIMP vectors to glm vectors vertex.pos = glm::make_vec3(&scene->mMeshes[m]->mVertices[v].x) * scale; vertex.normal = glm::make_vec3(&scene->mMeshes[m]->mNormals[v].x); // Texture coordinates and colors may have multiple channels, we only use the first [0] one vertex.uv = glm::make_vec2(&scene->mMeshes[m]->mTextureCoords[0][v].x); // Mesh may not have vertex colors vertex.color = (scene->mMeshes[m]->HasVertexColors(0)) ? glm::make_vec3(&scene->mMeshes[m]->mColors[0][v].r) : glm::vec3(1.0f); // Vulkan uses a right-handed NDC (contrary to OpenGL), so simply flip Y-Axis vertex.pos.y *= -1.0f; vertexBuffer.push_back(vertex); } } size_t vertexBufferSize = vertexBuffer.size() * sizeof(Vertex); // Generate index buffer from ASSIMP scene data std::vector<uint32_t> indexBuffer; for (uint32_t m = 0; m < scene->mNumMeshes; m++) { uint32_t indexBase = static_cast<uint32_t>(indexBuffer.size()); for (uint32_t f = 0; f < scene->mMeshes[m]->mNumFaces; f++) { // We assume that all faces are triangulated for (uint32_t i = 0; i < 3; i++) { indexBuffer.push_back(scene->mMeshes[m]->mFaces[f].mIndices[i] + indexBase); } } } size_t indexBufferSize = indexBuffer.size() * sizeof(uint32_t); model.indices.count = static_cast<uint32_t>(indexBuffer.size()); // Static mesh should always be device local bool useStaging = true; if (useStaging) { struct { VkBuffer buffer; VkDeviceMemory memory; } vertexStaging, indexStaging; // Create staging buffers // Vertex data VK_CHECK_RESULT(vulkanDevice->createBuffer( VK_BUFFER_USAGE_TRANSFER_SRC_BIT, VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT, vertexBufferSize, &vertexStaging.buffer, &vertexStaging.memory, vertexBuffer.data())); // Index data VK_CHECK_RESULT(vulkanDevice->createBuffer( VK_BUFFER_USAGE_TRANSFER_SRC_BIT, VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT, indexBufferSize, &indexStaging.buffer, &indexStaging.memory, indexBuffer.data())); // Create device local buffers // Vertex buffer VK_CHECK_RESULT(vulkanDevice->createBuffer( VK_BUFFER_USAGE_VERTEX_BUFFER_BIT | VK_BUFFER_USAGE_TRANSFER_DST_BIT, VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT, vertexBufferSize, &model.vertices.buffer, &model.vertices.memory)); // Index buffer VK_CHECK_RESULT(vulkanDevice->createBuffer( VK_BUFFER_USAGE_INDEX_BUFFER_BIT | VK_BUFFER_USAGE_TRANSFER_DST_BIT, VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT, indexBufferSize, &model.indices.buffer, &model.indices.memory)); // Copy from staging buffers VkCommandBuffer copyCmd = VulkanExampleBase::createCommandBuffer(VK_COMMAND_BUFFER_LEVEL_PRIMARY, true); VkBufferCopy copyRegion = {}; copyRegion.size = vertexBufferSize; vkCmdCopyBuffer( copyCmd, vertexStaging.buffer, model.vertices.buffer, 1, ©Region); copyRegion.size = indexBufferSize; vkCmdCopyBuffer( copyCmd, indexStaging.buffer, model.indices.buffer, 1, ©Region); VulkanExampleBase::flushCommandBuffer(copyCmd, queue, true); vkDestroyBuffer(device, vertexStaging.buffer, nullptr); vkFreeMemory(device, vertexStaging.memory, nullptr); vkDestroyBuffer(device, indexStaging.buffer, nullptr); vkFreeMemory(device, indexStaging.memory, nullptr); } else { // Vertex buffer VK_CHECK_RESULT(vulkanDevice->createBuffer( VK_BUFFER_USAGE_VERTEX_BUFFER_BIT, VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT, vertexBufferSize, &model.vertices.buffer, &model.vertices.memory, vertexBuffer.data())); // Index buffer VK_CHECK_RESULT(vulkanDevice->createBuffer( VK_BUFFER_USAGE_INDEX_BUFFER_BIT, VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT, indexBufferSize, &model.indices.buffer, &model.indices.memory, indexBuffer.data())); } }