void VkHelper::create2DImage(VkDevice device, VkPhysicalDevice physicalDevice, uint32_t width, uint32_t height, VkFormat format, VkImageTiling tiling, VkImageUsageFlags usage, VkMemoryPropertyFlags reqFlags, VkMemoryPropertyFlags preferredFlags, VkImage* img, VkDeviceMemory* deviceImgMem) { VkImageCreateInfo imgCreateInfo{}; imgCreateInfo.sType = VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO; imgCreateInfo.pNext = VK_NULL_HANDLE; imgCreateInfo.flags = 0; imgCreateInfo.imageType = VK_IMAGE_TYPE_2D; imgCreateInfo.extent.width = width; imgCreateInfo.extent.height = height; imgCreateInfo.extent.depth = 1; imgCreateInfo.arrayLayers = 1; imgCreateInfo.mipLevels = 1; imgCreateInfo.format = format; imgCreateInfo.tiling = tiling; imgCreateInfo.initialLayout = VK_IMAGE_LAYOUT_PREINITIALIZED; imgCreateInfo.queueFamilyIndexCount = -1; imgCreateInfo.pQueueFamilyIndices = 0; imgCreateInfo.usage = usage; imgCreateInfo.sharingMode = VK_SHARING_MODE_EXCLUSIVE; imgCreateInfo.samples = VK_SAMPLE_COUNT_1_BIT; if (vkCreateImage(device, &imgCreateInfo, nullptr, img) != VK_SUCCESS) std::runtime_error("ERROR: Failed to create image."); VkMemoryRequirements memReqs; vkGetImageMemoryRequirements(device, *img, &memReqs); VkMemoryAllocateInfo allocInfo{}; allocInfo.sType = VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO; allocInfo.pNext = VK_NULL_HANDLE; allocInfo.allocationSize = memReqs.size; allocInfo.memoryTypeIndex = VkHelper::findMemoryType(physicalDevice, memReqs, reqFlags, preferredFlags); if (vkAllocateMemory(device, &allocInfo, nullptr, deviceImgMem) != VK_SUCCESS) std::runtime_error("ERROR: Failed to allocate memory."); if (vkBindImageMemory(device, *img, *deviceImgMem, 0) != VK_SUCCESS) std::runtime_error("ERROR: Failed to bind image memory."); }
void prepareUniformBuffers() { // Prepare and initialize uniform buffer containing shader uniforms VkMemoryRequirements memReqs; // Vertex shader uniform buffer block VkBufferCreateInfo bufferInfo = {}; VkMemoryAllocateInfo allocInfo = {}; allocInfo.sType = VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO; allocInfo.pNext = NULL; allocInfo.allocationSize = 0; allocInfo.memoryTypeIndex = 0; VkResult err; bufferInfo.sType = VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO; bufferInfo.size = sizeof(uboVS); bufferInfo.usage = VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT; // Create a new buffer err = vkCreateBuffer(device, &bufferInfo, nullptr, &uniformDataVS.buffer); assert(!err); // Get memory requirements including size, alignment and memory type vkGetBufferMemoryRequirements(device, uniformDataVS.buffer, &memReqs); allocInfo.allocationSize = memReqs.size; // Gets the appropriate memory type for this type of buffer allocation // Only memory types that are visible to the host getMemoryType(memReqs.memoryTypeBits, VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT, &allocInfo.memoryTypeIndex); // Allocate memory for the uniform buffer err = vkAllocateMemory(device, &allocInfo, nullptr, &(uniformDataVS.memory)); assert(!err); // Bind memory to buffer err = vkBindBufferMemory(device, uniformDataVS.buffer, uniformDataVS.memory, 0); assert(!err); // Store information in the uniform's descriptor uniformDataVS.descriptor.buffer = uniformDataVS.buffer; uniformDataVS.descriptor.offset = 0; uniformDataVS.descriptor.range = sizeof(uboVS); updateUniformBuffers(); }
void Renderer::createImage(uint32_t width, uint32_t height, VkFormat format, VkImageTiling tiling, VkImageUsageFlags usage, VkMemoryPropertyFlags memoryProperties, VkImage & image, VkDeviceMemory & imageMemory) { VkImageCreateInfo image_create_info{}; image_create_info.sType = VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO; image_create_info.imageType = VK_IMAGE_TYPE_2D; image_create_info.extent.width = width; image_create_info.extent.height = height; image_create_info.extent.depth = 1; image_create_info.mipLevels = 1; image_create_info.arrayLayers = 1; image_create_info.format = format; image_create_info.tiling = tiling; image_create_info.initialLayout = VK_IMAGE_LAYOUT_PREINITIALIZED; image_create_info.usage = usage; image_create_info.sharingMode = VK_SHARING_MODE_EXCLUSIVE; image_create_info.samples = VK_SAMPLE_COUNT_1_BIT; image_create_info.flags = 0; ErrorCheck(vkCreateImage(_device, &image_create_info, nullptr, &image)); VkMemoryRequirements mem_reqs; vkGetImageMemoryRequirements(_device, image, &mem_reqs); VkMemoryAllocateInfo mem_alloc_info{}; mem_alloc_info.sType = VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO; mem_alloc_info.allocationSize = mem_reqs.size; uint32_t type_filter = mem_reqs.memoryTypeBits; uint32_t memory_type; for (uint32_t i = 0; i < _gpu_memory_properties.memoryTypeCount; i++) { if ((type_filter & (1 << i)) && (_gpu_memory_properties.memoryTypes[i].propertyFlags & memoryProperties) == memoryProperties) { memory_type = i; break; } } mem_alloc_info.memoryTypeIndex = memory_type; ErrorCheck(vkAllocateMemory(_device, &mem_alloc_info, nullptr, &imageMemory)); vkBindImageMemory(_device, image, imageMemory, 0); }
void VulkanGear::prepareUniformBuffer() { // Vertex shader uniform buffer block VkMemoryAllocateInfo allocInfo = vkTools::initializers::memoryAllocateInfo(); VkMemoryRequirements memReqs; VkBufferCreateInfo bufferInfo = vkTools::initializers::bufferCreateInfo( VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT, sizeof(ubo)); VK_CHECK_RESULT(vkCreateBuffer(device, &bufferInfo, nullptr, &uniformData.buffer)); vkGetBufferMemoryRequirements(device, uniformData.buffer, &memReqs); allocInfo.allocationSize = memReqs.size; allocInfo.memoryTypeIndex = exampleBase->getMemoryType(memReqs.memoryTypeBits, VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT); VK_CHECK_RESULT(vkAllocateMemory(device, &allocInfo, nullptr, &uniformData.memory)); VK_CHECK_RESULT(vkBindBufferMemory(device, uniformData.buffer, uniformData.memory, 0)); uniformData.descriptor.buffer = uniformData.buffer; uniformData.descriptor.offset = 0; uniformData.descriptor.range = sizeof(ubo); uniformData.allocSize = allocInfo.allocationSize; }
VkBool32 VulkanExampleBase::createBuffer(VkBufferUsageFlags usageFlags, VkMemoryPropertyFlags memoryPropertyFlags, VkDeviceSize size, void * data, VkBuffer * buffer, VkDeviceMemory * memory) { VkMemoryRequirements memReqs; VkMemoryAllocateInfo memAlloc = vkTools::initializers::memoryAllocateInfo(); VkBufferCreateInfo bufferCreateInfo = vkTools::initializers::bufferCreateInfo(usageFlags, size); vkTools::checkResult(vkCreateBuffer(device, &bufferCreateInfo, nullptr, buffer)); vkGetBufferMemoryRequirements(device, *buffer, &memReqs); memAlloc.allocationSize = memReqs.size; getMemoryType(memReqs.memoryTypeBits, memoryPropertyFlags, &memAlloc.memoryTypeIndex); vkTools::checkResult(vkAllocateMemory(device, &memAlloc, nullptr, memory)); if (data != nullptr) { void *mapped; vkTools::checkResult(vkMapMemory(device, *memory, 0, size, 0, &mapped)); memcpy(mapped, data, size); vkUnmapMemory(device, *memory); } vkTools::checkResult(vkBindBufferMemory(device, *buffer, *memory, 0)); return true; }
/* =============== GL_CreateHeap =============== */ glheap_t * GL_CreateHeap(VkDeviceSize size, uint32_t memory_type_index) { glheap_t * heap = malloc(sizeof(glheap_t)); VkMemoryAllocateInfo memory_allocate_info; memset(&memory_allocate_info, 0, sizeof(memory_allocate_info)); memory_allocate_info.sType = VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO; memory_allocate_info.allocationSize = size; memory_allocate_info.memoryTypeIndex = memory_type_index; VkResult err = vkAllocateMemory(vulkan_globals.device, &memory_allocate_info, NULL, &heap->memory); if (err != VK_SUCCESS) Sys_Error("vkAllocateMemory failed"); heap->head = malloc(sizeof(glheapnode_t)); heap->head->offset = 0; heap->head->size = size; heap->head->prev = NULL; heap->head->next = NULL; heap->head->free = true; return heap; }
void setupDepthStencil() { VkImageCreateInfo image = {}; image.sType = VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO; image.imageType = VK_IMAGE_TYPE_2D; image.format = depthFormat; image.extent = { width, height, 1 }; image.mipLevels = 1; image.arrayLayers = 1; image.samples = VK_SAMPLE_COUNT_1_BIT; image.tiling = VK_IMAGE_TILING_OPTIMAL; image.usage = VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT | VK_IMAGE_USAGE_TRANSFER_SRC_BIT; image.initialLayout = VK_IMAGE_LAYOUT_UNDEFINED; VK_CHECK_RESULT(vkCreateImage(device, &image, nullptr, &depthStencil.image)); VkMemoryAllocateInfo memAlloc = {}; memAlloc.sType = VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO; VkMemoryRequirements memReqs; vkGetImageMemoryRequirements(device, depthStencil.image, &memReqs); memAlloc.allocationSize = memReqs.size; memAlloc.memoryTypeIndex = getMemoryTypeIndex(memReqs.memoryTypeBits, VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT); VK_CHECK_RESULT(vkAllocateMemory(device, &memAlloc, nullptr, &depthStencil.mem)); VK_CHECK_RESULT(vkBindImageMemory(device, depthStencil.image, depthStencil.mem, 0)); VkImageViewCreateInfo depthStencilView = {}; depthStencilView.sType = VK_STRUCTURE_TYPE_IMAGE_VIEW_CREATE_INFO; depthStencilView.viewType = VK_IMAGE_VIEW_TYPE_2D; depthStencilView.format = depthFormat; depthStencilView.subresourceRange = {}; depthStencilView.subresourceRange.aspectMask = VK_IMAGE_ASPECT_DEPTH_BIT | VK_IMAGE_ASPECT_STENCIL_BIT; depthStencilView.subresourceRange.baseMipLevel = 0; depthStencilView.subresourceRange.levelCount = 1; depthStencilView.subresourceRange.baseArrayLayer = 0; depthStencilView.subresourceRange.layerCount = 1; depthStencilView.image = depthStencil.image; VK_CHECK_RESULT(vkCreateImageView(device, &depthStencilView, nullptr, &depthStencil.view)); }
/** * Create a buffer on the device * * @param usageFlags Usage flag bitmask for the buffer (i.e. index, vertex, uniform buffer) * @param memoryPropertyFlags Memory properties for this buffer (i.e. device local, host visible, coherent) * @param buffer Pointer to a vk::Vulkan buffer object * @param size Size of the buffer in byes * @param data Pointer to the data that should be copied to the buffer after creation (optional, if not set, no data is copied over) * * @return VK_SUCCESS if buffer handle and memory have been created and (optionally passed) data has been copied */ VkResult createBuffer(VkBufferUsageFlags usageFlags, VkMemoryPropertyFlags memoryPropertyFlags, vk::Buffer *buffer, VkDeviceSize size, void *data = nullptr) { buffer->device = logicalDevice; // Create the buffer handle VkBufferCreateInfo bufferCreateInfo = vkTools::initializers::bufferCreateInfo(usageFlags, size); VK_CHECK_RESULT(vkCreateBuffer(logicalDevice, &bufferCreateInfo, nullptr, &buffer->buffer)); // Create the memory backing up the buffer handle VkMemoryRequirements memReqs; VkMemoryAllocateInfo memAlloc = vkTools::initializers::memoryAllocateInfo(); vkGetBufferMemoryRequirements(logicalDevice, buffer->buffer, &memReqs); memAlloc.allocationSize = memReqs.size; // Find a memory type index that fits the properties of the buffer memAlloc.memoryTypeIndex = getMemoryType(memReqs.memoryTypeBits, memoryPropertyFlags); VK_CHECK_RESULT(vkAllocateMemory(logicalDevice, &memAlloc, nullptr, &buffer->memory)); buffer->alignment = memReqs.alignment; buffer->size = memAlloc.allocationSize; buffer->usageFlags = usageFlags; buffer->memoryPropertyFlags = memoryPropertyFlags; // If a pointer to the buffer data has been passed, map the buffer and copy over the data if (data != nullptr) { VK_CHECK_RESULT(buffer->map()); memcpy(buffer->mapped, data, size); buffer->unmap(); } // Initialize a default descriptor that covers the whole buffer size buffer->setupDescriptor(); // Attach the memory to the buffer object return buffer->bind(); }
bool vulkan_buffer::create_internal(const bool copy_host_data, const compute_queue& cqueue) { const auto& vulkan_dev = ((const vulkan_device&)cqueue.get_device()).device; // create the buffer const VkBufferCreateInfo buffer_create_info { .sType = VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO, .pNext = nullptr, .flags = 0, // no sparse backing .size = size, // set all the bits here, might need some better restrictions later on // NOTE: not setting vertex bit here, b/c we're always using SSBOs .usage = (VK_BUFFER_USAGE_TRANSFER_SRC_BIT | VK_BUFFER_USAGE_TRANSFER_DST_BIT | VK_BUFFER_USAGE_STORAGE_BUFFER_BIT | VK_BUFFER_USAGE_INDEX_BUFFER_BIT | VK_BUFFER_USAGE_INDIRECT_BUFFER_BIT), // TODO: probably want a concurrent option later on .sharingMode = VK_SHARING_MODE_EXCLUSIVE, .queueFamilyIndexCount = 0, .pQueueFamilyIndices = nullptr, }; VK_CALL_RET(vkCreateBuffer(vulkan_dev, &buffer_create_info, nullptr, &buffer), "buffer creation failed", false) // allocate / back it up VkMemoryRequirements mem_req; vkGetBufferMemoryRequirements(vulkan_dev, buffer, &mem_req); const VkMemoryAllocateInfo alloc_info { .sType = VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO, .pNext = nullptr, .allocationSize = mem_req.size, .memoryTypeIndex = find_memory_type_index(mem_req.memoryTypeBits, true /* prefer device memory */), }; VK_CALL_RET(vkAllocateMemory(vulkan_dev, &alloc_info, nullptr, &mem), "buffer allocation failed", false) VK_CALL_RET(vkBindBufferMemory(vulkan_dev, buffer, mem, 0), "buffer allocation binding failed", false) // update buffer desc info buffer_info.buffer = buffer; buffer_info.offset = 0; buffer_info.range = size; // buffer init from host data pointer if(copy_host_data && host_ptr != nullptr && !has_flag<COMPUTE_MEMORY_FLAG::NO_INITIAL_COPY>(flags)) { if(!write_memory_data(cqueue, host_ptr, size, 0, 0, "failed to initialize buffer with host data (map failed)")) { return false; } } return true; } vulkan_buffer::~vulkan_buffer() { if(buffer != nullptr) { vkDestroyBuffer(((const vulkan_device&)dev).device, buffer, nullptr); buffer = nullptr; } buffer_info = { nullptr, 0, 0 }; } void vulkan_buffer::read(const compute_queue& cqueue, const size_t size_, const size_t offset) { read(cqueue, host_ptr, size_, offset); } void vulkan_buffer::read(const compute_queue& cqueue, void* dst, const size_t size_, const size_t offset) { if(buffer == nullptr) return; const size_t read_size = (size_ == 0 ? size : size_); if(!read_check(size, read_size, offset, flags)) return; GUARD(lock); read_memory_data(cqueue, dst, read_size, offset, 0, "failed to read buffer"); } void vulkan_buffer::write(const compute_queue& cqueue, const size_t size_, const size_t offset) { write(cqueue, host_ptr, size_, offset); }
// Setup and fill the compute shader storage buffers for // vertex positions and velocities void prepareStorageBuffers() { float destPosX = 0.0f; float destPosY = 0.0f; // Initial particle positions std::vector<Particle> particleBuffer; for (int i = 0; i < PARTICLE_COUNT; ++i) { // Position float aspectRatio = (float)height / (float)width; float rndVal = (float)rand() / (float)(RAND_MAX / (360.0f * 3.14f * 2.0f)); float rndRad = (float)rand() / (float)(RAND_MAX) * 0.5f; Particle p; p.pos = glm::vec4( destPosX + cos(rndVal) * rndRad * aspectRatio, destPosY + sin(rndVal) * rndRad, 0.0f, 1.0f); p.col = glm::vec4( (float)(rand() % 255) / 255.0f, (float)(rand() % 255) / 255.0f, (float)(rand() % 255) / 255.0f, 1.0f); p.vel = glm::vec4(0.0f); particleBuffer.push_back(p); } // Buffer size is the same for all storage buffers uint32_t storageBufferSize = particleBuffer.size() * sizeof(Particle); VkMemoryAllocateInfo memAlloc = vkTools::initializers::memoryAllocateInfo(); VkMemoryRequirements memReqs; VkResult err; void *data; struct StagingBuffer { VkDeviceMemory memory; VkBuffer buffer; } stagingBuffer; // Allocate and fill host-visible staging storage buffer object // Allocate and fill storage buffer object VkBufferCreateInfo vBufferInfo = vkTools::initializers::bufferCreateInfo( VK_BUFFER_USAGE_TRANSFER_SRC_BIT, storageBufferSize); vkTools::checkResult(vkCreateBuffer(device, &vBufferInfo, nullptr, &stagingBuffer.buffer)); vkGetBufferMemoryRequirements(device, stagingBuffer.buffer, &memReqs); memAlloc.allocationSize = memReqs.size; getMemoryType(memReqs.memoryTypeBits, VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT, &memAlloc.memoryTypeIndex); vkTools::checkResult(vkAllocateMemory(device, &memAlloc, nullptr, &stagingBuffer.memory)); vkTools::checkResult(vkMapMemory(device, stagingBuffer.memory, 0, storageBufferSize, 0, &data)); memcpy(data, particleBuffer.data(), storageBufferSize); vkUnmapMemory(device, stagingBuffer.memory); vkTools::checkResult(vkBindBufferMemory(device, stagingBuffer.buffer, stagingBuffer.memory, 0)); // Allocate device local storage buffer ojbect vBufferInfo.usage = VK_BUFFER_USAGE_STORAGE_BUFFER_BIT | VK_BUFFER_USAGE_TRANSFER_DST_BIT; vkTools::checkResult(vkCreateBuffer(device, &vBufferInfo, nullptr, &computeStorageBuffer.buffer)); vkGetBufferMemoryRequirements(device, computeStorageBuffer.buffer, &memReqs); memAlloc.allocationSize = memReqs.size; getMemoryType(memReqs.memoryTypeBits, VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT, &memAlloc.memoryTypeIndex); vkTools::checkResult(vkAllocateMemory(device, &memAlloc, nullptr, &computeStorageBuffer.memory)); vkTools::checkResult(vkBindBufferMemory(device, computeStorageBuffer.buffer, computeStorageBuffer.memory, 0)); // Copy from host to device createSetupCommandBuffer(); VkBufferCopy copyRegion = {}; copyRegion.size = storageBufferSize; vkCmdCopyBuffer( setupCmdBuffer, stagingBuffer.buffer, computeStorageBuffer.buffer, 1, ©Region); flushSetupCommandBuffer(); // Destroy staging buffer vkDestroyBuffer(device, stagingBuffer.buffer, nullptr); vkFreeMemory(device, stagingBuffer.memory, nullptr); computeStorageBuffer.descriptor.buffer = computeStorageBuffer.buffer; computeStorageBuffer.descriptor.offset = 0; computeStorageBuffer.descriptor.range = storageBufferSize; // Binding description vertices.bindingDescriptions.resize(1); vertices.bindingDescriptions[0] = vkTools::initializers::vertexInputBindingDescription( VERTEX_BUFFER_BIND_ID, sizeof(Particle), VK_VERTEX_INPUT_RATE_VERTEX); // Attribute descriptions // Describes memory layout and shader positions vertices.attributeDescriptions.resize(2); // Location 0 : Position vertices.attributeDescriptions[0] = vkTools::initializers::vertexInputAttributeDescription( VERTEX_BUFFER_BIND_ID, 0, VK_FORMAT_R32G32B32A32_SFLOAT, 0); // Location 1 : Color vertices.attributeDescriptions[1] = vkTools::initializers::vertexInputAttributeDescription( VERTEX_BUFFER_BIND_ID, 1, VK_FORMAT_R32G32B32A32_SFLOAT, sizeof(float) * 4); // Assign to vertex buffer vertices.inputState = vkTools::initializers::pipelineVertexInputStateCreateInfo(); vertices.inputState.vertexBindingDescriptionCount = vertices.bindingDescriptions.size(); vertices.inputState.pVertexBindingDescriptions = vertices.bindingDescriptions.data(); vertices.inputState.vertexAttributeDescriptionCount = vertices.attributeDescriptions.size(); vertices.inputState.pVertexAttributeDescriptions = vertices.attributeDescriptions.data(); }
int sample_main(int argc, char *argv[]) { VkResult U_ASSERT_ONLY res; bool U_ASSERT_ONLY pass; struct sample_info info = {}; char sample_title[] = "Input Attachment Sample"; const bool depthPresent = false; const bool vertexPresent = false; process_command_line_args(info, argc, argv); init_global_layer_properties(info); init_instance_extension_names(info); init_device_extension_names(info); init_instance(info, sample_title); init_enumerate_device(info); VkFormatProperties props; vkGetPhysicalDeviceFormatProperties(info.gpus[0], VK_FORMAT_R8G8B8A8_UNORM, &props); if (!(props.optimalTilingFeatures & VK_FORMAT_FEATURE_COLOR_ATTACHMENT_BIT)) { std::cout << "VK_FORMAT_R8G8B8A8_UNORM format unsupported for input " "attachment\n"; exit(-1); } init_window_size(info, 500, 500); init_connection(info); init_window(info); init_swapchain_extension(info); init_device(info); init_command_pool(info); init_command_buffer(info); execute_begin_command_buffer(info); init_device_queue(info); init_swap_chain(info); /* VULKAN_KEY_START */ // Create a framebuffer with 2 attachments, one the color attachment // the shaders render into, and the other an input attachment which // will be cleared to yellow, and then used by the shaders to color // the drawn triangle. Final result should be a yellow triangle // Create the image that will be used as the input attachment // The image for the color attachment is the presentable image already // created in init_swapchain() VkImageCreateInfo image_create_info = {}; image_create_info.sType = VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO; image_create_info.pNext = NULL; image_create_info.imageType = VK_IMAGE_TYPE_2D; image_create_info.format = info.format; image_create_info.extent.width = info.width; image_create_info.extent.height = info.height; image_create_info.extent.depth = 1; image_create_info.mipLevels = 1; image_create_info.arrayLayers = 1; image_create_info.samples = NUM_SAMPLES; image_create_info.tiling = VK_IMAGE_TILING_OPTIMAL; image_create_info.initialLayout = VK_IMAGE_LAYOUT_UNDEFINED; image_create_info.usage = VK_IMAGE_USAGE_INPUT_ATTACHMENT_BIT | VK_IMAGE_USAGE_TRANSFER_DST_BIT; image_create_info.queueFamilyIndexCount = 0; image_create_info.pQueueFamilyIndices = NULL; image_create_info.sharingMode = VK_SHARING_MODE_EXCLUSIVE; image_create_info.flags = 0; VkMemoryAllocateInfo mem_alloc = {}; mem_alloc.sType = VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO; mem_alloc.pNext = NULL; mem_alloc.allocationSize = 0; mem_alloc.memoryTypeIndex = 0; VkImage input_image; VkDeviceMemory input_memory; res = vkCreateImage(info.device, &image_create_info, NULL, &input_image); assert(res == VK_SUCCESS); VkMemoryRequirements mem_reqs; vkGetImageMemoryRequirements(info.device, input_image, &mem_reqs); mem_alloc.allocationSize = mem_reqs.size; pass = memory_type_from_properties(info, mem_reqs.memoryTypeBits, 0, &mem_alloc.memoryTypeIndex); assert(pass); res = vkAllocateMemory(info.device, &mem_alloc, NULL, &input_memory); assert(res == VK_SUCCESS); res = vkBindImageMemory(info.device, input_image, input_memory, 0); assert(res == VK_SUCCESS); // Set the image layout to TRANSFER_DST_OPTIMAL to be ready for clear set_image_layout(info, input_image, VK_IMAGE_ASPECT_COLOR_BIT, VK_IMAGE_LAYOUT_UNDEFINED, VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT, VK_PIPELINE_STAGE_TRANSFER_BIT); VkImageSubresourceRange srRange = {}; srRange.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT; srRange.baseMipLevel = 0; srRange.levelCount = VK_REMAINING_MIP_LEVELS; srRange.baseArrayLayer = 0; srRange.layerCount = VK_REMAINING_ARRAY_LAYERS; VkClearColorValue clear_color; clear_color.float32[0] = 1.0f; clear_color.float32[1] = 1.0f; clear_color.float32[2] = 0.0f; clear_color.float32[3] = 0.0f; // Clear the input attachment image to yellow vkCmdClearColorImage(info.cmd, input_image, VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, &clear_color, 1, &srRange); // Set the image layout to SHADER_READONLY_OPTIMAL for use by the shaders set_image_layout(info, input_image, VK_IMAGE_ASPECT_COLOR_BIT, VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL, VK_PIPELINE_STAGE_TRANSFER_BIT, VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT); VkImageViewCreateInfo view_info = {}; view_info.sType = VK_STRUCTURE_TYPE_IMAGE_VIEW_CREATE_INFO; view_info.pNext = NULL; view_info.image = VK_NULL_HANDLE; view_info.viewType = VK_IMAGE_VIEW_TYPE_2D; view_info.format = info.format; view_info.components.r = VK_COMPONENT_SWIZZLE_R; view_info.components.g = VK_COMPONENT_SWIZZLE_G; view_info.components.b = VK_COMPONENT_SWIZZLE_B; view_info.components.a = VK_COMPONENT_SWIZZLE_A; view_info.subresourceRange.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT; view_info.subresourceRange.baseMipLevel = 0; view_info.subresourceRange.levelCount = 1; view_info.subresourceRange.baseArrayLayer = 0; view_info.subresourceRange.layerCount = 1; VkImageView input_attachment_view; view_info.image = input_image; res = vkCreateImageView(info.device, &view_info, NULL, &input_attachment_view); assert(res == VK_SUCCESS); VkDescriptorImageInfo input_image_info = {}; input_image_info.imageLayout = VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL; input_image_info.imageView = input_attachment_view; input_image_info.sampler = VK_NULL_HANDLE; VkDescriptorSetLayoutBinding layout_bindings[1]; layout_bindings[0].binding = 0; layout_bindings[0].descriptorType = VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT; layout_bindings[0].descriptorCount = 1; layout_bindings[0].stageFlags = VK_SHADER_STAGE_FRAGMENT_BIT; layout_bindings[0].pImmutableSamplers = NULL; VkDescriptorSetLayoutCreateInfo descriptor_layout = {}; descriptor_layout.sType = VK_STRUCTURE_TYPE_DESCRIPTOR_SET_LAYOUT_CREATE_INFO; descriptor_layout.pNext = NULL; descriptor_layout.bindingCount = 1; descriptor_layout.pBindings = layout_bindings; info.desc_layout.resize(NUM_DESCRIPTOR_SETS); res = vkCreateDescriptorSetLayout(info.device, &descriptor_layout, NULL, info.desc_layout.data()); assert(res == VK_SUCCESS); VkPipelineLayoutCreateInfo pPipelineLayoutCreateInfo = {}; pPipelineLayoutCreateInfo.sType = VK_STRUCTURE_TYPE_PIPELINE_LAYOUT_CREATE_INFO; pPipelineLayoutCreateInfo.pNext = NULL; pPipelineLayoutCreateInfo.pushConstantRangeCount = 0; pPipelineLayoutCreateInfo.pPushConstantRanges = NULL; pPipelineLayoutCreateInfo.setLayoutCount = NUM_DESCRIPTOR_SETS; pPipelineLayoutCreateInfo.pSetLayouts = info.desc_layout.data(); res = vkCreatePipelineLayout(info.device, &pPipelineLayoutCreateInfo, NULL, &info.pipeline_layout); assert(res == VK_SUCCESS); // First attachment is the color attachment - clear at the beginning of the // renderpass and transition layout to PRESENT_SRC_KHR at the end of // renderpass VkAttachmentDescription attachments[2]; attachments[0].format = info.format; attachments[0].samples = VK_SAMPLE_COUNT_1_BIT; attachments[0].loadOp = VK_ATTACHMENT_LOAD_OP_CLEAR; attachments[0].storeOp = VK_ATTACHMENT_STORE_OP_STORE; attachments[0].stencilLoadOp = VK_ATTACHMENT_LOAD_OP_DONT_CARE; attachments[0].stencilStoreOp = VK_ATTACHMENT_STORE_OP_DONT_CARE; attachments[0].initialLayout = VK_IMAGE_LAYOUT_UNDEFINED; attachments[0].finalLayout = VK_IMAGE_LAYOUT_PRESENT_SRC_KHR; attachments[0].flags = 0; // Second attachment is input attachment. Once cleared it should have // width*height yellow pixels. Doing a subpassLoad in the fragment shader // should give the shader the color at the fragments x,y location // from the input attachment attachments[1].format = info.format; attachments[1].samples = VK_SAMPLE_COUNT_1_BIT; attachments[1].loadOp = VK_ATTACHMENT_LOAD_OP_LOAD; attachments[1].storeOp = VK_ATTACHMENT_STORE_OP_DONT_CARE; attachments[1].stencilLoadOp = VK_ATTACHMENT_LOAD_OP_DONT_CARE; attachments[1].stencilStoreOp = VK_ATTACHMENT_STORE_OP_DONT_CARE; attachments[1].initialLayout = VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL; attachments[1].finalLayout = VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL; attachments[1].flags = 0; VkAttachmentReference color_reference = {}; color_reference.attachment = 0; color_reference.layout = VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL; VkAttachmentReference input_reference = {}; input_reference.attachment = 1; input_reference.layout = VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL; VkSubpassDescription subpass = {}; subpass.pipelineBindPoint = VK_PIPELINE_BIND_POINT_GRAPHICS; subpass.flags = 0; subpass.inputAttachmentCount = 1; subpass.pInputAttachments = &input_reference; subpass.colorAttachmentCount = 1; subpass.pColorAttachments = &color_reference; subpass.pResolveAttachments = NULL; subpass.pDepthStencilAttachment = NULL; subpass.preserveAttachmentCount = 0; subpass.pPreserveAttachments = NULL; VkRenderPassCreateInfo rp_info = {}; rp_info.sType = VK_STRUCTURE_TYPE_RENDER_PASS_CREATE_INFO; rp_info.pNext = NULL; rp_info.attachmentCount = 2; rp_info.pAttachments = attachments; rp_info.subpassCount = 1; rp_info.pSubpasses = &subpass; rp_info.dependencyCount = 0; rp_info.pDependencies = NULL; res = vkCreateRenderPass(info.device, &rp_info, NULL, &info.render_pass); assert(!res); init_shaders(info, vertShaderText, fragShaderText); VkImageView fb_attachments[2]; fb_attachments[1] = input_attachment_view; VkFramebufferCreateInfo fbc_info = {}; fbc_info.sType = VK_STRUCTURE_TYPE_FRAMEBUFFER_CREATE_INFO; fbc_info.pNext = NULL; fbc_info.renderPass = info.render_pass; fbc_info.attachmentCount = 2; fbc_info.pAttachments = fb_attachments; fbc_info.width = info.width; fbc_info.height = info.height; fbc_info.layers = 1; uint32_t i; info.framebuffers = (VkFramebuffer *)malloc(info.swapchainImageCount * sizeof(VkFramebuffer)); for (i = 0; i < info.swapchainImageCount; i++) { fb_attachments[0] = info.buffers[i].view; res = vkCreateFramebuffer(info.device, &fbc_info, NULL, &info.framebuffers[i]); assert(res == VK_SUCCESS); } VkDescriptorPoolSize type_count[1]; type_count[0].type = VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT; type_count[0].descriptorCount = 1; VkDescriptorPoolCreateInfo descriptor_pool = {}; descriptor_pool.sType = VK_STRUCTURE_TYPE_DESCRIPTOR_POOL_CREATE_INFO; descriptor_pool.pNext = NULL; descriptor_pool.maxSets = 1; descriptor_pool.poolSizeCount = 1; descriptor_pool.pPoolSizes = type_count; res = vkCreateDescriptorPool(info.device, &descriptor_pool, NULL, &info.desc_pool); assert(res == VK_SUCCESS); VkDescriptorSetAllocateInfo desc_alloc_info[1]; desc_alloc_info[0].sType = VK_STRUCTURE_TYPE_DESCRIPTOR_SET_ALLOCATE_INFO; desc_alloc_info[0].pNext = NULL; desc_alloc_info[0].descriptorPool = info.desc_pool; desc_alloc_info[0].descriptorSetCount = 1; desc_alloc_info[0].pSetLayouts = info.desc_layout.data(); info.desc_set.resize(1); res = vkAllocateDescriptorSets(info.device, desc_alloc_info, info.desc_set.data()); assert(res == VK_SUCCESS); VkWriteDescriptorSet writes[1]; // Write descriptor set with one write describing input attachment writes[0] = {}; writes[0].sType = VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET; writes[0].dstSet = info.desc_set[0]; writes[0].dstBinding = 0; writes[0].descriptorCount = 1; writes[0].descriptorType = VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT; writes[0].pImageInfo = &input_image_info; writes[0].pBufferInfo = nullptr; writes[0].pTexelBufferView = nullptr; writes[0].dstArrayElement = 0; vkUpdateDescriptorSets(info.device, 1, writes, 0, NULL); init_pipeline_cache(info); init_pipeline(info, depthPresent, vertexPresent); // Color attachment clear to gray VkClearValue clear_values; clear_values.color.float32[0] = 0.2f; clear_values.color.float32[1] = 0.2f; clear_values.color.float32[2] = 0.2f; clear_values.color.float32[3] = 0.2f; VkSemaphoreCreateInfo imageAcquiredSemaphoreCreateInfo; imageAcquiredSemaphoreCreateInfo.sType = VK_STRUCTURE_TYPE_SEMAPHORE_CREATE_INFO; imageAcquiredSemaphoreCreateInfo.pNext = NULL; imageAcquiredSemaphoreCreateInfo.flags = 0; res = vkCreateSemaphore(info.device, &imageAcquiredSemaphoreCreateInfo, NULL, &info.imageAcquiredSemaphore); assert(res == VK_SUCCESS); // Get the index of the next available swapchain image: res = vkAcquireNextImageKHR(info.device, info.swap_chain, UINT64_MAX, info.imageAcquiredSemaphore, VK_NULL_HANDLE, &info.current_buffer); // TODO: Deal with the VK_SUBOPTIMAL_KHR and VK_ERROR_OUT_OF_DATE_KHR // return codes assert(res == VK_SUCCESS); VkRenderPassBeginInfo rp_begin; rp_begin.sType = VK_STRUCTURE_TYPE_RENDER_PASS_BEGIN_INFO; rp_begin.pNext = NULL; rp_begin.renderPass = info.render_pass; rp_begin.framebuffer = info.framebuffers[info.current_buffer]; rp_begin.renderArea.offset.x = 0; rp_begin.renderArea.offset.y = 0; rp_begin.renderArea.extent.width = info.width; rp_begin.renderArea.extent.height = info.height; rp_begin.clearValueCount = 1; rp_begin.pClearValues = &clear_values; vkCmdBeginRenderPass(info.cmd, &rp_begin, VK_SUBPASS_CONTENTS_INLINE); vkCmdBindPipeline(info.cmd, VK_PIPELINE_BIND_POINT_GRAPHICS, info.pipeline); vkCmdBindDescriptorSets(info.cmd, VK_PIPELINE_BIND_POINT_GRAPHICS, info.pipeline_layout, 0, NUM_DESCRIPTOR_SETS, info.desc_set.data(), 0, NULL); init_viewports(info); init_scissors(info); vkCmdDraw(info.cmd, 3, 1, 0, 0); vkCmdEndRenderPass(info.cmd); res = vkEndCommandBuffer(info.cmd); assert(res == VK_SUCCESS); /* VULKAN_KEY_END */ const VkCommandBuffer cmd_bufs[] = {info.cmd}; VkFenceCreateInfo fenceInfo; VkFence drawFence; fenceInfo.sType = VK_STRUCTURE_TYPE_FENCE_CREATE_INFO; fenceInfo.pNext = NULL; fenceInfo.flags = 0; vkCreateFence(info.device, &fenceInfo, NULL, &drawFence); execute_queue_cmdbuf(info, cmd_bufs, drawFence); do { res = vkWaitForFences(info.device, 1, &drawFence, VK_TRUE, FENCE_TIMEOUT); } while (res == VK_TIMEOUT); assert(res == VK_SUCCESS); vkDestroyFence(info.device, drawFence, NULL); execute_present_image(info); wait_seconds(1); if (info.save_images) write_ppm(info, "input_attachment"); vkDestroySemaphore(info.device, info.imageAcquiredSemaphore, NULL); vkDestroyImageView(info.device, input_attachment_view, NULL); vkDestroyImage(info.device, input_image, NULL); vkFreeMemory(info.device, input_memory, NULL); destroy_pipeline(info); destroy_pipeline_cache(info); destroy_descriptor_pool(info); destroy_framebuffers(info); destroy_shaders(info); destroy_renderpass(info); destroy_descriptor_and_pipeline_layouts(info); destroy_swap_chain(info); destroy_command_buffer(info); destroy_command_pool(info); destroy_device(info); destroy_window(info); destroy_instance(info); return 0; }
int sample_main(int argc, char *argv[]) { VkResult U_ASSERT_ONLY res; struct sample_info info = {}; char sample_title[] = "Copy/Blit Image"; VkImageCreateInfo image_info; VkImage bltSrcImage; VkImage bltDstImage; VkMemoryRequirements memReq; VkMemoryAllocateInfo memAllocInfo; VkDeviceMemory dmem; unsigned char *pImgMem; process_command_line_args(info, argc, argv); init_global_layer_properties(info); init_instance_extension_names(info); init_device_extension_names(info); init_instance(info, sample_title); init_enumerate_device(info); init_window_size(info, 640, 640); init_connection(info); init_window(info); init_swapchain_extension(info); VkSurfaceCapabilitiesKHR surfCapabilities; res = vkGetPhysicalDeviceSurfaceCapabilitiesKHR(info.gpus[0], info.surface, &surfCapabilities); if (!(surfCapabilities.supportedUsageFlags & VK_IMAGE_USAGE_TRANSFER_DST_BIT)) { std::cout << "Surface cannot be destination of blit - abort \n"; exit(-1); } init_device(info); init_command_pool(info); init_command_buffer(info); execute_begin_command_buffer(info); init_device_queue(info); init_swap_chain(info, VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT | VK_IMAGE_USAGE_TRANSFER_DST_BIT); /* VULKAN_KEY_START */ VkFormatProperties formatProps; vkGetPhysicalDeviceFormatProperties(info.gpus[0], info.format, &formatProps); assert( (formatProps.linearTilingFeatures & VK_FORMAT_FEATURE_BLIT_SRC_BIT) && "Format cannot be used as transfer source"); VkSemaphore presentCompleteSemaphore; VkSemaphoreCreateInfo presentCompleteSemaphoreCreateInfo; presentCompleteSemaphoreCreateInfo.sType = VK_STRUCTURE_TYPE_SEMAPHORE_CREATE_INFO; presentCompleteSemaphoreCreateInfo.pNext = NULL; presentCompleteSemaphoreCreateInfo.flags = VK_FENCE_CREATE_SIGNALED_BIT; res = vkCreateSemaphore(info.device, &presentCompleteSemaphoreCreateInfo, NULL, &presentCompleteSemaphore); assert(res == VK_SUCCESS); // Get the index of the next available swapchain image: res = vkAcquireNextImageKHR(info.device, info.swap_chain, UINT64_MAX, presentCompleteSemaphore, VK_NULL_HANDLE, &info.current_buffer); // TODO: Deal with the VK_SUBOPTIMAL_KHR and VK_ERROR_OUT_OF_DATE_KHR // return codes assert(res == VK_SUCCESS); // Create an image, map it, and write some values to the image image_info.sType = VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO; image_info.pNext = NULL; image_info.imageType = VK_IMAGE_TYPE_2D; image_info.format = info.format; image_info.extent.width = info.width; image_info.extent.height = info.height; image_info.extent.depth = 1; image_info.mipLevels = 1; image_info.arrayLayers = 1; image_info.samples = NUM_SAMPLES; image_info.queueFamilyIndexCount = 0; image_info.pQueueFamilyIndices = NULL; image_info.sharingMode = VK_SHARING_MODE_EXCLUSIVE; image_info.usage = VK_IMAGE_USAGE_TRANSFER_SRC_BIT; image_info.flags = 0; image_info.tiling = VK_IMAGE_TILING_LINEAR; image_info.initialLayout = VK_IMAGE_LAYOUT_UNDEFINED; res = vkCreateImage(info.device, &image_info, NULL, &bltSrcImage); memAllocInfo.sType = VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO; memAllocInfo.pNext = NULL; vkGetImageMemoryRequirements(info.device, bltSrcImage, &memReq); bool pass = memory_type_from_properties(info, memReq.memoryTypeBits, VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT, &memAllocInfo.memoryTypeIndex); assert(pass); memAllocInfo.allocationSize = memReq.size; res = vkAllocateMemory(info.device, &memAllocInfo, NULL, &dmem); res = vkBindImageMemory(info.device, bltSrcImage, dmem, 0); set_image_layout(info, bltSrcImage, VK_IMAGE_ASPECT_COLOR_BIT, VK_IMAGE_LAYOUT_UNDEFINED, VK_IMAGE_LAYOUT_GENERAL); res = vkEndCommandBuffer(info.cmd); assert(res == VK_SUCCESS); VkFence cmdFence; init_fence(info, cmdFence); VkPipelineStageFlags pipe_stage_flags = VK_PIPELINE_STAGE_BOTTOM_OF_PIPE_BIT; VkSubmitInfo submit_info = {}; submit_info.pNext = NULL; submit_info.sType = VK_STRUCTURE_TYPE_SUBMIT_INFO; submit_info.waitSemaphoreCount = 1; submit_info.pWaitSemaphores = &presentCompleteSemaphore; submit_info.pWaitDstStageMask = &pipe_stage_flags; submit_info.commandBufferCount = 1; submit_info.pCommandBuffers = &info.cmd; submit_info.signalSemaphoreCount = 0; submit_info.pSignalSemaphores = NULL; /* Queue the command buffer for execution */ res = vkQueueSubmit(info.queue, 1, &submit_info, cmdFence); assert(res == VK_SUCCESS); /* Make sure command buffer is finished before mapping */ do { res = vkWaitForFences(info.device, 1, &cmdFence, VK_TRUE, FENCE_TIMEOUT); } while (res == VK_TIMEOUT); assert(res == VK_SUCCESS); vkDestroyFence(info.device, cmdFence, NULL); res = vkMapMemory(info.device, dmem, 0, memReq.size, 0, (void **)&pImgMem); // Checkerboard of 8x8 pixel squares for (int row = 0; row < info.height; row++) { for (int col = 0; col < info.width; col++) { unsigned char rgb = (((row & 0x8) == 0) ^ ((col & 0x8) == 0)) * 255; pImgMem[0] = rgb; pImgMem[1] = rgb; pImgMem[2] = rgb; pImgMem[3] = 255; pImgMem += 4; } } // Flush the mapped memory and then unmap it Assume it isn't coherent since // we didn't really confirm VkMappedMemoryRange memRange; memRange.sType = VK_STRUCTURE_TYPE_MAPPED_MEMORY_RANGE; memRange.pNext = NULL; memRange.memory = dmem; memRange.offset = 0; memRange.size = memReq.size; res = vkFlushMappedMemoryRanges(info.device, 1, &memRange); vkUnmapMemory(info.device, dmem); vkResetCommandBuffer(info.cmd, 0); execute_begin_command_buffer(info); set_image_layout(info, bltSrcImage, VK_IMAGE_ASPECT_COLOR_BIT, VK_IMAGE_LAYOUT_GENERAL, VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL); bltDstImage = info.buffers[info.current_buffer].image; // init_swap_chain will create the images as color attachment optimal // but we want transfer dst optimal set_image_layout(info, bltDstImage, VK_IMAGE_ASPECT_COLOR_BIT, VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL, VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL); // Do a 32x32 blit to all of the dst image - should get big squares VkImageBlit region; region.srcSubresource.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT; region.srcSubresource.mipLevel = 0; region.srcSubresource.baseArrayLayer = 0; region.srcSubresource.layerCount = 1; region.srcOffsets[0].x = 0; region.srcOffsets[0].y = 0; region.srcOffsets[0].z = 0; region.srcOffsets[1].x = 32; region.srcOffsets[1].y = 32; region.srcOffsets[1].z = 1; region.dstSubresource.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT; region.dstSubresource.mipLevel = 0; region.dstSubresource.baseArrayLayer = 0; region.dstSubresource.layerCount = 1; region.dstOffsets[0].x = 0; region.dstOffsets[0].y = 0; region.dstOffsets[0].z = 0; region.dstOffsets[1].x = info.width; region.dstOffsets[1].y = info.height; region.dstOffsets[1].z = 1; vkCmdBlitImage(info.cmd, bltSrcImage, VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL, bltDstImage, VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, 1, ®ion, VK_FILTER_LINEAR); // Do a image copy to part of the dst image - checks should stay small VkImageCopy cregion; cregion.srcSubresource.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT; cregion.srcSubresource.mipLevel = 0; cregion.srcSubresource.baseArrayLayer = 0; cregion.srcSubresource.layerCount = 1; cregion.srcOffset.x = 0; cregion.srcOffset.y = 0; cregion.srcOffset.z = 0; cregion.dstSubresource.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT; cregion.dstSubresource.mipLevel = 0; cregion.dstSubresource.baseArrayLayer = 0; cregion.dstSubresource.layerCount = 1; cregion.dstOffset.x = 256; cregion.dstOffset.y = 256; cregion.dstOffset.z = 0; cregion.extent.width = 128; cregion.extent.height = 128; cregion.extent.depth = 1; vkCmdCopyImage(info.cmd, bltSrcImage, VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL, bltDstImage, VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, 1, &cregion); VkImageMemoryBarrier prePresentBarrier = {}; prePresentBarrier.sType = VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER; prePresentBarrier.pNext = NULL; prePresentBarrier.srcAccessMask = VK_ACCESS_TRANSFER_WRITE_BIT; prePresentBarrier.dstAccessMask = VK_ACCESS_MEMORY_READ_BIT; prePresentBarrier.oldLayout = VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL; prePresentBarrier.newLayout = VK_IMAGE_LAYOUT_PRESENT_SRC_KHR; prePresentBarrier.srcQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED; prePresentBarrier.dstQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED; prePresentBarrier.subresourceRange.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT; prePresentBarrier.subresourceRange.baseMipLevel = 0; prePresentBarrier.subresourceRange.levelCount = 1; prePresentBarrier.subresourceRange.baseArrayLayer = 0; prePresentBarrier.subresourceRange.layerCount = 1; prePresentBarrier.image = info.buffers[info.current_buffer].image; vkCmdPipelineBarrier(info.cmd, VK_PIPELINE_STAGE_ALL_COMMANDS_BIT, VK_PIPELINE_STAGE_BOTTOM_OF_PIPE_BIT, 0, 0, NULL, 0, NULL, 1, &prePresentBarrier); res = vkEndCommandBuffer(info.cmd); VkFenceCreateInfo fenceInfo; VkFence drawFence; fenceInfo.sType = VK_STRUCTURE_TYPE_FENCE_CREATE_INFO; fenceInfo.pNext = NULL; fenceInfo.flags = 0; vkCreateFence(info.device, &fenceInfo, NULL, &drawFence); submit_info.pNext = NULL; submit_info.sType = VK_STRUCTURE_TYPE_SUBMIT_INFO; submit_info.waitSemaphoreCount = 0; submit_info.pWaitSemaphores = NULL; submit_info.pWaitDstStageMask = NULL; submit_info.commandBufferCount = 1; submit_info.pCommandBuffers = &info.cmd; submit_info.signalSemaphoreCount = 0; submit_info.pSignalSemaphores = NULL; /* Queue the command buffer for execution */ res = vkQueueSubmit(info.queue, 1, &submit_info, drawFence); assert(res == VK_SUCCESS); res = vkQueueWaitIdle(info.queue); assert(res == VK_SUCCESS); /* Now present the image in the window */ VkPresentInfoKHR present; present.sType = VK_STRUCTURE_TYPE_PRESENT_INFO_KHR; present.pNext = NULL; present.swapchainCount = 1; present.pSwapchains = &info.swap_chain; present.pImageIndices = &info.current_buffer; present.pWaitSemaphores = NULL; present.waitSemaphoreCount = 0; present.pResults = NULL; /* Make sure command buffer is finished before presenting */ do { res = vkWaitForFences(info.device, 1, &drawFence, VK_TRUE, FENCE_TIMEOUT); } while (res == VK_TIMEOUT); assert(res == VK_SUCCESS); res = vkQueuePresentKHR(info.queue, &present); assert(res == VK_SUCCESS); wait_seconds(1); /* VULKAN_KEY_END */ if (info.save_images) write_ppm(info, "copyblitimage"); vkDestroySemaphore(info.device, presentCompleteSemaphore, NULL); vkDestroyFence(info.device, drawFence, NULL); vkDestroyImage(info.device, bltSrcImage, NULL); vkFreeMemory(info.device, dmem, NULL); destroy_swap_chain(info); destroy_command_buffer(info); destroy_command_pool(info); destroy_device(info); destroy_window(info); destroy_instance(info); return 0; }
WError WImage::CreateFromPixelsArray( void* pixels, unsigned int width, unsigned int height, bool bDynamic, unsigned int num_components, VkFormat fmt, size_t comp_size) { VkDevice device = m_app->GetVulkanDevice(); VkResult err; VkMemoryAllocateInfo memAllocInfo = vkTools::initializers::memoryAllocateInfo(); VkMemoryRequirements memReqs = {}; VkBufferImageCopy bufferCopyRegion = {}; VkImageCreateInfo imageCreateInfo = vkTools::initializers::imageCreateInfo(); VkBufferCreateInfo bufferCreateInfo = vkTools::initializers::bufferCreateInfo(); uint8_t *data; VkFormat format = fmt; if (fmt == VK_FORMAT_UNDEFINED) { switch (num_components) { case 1: format = VK_FORMAT_R32_SFLOAT; break; case 2: format = VK_FORMAT_R32G32_SFLOAT; break; case 3: format = VK_FORMAT_R32G32B32_SFLOAT; break; case 4: format = VK_FORMAT_R32G32B32A32_SFLOAT; break; default: return WError(W_INVALIDPARAM); } comp_size = 4; } _DestroyResources(); // Create a host-visible staging buffer that contains the raw image data bufferCreateInfo.size = width * height * num_components * comp_size; // This buffer is used as a transfer source for the buffer copy bufferCreateInfo.usage = VK_BUFFER_USAGE_TRANSFER_SRC_BIT; bufferCreateInfo.sharingMode = VK_SHARING_MODE_EXCLUSIVE; err = vkCreateBuffer(device, &bufferCreateInfo, nullptr, &m_stagingBuffer); if (err) { _DestroyResources(); return WError(W_OUTOFMEMORY); } // Get memory requirements for the staging buffer (alignment, memory type bits) vkGetBufferMemoryRequirements(device, m_stagingBuffer, &memReqs); memAllocInfo.allocationSize = memReqs.size; // Get memory type index for a host visible buffer m_app->GetMemoryType(memReqs.memoryTypeBits, VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT, &memAllocInfo.memoryTypeIndex); err = vkAllocateMemory(device, &memAllocInfo, nullptr, &m_stagingMemory); if (err) { vkDestroyBuffer(device, m_stagingBuffer, nullptr); _DestroyResources(); return WError(W_OUTOFMEMORY); } err = vkBindBufferMemory(device, m_stagingBuffer, m_stagingMemory, 0); if (err) goto free_buffers; // Copy texture data into staging buffer if (pixels) { err = vkMapMemory(device, m_stagingMemory, 0, memReqs.size, 0, (void **)&data); if (err) goto free_buffers; memcpy(data, pixels, bufferCreateInfo.size); vkUnmapMemory(device, m_stagingMemory); } // Create optimal tiled target image imageCreateInfo.imageType = VK_IMAGE_TYPE_2D; imageCreateInfo.format = format; imageCreateInfo.mipLevels = 1; // TODO: USE m_app->engineParams["numGeneratedMips"] imageCreateInfo.arrayLayers = 1; imageCreateInfo.samples = VK_SAMPLE_COUNT_1_BIT; imageCreateInfo.tiling = VK_IMAGE_TILING_OPTIMAL; imageCreateInfo.sharingMode = VK_SHARING_MODE_EXCLUSIVE; imageCreateInfo.initialLayout = VK_IMAGE_LAYOUT_PREINITIALIZED; imageCreateInfo.extent = { width, height, 1 }; imageCreateInfo.usage = VK_IMAGE_USAGE_TRANSFER_DST_BIT | VK_IMAGE_USAGE_SAMPLED_BIT; err = vkCreateImage(device, &imageCreateInfo, nullptr, &m_image); if (err) goto free_buffers; vkGetImageMemoryRequirements(device, m_image, &memReqs); memAllocInfo.allocationSize = memReqs.size; m_app->GetMemoryType(memReqs.memoryTypeBits, VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT, &memAllocInfo.memoryTypeIndex); err = vkAllocateMemory(device, &memAllocInfo, nullptr, &m_deviceMemory); if (err) goto free_buffers; err = vkBindImageMemory(device, m_image, m_deviceMemory, 0); if (err) goto free_buffers; // Setup buffer copy regions for each mip level bufferCopyRegion.imageSubresource.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT; bufferCopyRegion.imageSubresource.mipLevel = 0; bufferCopyRegion.imageSubresource.baseArrayLayer = 0; bufferCopyRegion.imageSubresource.layerCount = 1; bufferCopyRegion.imageExtent.width = width; bufferCopyRegion.imageExtent.height = height; bufferCopyRegion.imageExtent.depth = 1; bufferCopyRegion.bufferOffset = 0; err = m_app->BeginCommandBuffer(); if (err) goto free_buffers; // Image barrier for optimal image (target) // Optimal image will be used as destination for the copy vkTools::setImageLayout( m_app->GetCommandBuffer(), m_image, VK_IMAGE_ASPECT_COLOR_BIT, VK_IMAGE_LAYOUT_PREINITIALIZED, VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL); // Copy mip levels from staging buffer vkCmdCopyBufferToImage( m_app->GetCommandBuffer(), m_stagingBuffer, m_image, VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, 1, &bufferCopyRegion ); // Change texture image layout to shader read after all mip levels have been copied vkTools::setImageLayout( m_app->GetCommandBuffer(), m_image, VK_IMAGE_ASPECT_COLOR_BIT, VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL); err = m_app->EndCommandBuffer(); if (err) goto free_buffers; free_buffers: // Clean up staging resources if (err || !bDynamic) { vkFreeMemory(device, m_stagingMemory, nullptr); vkDestroyBuffer(device, m_stagingBuffer, nullptr); m_stagingMemory = VK_NULL_HANDLE; m_stagingBuffer = VK_NULL_HANDLE; } if (err) { _DestroyResources(); return WError(W_OUTOFMEMORY); } // Create image view // Textures are not directly accessed by the shaders and // are abstracted by image views containing additional // information and sub resource ranges VkImageViewCreateInfo view = vkTools::initializers::imageViewCreateInfo(); view.image = VK_NULL_HANDLE; view.viewType = VK_IMAGE_VIEW_TYPE_2D; view.format = format; view.components = { VK_COMPONENT_SWIZZLE_R, VK_COMPONENT_SWIZZLE_G, VK_COMPONENT_SWIZZLE_B, VK_COMPONENT_SWIZZLE_A }; view.subresourceRange.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT; view.subresourceRange.baseMipLevel = 0; view.subresourceRange.baseArrayLayer = 0; view.subresourceRange.layerCount = 1; // Linear tiling usually won't support mip maps // Only set mip map count if optimal tiling is used view.subresourceRange.levelCount = 1; // mips view.image = m_image; err = vkCreateImageView(device, &view, nullptr, &m_view); if (err) { _DestroyResources(); return WError(W_UNABLETOCREATEIMAGE); } m_width = width; m_height = height; m_numComponents = num_components; m_componentSize = comp_size; m_mapSize = bufferCreateInfo.size; m_format = format; return WError(W_SUCCEEDED); }
std::unique_ptr<StagingTexture2D> StagingTexture2DLinear::Create(STAGING_BUFFER_TYPE type, u32 width, u32 height, VkFormat format) { VkImageUsageFlags usage = VK_IMAGE_USAGE_TRANSFER_SRC_BIT | VK_IMAGE_USAGE_TRANSFER_DST_BIT; VkImageCreateInfo create_info = { VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO, // VkStructureType sType nullptr, // const void* pNext 0, // VkImageCreateFlags flags VK_IMAGE_TYPE_2D, // VkImageType imageType format, // VkFormat format {width, height, 1}, // VkExtent3D extent 1, // uint32_t mipLevels 1, // uint32_t arrayLayers VK_SAMPLE_COUNT_1_BIT, // VkSampleCountFlagBits samples VK_IMAGE_TILING_LINEAR, // VkImageTiling tiling usage, // VkImageUsageFlags usage VK_SHARING_MODE_EXCLUSIVE, // VkSharingMode sharingMode 0, // uint32_t queueFamilyIndexCount nullptr, // const uint32_t* pQueueFamilyIndices VK_IMAGE_LAYOUT_PREINITIALIZED // VkImageLayout initialLayout }; VkImage image; VkResult res = vkCreateImage(g_vulkan_context->GetDevice(), &create_info, nullptr, &image); if (res != VK_SUCCESS) { LOG_VULKAN_ERROR(res, "vkCreateImage failed: "); return nullptr; } VkMemoryRequirements memory_requirements; vkGetImageMemoryRequirements(g_vulkan_context->GetDevice(), image, &memory_requirements); bool is_coherent; u32 memory_type_index; if (type == STAGING_BUFFER_TYPE_READBACK) { memory_type_index = g_vulkan_context->GetReadbackMemoryType(memory_requirements.memoryTypeBits, &is_coherent); } else { memory_type_index = g_vulkan_context->GetUploadMemoryType(memory_requirements.memoryTypeBits, &is_coherent); } VkMemoryAllocateInfo memory_allocate_info = { VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO, // VkStructureType sType nullptr, // const void* pNext memory_requirements.size, // VkDeviceSize allocationSize memory_type_index // uint32_t memoryTypeIndex }; VkDeviceMemory memory; res = vkAllocateMemory(g_vulkan_context->GetDevice(), &memory_allocate_info, nullptr, &memory); if (res != VK_SUCCESS) { LOG_VULKAN_ERROR(res, "vkAllocateMemory failed: "); vkDestroyImage(g_vulkan_context->GetDevice(), image, nullptr); return nullptr; } res = vkBindImageMemory(g_vulkan_context->GetDevice(), image, memory, 0); if (res != VK_SUCCESS) { LOG_VULKAN_ERROR(res, "vkBindImageMemory failed: "); vkDestroyImage(g_vulkan_context->GetDevice(), image, nullptr); vkFreeMemory(g_vulkan_context->GetDevice(), memory, nullptr); return nullptr; } // Assume tight packing. Is this correct? u32 stride = width * Util::GetTexelSize(format); return std::make_unique<StagingTexture2DLinear>(type, width, height, format, stride, image, memory, memory_requirements.size, is_coherent); }
int sample_main(int argc, char *argv[]) { VkResult U_ASSERT_ONLY res; bool U_ASSERT_ONLY pass; struct sample_info info = {}; char sample_title[] = "Draw Cube"; const bool depthPresent = true; process_command_line_args(info, argc, argv); init_global_layer_properties(info); init_instance_extension_names(info); init_device_extension_names(info); init_instance(info, sample_title); init_enumerate_device(info); if (info.gpu_props.limits.maxDescriptorSetUniformBuffersDynamic < 1) { std::cout << "No dynamic uniform buffers supported\n"; exit(-1); } init_window_size(info, 500, 500); init_connection(info); init_window(info); init_swapchain_extension(info); init_device(info); init_command_pool(info); init_command_buffer(info); execute_begin_command_buffer(info); init_device_queue(info); init_swap_chain(info); init_depth_buffer(info); init_renderpass(info, depthPresent); init_shaders(info, vertShaderText, fragShaderText); init_framebuffers(info, depthPresent); init_vertex_buffer(info, g_vb_solid_face_colors_Data, sizeof(g_vb_solid_face_colors_Data), sizeof(g_vb_solid_face_colors_Data[0]), false); /* Set up uniform buffer with 2 transform matrices in it */ info.Projection = glm::perspective(glm::radians(45.0f), 1.0f, 0.1f, 100.0f); info.View = glm::lookAt( glm::vec3(0, 3, 10), // Camera is at (0,3,10), in World Space glm::vec3(0, 0, 0), // and looks at the origin glm::vec3(0, -1, 0) // Head is up (set to 0,-1,0 to look upside-down) ); info.Model = glm::mat4(1.0f); // Vulkan clip space has inverted Y and half Z. info.Clip = glm::mat4(1.0f, 0.0f, 0.0f, 0.0f, 0.0f, -1.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.5f, 0.0f, 0.0f, 0.0f, 0.5f, 1.0f); info.MVP = info.Clip * info.Projection * info.View * info.Model; /* VULKAN_KEY_START */ info.Model = glm::translate(info.Model, glm::vec3(1.5, 1.5, 1.5)); glm::mat4 MVP2 = info.Clip * info.Projection * info.View * info.Model; VkDeviceSize buf_size = sizeof(info.MVP); if (info.gpu_props.limits.minUniformBufferOffsetAlignment) buf_size = (buf_size + info.gpu_props.limits.minUniformBufferOffsetAlignment - 1) & ~(info.gpu_props.limits.minUniformBufferOffsetAlignment - 1); VkBufferCreateInfo buf_info = {}; buf_info.sType = VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO; buf_info.pNext = NULL; buf_info.usage = VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT; buf_info.size = 2 * buf_size; buf_info.queueFamilyIndexCount = 0; buf_info.pQueueFamilyIndices = NULL; buf_info.sharingMode = VK_SHARING_MODE_EXCLUSIVE; buf_info.flags = 0; res = vkCreateBuffer(info.device, &buf_info, NULL, &info.uniform_data.buf); assert(res == VK_SUCCESS); VkMemoryRequirements mem_reqs; vkGetBufferMemoryRequirements(info.device, info.uniform_data.buf, &mem_reqs); VkMemoryAllocateInfo alloc_info = {}; alloc_info.sType = VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO; alloc_info.pNext = NULL; alloc_info.memoryTypeIndex = 0; alloc_info.allocationSize = mem_reqs.size; pass = memory_type_from_properties(info, mem_reqs.memoryTypeBits, VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT, &alloc_info.memoryTypeIndex); assert(pass); res = vkAllocateMemory(info.device, &alloc_info, NULL, &(info.uniform_data.mem)); assert(res == VK_SUCCESS); /* Map the buffer memory and copy both matrices */ uint8_t *pData; res = vkMapMemory(info.device, info.uniform_data.mem, 0, mem_reqs.size, 0, (void **)&pData); assert(res == VK_SUCCESS); memcpy(pData, &info.MVP, sizeof(info.MVP)); pData += buf_size; memcpy(pData, &MVP2, sizeof(MVP2)); vkUnmapMemory(info.device, info.uniform_data.mem); res = vkBindBufferMemory(info.device, info.uniform_data.buf, info.uniform_data.mem, 0); assert(res == VK_SUCCESS); info.uniform_data.buffer_info.buffer = info.uniform_data.buf; info.uniform_data.buffer_info.offset = 0; info.uniform_data.buffer_info.range = buf_size; /* Init desciptor and pipeline layouts - descriptor type is * UNIFORM_BUFFER_DYNAMIC */ VkDescriptorSetLayoutBinding layout_bindings[2]; layout_bindings[0].binding = 0; layout_bindings[0].descriptorType = VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC; layout_bindings[0].descriptorCount = 1; layout_bindings[0].stageFlags = VK_SHADER_STAGE_VERTEX_BIT; layout_bindings[0].pImmutableSamplers = NULL; /* Next take layout bindings and use them to create a descriptor set layout */ VkDescriptorSetLayoutCreateInfo descriptor_layout = {}; descriptor_layout.sType = VK_STRUCTURE_TYPE_DESCRIPTOR_SET_LAYOUT_CREATE_INFO; descriptor_layout.pNext = NULL; descriptor_layout.bindingCount = 1; descriptor_layout.pBindings = layout_bindings; info.desc_layout.resize(NUM_DESCRIPTOR_SETS); res = vkCreateDescriptorSetLayout(info.device, &descriptor_layout, NULL, info.desc_layout.data()); assert(res == VK_SUCCESS); /* Now use the descriptor layout to create a pipeline layout */ VkPipelineLayoutCreateInfo pPipelineLayoutCreateInfo = {}; pPipelineLayoutCreateInfo.sType = VK_STRUCTURE_TYPE_PIPELINE_LAYOUT_CREATE_INFO; pPipelineLayoutCreateInfo.pNext = NULL; pPipelineLayoutCreateInfo.pushConstantRangeCount = 0; pPipelineLayoutCreateInfo.pPushConstantRanges = NULL; pPipelineLayoutCreateInfo.setLayoutCount = NUM_DESCRIPTOR_SETS; pPipelineLayoutCreateInfo.pSetLayouts = info.desc_layout.data(); res = vkCreatePipelineLayout(info.device, &pPipelineLayoutCreateInfo, NULL, &info.pipeline_layout); assert(res == VK_SUCCESS); /* Create descriptor pool with UNIFOM_BUFFER_DYNAMIC type */ VkDescriptorPoolSize type_count[1]; type_count[0].type = VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC; type_count[0].descriptorCount = 1; VkDescriptorPoolCreateInfo descriptor_pool = {}; descriptor_pool.sType = VK_STRUCTURE_TYPE_DESCRIPTOR_POOL_CREATE_INFO; descriptor_pool.pNext = NULL; descriptor_pool.maxSets = 1; descriptor_pool.poolSizeCount = 1; descriptor_pool.pPoolSizes = type_count; res = vkCreateDescriptorPool(info.device, &descriptor_pool, NULL, &info.desc_pool); assert(res == VK_SUCCESS); VkDescriptorSetAllocateInfo desc_alloc_info[1]; desc_alloc_info[0].sType = VK_STRUCTURE_TYPE_DESCRIPTOR_SET_ALLOCATE_INFO; desc_alloc_info[0].pNext = NULL; desc_alloc_info[0].descriptorPool = info.desc_pool; desc_alloc_info[0].descriptorSetCount = NUM_DESCRIPTOR_SETS; desc_alloc_info[0].pSetLayouts = info.desc_layout.data(); /* Allocate descriptor set with UNIFORM_BUFFER_DYNAMIC */ info.desc_set.resize(NUM_DESCRIPTOR_SETS); res = vkAllocateDescriptorSets(info.device, desc_alloc_info, info.desc_set.data()); assert(res == VK_SUCCESS); VkWriteDescriptorSet writes[1]; writes[0] = {}; writes[0].sType = VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET; writes[0].pNext = NULL; writes[0].dstSet = info.desc_set[0]; writes[0].descriptorCount = 1; writes[0].descriptorType = VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC; writes[0].pBufferInfo = &info.uniform_data.buffer_info; writes[0].dstArrayElement = 0; writes[0].dstBinding = 0; vkUpdateDescriptorSets(info.device, 1, writes, 0, NULL); init_pipeline_cache(info); init_pipeline(info, depthPresent); VkClearValue clear_values[2]; clear_values[0].color.float32[0] = 0.2f; clear_values[0].color.float32[1] = 0.2f; clear_values[0].color.float32[2] = 0.2f; clear_values[0].color.float32[3] = 0.2f; clear_values[1].depthStencil.depth = 1.0f; clear_values[1].depthStencil.stencil = 0; VkSemaphore presentCompleteSemaphore; VkSemaphoreCreateInfo presentCompleteSemaphoreCreateInfo; presentCompleteSemaphoreCreateInfo.sType = VK_STRUCTURE_TYPE_SEMAPHORE_CREATE_INFO; presentCompleteSemaphoreCreateInfo.pNext = NULL; presentCompleteSemaphoreCreateInfo.flags = 0; res = vkCreateSemaphore(info.device, &presentCompleteSemaphoreCreateInfo, NULL, &presentCompleteSemaphore); assert(res == VK_SUCCESS); // Get the index of the next available swapchain image: res = vkAcquireNextImageKHR(info.device, info.swap_chain, UINT64_MAX, presentCompleteSemaphore, VK_NULL_HANDLE, &info.current_buffer); // TODO: Deal with the VK_SUBOPTIMAL_KHR and VK_ERROR_OUT_OF_DATE_KHR // return codes assert(res == VK_SUCCESS); VkRenderPassBeginInfo rp_begin; rp_begin.sType = VK_STRUCTURE_TYPE_RENDER_PASS_BEGIN_INFO; rp_begin.pNext = NULL; rp_begin.renderPass = info.render_pass; rp_begin.framebuffer = info.framebuffers[info.current_buffer]; rp_begin.renderArea.offset.x = 0; rp_begin.renderArea.offset.y = 0; rp_begin.renderArea.extent.width = info.width; rp_begin.renderArea.extent.height = info.height; rp_begin.clearValueCount = 2; rp_begin.pClearValues = clear_values; vkCmdBeginRenderPass(info.cmd, &rp_begin, VK_SUBPASS_CONTENTS_INLINE); vkCmdBindPipeline(info.cmd, VK_PIPELINE_BIND_POINT_GRAPHICS, info.pipeline); /* The first draw should use the first matrix in the buffer */ uint32_t uni_offsets[1] = {0}; vkCmdBindDescriptorSets(info.cmd, VK_PIPELINE_BIND_POINT_GRAPHICS, info.pipeline_layout, 0, NUM_DESCRIPTOR_SETS, info.desc_set.data(), 1, uni_offsets); const VkDeviceSize vtx_offsets[1] = {0}; vkCmdBindVertexBuffers(info.cmd, 0, 1, &info.vertex_buffer.buf, vtx_offsets); init_viewports(info); init_scissors(info); vkCmdDraw(info.cmd, 12 * 3, 1, 0, 0); uni_offsets[0] = (uint32_t)buf_size; /* The second draw should use the second matrix in the buffer */ vkCmdBindDescriptorSets(info.cmd, VK_PIPELINE_BIND_POINT_GRAPHICS, info.pipeline_layout, 0, NUM_DESCRIPTOR_SETS, info.desc_set.data(), 1, uni_offsets); vkCmdDraw(info.cmd, 12 * 3, 1, 0, 0); vkCmdEndRenderPass(info.cmd); VkImageMemoryBarrier prePresentBarrier = {}; prePresentBarrier.sType = VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER; prePresentBarrier.pNext = NULL; prePresentBarrier.srcAccessMask = VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT; prePresentBarrier.dstAccessMask = VK_ACCESS_MEMORY_READ_BIT; prePresentBarrier.oldLayout = VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL; prePresentBarrier.newLayout = VK_IMAGE_LAYOUT_PRESENT_SRC_KHR; prePresentBarrier.srcQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED; prePresentBarrier.dstQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED; prePresentBarrier.subresourceRange.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT; prePresentBarrier.subresourceRange.baseMipLevel = 0; prePresentBarrier.subresourceRange.levelCount = 1; prePresentBarrier.subresourceRange.baseArrayLayer = 0; prePresentBarrier.subresourceRange.layerCount = 1; prePresentBarrier.image = info.buffers[info.current_buffer].image; vkCmdPipelineBarrier(info.cmd, VK_PIPELINE_STAGE_ALL_COMMANDS_BIT, VK_PIPELINE_STAGE_BOTTOM_OF_PIPE_BIT, 0, 0, NULL, 0, NULL, 1, &prePresentBarrier); res = vkEndCommandBuffer(info.cmd); const VkCommandBuffer cmd_bufs[] = {info.cmd}; VkFenceCreateInfo fenceInfo; VkFence drawFence; fenceInfo.sType = VK_STRUCTURE_TYPE_FENCE_CREATE_INFO; fenceInfo.pNext = NULL; fenceInfo.flags = 0; vkCreateFence(info.device, &fenceInfo, NULL, &drawFence); VkPipelineStageFlags pipe_stage_flags = VK_PIPELINE_STAGE_BOTTOM_OF_PIPE_BIT; VkSubmitInfo submit_info[1] = {}; submit_info[0].pNext = NULL; submit_info[0].sType = VK_STRUCTURE_TYPE_SUBMIT_INFO; submit_info[0].waitSemaphoreCount = 1; submit_info[0].pWaitSemaphores = &presentCompleteSemaphore; submit_info[0].pWaitDstStageMask = &pipe_stage_flags; submit_info[0].commandBufferCount = 1; submit_info[0].pCommandBuffers = cmd_bufs; submit_info[0].signalSemaphoreCount = 0; submit_info[0].pSignalSemaphores = NULL; /* Queue the command buffer for execution */ res = vkQueueSubmit(info.queue, 1, submit_info, drawFence); assert(res == VK_SUCCESS); /* Now present the image in the window */ VkPresentInfoKHR present; present.sType = VK_STRUCTURE_TYPE_PRESENT_INFO_KHR; present.pNext = NULL; present.swapchainCount = 1; present.pSwapchains = &info.swap_chain; present.pImageIndices = &info.current_buffer; present.pWaitSemaphores = NULL; present.waitSemaphoreCount = 0; present.pResults = NULL; /* Make sure command buffer is finished before presenting */ do { res = vkWaitForFences(info.device, 1, &drawFence, VK_TRUE, FENCE_TIMEOUT); } while (res == VK_TIMEOUT); assert(res == VK_SUCCESS); res = vkQueuePresentKHR(info.queue, &present); assert(res == VK_SUCCESS); wait_seconds(1); /* VULKAN_KEY_END */ if (info.save_images) write_ppm(info, "dynamicuniform"); vkDestroySemaphore(info.device, presentCompleteSemaphore, NULL); vkDestroyFence(info.device, drawFence, NULL); destroy_pipeline(info); destroy_pipeline_cache(info); destroy_descriptor_pool(info); destroy_vertex_buffer(info); destroy_framebuffers(info); destroy_shaders(info); destroy_renderpass(info); destroy_descriptor_and_pipeline_layouts(info); destroy_uniform_buffer(info); destroy_depth_buffer(info); destroy_swap_chain(info); destroy_command_buffer(info); destroy_command_pool(info); destroy_device(info); destroy_window(info); destroy_instance(info); return 0; }
// Setup the offscreen framebuffer for rendering the blurred scene // The color attachment of this framebuffer will then be used to sample frame in the fragment shader of the final pass void prepareOffscreen() { offscreenPass.width = FB_DIM; offscreenPass.height = FB_DIM; // Find a suitable depth format VkFormat fbDepthFormat; VkBool32 validDepthFormat = vks::tools::getSupportedDepthFormat(physicalDevice, &fbDepthFormat); assert(validDepthFormat); // Color attachment VkImageCreateInfo image = vks::initializers::imageCreateInfo(); image.imageType = VK_IMAGE_TYPE_2D; image.format = FB_COLOR_FORMAT; image.extent.width = offscreenPass.width; image.extent.height = offscreenPass.height; image.extent.depth = 1; image.mipLevels = 1; image.arrayLayers = 1; image.samples = VK_SAMPLE_COUNT_1_BIT; image.tiling = VK_IMAGE_TILING_OPTIMAL; // We will sample directly from the color attachment image.usage = VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT | VK_IMAGE_USAGE_SAMPLED_BIT; VkMemoryAllocateInfo memAlloc = vks::initializers::memoryAllocateInfo(); VkMemoryRequirements memReqs; VK_CHECK_RESULT(vkCreateImage(device, &image, nullptr, &offscreenPass.color.image)); vkGetImageMemoryRequirements(device, offscreenPass.color.image, &memReqs); memAlloc.allocationSize = memReqs.size; memAlloc.memoryTypeIndex = vulkanDevice->getMemoryType(memReqs.memoryTypeBits, VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT); VK_CHECK_RESULT(vkAllocateMemory(device, &memAlloc, nullptr, &offscreenPass.color.mem)); VK_CHECK_RESULT(vkBindImageMemory(device, offscreenPass.color.image, offscreenPass.color.mem, 0)); VkImageViewCreateInfo colorImageView = vks::initializers::imageViewCreateInfo(); colorImageView.viewType = VK_IMAGE_VIEW_TYPE_2D; colorImageView.format = FB_COLOR_FORMAT; colorImageView.subresourceRange = {}; colorImageView.subresourceRange.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT; colorImageView.subresourceRange.baseMipLevel = 0; colorImageView.subresourceRange.levelCount = 1; colorImageView.subresourceRange.baseArrayLayer = 0; colorImageView.subresourceRange.layerCount = 1; colorImageView.image = offscreenPass.color.image; VK_CHECK_RESULT(vkCreateImageView(device, &colorImageView, nullptr, &offscreenPass.color.view)); // Create sampler to sample from the attachment in the fragment shader VkSamplerCreateInfo samplerInfo = vks::initializers::samplerCreateInfo(); samplerInfo.magFilter = VK_FILTER_LINEAR; samplerInfo.minFilter = VK_FILTER_LINEAR; samplerInfo.mipmapMode = VK_SAMPLER_MIPMAP_MODE_LINEAR; samplerInfo.addressModeU = VK_SAMPLER_ADDRESS_MODE_CLAMP_TO_EDGE; samplerInfo.addressModeV = samplerInfo.addressModeU; samplerInfo.addressModeW = samplerInfo.addressModeU; samplerInfo.mipLodBias = 0.0f; samplerInfo.maxAnisotropy = 1.0f; samplerInfo.minLod = 0.0f; samplerInfo.maxLod = 1.0f; samplerInfo.borderColor = VK_BORDER_COLOR_FLOAT_OPAQUE_WHITE; VK_CHECK_RESULT(vkCreateSampler(device, &samplerInfo, nullptr, &offscreenPass.sampler)); // Depth stencil attachment image.format = fbDepthFormat; image.usage = VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT; VK_CHECK_RESULT(vkCreateImage(device, &image, nullptr, &offscreenPass.depth.image)); vkGetImageMemoryRequirements(device, offscreenPass.depth.image, &memReqs); memAlloc.allocationSize = memReqs.size; memAlloc.memoryTypeIndex = vulkanDevice->getMemoryType(memReqs.memoryTypeBits, VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT); VK_CHECK_RESULT(vkAllocateMemory(device, &memAlloc, nullptr, &offscreenPass.depth.mem)); VK_CHECK_RESULT(vkBindImageMemory(device, offscreenPass.depth.image, offscreenPass.depth.mem, 0)); VkImageViewCreateInfo depthStencilView = vks::initializers::imageViewCreateInfo(); depthStencilView.viewType = VK_IMAGE_VIEW_TYPE_2D; depthStencilView.format = fbDepthFormat; depthStencilView.flags = 0; depthStencilView.subresourceRange = {}; depthStencilView.subresourceRange.aspectMask = VK_IMAGE_ASPECT_DEPTH_BIT | VK_IMAGE_ASPECT_STENCIL_BIT; depthStencilView.subresourceRange.baseMipLevel = 0; depthStencilView.subresourceRange.levelCount = 1; depthStencilView.subresourceRange.baseArrayLayer = 0; depthStencilView.subresourceRange.layerCount = 1; depthStencilView.image = offscreenPass.depth.image; VK_CHECK_RESULT(vkCreateImageView(device, &depthStencilView, nullptr, &offscreenPass.depth.view)); // Create a separate render pass for the offscreen rendering as it may differ from the one used for scene rendering std::array<VkAttachmentDescription, 2> attchmentDescriptions = {}; // Color attachment attchmentDescriptions[0].format = FB_COLOR_FORMAT; attchmentDescriptions[0].samples = VK_SAMPLE_COUNT_1_BIT; attchmentDescriptions[0].loadOp = VK_ATTACHMENT_LOAD_OP_CLEAR; attchmentDescriptions[0].storeOp = VK_ATTACHMENT_STORE_OP_STORE; attchmentDescriptions[0].stencilLoadOp = VK_ATTACHMENT_LOAD_OP_DONT_CARE; attchmentDescriptions[0].stencilStoreOp = VK_ATTACHMENT_STORE_OP_DONT_CARE; attchmentDescriptions[0].initialLayout = VK_IMAGE_LAYOUT_UNDEFINED; attchmentDescriptions[0].finalLayout = VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL; // Depth attachment attchmentDescriptions[1].format = fbDepthFormat; attchmentDescriptions[1].samples = VK_SAMPLE_COUNT_1_BIT; attchmentDescriptions[1].loadOp = VK_ATTACHMENT_LOAD_OP_CLEAR; attchmentDescriptions[1].storeOp = VK_ATTACHMENT_STORE_OP_DONT_CARE; attchmentDescriptions[1].stencilLoadOp = VK_ATTACHMENT_LOAD_OP_DONT_CARE; attchmentDescriptions[1].stencilStoreOp = VK_ATTACHMENT_STORE_OP_DONT_CARE; attchmentDescriptions[1].initialLayout = VK_IMAGE_LAYOUT_UNDEFINED; attchmentDescriptions[1].finalLayout = VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL; VkAttachmentReference colorReference = { 0, VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL }; VkAttachmentReference depthReference = { 1, VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL }; VkSubpassDescription subpassDescription = {}; subpassDescription.pipelineBindPoint = VK_PIPELINE_BIND_POINT_GRAPHICS; subpassDescription.colorAttachmentCount = 1; subpassDescription.pColorAttachments = &colorReference; subpassDescription.pDepthStencilAttachment = &depthReference; // Use subpass dependencies for layout transitions std::array<VkSubpassDependency, 2> dependencies; dependencies[0].srcSubpass = VK_SUBPASS_EXTERNAL; dependencies[0].dstSubpass = 0; dependencies[0].srcStageMask = VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT; dependencies[0].dstStageMask = VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT; dependencies[0].srcAccessMask = VK_ACCESS_SHADER_READ_BIT; dependencies[0].dstAccessMask = VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT; dependencies[0].dependencyFlags = VK_DEPENDENCY_BY_REGION_BIT; dependencies[1].srcSubpass = 0; dependencies[1].dstSubpass = VK_SUBPASS_EXTERNAL; dependencies[1].srcStageMask = VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT; dependencies[1].dstStageMask = VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT; dependencies[1].srcAccessMask = VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT; dependencies[1].dstAccessMask = VK_ACCESS_SHADER_READ_BIT; dependencies[1].dependencyFlags = VK_DEPENDENCY_BY_REGION_BIT; // Create the actual renderpass VkRenderPassCreateInfo renderPassInfo = {}; renderPassInfo.sType = VK_STRUCTURE_TYPE_RENDER_PASS_CREATE_INFO; renderPassInfo.attachmentCount = static_cast<uint32_t>(attchmentDescriptions.size()); renderPassInfo.pAttachments = attchmentDescriptions.data(); renderPassInfo.subpassCount = 1; renderPassInfo.pSubpasses = &subpassDescription; renderPassInfo.dependencyCount = static_cast<uint32_t>(dependencies.size()); renderPassInfo.pDependencies = dependencies.data(); VK_CHECK_RESULT(vkCreateRenderPass(device, &renderPassInfo, nullptr, &offscreenPass.renderPass)); VkImageView attachments[2]; attachments[0] = offscreenPass.color.view; attachments[1] = offscreenPass.depth.view; VkFramebufferCreateInfo fbufCreateInfo = vks::initializers::framebufferCreateInfo(); fbufCreateInfo.renderPass = offscreenPass.renderPass; fbufCreateInfo.attachmentCount = 2; fbufCreateInfo.pAttachments = attachments; fbufCreateInfo.width = offscreenPass.width; fbufCreateInfo.height = offscreenPass.height; fbufCreateInfo.layers = 1; VK_CHECK_RESULT(vkCreateFramebuffer(device, &fbufCreateInfo, nullptr, &offscreenPass.frameBuffer)); // Fill a descriptor for later use in a descriptor set offscreenPass.descriptor.imageLayout = VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL; offscreenPass.descriptor.imageView = offscreenPass.color.view; offscreenPass.descriptor.sampler = offscreenPass.sampler; }
static void *per_thread_code(void *arg) { /* This code should be executed by each of the three threads. It will */ /* create a vertex buffer with position and color per vertex, then load */ /* commands into the thread's designated command buffer to draw the */ /* triangle */ VkResult U_ASSERT_ONLY res; size_t threadNum = (size_t)arg; VkCommandPoolCreateInfo poolInfo; poolInfo.sType = VK_STRUCTURE_TYPE_COMMAND_POOL_CREATE_INFO; poolInfo.pNext = NULL; poolInfo.queueFamilyIndex = info.graphics_queue_family_index; poolInfo.flags = 0; vkCreateCommandPool(info.device, &poolInfo, NULL, &threadCmdPools[threadNum]); VkCommandBufferAllocateInfo cmdBufInfo; cmdBufInfo.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_ALLOCATE_INFO; cmdBufInfo.pNext = NULL; cmdBufInfo.commandBufferCount = 1; cmdBufInfo.commandPool = threadCmdPools[threadNum]; cmdBufInfo.level = VK_COMMAND_BUFFER_LEVEL_PRIMARY; vkAllocateCommandBuffers(info.device, &cmdBufInfo, &threadCmdBufs[threadNum]); VkBufferCreateInfo buf_info = {}; buf_info.sType = VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO; buf_info.pNext = NULL; buf_info.usage = VK_BUFFER_USAGE_VERTEX_BUFFER_BIT; buf_info.size = 3 * sizeof(triData[0]); buf_info.queueFamilyIndexCount = 0; buf_info.pQueueFamilyIndices = NULL; buf_info.sharingMode = VK_SHARING_MODE_EXCLUSIVE; buf_info.flags = 0; res = vkCreateBuffer(info.device, &buf_info, NULL, &vertex_buffer[threadNum].buf); assert(res == VK_SUCCESS); VkMemoryRequirements mem_reqs; vkGetBufferMemoryRequirements(info.device, vertex_buffer[threadNum].buf, &mem_reqs); VkMemoryAllocateInfo alloc_info = {}; alloc_info.sType = VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO; alloc_info.pNext = NULL; alloc_info.memoryTypeIndex = 0; alloc_info.allocationSize = mem_reqs.size; bool pass; pass = memory_type_from_properties(info, mem_reqs.memoryTypeBits, VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT, &alloc_info.memoryTypeIndex); assert(pass); res = vkAllocateMemory(info.device, &alloc_info, NULL, &(vertex_buffer[threadNum].mem)); assert(res == VK_SUCCESS); uint8_t *pData; res = vkMapMemory(info.device, vertex_buffer[threadNum].mem, 0, mem_reqs.size, 0, (void **)&pData); assert(res == VK_SUCCESS); memcpy(pData, &triData[threadNum * 3], 3 * sizeof(triData[0])); vkUnmapMemory(info.device, vertex_buffer[threadNum].mem); res = vkBindBufferMemory(info.device, vertex_buffer[threadNum].buf, vertex_buffer[threadNum].mem, 0); assert(res == VK_SUCCESS); VkCommandBufferBeginInfo cmd_buf_info = {}; cmd_buf_info.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_BEGIN_INFO; cmd_buf_info.pNext = NULL; cmd_buf_info.flags = 0; cmd_buf_info.pInheritanceInfo = NULL; res = vkBeginCommandBuffer(threadCmdBufs[threadNum], &cmd_buf_info); assert(res == VK_SUCCESS); VkRenderPassBeginInfo rp_begin; rp_begin.sType = VK_STRUCTURE_TYPE_RENDER_PASS_BEGIN_INFO; rp_begin.pNext = NULL; rp_begin.renderPass = info.render_pass; rp_begin.framebuffer = info.framebuffers[info.current_buffer]; rp_begin.renderArea.offset.x = 0; rp_begin.renderArea.offset.y = 0; rp_begin.renderArea.extent.width = info.width; rp_begin.renderArea.extent.height = info.height; rp_begin.clearValueCount = 0; rp_begin.pClearValues = NULL; vkCmdBeginRenderPass(threadCmdBufs[threadNum], &rp_begin, VK_SUBPASS_CONTENTS_INLINE); vkCmdBindPipeline(threadCmdBufs[threadNum], VK_PIPELINE_BIND_POINT_GRAPHICS, info.pipeline); const VkDeviceSize offsets[1] = {0}; vkCmdBindVertexBuffers(threadCmdBufs[threadNum], 0, 1, &vertex_buffer[threadNum].buf, offsets); VkViewport viewport; viewport.height = (float)info.height; viewport.width = (float)info.width; viewport.minDepth = (float)0.0f; viewport.maxDepth = (float)1.0f; viewport.x = 0; viewport.y = 0; vkCmdSetViewport(threadCmdBufs[threadNum], 0, NUM_VIEWPORTS, &viewport); VkRect2D scissor; scissor.extent.width = info.width; scissor.extent.height = info.height; scissor.offset.x = 0; scissor.offset.y = 0; vkCmdSetScissor(threadCmdBufs[threadNum], 0, NUM_SCISSORS, &scissor); vkCmdDraw(threadCmdBufs[threadNum], 3, 1, 0, 0); vkCmdEndRenderPass(threadCmdBufs[threadNum]); res = vkEndCommandBuffer(threadCmdBufs[threadNum]); assert(res == VK_SUCCESS); return NULL; }
int sample_main(int argc, char *argv[]) { VkResult U_ASSERT_ONLY res; bool U_ASSERT_ONLY pass; struct sample_info info = {}; char sample_title[] = "Texture Initialization Sample"; init_global_layer_properties(info); init_instance_extension_names(info); init_device_extension_names(info); init_instance(info, sample_title); init_enumerate_device(info); init_connection(info); init_window_size(info, 50, 50); init_window(info); init_swapchain_extension(info); init_device(info); init_command_pool(info); init_command_buffer(info); execute_begin_command_buffer(info); init_device_queue(info); /* VULKAN_KEY_START */ /* * Set up textures: * - Create a linear tiled image * - Map it and write the texture data into it * - If linear images cannot be used as textures, create an optimally * tiled image and blit from the linearly tiled image to the optimally * tiled image * - * - * - */ struct texture_object texObj; std::string filename = get_base_data_dir(); filename.append("lunarg.ppm"); if (!read_ppm(filename.c_str(), texObj.tex_width, texObj.tex_height, 0, NULL)) { std::cout << "Could not read texture file lunarg.ppm\n"; exit(-1); } VkFormatProperties formatProps; vkGetPhysicalDeviceFormatProperties(info.gpus[0], VK_FORMAT_R8G8B8A8_UNORM, &formatProps); /* See if we can use a linear tiled image for a texture, if not, we will * need a staging image for the texture data */ bool needStaging = (!(formatProps.linearTilingFeatures & VK_FORMAT_FEATURE_SAMPLED_IMAGE_BIT)) ? true : false; VkImageCreateInfo image_create_info = {}; image_create_info.sType = VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO; image_create_info.pNext = NULL; image_create_info.imageType = VK_IMAGE_TYPE_2D; image_create_info.format = VK_FORMAT_R8G8B8A8_UNORM; image_create_info.extent.width = texObj.tex_width; image_create_info.extent.height = texObj.tex_height; image_create_info.extent.depth = 1; image_create_info.mipLevels = 1; image_create_info.arrayLayers = 1; image_create_info.samples = NUM_SAMPLES; image_create_info.tiling = VK_IMAGE_TILING_LINEAR; image_create_info.initialLayout = VK_IMAGE_LAYOUT_PREINITIALIZED; image_create_info.usage = needStaging ? VK_IMAGE_USAGE_TRANSFER_SRC_BIT : VK_IMAGE_USAGE_SAMPLED_BIT; image_create_info.queueFamilyIndexCount = 0; image_create_info.pQueueFamilyIndices = NULL; image_create_info.sharingMode = VK_SHARING_MODE_EXCLUSIVE; image_create_info.flags = 0; VkMemoryAllocateInfo mem_alloc = {}; mem_alloc.sType = VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO; mem_alloc.pNext = NULL; mem_alloc.allocationSize = 0; mem_alloc.memoryTypeIndex = 0; VkImage mappableImage; VkDeviceMemory mappableMemory; VkMemoryRequirements mem_reqs; /* Create a mappable image. It will be the texture if linear images are ok * to be textures or it will be the staging image if they are not. */ res = vkCreateImage(info.device, &image_create_info, NULL, &mappableImage); assert(res == VK_SUCCESS); vkGetImageMemoryRequirements(info.device, mappableImage, &mem_reqs); mem_alloc.allocationSize = mem_reqs.size; /* Find the memory type that is host mappable */ pass = memory_type_from_properties(info, mem_reqs.memoryTypeBits, VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT, &mem_alloc.memoryTypeIndex); assert(pass); /* allocate memory */ res = vkAllocateMemory(info.device, &mem_alloc, NULL, &(mappableMemory)); assert(res == VK_SUCCESS); /* bind memory */ res = vkBindImageMemory(info.device, mappableImage, mappableMemory, 0); assert(res == VK_SUCCESS); VkImageSubresource subres = {}; subres.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT; subres.mipLevel = 0; subres.arrayLayer = 0; VkSubresourceLayout layout; void *data; /* Get the subresource layout so we know what the row pitch is */ vkGetImageSubresourceLayout(info.device, mappableImage, &subres, &layout); res = vkMapMemory(info.device, mappableMemory, 0, mem_reqs.size, 0, &data); assert(res == VK_SUCCESS); /* Read the ppm file into the mappable image's memory */ if (!read_ppm(filename.c_str(), texObj.tex_width, texObj.tex_height, layout.rowPitch, (unsigned char *)data)) { std::cout << "Could not load texture file lunarg.ppm\n"; exit(-1); } vkUnmapMemory(info.device, mappableMemory); if (!needStaging) { /* If we can use the linear tiled image as a texture, just do it */ texObj.image = mappableImage; texObj.mem = mappableMemory; texObj.imageLayout = VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL; set_image_layout(info, texObj.image, VK_IMAGE_ASPECT_COLOR_BIT, VK_IMAGE_LAYOUT_PREINITIALIZED, texObj.imageLayout); } else { /* The mappable image cannot be our texture, so create an optimally * tiled image and blit to it */ image_create_info.tiling = VK_IMAGE_TILING_OPTIMAL; image_create_info.usage = VK_IMAGE_USAGE_TRANSFER_DST_BIT | VK_IMAGE_USAGE_SAMPLED_BIT; image_create_info.initialLayout = VK_IMAGE_LAYOUT_UNDEFINED; res = vkCreateImage(info.device, &image_create_info, NULL, &texObj.image); assert(res == VK_SUCCESS); vkGetImageMemoryRequirements(info.device, texObj.image, &mem_reqs); mem_alloc.allocationSize = mem_reqs.size; /* Find memory type - don't specify any mapping requirements */ pass = memory_type_from_properties(info, mem_reqs.memoryTypeBits, 0, &mem_alloc.memoryTypeIndex); assert(pass); /* allocate memory */ res = vkAllocateMemory(info.device, &mem_alloc, NULL, &texObj.mem); assert(res == VK_SUCCESS); /* bind memory */ res = vkBindImageMemory(info.device, texObj.image, texObj.mem, 0); assert(res == VK_SUCCESS); /* Since we're going to blit from the mappable image, set its layout to * SOURCE_OPTIMAL */ /* Side effect is that this will create info.cmd */ set_image_layout(info, mappableImage, VK_IMAGE_ASPECT_COLOR_BIT, VK_IMAGE_LAYOUT_PREINITIALIZED, VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL); /* Since we're going to blit to the texture image, set its layout to * DESTINATION_OPTIMAL */ set_image_layout(info, texObj.image, VK_IMAGE_ASPECT_COLOR_BIT, VK_IMAGE_LAYOUT_UNDEFINED, VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL); VkImageCopy copy_region; copy_region.srcSubresource.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT; copy_region.srcSubresource.mipLevel = 0; copy_region.srcSubresource.baseArrayLayer = 0; copy_region.srcSubresource.layerCount = 1; copy_region.srcOffset.x = 0; copy_region.srcOffset.y = 0; copy_region.srcOffset.z = 0; copy_region.dstSubresource.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT; copy_region.dstSubresource.mipLevel = 0; copy_region.dstSubresource.baseArrayLayer = 0; copy_region.dstSubresource.layerCount = 1; copy_region.dstOffset.x = 0; copy_region.dstOffset.y = 0; copy_region.dstOffset.z = 0; copy_region.extent.width = texObj.tex_width; copy_region.extent.height = texObj.tex_height; copy_region.extent.depth = 1; /* Put the copy command into the command buffer */ vkCmdCopyImage(info.cmd, mappableImage, VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL, texObj.image, VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, 1, ©_region); /* Set the layout for the texture image from DESTINATION_OPTIMAL to * SHADER_READ_ONLY */ texObj.imageLayout = VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL; set_image_layout(info, texObj.image, VK_IMAGE_ASPECT_COLOR_BIT, VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, texObj.imageLayout); } execute_end_command_buffer(info); execute_queue_command_buffer(info); VkSamplerCreateInfo samplerCreateInfo = {}; samplerCreateInfo.sType = VK_STRUCTURE_TYPE_SAMPLER_CREATE_INFO; samplerCreateInfo.magFilter = VK_FILTER_NEAREST; samplerCreateInfo.minFilter = VK_FILTER_NEAREST; samplerCreateInfo.mipmapMode = VK_SAMPLER_MIPMAP_MODE_NEAREST; samplerCreateInfo.addressModeU = VK_SAMPLER_ADDRESS_MODE_CLAMP_TO_EDGE; samplerCreateInfo.addressModeV = VK_SAMPLER_ADDRESS_MODE_CLAMP_TO_EDGE; samplerCreateInfo.addressModeW = VK_SAMPLER_ADDRESS_MODE_CLAMP_TO_EDGE; samplerCreateInfo.mipLodBias = 0.0; samplerCreateInfo.anisotropyEnable = VK_FALSE, samplerCreateInfo.maxAnisotropy = 0; samplerCreateInfo.compareEnable = VK_FALSE; samplerCreateInfo.compareOp = VK_COMPARE_OP_NEVER; samplerCreateInfo.minLod = 0.0; samplerCreateInfo.maxLod = 0.0; samplerCreateInfo.borderColor = VK_BORDER_COLOR_FLOAT_OPAQUE_WHITE; /* create sampler */ res = vkCreateSampler(info.device, &samplerCreateInfo, NULL, &texObj.sampler); assert(res == VK_SUCCESS); VkImageViewCreateInfo view_info = {}; view_info.sType = VK_STRUCTURE_TYPE_IMAGE_VIEW_CREATE_INFO; view_info.pNext = NULL; view_info.image = VK_NULL_HANDLE; view_info.viewType = VK_IMAGE_VIEW_TYPE_2D; view_info.format = VK_FORMAT_R8G8B8A8_UNORM; view_info.components.r = VK_COMPONENT_SWIZZLE_R; view_info.components.g = VK_COMPONENT_SWIZZLE_G; view_info.components.b = VK_COMPONENT_SWIZZLE_B; view_info.components.a = VK_COMPONENT_SWIZZLE_A; view_info.subresourceRange.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT; view_info.subresourceRange.baseMipLevel = 0; view_info.subresourceRange.levelCount = 1; view_info.subresourceRange.baseArrayLayer = 0; view_info.subresourceRange.layerCount = 1; /* create image view */ view_info.image = texObj.image; res = vkCreateImageView(info.device, &view_info, NULL, &texObj.view); assert(res == VK_SUCCESS); info.textures.push_back(texObj); /* VULKAN_KEY_END */ /* Clean Up */ vkDestroySampler(info.device, texObj.sampler, NULL); vkDestroyImageView(info.device, texObj.view, NULL); vkDestroyImage(info.device, texObj.image, NULL); vkFreeMemory(info.device, texObj.mem, NULL); if (needStaging) { /* Release the resources for the staging image */ vkFreeMemory(info.device, mappableMemory, NULL); vkDestroyImage(info.device, mappableImage, NULL); } destroy_command_buffer(info); destroy_command_pool(info); destroy_device(info); destroy_window(info); destroy_instance(info); return 0; }
void loadTexture(const char* fileName, VkFormat format, bool forceLinearTiling) { VkFormatProperties formatProperties; VkResult err; AAsset* asset = AAssetManager_open(app->activity->assetManager, fileName, AASSET_MODE_STREAMING); assert(asset); size_t size = AAsset_getLength(asset); assert(size > 0); void *textureData = malloc(size); AAsset_read(asset, textureData, size); AAsset_close(asset); gli::texture2D tex2D(gli::load((const char*)textureData, size)); assert(!tex2D.empty()); texture.width = tex2D[0].dimensions().x; texture.height = tex2D[0].dimensions().y; texture.mipLevels = tex2D.levels(); // Get device properites for the requested texture format vkGetPhysicalDeviceFormatProperties(physicalDevice, format, &formatProperties); // Only use linear tiling if requested (and supported by the device) // Support for linear tiling is mostly limited, so prefer to use // optimal tiling instead // On most implementations linear tiling will only support a very // limited amount of formats and features (mip maps, cubemaps, arrays, etc.) VkBool32 useStaging = true; // Only use linear tiling if forced if (forceLinearTiling) { // Don't use linear if format is not supported for (linear) shader sampling useStaging = !(formatProperties.linearTilingFeatures & VK_FORMAT_FEATURE_SAMPLED_IMAGE_BIT); } VkImageCreateInfo imageCreateInfo = vkTools::initializers::imageCreateInfo(); imageCreateInfo.imageType = VK_IMAGE_TYPE_2D; imageCreateInfo.format = format; imageCreateInfo.mipLevels = 1; imageCreateInfo.arrayLayers = 1; imageCreateInfo.samples = VK_SAMPLE_COUNT_1_BIT; imageCreateInfo.tiling = VK_IMAGE_TILING_LINEAR; imageCreateInfo.usage = (useStaging) ? VK_IMAGE_USAGE_TRANSFER_SRC_BIT : VK_IMAGE_USAGE_SAMPLED_BIT; imageCreateInfo.sharingMode = VK_SHARING_MODE_EXCLUSIVE; imageCreateInfo.flags = 0; imageCreateInfo.extent = { texture.width, texture.height, 1 }; VkMemoryAllocateInfo memAllocInfo = vkTools::initializers::memoryAllocateInfo(); VkMemoryRequirements memReqs; startSetupCommandBuffer(); if (useStaging) { // Load all available mip levels into linear textures // and copy to optimal tiling target struct MipLevel { VkImage image; VkDeviceMemory memory; }; std::vector<MipLevel> mipLevels; mipLevels.resize(texture.mipLevels); // Copy mip levels for (uint32_t level = 0; level < texture.mipLevels; ++level) { imageCreateInfo.extent.width = tex2D[level].dimensions().x; imageCreateInfo.extent.height = tex2D[level].dimensions().y; imageCreateInfo.extent.depth = 1; err = vkCreateImage(device, &imageCreateInfo, nullptr, &mipLevels[level].image); assert(!err); vkGetImageMemoryRequirements(device, mipLevels[level].image, &memReqs); memAllocInfo.allocationSize = memReqs.size; getMemoryType(memReqs.memoryTypeBits, VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT, &memAllocInfo.memoryTypeIndex); err = vkAllocateMemory(device, &memAllocInfo, nullptr, &mipLevels[level].memory); assert(!err); err = vkBindImageMemory(device, mipLevels[level].image, mipLevels[level].memory, 0); assert(!err); VkImageSubresource subRes = {}; subRes.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT; VkSubresourceLayout subResLayout; void *data; vkGetImageSubresourceLayout(device, mipLevels[level].image, &subRes, &subResLayout); assert(!err); err = vkMapMemory(device, mipLevels[level].memory, 0, memReqs.size, 0, &data); assert(!err); size_t levelSize = tex2D[level].size(); memcpy(data, tex2D[level].data(), levelSize); vkUnmapMemory(device, mipLevels[level].memory); LOGW("setImageLayout %d", 1); // Image barrier for linear image (base) // Linear image will be used as a source for the copy vkTools::setImageLayout( setupCmdBuffer, mipLevels[level].image, VK_IMAGE_ASPECT_COLOR_BIT, VK_IMAGE_LAYOUT_UNDEFINED, VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL); } // Setup texture as blit target with optimal tiling imageCreateInfo.tiling = VK_IMAGE_TILING_OPTIMAL; imageCreateInfo.usage = VK_IMAGE_USAGE_TRANSFER_DST_BIT | VK_IMAGE_USAGE_SAMPLED_BIT; imageCreateInfo.mipLevels = texture.mipLevels; imageCreateInfo.extent = { texture.width, texture.height, 1 }; err = vkCreateImage(device, &imageCreateInfo, nullptr, &texture.image); assert(!err); vkGetImageMemoryRequirements(device, texture.image, &memReqs); memAllocInfo.allocationSize = memReqs.size; getMemoryType(memReqs.memoryTypeBits, VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT, &memAllocInfo.memoryTypeIndex); err = vkAllocateMemory(device, &memAllocInfo, nullptr, &texture.deviceMemory); assert(!err); err = vkBindImageMemory(device, texture.image, texture.deviceMemory, 0); assert(!err); // Image barrier for optimal image (target) // Optimal image will be used as destination for the copy vkTools::setImageLayout( setupCmdBuffer, texture.image, VK_IMAGE_ASPECT_COLOR_BIT, VK_IMAGE_LAYOUT_UNDEFINED, VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL); // Copy mip levels one by one for (uint32_t level = 0; level < texture.mipLevels; ++level) { // Copy region for image blit VkImageCopy copyRegion = {}; copyRegion.srcSubresource.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT; copyRegion.srcSubresource.baseArrayLayer = 0; copyRegion.srcSubresource.mipLevel = 0; copyRegion.srcSubresource.layerCount = 1; copyRegion.srcOffset = { 0, 0, 0 }; copyRegion.dstSubresource.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT; copyRegion.dstSubresource.baseArrayLayer = 0; // Set mip level to copy the linear image to copyRegion.dstSubresource.mipLevel = level; copyRegion.dstSubresource.layerCount = 1; copyRegion.dstOffset = { 0, 0, 0 }; copyRegion.extent.width = tex2D[level].dimensions().x; copyRegion.extent.height = tex2D[level].dimensions().y; copyRegion.extent.depth = 1; // Put image copy into command buffer vkCmdCopyImage( setupCmdBuffer, mipLevels[level].image, VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL, texture.image, VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, 1, ©Region); // Change texture image layout to shader read after the copy texture.imageLayout = VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL; vkTools::setImageLayout( setupCmdBuffer, texture.image, VK_IMAGE_ASPECT_COLOR_BIT, VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, texture.imageLayout); } // Clean up linear images // No longer required after mip levels // have been transformed over to optimal tiling for (auto& level : mipLevels) { vkDestroyImage(device, level.image, nullptr); vkFreeMemory(device, level.memory, nullptr); } } else { // Prefer using optimal tiling, as linear tiling // may support only a small set of features // depending on implementation (e.g. no mip maps, only one layer, etc.) VkImage mappableImage; VkDeviceMemory mappableMemory; // Load mip map level 0 to linear tiling image err = vkCreateImage(device, &imageCreateInfo, nullptr, &mappableImage); assert(!err); // Get memory requirements for this image // like size and alignment vkGetImageMemoryRequirements(device, mappableImage, &memReqs); // Set memory allocation size to required memory size memAllocInfo.allocationSize = memReqs.size; // Get memory type that can be mapped to host memory getMemoryType(memReqs.memoryTypeBits, VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT, &memAllocInfo.memoryTypeIndex); // Allocate host memory err = vkAllocateMemory(device, &memAllocInfo, nullptr, &mappableMemory); assert(!err); // Bind allocated image for use err = vkBindImageMemory(device, mappableImage, mappableMemory, 0); assert(!err); // Get sub resource layout // Mip map count, array layer, etc. VkImageSubresource subRes = {}; subRes.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT; VkSubresourceLayout subResLayout; void *data; // Get sub resources layout // Includes row pitch, size offsets, etc. vkGetImageSubresourceLayout(device, mappableImage, &subRes, &subResLayout); assert(!err); // Map image memory err = vkMapMemory(device, mappableMemory, 0, memReqs.size, 0, &data); assert(!err); // Copy image data into memory memcpy(data, tex2D[subRes.mipLevel].data(), tex2D[subRes.mipLevel].size()); vkUnmapMemory(device, mappableMemory); // Linear tiled images don't need to be staged // and can be directly used as textures texture.image = mappableImage; texture.deviceMemory = mappableMemory; texture.imageLayout = VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL; // Setup image memory barrier vkTools::setImageLayout( setupCmdBuffer, texture.image, VK_IMAGE_ASPECT_COLOR_BIT, VK_IMAGE_LAYOUT_UNDEFINED, texture.imageLayout); } flushSetupCommandBuffer(); // Create sampler // In Vulkan textures are accessed by samplers // This separates all the sampling information from the // texture data // This means you could have multiple sampler objects // for the same texture with different settings // Similar to the samplers available with OpenGL 3.3 VkSamplerCreateInfo sampler = vkTools::initializers::samplerCreateInfo(); sampler.magFilter = VK_FILTER_LINEAR; sampler.minFilter = VK_FILTER_LINEAR; sampler.mipmapMode = VK_SAMPLER_MIPMAP_MODE_LINEAR; sampler.addressModeU = VK_SAMPLER_ADDRESS_MODE_CLAMP_TO_EDGE; sampler.addressModeV = sampler.addressModeU; sampler.addressModeW = sampler.addressModeU; sampler.mipLodBias = 0.0f; sampler.compareOp = VK_COMPARE_OP_NEVER; sampler.minLod = 0.0f; // Max level-of-detail should match mip level count sampler.maxLod = (useStaging) ? (float)texture.mipLevels : 0.0f; // Enable anisotropic filtering sampler.maxAnisotropy = 8; sampler.anisotropyEnable = VK_TRUE; sampler.borderColor = VK_BORDER_COLOR_FLOAT_OPAQUE_WHITE; err = vkCreateSampler(device, &sampler, nullptr, &texture.sampler); assert(!err); // Create image view // Textures are not directly accessed by the shaders and // are abstracted by image views containing additional // information and sub resource ranges VkImageViewCreateInfo view = vkTools::initializers::imageViewCreateInfo(); view.image = VK_NULL_HANDLE; view.viewType = VK_IMAGE_VIEW_TYPE_2D; view.format = format; view.components = { VK_COMPONENT_SWIZZLE_R, VK_COMPONENT_SWIZZLE_G, VK_COMPONENT_SWIZZLE_B, VK_COMPONENT_SWIZZLE_A }; view.subresourceRange.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT; view.subresourceRange.baseMipLevel = 0; view.subresourceRange.baseArrayLayer = 0; view.subresourceRange.layerCount = 1; // Linear tiling usually won't support mip maps // Only set mip map count if optimal tiling is used view.subresourceRange.levelCount = (useStaging) ? texture.mipLevels : 1; view.image = texture.image; err = vkCreateImageView(device, &view, nullptr, &texture.view); assert(!err); }
void prepareVertices() { // Setup vertices std::vector<Vertex> vertexBuffer; vertexBuffer.push_back({ { 1.0f, 1.0f, 0.0f },{ 1.0f, 1.0f } }); vertexBuffer.push_back({ { -1.0f, 1.0f, 0.0f },{ 0.0f, 1.0f } }); vertexBuffer.push_back({ { -1.0f, -1.0f, 0.0f },{ 0.0f, 0.0f } }); vertexBuffer.push_back({ { 1.0f, -1.0f, 0.0f },{ 1.0f, 0.0f } }); int vertexBufferSize = vertexBuffer.size() * sizeof(Vertex); // Setup indices std::vector<uint32_t> indexBuffer; indexBuffer.push_back(0); indexBuffer.push_back(1); indexBuffer.push_back(2); indexBuffer.push_back(2); indexBuffer.push_back(3); indexBuffer.push_back(0); int indexBufferSize = indexBuffer.size() * sizeof(uint32_t); VkMemoryAllocateInfo memAlloc = {}; memAlloc.sType = VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO; memAlloc.pNext = NULL; memAlloc.allocationSize = 0; memAlloc.memoryTypeIndex = 0; VkMemoryRequirements memReqs; VkResult err; void *data; // Generate vertex buffer // Setup VkBufferCreateInfo bufInfo = {}; bufInfo.sType = VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO; bufInfo.pNext = NULL; bufInfo.size = vertexBufferSize; bufInfo.usage = VK_BUFFER_USAGE_VERTEX_BUFFER_BIT; bufInfo.flags = 0; // Copy vertex data to VRAM memset(&vertices, 0, sizeof(vertices)); err = vkCreateBuffer(device, &bufInfo, nullptr, &vertices.buf); assert(!err); vkGetBufferMemoryRequirements(device, vertices.buf, &memReqs); memAlloc.allocationSize = memReqs.size; getMemoryType(memReqs.memoryTypeBits, VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT, &memAlloc.memoryTypeIndex); vkAllocateMemory(device, &memAlloc, nullptr, &vertices.mem); assert(!err); err = vkMapMemory(device, vertices.mem, 0, memAlloc.allocationSize, 0, &data); assert(!err); memcpy(data, vertexBuffer.data(), vertexBufferSize); vkUnmapMemory(device, vertices.mem); assert(!err); err = vkBindBufferMemory(device, vertices.buf, vertices.mem, 0); assert(!err); // Generate index buffer // Setup VkBufferCreateInfo indexbufferInfo = {}; indexbufferInfo.sType = VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO; indexbufferInfo.pNext = NULL; indexbufferInfo.size = indexBufferSize; indexbufferInfo.usage = VK_BUFFER_USAGE_INDEX_BUFFER_BIT; indexbufferInfo.flags = 0; // Copy index data to VRAM memset(&indices, 0, sizeof(indices)); err = vkCreateBuffer(device, &bufInfo, nullptr, &indices.buf); assert(!err); vkGetBufferMemoryRequirements(device, indices.buf, &memReqs); memAlloc.allocationSize = memReqs.size; getMemoryType(memReqs.memoryTypeBits, VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT, &memAlloc.memoryTypeIndex); err = vkAllocateMemory(device, &memAlloc, nullptr, &indices.mem); assert(!err); err = vkMapMemory(device, indices.mem, 0, indexBufferSize, 0, &data); assert(!err); memcpy(data, indexBuffer.data(), indexBufferSize); vkUnmapMemory(device, indices.mem); err = vkBindBufferMemory(device, indices.buf, indices.mem, 0); assert(!err); indices.count = indexBuffer.size(); // Binding description vertices.bindingDescriptions.resize(1); vertices.bindingDescriptions[0] = vkTools::initializers::vertexInputBindingDescription( VERTEX_BUFFER_BIND_ID, sizeof(Vertex), VK_VERTEX_INPUT_RATE_VERTEX); // Attribute descriptions // Describes memory layout and shader positions vertices.attributeDescriptions.resize(2); // Location 0 : Position vertices.attributeDescriptions[0] = vkTools::initializers::vertexInputAttributeDescription( VERTEX_BUFFER_BIND_ID, 0, VK_FORMAT_R32G32B32_SFLOAT, 0); // Location 1 : UV vertices.attributeDescriptions[1] = vkTools::initializers::vertexInputAttributeDescription( VERTEX_BUFFER_BIND_ID, 1, VK_FORMAT_R32G32_SFLOAT, sizeof(float) * 3); // Assign to vertex buffer vertices.inputState.sType = VK_STRUCTURE_TYPE_PIPELINE_VERTEX_INPUT_STATE_CREATE_INFO; vertices.inputState.pNext = NULL; vertices.inputState.vertexBindingDescriptionCount = vertices.bindingDescriptions.size(); vertices.inputState.pVertexBindingDescriptions = vertices.bindingDescriptions.data(); vertices.inputState.vertexAttributeDescriptionCount = vertices.attributeDescriptions.size(); vertices.inputState.pVertexAttributeDescriptions = vertices.attributeDescriptions.data(); }
std::unique_ptr<Texture2D> Texture2D::Create(u32 width, u32 height, u32 levels, u32 layers, VkFormat format, VkSampleCountFlagBits samples, VkImageViewType view_type, VkImageTiling tiling, VkImageUsageFlags usage) { VkImageCreateInfo image_info = {VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO, nullptr, 0, VK_IMAGE_TYPE_2D, format, {width, height, 1}, levels, layers, samples, tiling, usage, VK_SHARING_MODE_EXCLUSIVE, 0, nullptr, VK_IMAGE_LAYOUT_UNDEFINED}; VkImage image = VK_NULL_HANDLE; VkResult res = vkCreateImage(g_vulkan_context->GetDevice(), &image_info, nullptr, &image); if (res != VK_SUCCESS) { LOG_VULKAN_ERROR(res, "vkCreateImage failed: "); return nullptr; } // Allocate memory to back this texture, we want device local memory in this case VkMemoryRequirements memory_requirements; vkGetImageMemoryRequirements(g_vulkan_context->GetDevice(), image, &memory_requirements); VkMemoryAllocateInfo memory_info = { VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO, nullptr, memory_requirements.size, g_vulkan_context->GetMemoryType(memory_requirements.memoryTypeBits, VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT)}; VkDeviceMemory device_memory; res = vkAllocateMemory(g_vulkan_context->GetDevice(), &memory_info, nullptr, &device_memory); if (res != VK_SUCCESS) { LOG_VULKAN_ERROR(res, "vkAllocateMemory failed: "); vkDestroyImage(g_vulkan_context->GetDevice(), image, nullptr); return nullptr; } res = vkBindImageMemory(g_vulkan_context->GetDevice(), image, device_memory, 0); if (res != VK_SUCCESS) { LOG_VULKAN_ERROR(res, "vkBindImageMemory failed: "); vkDestroyImage(g_vulkan_context->GetDevice(), image, nullptr); vkFreeMemory(g_vulkan_context->GetDevice(), device_memory, nullptr); return nullptr; } VkImageViewCreateInfo view_info = { VK_STRUCTURE_TYPE_IMAGE_VIEW_CREATE_INFO, nullptr, 0, image, view_type, format, {VK_COMPONENT_SWIZZLE_IDENTITY, VK_COMPONENT_SWIZZLE_IDENTITY, VK_COMPONENT_SWIZZLE_IDENTITY, VK_COMPONENT_SWIZZLE_IDENTITY}, {Util::IsDepthFormat(format) ? static_cast<VkImageAspectFlags>(VK_IMAGE_ASPECT_DEPTH_BIT) : static_cast<VkImageAspectFlags>(VK_IMAGE_ASPECT_COLOR_BIT), 0, levels, 0, layers}}; VkImageView view = VK_NULL_HANDLE; res = vkCreateImageView(g_vulkan_context->GetDevice(), &view_info, nullptr, &view); if (res != VK_SUCCESS) { LOG_VULKAN_ERROR(res, "vkCreateImageView failed: "); vkDestroyImage(g_vulkan_context->GetDevice(), image, nullptr); vkFreeMemory(g_vulkan_context->GetDevice(), device_memory, nullptr); return nullptr; } return std::make_unique<Texture2D>(width, height, levels, layers, format, samples, view_type, image, device_memory, view); }
tut1_error tut7_create_images(struct tut1_physical_device *phy_dev, struct tut2_device *dev, struct tut7_image *images, uint32_t image_count) { /* * In this function, we will create a bunch of images. Images in graphics serve essentially two purposes. One * is to provide data to shaders, traditionally known as textures. Another is to render into either as the * final result or for further use, traditionally also known as textures. Vulkan calls all of these "images", * which are just glorified "buffers". We already worked with a Vulkan buffer in Tutorial 4, which was just an * array of data. Images on the other hand can have up to 3 dimensions, a format (such as BGRA), multisampling * properties, tiling properties and a layout. They are glorified buffers because all of these features can be * emulated with buffers, although besides requiring more work in the shaders, using images also allows a lot * more optimization by the device and its driver. * * That said, creating images is fairly similar to buffers. You create an image, allocate memory to it, create * an image view for access to the image, you bind it to a command buffer through a descriptor set and go on * using it in the shaders. Like buffers, you can choose to initialize the image. The data sent through * images could be anything, such as textures used to draw objects, patterns used by a shader to apply an * effect, or just general data for the shaders. The image can be written to as well. The data written to an * image could also be for anything, such as the final colors that go on to be displayed on the screen, the * depth or stencil data, the output of a filter used for further processing, the processed image to be * retrieved by the application (e.g. used by gimp), a texture that evolves over time, etc. * * Loading the image data is outside the scope of this tutorial, so we'll leave that for another time. Once * the image is created, it's device memory can be mapped, loaded and unmapped, so it is not necessary to do * that in this function either. */ uint32_t successful = 0; tut1_error retval = TUT1_ERROR_NONE; VkResult res; for (uint32_t i = 0; i < image_count; ++i) { images[i].image = NULL; images[i].image_mem = NULL; images[i].view = NULL; images[i].sampler = NULL; /* * To create an image, we need a CreateInfo struct as usual. Some parts of this struct is similar to * VkBufferCreateInfo from Tutorial 3. The ones that need explanation are explained here. The image * type specifies what are the dimensions of the image. In these tutorial series, we will use 2D * images for simplicity. Also for simplicity, let's ignore mipmapping and image layers. The image * format is one of VK_FORMAT. A normal format could be VK_FORMAT_B8G8R8A8_UNORM, but it might make * sense to use other formats, especially for images that would get their data from a texture file. * * If the image is going to be initialized, for example from a texture file, then the structure of the * image data, otherwise known as "tiling", must be set to linear. This means that the image is stored * as a normal row-major array, so that when its memory is mapped by the application, it would make * sense! If the image is not to be initialized on the other hand, it is better to keep the tiling as * optimal, which means whatever format the GPU likes best. It is necessary for the application to * copy a linear image to an optimal one for GPU usage. If the application wants to read the image * back, it must copy it from an optimal image to a linear one. This also means that the `usage` of * the linear images can contain only TRANSFER_SRC and TRANSFER_DST bits. More on image copies when we * actually start using them. * * Linear images are sampled only once. Optimal images can be multisampled. You can read about * multisampling online (from OpenGL), but in short it asks for each pixel to be sampled at multiple * locations inside the pixel, which helps with antialiasing. Here, we will simply choose a higher * number of samples as allowed by the GPU (retrieved with vkGetPhysicalDeviceImageFormatProperties * below). * * Linear images are also restricted to 2D, no mipmapping and no layers, which is fortunate because we * wanted those for simplicity anyway! There is also a restriction on the format of the image, which * cannot be depth/stencil. * * In Tutorial 3, we specified the buffer usage as storage, which was a rather generic specification. * For the image, we have more options to specify the usage. Choosing the minimum usage bits for each * image, specifying only what we actually want to do with the image allows the GPU to possibly place * the image in the most optimal memory location, or load/unload the image at necessary times. This is * left to the application to provide as it varies from case to case. The usages in short are: * * Transfer src/dst: whether the image can be used as a source/destination of an image copy. * Sampled: whether the image can be sampled by a shader. * Storage: whether the image can be read from and written to by a shader. * Color attachment: whether the image can be used as a render target (for color). * Depth/stencil attachment: whether the image can be used as a render target (for depth/stencil). * Transient attachment: whether the image is lazily allocated (ignored for now). * Input attachment: whether the image can be read (unfiltered) by a shader (ignored for now). * * If the image was to be shared between queue families, it should be declared with a special * `sharingMode` specifying that there would be concurrent access to the image, and by which queue * families. We are going to use the images and views created here in multiple pipelines, one for each * swapchain image. Since those pipelines may be created on top of different queue families, we need * to tell Vulkan that these images would be shared. At the time of this writing, on Nvidia cards * there is only one queue family and sharing is meaningless. However, it is legal for a driver to * expose multiple similar queue families instead of one queue family with multiple queues. The * application is expected to provide the queue families that would use this image. Most likely, the * result of `tut7_get_presentable_queues` is what you would want. * * Finally, an image has a layout. Each layout is limited in what operations can be done in it, but * instead is optimal for a task. The possible image layouts are: * * Undefined: no device access is allowed. This is used as an initial layout and must be transitioned * away from before use. * Preinitialized: no device access is allowed. Similar to undefined, this is only an initial layout * and must be transitioned away from before use. The only difference is that the contents of the * image are kept during the transition. * General: supports all types of device access. * Color attachment optimal: only usable with color attachment images. * Depth/stencil attachment optimal: only usable with depth/stencil attachment images. * Depth/stencil read-only optimal: only usable with depth/stencil attachment images. The difference * between this and the depth/stencil attachment optimal layout is that this image can also be used * as a read-only sampled image or input attachment for use by the shaders. * Shader read-only optimal: only usable with sampled and input attachment images. Similar to * depth/stencil read-only optimal, this layout can be used as a read-only image or input attachment * for use by the shaders. * Transfer src/dst optimal: only usable with transfer src/dst images and must only be used as the * source or destination of an image transfer. * Present src (extension): used for presenting an image to a swapchain. An image taken from the * swapchain is in this layout and must be transitioned away before use after * vkAcquireNextImageKHR. Before giving the image back with vkQueuePresentKHR, it must be * transitioned again to this layout. * * For linear images, which are going to be initialized by the application, we will use the * preinitialized layout. Otherwise, the layout must be undefined and later transitioned to the * desired layout using a pipeline barrier (more on this later). */ VkImageTiling tiling = VK_IMAGE_TILING_OPTIMAL; VkSampleCountFlagBits samples = VK_SAMPLE_COUNT_1_BIT; VkImageLayout layout = VK_IMAGE_LAYOUT_UNDEFINED; if (images[i].will_be_initialized || images[i].host_visible) { images[i].usage &= VK_IMAGE_USAGE_TRANSFER_SRC_BIT | VK_IMAGE_USAGE_TRANSFER_DST_BIT; layout = VK_IMAGE_LAYOUT_PREINITIALIZED; tiling = VK_IMAGE_TILING_LINEAR; } else if (images[i].multisample) { /* * To get the format properties for an image, we need to tell Vulkan how we expect to create the image, * i.e. what is its format, type, tiling, usage and flags (which we didn't use). We could check many * of the parameters given to this function with the properties returned from this function, but we'll * just take a possible sampling count out of it, and assume the parameters are fine. In a real * application, you would want to do more validity checks. */ VkImageFormatProperties format_properties; res = vkGetPhysicalDeviceImageFormatProperties(phy_dev->physical_device, images[i].format, VK_IMAGE_TYPE_2D, tiling, images[i].usage, 0, &format_properties); tut1_error_sub_set_vkresult(&retval, res); if (res == 0) { for (uint32_t s = VK_SAMPLE_COUNT_16_BIT; s != 0; s >>= 1) if ((format_properties.sampleCounts & s)) { samples = s; break; } } } /* * Create the image with the above description as usual. The CreateInfo struct takes the parameters * and memory allocation callbacks are not used. */ bool shared = images[i].sharing_queue_count > 1; struct VkImageCreateInfo image_info = { .sType = VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO, .imageType = VK_IMAGE_TYPE_2D, .format = images[i].format, .extent = {images[i].extent.width, images[i].extent.height, 1}, .mipLevels = 1, .arrayLayers = 1, .samples = samples, .tiling = tiling, .usage = images[i].usage, .sharingMode = shared?VK_SHARING_MODE_CONCURRENT:VK_SHARING_MODE_EXCLUSIVE, .queueFamilyIndexCount = shared?images[i].sharing_queue_count:0, .pQueueFamilyIndices = shared?images[i].sharing_queues:NULL, .initialLayout = layout, }; res = vkCreateImage(dev->device, &image_info, NULL, &images[i].image); tut1_error_sub_set_vkresult(&retval, res); if (res) continue; /* * In Tutorial 4, we created a buffer, allocated memory for it and bound the memory to the buffer. * Images are glorified buffers and the process is similar. The same argument regarding host-coherent * memory holds here as well. So, if the image requires device-local memory, we will look for that, * otherwise we will look for memory that is not just host-visible, but also host-coherent. */ VkMemoryRequirements mem_req = {0}; vkGetImageMemoryRequirements(dev->device, images[i].image, &mem_req); uint32_t mem_index = tut4_find_suitable_memory(phy_dev, dev, &mem_req, images[i].host_visible? VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT: VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT); if (mem_index >= phy_dev->memories.memoryTypeCount) continue; VkMemoryAllocateInfo mem_info = { .sType = VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO, .allocationSize = mem_req.size, .memoryTypeIndex = mem_index, }; res = vkAllocateMemory(dev->device, &mem_info, NULL, &images[i].image_mem); tut1_error_sub_set_vkresult(&retval, res); if (res) continue; res = vkBindImageMemory(dev->device, images[i].image, images[i].image_mem, 0); tut1_error_sub_set_vkresult(&retval, res); if (res) continue; if (images[i].make_view) { /* * Once we have an image, we need a view on the image to be able to use it. This is just like in * Tutorial 4 where we had a view on the buffer to work with it. In Tutorial 4, we had divided up the * buffer for concurrent processing in the shaders, and each view looked at a specific part of the * buffer. With images, this could also be useful, for example if one large image contains multiple * areas of interest (such as a texture) where different shaders need to look at. However, let's keep * things as simple as possible and create a view that is as large as the image itself. * * The image view's CreateInfo is largely similar to the one for buffer views. For image views, we * need to specify which components of the image we want to view and the range is not a simple * (offset, size) as was in the buffer view. * * For the components, we have the option to not only select which components (R, G, B and A) to view, * but also to remap them (this operation is called swizzle). For example to get the value of the red * component in place of alpha etc. The mapping for each component can be specified separately, and * mapping 0 means identity. We are not going to remap anything, so we'll leave all fields in * `components` be 0. * * The range of the image asks for which mipmap levels and image array layers we are interested in, * which are simply both 0 because we have only one of each. As part of the range of the view, we also * need to specify which aspect of the image we are looking it. This could be color, depth, stencil * etc. Here, we will decide the aspect based on the image usage; if it's used as depth/stencil, we * will set both depth and stencil aspects. Otherwise we will view the color aspect. */ VkImageViewCreateInfo view_info = { .sType = VK_STRUCTURE_TYPE_IMAGE_VIEW_CREATE_INFO, .image = images[i].image, .viewType = VK_IMAGE_VIEW_TYPE_2D, .format = images[i].format, .subresourceRange = { .aspectMask = (images[i].usage & VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT) == 0? VK_IMAGE_ASPECT_COLOR_BIT: VK_IMAGE_ASPECT_DEPTH_BIT | VK_IMAGE_ASPECT_STENCIL_BIT, .baseMipLevel = 0, .levelCount = VK_REMAINING_MIP_LEVELS, .baseArrayLayer = 0, .layerCount = VK_REMAINING_ARRAY_LAYERS, }, }; res = vkCreateImageView(dev->device, &view_info, NULL, &images[i].view); tut1_error_sub_set_vkresult(&retval, res); if (res) continue; } if ((images[i].usage & VK_IMAGE_USAGE_SAMPLED_BIT)) { /* * If the image is going to be sampled, we can create a sampler for it as well. A sampler * specifies how to sample an image. An image is just a glorified buffer, i.e., it's just an * array, as I have said before as well. However, the sampler is what makes using images so * much more powerful. When accessing a buffer, you can access each index individually. With * a sampler, you can access an image at non-integer indices. The sampler then "filters" the * image to provide some data for that index. * * The simplest example is magnification. If you sample the image at coordinates (u+0.5,v) * where u and v are integer pixel locations, then the color you get could be the average of * the colors (values) at coordinates (u,v) and (u+1,v). Vulkan uses the term `texel` to refer * to these "texture" pixels. * * The sampler parameters are explained below: * * - magFilter, minFilter: what to do if asked to sample between the texels. The options are * to take the value of the nearest texel, or interpolate between neighbors. Think about it * as what to do if you try to zoom in or out of an image. We'll go with interpolation, * since it's nicer. * - mipmapMode: similarly, if the image has multiple mipmap levels, accessing between the * levels could either interpolate between two levels or clamp to the nearest. We don't use * mipmaps here, so this doesn't matter, but let's tell it to interpolate anyway. * - addressModeU/V/W: this specifies what happens if you access outside the image. The * options are to: * * repeat the image as if it was a tiled to infinity in each direction, * * mirrored repeat the image as if a larger image containing the image and its mirror * was tiled to infinity in each direction, * * clamp to edge so that any access out of the image boundaries returns the value at the * closest point on the edge of the image, * * clamp to border so that any access out of the image boundaries returns a special * "border" value for the image (border value defined below), * * mirrored clamp to edge so that any access out of the image boundaries returns the value * at the closest point on the edge of a larger image that is made up of the image and its * mirror. * Each of these modes is useful in different situations. "Repeat" is probably the most * problematic as it introduces discontinuity around the edges. "Mirrored" solves this * problem and can add some interesting effects. "Clamp to edge" also solves this problem, * and let's just use that. "Clamp to border" would introduce other edges, and I imagine is * most useful for debugging. * - anisotropyEnable, maxAnisotropy: whether anisotropic filtering is enabled and by how * much. Anisotropic filtering is expensive but nice, so let's enable it. The maximum value * for anisotropic filtering can be retrieved from the device's limits. * - compareEnable, compareOp: this is used with depth images to result in a reading of 0 or 1 * based on the result of a compare operation. We are not interested in this for now. * - minLod, maxLod: the level-of-detail value (mip level) gets clamped to these values. We * are not using mipmapped images, so we'll just give 0 and 1 respectively. * - borderColor: if the "clamp to border" addressing mode was selected, out-of-bound accesses * to the image would return the border color, which is set here. Options are limited: * transparent, white and black. Since we're using "clamp to edge" addressing, this value is * not used. * - unnormalizedCoordinates: with Vulkan, you can either index the image using the * unnormalized coordinates, so that u and v span from 0 to size of the image, or you can * access the image using normalized coordinates, so that u and v span from 0 to 1. * Unnormalized coordinates can be useful in some circumstances, but normalized coordinates * lets you access the image without dealing with its sizes. Aside from that, with * unnormalized coordinates, you are limited in the type of images you can access; only 1D * and 2D images with a single layer and single mip level are acceptable and essentially all * other features of the sampler must be disabled too. Needless to say, we will use * normalized coordinates. */ VkSamplerCreateInfo sampler_info = { .sType = VK_STRUCTURE_TYPE_SAMPLER_CREATE_INFO, .magFilter = VK_FILTER_LINEAR, .minFilter = VK_FILTER_LINEAR, .mipmapMode = VK_SAMPLER_MIPMAP_MODE_LINEAR, .addressModeU = VK_SAMPLER_ADDRESS_MODE_CLAMP_TO_EDGE, .addressModeV = VK_SAMPLER_ADDRESS_MODE_CLAMP_TO_EDGE, .addressModeW = VK_SAMPLER_ADDRESS_MODE_CLAMP_TO_EDGE, .anisotropyEnable = true, .maxAnisotropy = phy_dev->properties.limits.maxSamplerAnisotropy, .minLod = 0, .maxLod = 1, }; res = vkCreateSampler(dev->device, &sampler_info, NULL, &images[i].sampler); tut1_error_sub_set_vkresult(&retval, res); if (res) continue; } ++successful; } /* * Now that you have learned all about images, we're not going to use them in this tutorial. Please don't hate * me. There is already so much here that rendering textured images can wait. It was not all in vein though * because we would need image views on the swapchain images anyway. Now at least you understand the * properties and restrictions of the swapchain images better. */ tut1_error_set_vkresult(&retval, successful == image_count?VK_SUCCESS:VK_INCOMPLETE); return retval; }
bool VKRenderPass::setupAttachmentImages() { VKRenderer* renderer = VKRenderer::RendererInstance; VkFormat depthFormat = renderer->GetPreferredDepthFormat(); renderer->CreateSetupCommandBuffer(); VkCommandBuffer setupCommand = renderer->GetSetupCommandBuffer(); VkResult err; //If width and height were not set, lets use the size of the screen that the renderer reports if (m_width == 0) m_width = renderer->GetWidth(); if (m_height == 0) m_height = renderer->GetHeight(); //Create an image for every output texture for (size_t i = 0; i < m_outputRenderTargets.size(); i++) { VKRenderTargetHandle vkRenderTarget = m_outputRenderTargets[i].DynamicCastHandle<VKRenderTarget>(); VkFormat colorFormat = vkRenderTarget->GetVKColorFormat(); //Attachment image that we will push back into a vector Image_vk colorImage; uint32_t width = vkRenderTarget->GetWidth(); uint32_t height = vkRenderTarget->GetHeight(); if (width == 0) width = m_width; if (height == 0) height = m_height; //Color attachment VkImageCreateInfo imageInfo = {}; imageInfo.sType = VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO; imageInfo.pNext = nullptr; imageInfo.format = colorFormat; imageInfo.imageType = VK_IMAGE_TYPE_2D; imageInfo.extent = { width, height, 1 }; imageInfo.mipLevels = 1; imageInfo.arrayLayers = 1; imageInfo.samples = VK_SAMPLE_COUNT_1_BIT; imageInfo.tiling = VK_IMAGE_TILING_OPTIMAL; imageInfo.usage = VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT | VK_IMAGE_USAGE_TRANSFER_SRC_BIT; imageInfo.flags = 0; VkMemoryAllocateInfo memAllocInfo = {}; memAllocInfo.sType = VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO; VkMemoryRequirements memReqs; err = vkCreateImage(m_device, &imageInfo, nullptr, &colorImage.image); assert(!err); if (err != VK_SUCCESS) { HT_DEBUG_PRINTF("VKRenderTarget::VPrepare(): Error creating color image!\n"); return false; } vkGetImageMemoryRequirements(m_device, colorImage.image, &memReqs); memAllocInfo.allocationSize = memReqs.size; renderer->MemoryTypeFromProperties(memReqs.memoryTypeBits, VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT, &memAllocInfo.memoryTypeIndex); err = vkAllocateMemory(m_device, &memAllocInfo, nullptr, &colorImage.memory); assert(!err); if (err != VK_SUCCESS) { HT_DEBUG_PRINTF("VKRenderTarget::VPrepare(): Error allocating color image memory!\n"); return false; } err = vkBindImageMemory(m_device, colorImage.image, colorImage.memory, 0); if (err != VK_SUCCESS) { HT_DEBUG_PRINTF("VKRenderTarget::VPrepare(): Error binding color image memory!\n"); return false; } renderer->SetImageLayout(setupCommand, colorImage.image, VK_IMAGE_ASPECT_COLOR_BIT, VK_IMAGE_LAYOUT_UNDEFINED, VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL); VkImageViewCreateInfo viewInfo = {}; viewInfo.sType = VK_STRUCTURE_TYPE_IMAGE_VIEW_CREATE_INFO; viewInfo.pNext = nullptr; viewInfo.viewType = VK_IMAGE_VIEW_TYPE_2D; viewInfo.format = colorFormat; viewInfo.flags = 0; viewInfo.subresourceRange = {}; viewInfo.subresourceRange.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT; viewInfo.subresourceRange.baseMipLevel = 0; viewInfo.subresourceRange.levelCount = 1; viewInfo.subresourceRange.baseArrayLayer = 0; viewInfo.subresourceRange.layerCount = 1; viewInfo.image = colorImage.image; err = vkCreateImageView(m_device, &viewInfo, nullptr, &colorImage.view); if (err != VK_SUCCESS) { HT_DEBUG_PRINTF("VKRenderTarget::VPrepare(): Error creating color image view!\n"); return false; } m_colorImages.push_back(colorImage); } //Create depth buffer VkImageCreateInfo imageInfo = {}; imageInfo.sType = VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO; imageInfo.pNext = nullptr; imageInfo.format = depthFormat; imageInfo.imageType = VK_IMAGE_TYPE_2D; imageInfo.extent = { m_width, m_height, 1 }; imageInfo.mipLevels = 1; imageInfo.arrayLayers = 1; imageInfo.samples = VK_SAMPLE_COUNT_1_BIT; imageInfo.tiling = VK_IMAGE_TILING_OPTIMAL; imageInfo.usage = VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT; imageInfo.flags = 0; VkMemoryAllocateInfo memAllocInfo = {}; memAllocInfo.sType = VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO; VkMemoryRequirements memReqs; err = vkCreateImage(m_device, &imageInfo, nullptr, &m_depthImage.image); assert(!err); if (err != VK_SUCCESS) { HT_DEBUG_PRINTF("VKRenderTarget::VPrepare(): Error creating color image!\n"); return false; } vkGetImageMemoryRequirements(m_device, m_depthImage.image, &memReqs); memAllocInfo.allocationSize = memReqs.size; renderer->MemoryTypeFromProperties(memReqs.memoryTypeBits, VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT, &memAllocInfo.memoryTypeIndex); err = vkAllocateMemory(m_device, &memAllocInfo, nullptr, &m_depthImage.memory); assert(!err); if (err != VK_SUCCESS) { HT_DEBUG_PRINTF("VKRenderTarget::VPrepare(): Error allocating color image memory!\n"); return false; } err = vkBindImageMemory(m_device, m_depthImage.image, m_depthImage.memory, 0); if (err != VK_SUCCESS) { HT_DEBUG_PRINTF("VKRenderTarget::VPrepare(): Error binding color image memory!\n"); return false; } renderer->SetImageLayout(setupCommand, m_depthImage.image, VK_IMAGE_ASPECT_DEPTH_BIT | VK_IMAGE_ASPECT_STENCIL_BIT, VK_IMAGE_LAYOUT_UNDEFINED, VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL); VkImageViewCreateInfo viewInfo = {}; viewInfo.sType = VK_STRUCTURE_TYPE_IMAGE_VIEW_CREATE_INFO; viewInfo.pNext = nullptr; viewInfo.viewType = VK_IMAGE_VIEW_TYPE_2D; viewInfo.format = depthFormat; viewInfo.flags = 0; viewInfo.subresourceRange = {}; viewInfo.subresourceRange.aspectMask = VK_IMAGE_ASPECT_DEPTH_BIT | VK_IMAGE_ASPECT_STENCIL_BIT; viewInfo.subresourceRange.baseMipLevel = 0; viewInfo.subresourceRange.levelCount = 1; viewInfo.subresourceRange.baseArrayLayer = 0; viewInfo.subresourceRange.layerCount = 1; viewInfo.image = m_depthImage.image; err = vkCreateImageView(m_device, &viewInfo, nullptr, &m_depthImage.view); if (err != VK_SUCCESS) { HT_DEBUG_PRINTF("VKRenderTarget::VPrepare(): Error creating color image view!\n"); return false; } renderer->FlushSetupCommandBuffer(); return true; }
tut1_error tut7_create_buffers(struct tut1_physical_device *phy_dev, struct tut2_device *dev, struct tut7_buffer *buffers, uint32_t buffer_count) { /* We have already seen buffer create in Tutorial 4, so we'll go over this quickly. */ uint32_t successful = 0; tut1_error retval = TUT1_ERROR_NONE; VkResult res; for (uint32_t i = 0; i < buffer_count; ++i) { buffers[i].buffer = NULL; buffers[i].buffer_mem = NULL; buffers[i].view = NULL; /* * The buffer CreateInfo is much simpler than the image CreateInfo. The only part of it we didn't see * in Tutorial 4 is sharing the buffer between queue families. The parameters for that are exactly the * same as the image CreateInfo. * * The size of the buffer depends on its format, but let's not worry about translating the size for * each possible format and lazily assume 4 bytes, which covers a lot of formats (even if * overestimating them). Naturally, doing this is not really advised. */ bool shared = buffers[i].sharing_queue_count > 1; VkBufferCreateInfo buffer_info = { .sType = VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO, .size = buffers[i].size * sizeof(float), .usage = buffers[i].usage, .sharingMode = shared?VK_SHARING_MODE_CONCURRENT:VK_SHARING_MODE_EXCLUSIVE, .queueFamilyIndexCount = shared?buffers[i].sharing_queue_count:0, .pQueueFamilyIndices = shared?buffers[i].sharing_queues:NULL, }; res = vkCreateBuffer(dev->device, &buffer_info, NULL, &buffers[i].buffer); tut1_error_sub_set_vkresult(&retval, res); if (res) continue; VkMemoryRequirements mem_req = {0}; vkGetBufferMemoryRequirements(dev->device, buffers[i].buffer, &mem_req); uint32_t mem_index = tut4_find_suitable_memory(phy_dev, dev, &mem_req, buffers[i].host_visible? VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT: VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT); if (mem_index >= phy_dev->memories.memoryTypeCount) continue; VkMemoryAllocateInfo mem_info = { .sType = VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO, .allocationSize = mem_req.size, .memoryTypeIndex = mem_index, }; res = vkAllocateMemory(dev->device, &mem_info, NULL, &buffers[i].buffer_mem); tut1_error_sub_set_vkresult(&retval, res); if (res) continue; res = vkBindBufferMemory(dev->device, buffers[i].buffer, buffers[i].buffer_mem, 0); tut1_error_sub_set_vkresult(&retval, res); if (res) continue; if (buffers[i].make_view) { /* A buffer view can only be created on uniform and storage texel buffers */ if ((buffers[i].usage & VK_BUFFER_USAGE_UNIFORM_TEXEL_BUFFER_BIT) || (buffers[i].usage & VK_BUFFER_USAGE_STORAGE_TEXEL_BUFFER_BIT)) { VkBufferViewCreateInfo view_info = { .sType = VK_STRUCTURE_TYPE_BUFFER_VIEW_CREATE_INFO, .buffer = buffers[i].buffer, .format = buffers[i].format, .offset = 0, .range = VK_WHOLE_SIZE, }; res = vkCreateBufferView(dev->device, &view_info, NULL, &buffers[i].view); tut1_error_sub_set_vkresult(&retval, res); if (res) continue; } } ++successful; } tut1_error_set_vkresult(&retval, successful == buffer_count?VK_SUCCESS:VK_INCOMPLETE); return retval; } tut1_error tut7_load_shaders(struct tut2_device *dev, struct tut7_shader *shaders, uint32_t shader_count) { /* * We already saw how to load a shader in Tutorial 3. This function is just an array version of it. Nothing * fancy here. */ uint32_t successful = 0; tut1_error retval = TUT1_ERROR_NONE; tut1_error err; for (uint32_t i = 0; i < shader_count; ++i) { err = tut3_load_shader(dev, shaders[i].spirv_file, &shaders[i].shader); tut1_error_sub_merge(&retval, &err); if (!tut1_error_is_success(&err)) continue; ++successful; } tut1_error_set_vkresult(&retval, successful == shader_count?VK_SUCCESS:VK_INCOMPLETE); return retval; } tut1_error tut7_create_graphics_buffers(struct tut1_physical_device *phy_dev, struct tut2_device *dev, VkSurfaceFormatKHR surface_format, struct tut7_graphics_buffers *graphics_buffers, uint32_t graphics_buffer_count, VkRenderPass *render_pass) { /* * To render on a screen, we need a series of stuff. We need images to render to. We also need to tell our * graphics pipeline that we are going to use those images. In fact, similar to how we make descriptor set and * pipeline layouts to define stuff, then bind the actual sets and pipeline, we only specify what "kind" of * images we will use and then bind the actual images. * * Vulkan uses the concept of render passes to perform the rendering. A render pass could consist of multiple * subpasses, but let's not bother with that for now. Only thing to know is that the images used in rendering, * whether it's color, depth/stencil, input etc, are called "attachments". When creating a render pass, we * define what sort of attachments it would take and what are the dependencies between the subpasses. We are * going with a single subpass, so things would be simpler. The render pass is used to create a graphics * pipeline. * * When we are going to actually render something, we need to bind those attachments using real images. The * construct that holds the attachments together is called a "framebuffer". Commonly, we need color and * depth/stencil attachments for rendering, so we need to provide views on them to a framebuffer. We already * have an image created by the swapchain for our color output. We need to create the depth/stencil buffer * ourselves. * * So I lied when I said in the end of `tut7_create_images` that we won't use that function. For the color * image, we will just create a view (which we already saw how to do in that function), and we will use that * function to create our depth/stencil image and its view. * * If we wanted to render to an image, without presenting on the screen, here is where the difference would be, * that is, we would be creating an image for the color attachment ourselves, instead of using one created by * the swapchain. */ uint32_t successful = 0; tut1_error retval = TUT1_ERROR_NONE; VkResult res; tut1_error err; for (uint32_t i = 0; i < graphics_buffer_count; ++i) { graphics_buffers[i].color_view = NULL; graphics_buffers[i].depth = (struct tut7_image){0}; graphics_buffers[i].framebuffer = NULL; } /* Get a format for the depth/stencil image that supports depth/stencil attachment. */ VkFormat depth_format = tut7_get_supported_depth_stencil_format(phy_dev);; /* * Since the render pass just defines how the attachments look like, we need only one for use with all of our * swapchain images. On the other hand, we need a different framebuffer for each swapchain image (together * with its corresponding depth/stencil image). * * Our render pass has two attachments; the color image and the depth/stencil image. Each of these attachments * needs to be specified separately: * * - In the case of the color image, the format is given by `surface_format`, and in the case of the * depth/stencil image, the format is the one we just decided on above. * - For now, we don't do multisampling, so the number of samples is set to 1 for both attachments. * - At the beginning and end of each subpass, the render pass can either keep or clear/discard the contents of * an attachment. We use one subpass, so this doesn't really matter, but we'll go with clearing the image at * the beginning and keeping the contents at the end. If not specified, the default action (value 0) is to * keep the previous data at the beginning and preserve it at the end as well. * - The render pass also declares a promise that the driver would find the attachment in a certain layout at * the beginning of the render pass, and that the attachment would be at a certain layout at the end. For our * color attachment, it will start and end in the color attachment layout. The case is similar for the * depth/stencil buffer. We don't intend to transition them to another layout. * * Next, we need to declare the subpasses of the render pass. We use only one, so this is more * straightforward. The information required here are: * * - pipeline bind point: whether compute or graphics pipelines are going to use this subpass. We want * graphics now, so we'll go with that, but anyway compute is not even supported (at the time of this * writing). * - Attachments: which attachments to use, and which to simply preserve. If an attachment is neither used nor * preserved, its contents become undefined. If the attachment is decorated with `location=X` in glsl, then * pInputAttachments[X] is used if it's an input, or pColorAttachments[X] is used if it's an output. This is * other than the `binding=Y` decoration that specifies the buffer/image as specified by the descriptor set. * There can be multiple input and color attachments (hence the `location=X` binding required above), but * only one depth/stencil attachment. * * In the end, if we had multiple subpasses, their dependencies would also need to be declared. What's nice * about subpasses is that there is an automatic layout transition between subpasses (as specified by * VkAttachmentReference values), and there is a whole set of rules that allow the driver to safely perform * this transition before or after the subpass has actually started. Again, we have a single subpass, so none * of this matters now. */ VkAttachmentDescription render_pass_attachments[2] = { [0] = { .format = surface_format.format, .samples = VK_SAMPLE_COUNT_1_BIT, .loadOp = VK_ATTACHMENT_LOAD_OP_CLEAR, .storeOp = VK_ATTACHMENT_STORE_OP_STORE, .initialLayout = VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL, .finalLayout = VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL, }, [1] = { .format = depth_format, .samples = VK_SAMPLE_COUNT_1_BIT, .loadOp = VK_ATTACHMENT_LOAD_OP_CLEAR, .storeOp = VK_ATTACHMENT_STORE_OP_STORE, .initialLayout = VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL, .finalLayout = VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL, }, }; VkAttachmentReference render_pass_attachment_references[2] = { [0] = { .attachment = 0, /* corresponds to the index in pAttachments of VkRenderPassCreateInfo */ .layout = VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL, }, [1] = { .attachment = 1, .layout = VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL, }, };
std::unique_ptr<StagingTexture2D> StagingTexture2DBuffer::Create(STAGING_BUFFER_TYPE type, u32 width, u32 height, VkFormat format) { // Assume tight packing. u32 row_stride = Util::GetTexelSize(format) * width; u32 buffer_size = row_stride * height; VkImageUsageFlags usage = VK_IMAGE_USAGE_TRANSFER_SRC_BIT | VK_IMAGE_USAGE_TRANSFER_DST_BIT; VkBufferCreateInfo buffer_create_info = { VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO, // VkStructureType sType nullptr, // const void* pNext 0, // VkBufferCreateFlags flags buffer_size, // VkDeviceSize size usage, // VkBufferUsageFlags usage VK_SHARING_MODE_EXCLUSIVE, // VkSharingMode sharingMode 0, // uint32_t queueFamilyIndexCount nullptr // const uint32_t* pQueueFamilyIndices }; VkBuffer buffer; VkResult res = vkCreateBuffer(g_vulkan_context->GetDevice(), &buffer_create_info, nullptr, &buffer); if (res != VK_SUCCESS) { LOG_VULKAN_ERROR(res, "vkCreateBuffer failed: "); return nullptr; } VkMemoryRequirements memory_requirements; vkGetBufferMemoryRequirements(g_vulkan_context->GetDevice(), buffer, &memory_requirements); bool is_coherent; u32 memory_type_index; if (type == STAGING_BUFFER_TYPE_READBACK) { memory_type_index = g_vulkan_context->GetReadbackMemoryType(memory_requirements.memoryTypeBits, &is_coherent); } else { memory_type_index = g_vulkan_context->GetUploadMemoryType(memory_requirements.memoryTypeBits, &is_coherent); } VkMemoryAllocateInfo memory_allocate_info = { VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO, // VkStructureType sType nullptr, // const void* pNext memory_requirements.size, // VkDeviceSize allocationSize memory_type_index // uint32_t memoryTypeIndex }; VkDeviceMemory memory; res = vkAllocateMemory(g_vulkan_context->GetDevice(), &memory_allocate_info, nullptr, &memory); if (res != VK_SUCCESS) { LOG_VULKAN_ERROR(res, "vkAllocateMemory failed: "); vkDestroyBuffer(g_vulkan_context->GetDevice(), buffer, nullptr); return nullptr; } res = vkBindBufferMemory(g_vulkan_context->GetDevice(), buffer, memory, 0); if (res != VK_SUCCESS) { LOG_VULKAN_ERROR(res, "vkBindBufferMemory failed: "); vkDestroyBuffer(g_vulkan_context->GetDevice(), buffer, nullptr); vkFreeMemory(g_vulkan_context->GetDevice(), memory, nullptr); return nullptr; } return std::make_unique<StagingTexture2DBuffer>(type, width, height, format, row_stride, buffer, memory, buffer_size, is_coherent); }
int sample_main(int argc, char *argv[]) { VkResult U_ASSERT_ONLY res; bool U_ASSERT_ONLY pass; struct sample_info info = {}; char sample_title[] = "Vertex Buffer Sample"; const bool depthPresent = true; init_global_layer_properties(info); init_instance_extension_names(info); init_device_extension_names(info); init_instance(info, sample_title); init_enumerate_device(info); init_window_size(info, 500, 500); init_connection(info); init_window(info); init_swapchain_extension(info); init_device(info); init_command_pool(info); init_command_buffer(info); execute_begin_command_buffer(info); init_device_queue(info); init_swap_chain(info); init_depth_buffer(info); init_renderpass(info, depthPresent); init_framebuffers(info, depthPresent); /* VULKAN_KEY_START */ /* * Set up a vertex buffer: * - Create a buffer * - Map it and write the vertex data into it * - Bind it using vkCmdBindVertexBuffers * - Later, at pipeline creation, * - fill in vertex input part of the pipeline with relevent data */ VkBufferCreateInfo buf_info = {}; buf_info.sType = VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO; buf_info.pNext = NULL; buf_info.usage = VK_BUFFER_USAGE_VERTEX_BUFFER_BIT; buf_info.size = sizeof(g_vb_solid_face_colors_Data); buf_info.queueFamilyIndexCount = 0; buf_info.pQueueFamilyIndices = NULL; buf_info.sharingMode = VK_SHARING_MODE_EXCLUSIVE; buf_info.flags = 0; res = vkCreateBuffer(info.device, &buf_info, NULL, &info.vertex_buffer.buf); assert(res == VK_SUCCESS); VkMemoryRequirements mem_reqs; vkGetBufferMemoryRequirements(info.device, info.vertex_buffer.buf, &mem_reqs); VkMemoryAllocateInfo alloc_info = {}; alloc_info.sType = VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO; alloc_info.pNext = NULL; alloc_info.memoryTypeIndex = 0; alloc_info.allocationSize = mem_reqs.size; pass = memory_type_from_properties(info, mem_reqs.memoryTypeBits, VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT, &alloc_info.memoryTypeIndex); assert(pass && "No mappable, coherent memory"); res = vkAllocateMemory(info.device, &alloc_info, NULL, &(info.vertex_buffer.mem)); assert(res == VK_SUCCESS); uint8_t *pData; res = vkMapMemory(info.device, info.vertex_buffer.mem, 0, mem_reqs.size, 0, (void **)&pData); assert(res == VK_SUCCESS); memcpy(pData, g_vb_solid_face_colors_Data, sizeof(g_vb_solid_face_colors_Data)); vkUnmapMemory(info.device, info.vertex_buffer.mem); res = vkBindBufferMemory(info.device, info.vertex_buffer.buf, info.vertex_buffer.mem, 0); assert(res == VK_SUCCESS); /* We won't use these here, but we will need this info when creating the * pipeline */ info.vi_binding.binding = 0; info.vi_binding.inputRate = VK_VERTEX_INPUT_RATE_VERTEX; info.vi_binding.stride = sizeof(g_vb_solid_face_colors_Data[0]); info.vi_attribs[0].binding = 0; info.vi_attribs[0].location = 0; info.vi_attribs[0].format = VK_FORMAT_R32G32B32A32_SFLOAT; info.vi_attribs[0].offset = 0; info.vi_attribs[1].binding = 0; info.vi_attribs[1].location = 1; info.vi_attribs[1].format = VK_FORMAT_R32G32B32A32_SFLOAT; info.vi_attribs[1].offset = 16; const VkDeviceSize offsets[1] = {0}; /* We cannot bind the vertex buffer until we begin a renderpass */ VkClearValue clear_values[2]; clear_values[0].color.float32[0] = 0.2f; clear_values[0].color.float32[1] = 0.2f; clear_values[0].color.float32[2] = 0.2f; clear_values[0].color.float32[3] = 0.2f; clear_values[1].depthStencil.depth = 1.0f; clear_values[1].depthStencil.stencil = 0; VkSemaphore imageAcquiredSemaphore; VkSemaphoreCreateInfo imageAcquiredSemaphoreCreateInfo; imageAcquiredSemaphoreCreateInfo.sType = VK_STRUCTURE_TYPE_SEMAPHORE_CREATE_INFO; imageAcquiredSemaphoreCreateInfo.pNext = NULL; imageAcquiredSemaphoreCreateInfo.flags = 0; res = vkCreateSemaphore(info.device, &imageAcquiredSemaphoreCreateInfo, NULL, &imageAcquiredSemaphore); assert(res == VK_SUCCESS); // Get the index of the next available swapchain image: res = vkAcquireNextImageKHR(info.device, info.swap_chain, UINT64_MAX, imageAcquiredSemaphore, VK_NULL_HANDLE, &info.current_buffer); // TODO: Deal with the VK_SUBOPTIMAL_KHR and VK_ERROR_OUT_OF_DATE_KHR // return codes assert(res == VK_SUCCESS); VkRenderPassBeginInfo rp_begin = {}; rp_begin.sType = VK_STRUCTURE_TYPE_RENDER_PASS_BEGIN_INFO; rp_begin.pNext = NULL; rp_begin.renderPass = info.render_pass; rp_begin.framebuffer = info.framebuffers[info.current_buffer]; rp_begin.renderArea.offset.x = 0; rp_begin.renderArea.offset.y = 0; rp_begin.renderArea.extent.width = info.width; rp_begin.renderArea.extent.height = info.height; rp_begin.clearValueCount = 2; rp_begin.pClearValues = clear_values; vkCmdBeginRenderPass(info.cmd, &rp_begin, VK_SUBPASS_CONTENTS_INLINE); vkCmdBindVertexBuffers(info.cmd, 0, /* Start Binding */ 1, /* Binding Count */ &info.vertex_buffer.buf, /* pBuffers */ offsets); /* pOffsets */ vkCmdEndRenderPass(info.cmd); execute_end_command_buffer(info); execute_queue_command_buffer(info); /* VULKAN_KEY_END */ vkDestroySemaphore(info.device, imageAcquiredSemaphore, NULL); vkDestroyBuffer(info.device, info.vertex_buffer.buf, NULL); vkFreeMemory(info.device, info.vertex_buffer.mem, NULL); destroy_framebuffers(info); destroy_renderpass(info); destroy_depth_buffer(info); destroy_swap_chain(info); destroy_command_buffer(info); destroy_command_pool(info); destroy_device(info); destroy_window(info); destroy_instance(info); return 0; }
void loadCubemap(std::string filename, VkFormat format, bool forceLinearTiling) { #if defined(__ANDROID__) // Textures are stored inside the apk on Android (compressed) // So they need to be loaded via the asset manager AAsset* asset = AAssetManager_open(androidApp->activity->assetManager, filename.c_str(), AASSET_MODE_STREAMING); assert(asset); size_t size = AAsset_getLength(asset); assert(size > 0); void *textureData = malloc(size); AAsset_read(asset, textureData, size); AAsset_close(asset); gli::texture_cube texCube(gli::load((const char*)textureData, size)); #else gli::texture_cube texCube(gli::load(filename)); #endif assert(!texCube.empty()); cubeMap.width = texCube.extent().x; cubeMap.height = texCube.extent().y; cubeMap.mipLevels = texCube.levels(); VkMemoryAllocateInfo memAllocInfo = vks::initializers::memoryAllocateInfo(); VkMemoryRequirements memReqs; // Create a host-visible staging buffer that contains the raw image data VkBuffer stagingBuffer; VkDeviceMemory stagingMemory; VkBufferCreateInfo bufferCreateInfo = vks::initializers::bufferCreateInfo(); bufferCreateInfo.size = texCube.size(); // This buffer is used as a transfer source for the buffer copy bufferCreateInfo.usage = VK_BUFFER_USAGE_TRANSFER_SRC_BIT; bufferCreateInfo.sharingMode = VK_SHARING_MODE_EXCLUSIVE; VK_CHECK_RESULT(vkCreateBuffer(device, &bufferCreateInfo, nullptr, &stagingBuffer)); // Get memory requirements for the staging buffer (alignment, memory type bits) vkGetBufferMemoryRequirements(device, stagingBuffer, &memReqs); memAllocInfo.allocationSize = memReqs.size; // Get memory type index for a host visible buffer memAllocInfo.memoryTypeIndex = vulkanDevice->getMemoryType(memReqs.memoryTypeBits, VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT); VK_CHECK_RESULT(vkAllocateMemory(device, &memAllocInfo, nullptr, &stagingMemory)); VK_CHECK_RESULT(vkBindBufferMemory(device, stagingBuffer, stagingMemory, 0)); // Copy texture data into staging buffer uint8_t *data; VK_CHECK_RESULT(vkMapMemory(device, stagingMemory, 0, memReqs.size, 0, (void **)&data)); memcpy(data, texCube.data(), texCube.size()); vkUnmapMemory(device, stagingMemory); // Create optimal tiled target image VkImageCreateInfo imageCreateInfo = vks::initializers::imageCreateInfo(); imageCreateInfo.imageType = VK_IMAGE_TYPE_2D; imageCreateInfo.format = format; imageCreateInfo.mipLevels = cubeMap.mipLevels; imageCreateInfo.samples = VK_SAMPLE_COUNT_1_BIT; imageCreateInfo.tiling = VK_IMAGE_TILING_OPTIMAL; imageCreateInfo.usage = VK_IMAGE_USAGE_SAMPLED_BIT; imageCreateInfo.sharingMode = VK_SHARING_MODE_EXCLUSIVE; imageCreateInfo.initialLayout = VK_IMAGE_LAYOUT_UNDEFINED; imageCreateInfo.extent = { cubeMap.width, cubeMap.height, 1 }; imageCreateInfo.usage = VK_IMAGE_USAGE_TRANSFER_DST_BIT | VK_IMAGE_USAGE_SAMPLED_BIT; // Cube faces count as array layers in Vulkan imageCreateInfo.arrayLayers = 6; // This flag is required for cube map images imageCreateInfo.flags = VK_IMAGE_CREATE_CUBE_COMPATIBLE_BIT; VK_CHECK_RESULT(vkCreateImage(device, &imageCreateInfo, nullptr, &cubeMap.image)); vkGetImageMemoryRequirements(device, cubeMap.image, &memReqs); memAllocInfo.allocationSize = memReqs.size; memAllocInfo.memoryTypeIndex = vulkanDevice->getMemoryType(memReqs.memoryTypeBits, VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT); VK_CHECK_RESULT(vkAllocateMemory(device, &memAllocInfo, nullptr, &cubeMap.deviceMemory)); VK_CHECK_RESULT(vkBindImageMemory(device, cubeMap.image, cubeMap.deviceMemory, 0)); VkCommandBuffer copyCmd = VulkanExampleBase::createCommandBuffer(VK_COMMAND_BUFFER_LEVEL_PRIMARY, true); // Setup buffer copy regions for each face including all of it's miplevels std::vector<VkBufferImageCopy> bufferCopyRegions; uint32_t offset = 0; for (uint32_t face = 0; face < 6; face++) { for (uint32_t level = 0; level < cubeMap.mipLevels; level++) { VkBufferImageCopy bufferCopyRegion = {}; bufferCopyRegion.imageSubresource.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT; bufferCopyRegion.imageSubresource.mipLevel = level; bufferCopyRegion.imageSubresource.baseArrayLayer = face; bufferCopyRegion.imageSubresource.layerCount = 1; bufferCopyRegion.imageExtent.width = texCube[face][level].extent().x; bufferCopyRegion.imageExtent.height = texCube[face][level].extent().y; bufferCopyRegion.imageExtent.depth = 1; bufferCopyRegion.bufferOffset = offset; bufferCopyRegions.push_back(bufferCopyRegion); // Increase offset into staging buffer for next level / face offset += texCube[face][level].size(); } } // Image barrier for optimal image (target) // Set initial layout for all array layers (faces) of the optimal (target) tiled texture VkImageSubresourceRange subresourceRange = {}; subresourceRange.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT; subresourceRange.baseMipLevel = 0; subresourceRange.levelCount = cubeMap.mipLevels; subresourceRange.layerCount = 6; vks::tools::setImageLayout( copyCmd, cubeMap.image, VK_IMAGE_LAYOUT_UNDEFINED, VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, subresourceRange); // Copy the cube map faces from the staging buffer to the optimal tiled image vkCmdCopyBufferToImage( copyCmd, stagingBuffer, cubeMap.image, VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, static_cast<uint32_t>(bufferCopyRegions.size()), bufferCopyRegions.data() ); // Change texture image layout to shader read after all faces have been copied cubeMap.imageLayout = VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL; vks::tools::setImageLayout( copyCmd, cubeMap.image, VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, cubeMap.imageLayout, subresourceRange); VulkanExampleBase::flushCommandBuffer(copyCmd, queue, true); // Create sampler VkSamplerCreateInfo sampler = vks::initializers::samplerCreateInfo(); sampler.magFilter = VK_FILTER_LINEAR; sampler.minFilter = VK_FILTER_LINEAR; sampler.mipmapMode = VK_SAMPLER_MIPMAP_MODE_LINEAR; sampler.addressModeU = VK_SAMPLER_ADDRESS_MODE_CLAMP_TO_EDGE; sampler.addressModeV = sampler.addressModeU; sampler.addressModeW = sampler.addressModeU; sampler.mipLodBias = 0.0f; sampler.compareOp = VK_COMPARE_OP_NEVER; sampler.minLod = 0.0f; sampler.maxLod = cubeMap.mipLevels; sampler.borderColor = VK_BORDER_COLOR_FLOAT_OPAQUE_WHITE; sampler.maxAnisotropy = 1.0f; if (vulkanDevice->features.samplerAnisotropy) { sampler.maxAnisotropy = vulkanDevice->properties.limits.maxSamplerAnisotropy; sampler.anisotropyEnable = VK_TRUE; } VK_CHECK_RESULT(vkCreateSampler(device, &sampler, nullptr, &cubeMap.sampler)); // Create image view VkImageViewCreateInfo view = vks::initializers::imageViewCreateInfo(); // Cube map view type view.viewType = VK_IMAGE_VIEW_TYPE_CUBE; view.format = format; view.components = { VK_COMPONENT_SWIZZLE_R, VK_COMPONENT_SWIZZLE_G, VK_COMPONENT_SWIZZLE_B, VK_COMPONENT_SWIZZLE_A }; view.subresourceRange = { VK_IMAGE_ASPECT_COLOR_BIT, 0, 1, 0, 1 }; // 6 array layers (faces) view.subresourceRange.layerCount = 6; // Set number of mip levels view.subresourceRange.levelCount = cubeMap.mipLevels; view.image = cubeMap.image; VK_CHECK_RESULT(vkCreateImageView(device, &view, nullptr, &cubeMap.view)); // Clean up staging resources vkFreeMemory(device, stagingMemory, nullptr); vkDestroyBuffer(device, stagingBuffer, nullptr); }
int main(int argc, char *argv[]) { VkResult U_ASSERT_ONLY res; bool U_ASSERT_ONLY pass; struct sample_info info = {}; char sample_title[] = "Draw Cube"; process_command_line_args(info, argc, argv); init_global_layer_properties(info); info.instance_extension_names.push_back(VK_KHR_SURFACE_EXTENSION_NAME); #ifdef _WIN32 info.instance_extension_names.push_back( VK_KHR_WIN32_SURFACE_EXTENSION_NAME); #else info.instance_extension_names.push_back(VK_KHR_XCB_SURFACE_EXTENSION_NAME); #endif info.device_extension_names.push_back(VK_KHR_SWAPCHAIN_EXTENSION_NAME); init_instance(info, sample_title); init_enumerate_device(info); init_window_size(info, 500, 500); init_connection(info); init_window(info); init_swapchain_extension(info); init_device(info); init_command_pool(info); init_command_buffer(info); execute_begin_command_buffer(info); init_device_queue(info); init_swap_chain(info); init_depth_buffer(info); init_uniform_buffer(info); init_descriptor_and_pipeline_layouts(info, false); init_renderpass(info, DEPTH_PRESENT); init_shaders(info, vertShaderText, fragShaderText); init_framebuffers(info, DEPTH_PRESENT); init_vertex_buffer(info, g_vb_solid_face_colors_Data, sizeof(g_vb_solid_face_colors_Data), sizeof(g_vb_solid_face_colors_Data[0]), false); init_descriptor_pool(info, false); init_descriptor_set(info, false); init_pipeline_cache(info); init_pipeline(info, DEPTH_PRESENT); /* VULKAN_KEY_START */ VkClearValue clear_values[2]; clear_values[0].color.float32[0] = 0.2f; clear_values[0].color.float32[1] = 0.2f; clear_values[0].color.float32[2] = 0.2f; clear_values[0].color.float32[3] = 0.2f; clear_values[1].depthStencil.depth = 1.0f; clear_values[1].depthStencil.stencil = 0; VkSemaphore presentCompleteSemaphore; VkSemaphoreCreateInfo presentCompleteSemaphoreCreateInfo; presentCompleteSemaphoreCreateInfo.sType = VK_STRUCTURE_TYPE_SEMAPHORE_CREATE_INFO; presentCompleteSemaphoreCreateInfo.pNext = NULL; presentCompleteSemaphoreCreateInfo.flags = VK_FENCE_CREATE_SIGNALED_BIT; res = vkCreateSemaphore(info.device, &presentCompleteSemaphoreCreateInfo, NULL, &presentCompleteSemaphore); assert(res == VK_SUCCESS); // Get the index of the next available swapchain image: res = vkAcquireNextImageKHR(info.device, info.swap_chain, UINT64_MAX, presentCompleteSemaphore, NULL, &info.current_buffer); // TODO: Deal with the VK_SUBOPTIMAL_KHR and VK_ERROR_OUT_OF_DATE_KHR // return codes assert(res == VK_SUCCESS); /* Allocate a uniform buffer that will take query results. */ VkBuffer query_result_buf; VkDeviceMemory query_result_mem; VkBufferCreateInfo buf_info = {}; buf_info.sType = VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO; buf_info.pNext = NULL; buf_info.usage = VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT | VK_BUFFER_USAGE_TRANSFER_DST_BIT; buf_info.size = 4 * sizeof(uint64_t); buf_info.queueFamilyIndexCount = 0; buf_info.pQueueFamilyIndices = NULL; buf_info.sharingMode = VK_SHARING_MODE_EXCLUSIVE; buf_info.flags = 0; res = vkCreateBuffer(info.device, &buf_info, NULL, &query_result_buf); assert(res == VK_SUCCESS); VkMemoryRequirements mem_reqs; vkGetBufferMemoryRequirements(info.device, query_result_buf, &mem_reqs); VkMemoryAllocateInfo alloc_info = {}; alloc_info.sType = VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO; alloc_info.pNext = NULL; alloc_info.memoryTypeIndex = 0; alloc_info.allocationSize = mem_reqs.size; pass = memory_type_from_properties(info, mem_reqs.memoryTypeBits, VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT, &alloc_info.memoryTypeIndex); assert(pass); res = vkAllocateMemory(info.device, &alloc_info, NULL, &query_result_mem); assert(res == VK_SUCCESS); res = vkBindBufferMemory(info.device, query_result_buf, query_result_mem, 0); assert(res == VK_SUCCESS); VkQueryPool query_pool; VkQueryPoolCreateInfo query_pool_info; query_pool_info.sType = VK_STRUCTURE_TYPE_QUERY_POOL_CREATE_INFO; query_pool_info.pNext = NULL; query_pool_info.queryType = VK_QUERY_TYPE_OCCLUSION; query_pool_info.flags = 0; query_pool_info.queryCount = 2; query_pool_info.pipelineStatistics = 0; res = vkCreateQueryPool(info.device, &query_pool_info, NULL, &query_pool); assert(res == VK_SUCCESS); vkCmdResetQueryPool(info.cmd, query_pool, 0 /*startQuery*/, 2 /*queryCount*/); VkRenderPassBeginInfo rp_begin; rp_begin.sType = VK_STRUCTURE_TYPE_RENDER_PASS_BEGIN_INFO; rp_begin.pNext = NULL; rp_begin.renderPass = info.render_pass; rp_begin.framebuffer = info.framebuffers[info.current_buffer]; rp_begin.renderArea.offset.x = 0; rp_begin.renderArea.offset.y = 0; rp_begin.renderArea.extent.width = info.width; rp_begin.renderArea.extent.height = info.height; rp_begin.clearValueCount = 2; rp_begin.pClearValues = clear_values; vkCmdBeginRenderPass(info.cmd, &rp_begin, VK_SUBPASS_CONTENTS_INLINE); vkCmdBindPipeline(info.cmd, VK_PIPELINE_BIND_POINT_GRAPHICS, info.pipeline); vkCmdBindDescriptorSets(info.cmd, VK_PIPELINE_BIND_POINT_GRAPHICS, info.pipeline_layout, 0, NUM_DESCRIPTOR_SETS, info.desc_set.data(), 0, NULL); const VkDeviceSize offsets[1] = {0}; vkCmdBindVertexBuffers(info.cmd, 0, 1, &info.vertex_buffer.buf, offsets); VkViewport viewport; viewport.height = (float)info.height; viewport.width = (float)info.width; viewport.minDepth = (float)0.0f; viewport.maxDepth = (float)1.0f; viewport.x = 0; viewport.y = 0; vkCmdSetViewport(info.cmd, 0, NUM_VIEWPORTS, &viewport); VkRect2D scissor; scissor.extent.width = info.width; scissor.extent.height = info.height; scissor.offset.x = 0; scissor.offset.y = 0; vkCmdSetScissor(info.cmd, 0, NUM_SCISSORS, &scissor); vkCmdBeginQuery(info.cmd, query_pool, 0 /*slot*/, 0 /*flags*/); vkCmdEndQuery(info.cmd, query_pool, 0 /*slot*/); vkCmdBeginQuery(info.cmd, query_pool, 1 /*slot*/, 0 /*flags*/); vkCmdDraw(info.cmd, 12 * 3, 1, 0, 0); vkCmdEndRenderPass(info.cmd); vkCmdEndQuery(info.cmd, query_pool, 1 /*slot*/); vkCmdCopyQueryPoolResults( info.cmd, query_pool, 0 /*firstQuery*/, 2 /*queryCount*/, query_result_buf, 0 /*dstOffset*/, sizeof(uint64_t) /*stride*/, VK_QUERY_RESULT_64_BIT | VK_QUERY_RESULT_WAIT_BIT); VkImageMemoryBarrier prePresentBarrier = {}; prePresentBarrier.sType = VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER; prePresentBarrier.pNext = NULL; prePresentBarrier.srcAccessMask = VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT; prePresentBarrier.dstAccessMask = 0; prePresentBarrier.oldLayout = VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL; prePresentBarrier.newLayout = VK_IMAGE_LAYOUT_PRESENT_SRC_KHR; prePresentBarrier.srcQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED; prePresentBarrier.dstQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED; prePresentBarrier.subresourceRange.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT; prePresentBarrier.subresourceRange.baseMipLevel = 0; prePresentBarrier.subresourceRange.levelCount = 1; prePresentBarrier.subresourceRange.baseArrayLayer = 0; prePresentBarrier.subresourceRange.layerCount = 1; prePresentBarrier.image = info.buffers[info.current_buffer].image; vkCmdPipelineBarrier(info.cmd, VK_PIPELINE_STAGE_ALL_COMMANDS_BIT, VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT, 0, 0, NULL, 0, NULL, 1, &prePresentBarrier); res = vkEndCommandBuffer(info.cmd); const VkCommandBuffer cmd_bufs[] = {info.cmd}; VkFenceCreateInfo fenceInfo; VkFence drawFence; fenceInfo.sType = VK_STRUCTURE_TYPE_FENCE_CREATE_INFO; fenceInfo.pNext = NULL; fenceInfo.flags = 0; vkCreateFence(info.device, &fenceInfo, NULL, &drawFence); VkPipelineStageFlags pipe_stage_flags = VK_PIPELINE_STAGE_BOTTOM_OF_PIPE_BIT; VkSubmitInfo submit_info[1] = {}; submit_info[0].pNext = NULL; submit_info[0].sType = VK_STRUCTURE_TYPE_SUBMIT_INFO; submit_info[0].waitSemaphoreCount = 1; submit_info[0].pWaitSemaphores = &presentCompleteSemaphore; submit_info[0].pWaitDstStageMask = &pipe_stage_flags; submit_info[0].commandBufferCount = 1; submit_info[0].pCommandBuffers = cmd_bufs; submit_info[0].signalSemaphoreCount = 0; submit_info[0].pSignalSemaphores = NULL; /* Queue the command buffer for execution */ res = vkQueueSubmit(info.queue, 1, submit_info, drawFence); assert(res == VK_SUCCESS); res = vkQueueWaitIdle(info.queue); assert(res == VK_SUCCESS); uint64_t samples_passed[4]; samples_passed[0] = 0; samples_passed[1] = 0; res = vkGetQueryPoolResults( info.device, query_pool, 0 /*firstQuery*/, 2 /*queryCount*/, sizeof(samples_passed) /*dataSize*/, samples_passed, sizeof(uint64_t) /*stride*/, VK_QUERY_RESULT_64_BIT | VK_QUERY_RESULT_WAIT_BIT); assert(res == VK_SUCCESS); std::cout << "vkGetQueryPoolResults data" << "\n"; std::cout << "samples_passed[0] = " << samples_passed[0] << "\n"; std::cout << "samples_passed[1] = " << samples_passed[1] << "\n"; /* Read back query result from buffer */ uint64_t *samples_passed_ptr; res = vkMapMemory(info.device, query_result_mem, 0, mem_reqs.size, 0, (void **)&samples_passed_ptr); assert(res == VK_SUCCESS); std::cout << "vkCmdCopyQueryPoolResults data" << "\n"; std::cout << "samples_passed[0] = " << samples_passed_ptr[0] << "\n"; std::cout << "samples_passed[1] = " << samples_passed_ptr[1] << "\n"; vkUnmapMemory(info.device, query_result_mem); /* Now present the image in the window */ VkPresentInfoKHR present; present.sType = VK_STRUCTURE_TYPE_PRESENT_INFO_KHR; present.pNext = NULL; present.swapchainCount = 1; present.pSwapchains = &info.swap_chain; present.pImageIndices = &info.current_buffer; present.pWaitSemaphores = NULL; present.waitSemaphoreCount = 0; present.pResults = NULL; /* Make sure command buffer is finished before presenting */ do { res = vkWaitForFences(info.device, 1, &drawFence, VK_TRUE, FENCE_TIMEOUT); } while (res == VK_TIMEOUT); assert(res == VK_SUCCESS); res = vkQueuePresentKHR(info.queue, &present); assert(res == VK_SUCCESS); wait_seconds(1); /* VULKAN_KEY_END */ if (info.save_images) write_ppm(info, "occlusion_query"); vkDestroyBuffer(info.device, query_result_buf, NULL); vkFreeMemory(info.device, query_result_mem, NULL); vkDestroySemaphore(info.device, presentCompleteSemaphore, NULL); vkDestroyQueryPool(info.device, query_pool, NULL); vkDestroyFence(info.device, drawFence, NULL); destroy_pipeline(info); destroy_pipeline_cache(info); destroy_descriptor_pool(info); destroy_vertex_buffer(info); destroy_framebuffers(info); destroy_shaders(info); destroy_renderpass(info); destroy_descriptor_and_pipeline_layouts(info); destroy_uniform_buffer(info); destroy_depth_buffer(info); destroy_swap_chain(info); destroy_command_buffer(info); destroy_command_pool(info); destroy_device(info); destroy_window(info); destroy_instance(info); return 0; }
bool StreamBuffer::ResizeBuffer(size_t size) { // Create the buffer descriptor VkBufferCreateInfo buffer_create_info = { VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO, // VkStructureType sType nullptr, // const void* pNext 0, // VkBufferCreateFlags flags static_cast<VkDeviceSize>(size), // VkDeviceSize size m_usage, // VkBufferUsageFlags usage VK_SHARING_MODE_EXCLUSIVE, // VkSharingMode sharingMode 0, // uint32_t queueFamilyIndexCount nullptr // const uint32_t* pQueueFamilyIndices }; VkBuffer buffer = VK_NULL_HANDLE; VkResult res = vkCreateBuffer(g_vulkan_context->GetDevice(), &buffer_create_info, nullptr, &buffer); if (res != VK_SUCCESS) { LOG_VULKAN_ERROR(res, "vkCreateBuffer failed: "); return false; } // Get memory requirements (types etc) for this buffer VkMemoryRequirements memory_requirements; vkGetBufferMemoryRequirements(g_vulkan_context->GetDevice(), buffer, &memory_requirements); // Aim for a coherent mapping if possible. u32 memory_type_index = g_vulkan_context->GetUploadMemoryType(memory_requirements.memoryTypeBits, &m_coherent_mapping); // Allocate memory for backing this buffer VkMemoryAllocateInfo memory_allocate_info = { VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO, // VkStructureType sType nullptr, // const void* pNext memory_requirements.size, // VkDeviceSize allocationSize memory_type_index // uint32_t memoryTypeIndex }; VkDeviceMemory memory = VK_NULL_HANDLE; res = vkAllocateMemory(g_vulkan_context->GetDevice(), &memory_allocate_info, nullptr, &memory); if (res != VK_SUCCESS) { LOG_VULKAN_ERROR(res, "vkAllocateMemory failed: "); vkDestroyBuffer(g_vulkan_context->GetDevice(), buffer, nullptr); return false; } // Bind memory to buffer res = vkBindBufferMemory(g_vulkan_context->GetDevice(), buffer, memory, 0); if (res != VK_SUCCESS) { LOG_VULKAN_ERROR(res, "vkBindBufferMemory failed: "); vkDestroyBuffer(g_vulkan_context->GetDevice(), buffer, nullptr); vkFreeMemory(g_vulkan_context->GetDevice(), memory, nullptr); return false; } // Map this buffer into user-space void* mapped_ptr = nullptr; res = vkMapMemory(g_vulkan_context->GetDevice(), memory, 0, size, 0, &mapped_ptr); if (res != VK_SUCCESS) { LOG_VULKAN_ERROR(res, "vkMapMemory failed: "); vkDestroyBuffer(g_vulkan_context->GetDevice(), buffer, nullptr); vkFreeMemory(g_vulkan_context->GetDevice(), memory, nullptr); return false; } // Unmap current host pointer (if there was a previous buffer) if (m_host_pointer) vkUnmapMemory(g_vulkan_context->GetDevice(), m_memory); // Destroy the backings for the buffer after the command buffer executes if (m_buffer != VK_NULL_HANDLE) g_command_buffer_mgr->DeferBufferDestruction(m_buffer); if (m_memory != VK_NULL_HANDLE) g_command_buffer_mgr->DeferDeviceMemoryDestruction(m_memory); // Replace with the new buffer m_buffer = buffer; m_memory = memory; m_host_pointer = reinterpret_cast<u8*>(mapped_ptr); m_current_size = size; m_current_offset = 0; m_current_gpu_position = 0; m_tracked_fences.clear(); return true; }
void loadTextureArray(std::string filename, VkFormat format) { #if defined(__ANDROID__) // Textures are stored inside the apk on Android (compressed) // So they need to be loaded via the asset manager AAsset* asset = AAssetManager_open(androidApp->activity->assetManager, filename.c_str(), AASSET_MODE_STREAMING); assert(asset); size_t size = AAsset_getLength(asset); assert(size > 0); void *textureData = malloc(size); AAsset_read(asset, textureData, size); AAsset_close(asset); gli::texture2DArray tex2DArray(gli::load((const char*)textureData, size)); #else gli::texture2DArray tex2DArray(gli::load(filename)); #endif assert(!tex2DArray.empty()); textureArray.width = tex2DArray.dimensions().x; textureArray.height = tex2DArray.dimensions().y; layerCount = tex2DArray.layers(); // Get device properites for the requested texture format VkFormatProperties formatProperties; vkGetPhysicalDeviceFormatProperties(physicalDevice, format, &formatProperties); VkImageCreateInfo imageCreateInfo = vkTools::initializers::imageCreateInfo(); imageCreateInfo.imageType = VK_IMAGE_TYPE_2D; imageCreateInfo.format = format; imageCreateInfo.extent = { textureArray.width, textureArray.height, 1 }; imageCreateInfo.mipLevels = 1; imageCreateInfo.arrayLayers = 1; imageCreateInfo.samples = VK_SAMPLE_COUNT_1_BIT; imageCreateInfo.tiling = VK_IMAGE_TILING_LINEAR; imageCreateInfo.usage = VK_IMAGE_USAGE_TRANSFER_SRC_BIT; imageCreateInfo.sharingMode = VK_SHARING_MODE_EXCLUSIVE; imageCreateInfo.initialLayout = VK_IMAGE_LAYOUT_PREINITIALIZED; imageCreateInfo.flags = 0; VkMemoryAllocateInfo memAllocInfo = vkTools::initializers::memoryAllocateInfo(); VkMemoryRequirements memReqs; struct Layer { VkImage image; VkDeviceMemory memory; }; std::vector<Layer> arrayLayer; arrayLayer.resize(layerCount); // Allocate command buffer for image copies and layouts VkCommandBuffer cmdBuffer; VkCommandBufferAllocateInfo cmdBufAlllocatInfo = vkTools::initializers::commandBufferAllocateInfo( cmdPool, VK_COMMAND_BUFFER_LEVEL_PRIMARY, 1); VkResult err = vkAllocateCommandBuffers(device, &cmdBufAlllocatInfo, &cmdBuffer); assert(!err); VkCommandBufferBeginInfo cmdBufInfo = vkTools::initializers::commandBufferBeginInfo(); err = vkBeginCommandBuffer(cmdBuffer, &cmdBufInfo); assert(!err); // Load separate cube map faces into linear tiled textures for (uint32_t i = 0; i < layerCount; ++i) { err = vkCreateImage(device, &imageCreateInfo, nullptr, &arrayLayer[i].image); assert(!err); vkGetImageMemoryRequirements(device, arrayLayer[i].image, &memReqs); memAllocInfo.allocationSize = memReqs.size; getMemoryType(memReqs.memoryTypeBits, VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT, &memAllocInfo.memoryTypeIndex); err = vkAllocateMemory(device, &memAllocInfo, nullptr, &arrayLayer[i].memory); assert(!err); err = vkBindImageMemory(device, arrayLayer[i].image, arrayLayer[i].memory, 0); assert(!err); VkImageSubresource subRes = {}; subRes.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT; VkSubresourceLayout subResLayout; void *data; vkGetImageSubresourceLayout(device, arrayLayer[i].image, &subRes, &subResLayout); assert(!err); err = vkMapMemory(device, arrayLayer[i].memory, 0, memReqs.size, 0, &data); assert(!err); memcpy(data, tex2DArray[i].data(), tex2DArray[i].size()); vkUnmapMemory(device, arrayLayer[i].memory); // Image barrier for linear image (base) // Linear image will be used as a source for the copy vkTools::setImageLayout( cmdBuffer, arrayLayer[i].image, VK_IMAGE_ASPECT_COLOR_BIT, VK_IMAGE_LAYOUT_PREINITIALIZED, VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL); } // Transfer cube map faces to optimal tiling // Setup texture as blit target with optimal tiling imageCreateInfo.tiling = VK_IMAGE_TILING_OPTIMAL; imageCreateInfo.usage = VK_IMAGE_USAGE_TRANSFER_DST_BIT | VK_IMAGE_USAGE_SAMPLED_BIT; imageCreateInfo.arrayLayers = layerCount; err = vkCreateImage(device, &imageCreateInfo, nullptr, &textureArray.image); assert(!err); vkGetImageMemoryRequirements(device, textureArray.image, &memReqs); memAllocInfo.allocationSize = memReqs.size; getMemoryType(memReqs.memoryTypeBits, VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT, &memAllocInfo.memoryTypeIndex); err = vkAllocateMemory(device, &memAllocInfo, nullptr, &textureArray.deviceMemory); assert(!err); err = vkBindImageMemory(device, textureArray.image, textureArray.deviceMemory, 0); assert(!err); // Image barrier for optimal image (target) // Set initial layout for all array layers of the optimal (target) tiled texture VkImageSubresourceRange subresourceRange = {}; subresourceRange.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT; subresourceRange.baseMipLevel = 0; subresourceRange.levelCount = 1; subresourceRange.layerCount = layerCount; vkTools::setImageLayout( cmdBuffer, textureArray.image, VK_IMAGE_ASPECT_COLOR_BIT, VK_IMAGE_LAYOUT_PREINITIALIZED, VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, subresourceRange); // Copy cube map faces one by one for (uint32_t i = 0; i < layerCount; ++i) { // Copy region for image blit VkImageCopy copyRegion = {}; copyRegion.srcSubresource.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT; copyRegion.srcSubresource.baseArrayLayer = 0; copyRegion.srcSubresource.mipLevel = 0; copyRegion.srcSubresource.layerCount = 1; copyRegion.srcOffset = { 0, 0, 0 }; copyRegion.dstSubresource.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT; copyRegion.dstSubresource.baseArrayLayer = i; copyRegion.dstSubresource.mipLevel = 0; copyRegion.dstSubresource.layerCount = 1; copyRegion.dstOffset = { 0, 0, 0 }; copyRegion.extent.width = textureArray.width; copyRegion.extent.height = textureArray.height; copyRegion.extent.depth = 1; // Put image copy into command buffer vkCmdCopyImage( cmdBuffer, arrayLayer[i].image, VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL, textureArray.image, VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, 1, ©Region); } // Change texture image layout to shader read after all layers have been copied textureArray.imageLayout = VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL; vkTools::setImageLayout( cmdBuffer, textureArray.image, VK_IMAGE_ASPECT_COLOR_BIT, VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, textureArray.imageLayout, subresourceRange); err = vkEndCommandBuffer(cmdBuffer); assert(!err); VkFence nullFence = { VK_NULL_HANDLE }; // Submit command buffer to graphis queue VkSubmitInfo submitInfo = vkTools::initializers::submitInfo(); submitInfo.commandBufferCount = 1; submitInfo.pCommandBuffers = &cmdBuffer; err = vkQueueSubmit(queue, 1, &submitInfo, nullFence); assert(!err); err = vkQueueWaitIdle(queue); assert(!err); // Create sampler VkSamplerCreateInfo sampler = vkTools::initializers::samplerCreateInfo(); sampler.magFilter = VK_FILTER_LINEAR; sampler.minFilter = VK_FILTER_LINEAR; sampler.mipmapMode = VK_SAMPLER_MIPMAP_MODE_LINEAR; sampler.addressModeU = VK_SAMPLER_ADDRESS_MODE_CLAMP_TO_EDGE; sampler.addressModeV = sampler.addressModeU; sampler.addressModeW = sampler.addressModeU; sampler.mipLodBias = 0.0f; sampler.maxAnisotropy = 8; sampler.compareOp = VK_COMPARE_OP_NEVER; sampler.minLod = 0.0f; sampler.maxLod = 0.0f; sampler.borderColor = VK_BORDER_COLOR_FLOAT_OPAQUE_WHITE; err = vkCreateSampler(device, &sampler, nullptr, &textureArray.sampler); assert(!err); // Create image view VkImageViewCreateInfo view = vkTools::initializers::imageViewCreateInfo(); view.image = VK_NULL_HANDLE; view.viewType = VK_IMAGE_VIEW_TYPE_2D_ARRAY; view.format = format; view.components = { VK_COMPONENT_SWIZZLE_R, VK_COMPONENT_SWIZZLE_G, VK_COMPONENT_SWIZZLE_B, VK_COMPONENT_SWIZZLE_A }; view.subresourceRange = { VK_IMAGE_ASPECT_COLOR_BIT, 0, 1, 0, 1 }; view.subresourceRange.layerCount = layerCount; view.image = textureArray.image; err = vkCreateImageView(device, &view, nullptr, &textureArray.view); assert(!err); // Cleanup for (auto& layer : arrayLayer) { vkDestroyImage(device, layer.image, nullptr); vkFreeMemory(device, layer.memory, nullptr); } }