// Find and create a compute capable device queue void getComputeQueue() { uint32_t queueIndex = 0; uint32_t queueCount; vkGetPhysicalDeviceQueueFamilyProperties(physicalDevice, &queueCount, NULL); assert(queueCount >= 1); std::vector<VkQueueFamilyProperties> queueProps; queueProps.resize(queueCount); vkGetPhysicalDeviceQueueFamilyProperties(physicalDevice, &queueCount, queueProps.data()); for (queueIndex = 0; queueIndex < queueCount; queueIndex++) { if (queueProps[queueIndex].queueFlags & VK_QUEUE_COMPUTE_BIT) break; } assert(queueIndex < queueCount); VkDeviceQueueCreateInfo queueCreateInfo = {}; queueCreateInfo.sType = VK_STRUCTURE_TYPE_DEVICE_QUEUE_CREATE_INFO; queueCreateInfo.pNext = NULL; queueCreateInfo.queueFamilyIndex = queueIndex; queueCreateInfo.queueCount = 1; vkGetDeviceQueue(device, queueIndex, 0, &computeQueue); }
XCamReturn VKDevice::prepare_compute_queue () { uint32_t compute_idx = _instance->get_compute_queue_family_idx (); vkGetDeviceQueue (_dev_id, compute_idx, 0, &_compute_queue); return XCAM_RETURN_NO_ERROR; }
void VulkanWrapper::VWGraphicInstance::CreateLogicalDevice(VWGraphicAdapter* _adapter) { QueueFamilyIndices indices = FindQueueFamilies(m_PhysicalDevice, m_Surface); std::vector<VkDeviceQueueCreateInfo> queueCreateInfos; std::set<int> uniqueQueueFamilies = { indices.graphicsFamily, indices.presentFamily }; float queuePriority = 1.0f; for (int queueFamily : uniqueQueueFamilies) { VkDeviceQueueCreateInfo queueCreateInfo = {}; queueCreateInfo.sType = VK_STRUCTURE_TYPE_DEVICE_QUEUE_CREATE_INFO; queueCreateInfo.queueFamilyIndex = queueFamily; queueCreateInfo.queueCount = 1; queueCreateInfo.pQueuePriorities = &queuePriority; queueCreateInfos.push_back(queueCreateInfo); } VkPhysicalDeviceFeatures deviceFeatures = {}; VkDeviceCreateInfo createInfo = {}; createInfo.sType = VK_STRUCTURE_TYPE_DEVICE_CREATE_INFO; createInfo.pQueueCreateInfos = queueCreateInfos.data(); createInfo.queueCreateInfoCount = (uint32_t)queueCreateInfos.size(); createInfo.pEnabledFeatures = &deviceFeatures; createInfo.enabledExtensionCount = deviceExtensions.size(); createInfo.ppEnabledExtensionNames = deviceExtensions.data(); if (enableValidationLayers) { createInfo.enabledLayerCount = validationLayers.size(); createInfo.ppEnabledLayerNames = validationLayers.data(); } else { createInfo.enabledLayerCount = 0; } if (vkCreateDevice(m_PhysicalDevice, &createInfo, nullptr, &m_VulkanDevice) != VK_SUCCESS) { throw std::runtime_error("failed to create logical device!"); } vkGetDeviceQueue(m_VulkanDevice, indices.graphicsFamily, 0, &m_GraphicsQueue.queue); vkGetDeviceQueue(m_VulkanDevice, indices.presentFamily, 0, &m_PresentQueue.queue); m_GraphicsQueue.index = indices.graphicsFamily; m_PresentQueue.index = indices.presentFamily; }
void create_logical_device() { std::vector<VkDeviceQueueCreateInfo> queueCreateInfos; std::set<int> uniqueQueueFamilies = {graphics_queue_family_index, present_queue_family_index}; float queuePriority = 1.0f; for (int queueFamily : uniqueQueueFamilies) { VkDeviceQueueCreateInfo queueCreateInfo = {}; queueCreateInfo.sType = VK_STRUCTURE_TYPE_DEVICE_QUEUE_CREATE_INFO; queueCreateInfo.queueFamilyIndex = queueFamily; queueCreateInfo.queueCount = 1; queueCreateInfo.pQueuePriorities = &queuePriority; queueCreateInfos.push_back(queueCreateInfo); } VkPhysicalDeviceFeatures deviceFeatures = {}; VkDeviceCreateInfo createInfo = {}; createInfo.sType = VK_STRUCTURE_TYPE_DEVICE_CREATE_INFO; createInfo.pQueueCreateInfos = queueCreateInfos.data(); createInfo.queueCreateInfoCount = (uint32_t) queueCreateInfos.size(); createInfo.pEnabledFeatures = &deviceFeatures; createInfo.enabledExtensionCount = device_extensions.size(); createInfo.ppEnabledExtensionNames = device_extensions.data(); if (use_validation_layers) { createInfo.enabledLayerCount = validationLayers.size(); createInfo.ppEnabledLayerNames = validationLayers.data(); } else { createInfo.enabledLayerCount = 0; } if (vkCreateDevice(physical_device, &createInfo, nullptr, device.replace()) != VK_SUCCESS) { cout << "failed to create logical device" << endl; } vkGetDeviceQueue(device, graphics_queue_family_index, 0, &graphics_queue); vkGetDeviceQueue(device, present_queue_family_index, 0, &present_queue); }
Device::Device(VkPhysicalDevice physicalDevice) : physicalDevice(physicalDevice) { // select a queue family with compute support uint32_t numQueues; vkGetPhysicalDeviceQueueFamilyProperties(physicalDevice, &numQueues, nullptr); VkQueueFamilyProperties *queueFamilyProperties = new VkQueueFamilyProperties[numQueues]; vkGetPhysicalDeviceQueueFamilyProperties(physicalDevice, &numQueues, queueFamilyProperties); for (uint32_t i = 0; i < numQueues; i++) { if (queueFamilyProperties[i].queueFlags & VK_QUEUE_COMPUTE_BIT) { computeQueueFamily = i; break; } } delete [] queueFamilyProperties; if (computeQueueFamily == -1) { throw ERROR_DEVICES; } VkDeviceQueueCreateInfo queueCreateInfo = {VK_STRUCTURE_TYPE_DEVICE_QUEUE_CREATE_INFO}; queueCreateInfo.queueCount = 1; float priorities[] = {1.0f}; queueCreateInfo.pQueuePriorities = priorities; queueCreateInfo.queueFamilyIndex = computeQueueFamily; // create the logical device VkPhysicalDeviceFeatures physicalDeviceFeatures = {}; VkDeviceCreateInfo deviceCreateInfo = {VK_STRUCTURE_TYPE_DEVICE_CREATE_INFO}; deviceCreateInfo.pQueueCreateInfos = &queueCreateInfo; deviceCreateInfo.pEnabledFeatures = &physicalDeviceFeatures; deviceCreateInfo.queueCreateInfoCount = 1; if (VK_SUCCESS != vkCreateDevice(physicalDevice, &deviceCreateInfo, nullptr, &device)) { throw ERROR_DEVICES; } vkGetDeviceQueue(device, computeQueueFamily, 0, &queue); vkGetPhysicalDeviceProperties(physicalDevice, &physicalDeviceProperties); // get indices of memory types we care about VkPhysicalDeviceMemoryProperties physicalDeviceMemoryProperties; vkGetPhysicalDeviceMemoryProperties(physicalDevice, &physicalDeviceMemoryProperties); for (uint32_t i = 0; i < physicalDeviceMemoryProperties.memoryTypeCount; i++) { if (physicalDeviceMemoryProperties.memoryTypes[i].propertyFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT && memoryTypeMappable == -1) { memoryTypeMappable = i; } if (physicalDeviceMemoryProperties.memoryTypes[i].propertyFlags & VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT && memoryTypeLocal == -1) { memoryTypeLocal = i; } } // create the implicit command buffer implicitCommandBuffer = new CommandBuffer(*this); }
void createLogicalDevice() { QueueFamilyIndices indices = findQueueFamilies(physicalDevice); std::vector<VkDeviceQueueCreateInfo> queueCreateInfos; std::set<int> uniqueQueueFamilies = {indices.graphicsFamily, indices.presentFamily}; float queuePriority = 1.0f; for (int queueFamily : uniqueQueueFamilies) { VkDeviceQueueCreateInfo queueCreateInfo = {}; queueCreateInfo.sType = VK_STRUCTURE_TYPE_DEVICE_QUEUE_CREATE_INFO; queueCreateInfo.queueFamilyIndex = queueFamily; queueCreateInfo.queueCount = 1; queueCreateInfo.pQueuePriorities = &queuePriority; queueCreateInfos.push_back(queueCreateInfo); } VkPhysicalDeviceFeatures deviceFeatures = {}; VkDeviceCreateInfo createInfo = {}; createInfo.sType = VK_STRUCTURE_TYPE_DEVICE_CREATE_INFO; createInfo.queueCreateInfoCount = static_cast<uint32_t>(queueCreateInfos.size()); createInfo.pQueueCreateInfos = queueCreateInfos.data(); createInfo.pEnabledFeatures = &deviceFeatures; createInfo.enabledExtensionCount = static_cast<uint32_t>(deviceExtensions.size()); createInfo.ppEnabledExtensionNames = deviceExtensions.data(); if (enableValidationLayers) { createInfo.enabledLayerCount = static_cast<uint32_t>(validationLayers.size()); createInfo.ppEnabledLayerNames = validationLayers.data(); } else { createInfo.enabledLayerCount = 0; } if (vkCreateDevice(physicalDevice, &createInfo, nullptr, &device) != VK_SUCCESS) { throw std::runtime_error("failed to create logical device!"); } vkGetDeviceQueue(device, indices.graphicsFamily, 0, &graphicsQueue); vkGetDeviceQueue(device, indices.presentFamily, 0, &presentQueue); }
IQueueSP VKTS_APIENTRY queueGet(const VkDevice device, const uint32_t queueFamilyIndex, const uint32_t queueIndex) { if (!device) { return IQueueSP(); } VkQueue queue; vkGetDeviceQueue(device, queueFamilyIndex, queueIndex, &queue); return IQueueSP(new Queue(device, queueFamilyIndex, queueIndex, queue)); }
void Renderer::_InitDevice() { { uint32_t gpu_count = 0; vkEnumeratePhysicalDevices( _instance, &gpu_count, nullptr ); std::vector<VkPhysicalDevice> gpu_list( gpu_count ); vkEnumeratePhysicalDevices( _instance, &gpu_count, gpu_list.data() ); _gpu = gpu_list[ 0 ]; vkGetPhysicalDeviceProperties( _gpu, &_gpu_properties ); } { uint32_t family_count = 0; vkGetPhysicalDeviceQueueFamilyProperties( _gpu, &family_count, nullptr ); std::vector<VkQueueFamilyProperties> family_property_list( family_count ); vkGetPhysicalDeviceQueueFamilyProperties( _gpu, &family_count, family_property_list.data() ); bool found = false; for( uint32_t i=0; i < family_count; ++i ) { if( family_property_list[ i ].queueFlags & VK_QUEUE_GRAPHICS_BIT ) { found = true; _graphics_family_index = i; } } if( !found ) { assert( 0 && "Vulkan ERROR: Queue family supporting graphics not found." ); std::exit( -1 ); } } float queue_priorities[] { 1.0f }; VkDeviceQueueCreateInfo device_queue_create_info {}; device_queue_create_info.sType = VK_STRUCTURE_TYPE_DEVICE_QUEUE_CREATE_INFO; device_queue_create_info.queueFamilyIndex = _graphics_family_index; device_queue_create_info.queueCount = 1; device_queue_create_info.pQueuePriorities = queue_priorities; VkDeviceCreateInfo device_create_info {}; device_create_info.sType = VK_STRUCTURE_TYPE_DEVICE_CREATE_INFO; device_create_info.queueCreateInfoCount = 1; device_create_info.pQueueCreateInfos = &device_queue_create_info; device_create_info.enabledLayerCount = _device_layers.size(); device_create_info.ppEnabledLayerNames = _device_layers.data(); device_create_info.enabledExtensionCount = _device_extensions.size(); device_create_info.ppEnabledExtensionNames = _device_extensions.data(); ErrorCheck( vkCreateDevice( _gpu, &device_create_info, nullptr, &_device ) ); vkGetDeviceQueue( _device, _graphics_family_index, 0, &_queue ); }
void VulkanWindowContext::initializeContext(void* platformData, const DisplayParams& params) { fBackendContext.reset(GrVkBackendContext::Create(&fPresentQueueIndex, canPresent, platformData)); if (!(fBackendContext->fExtensions & kKHR_surface_GrVkExtensionFlag) || !(fBackendContext->fExtensions & kKHR_swapchain_GrVkExtensionFlag)) { fBackendContext.reset(nullptr); return; } VkInstance instance = fBackendContext->fInstance; VkDevice device = fBackendContext->fDevice; GET_PROC(DestroySurfaceKHR); GET_PROC(GetPhysicalDeviceSurfaceSupportKHR); GET_PROC(GetPhysicalDeviceSurfaceCapabilitiesKHR); GET_PROC(GetPhysicalDeviceSurfaceFormatsKHR); GET_PROC(GetPhysicalDeviceSurfacePresentModesKHR); GET_DEV_PROC(CreateSwapchainKHR); GET_DEV_PROC(DestroySwapchainKHR); GET_DEV_PROC(GetSwapchainImagesKHR); GET_DEV_PROC(AcquireNextImageKHR); GET_DEV_PROC(QueuePresentKHR); fContext = GrContext::Create(kVulkan_GrBackend, (GrBackendContext) fBackendContext.get()); fSurface = createVkSurface(instance, platformData); if (VK_NULL_HANDLE == fSurface) { fBackendContext.reset(nullptr); return; } VkBool32 supported; VkResult res = fGetPhysicalDeviceSurfaceSupportKHR(fBackendContext->fPhysicalDevice, fPresentQueueIndex, fSurface, &supported); if (VK_SUCCESS != res) { this->destroyContext(); return; } if (!this->createSwapchain(-1, -1, params)) { this->destroyContext(); return; } // create presentQueue vkGetDeviceQueue(fBackendContext->fDevice, fPresentQueueIndex, 0, &fPresentQueue); }
GstVulkanQueue * gst_vulkan_device_get_queue (GstVulkanDevice * device, guint32 queue_family, guint32 queue_i) { GstVulkanQueue *ret; g_return_val_if_fail (GST_IS_VULKAN_DEVICE (device), NULL); g_return_val_if_fail (device->device != NULL, NULL); g_return_val_if_fail (queue_family < device->n_queues, NULL); g_return_val_if_fail (queue_i < device->queue_family_props[queue_family].queueCount, NULL); ret = g_object_new (GST_TYPE_VULKAN_QUEUE, NULL); ret->device = gst_object_ref (device); ret->family = queue_family; ret->index = queue_i; vkGetDeviceQueue (device->device, queue_family, queue_i, &ret->queue); return ret; }
void Device::init_queues() { uint32_t queue_node_count; // Call with NULL data to get count vkGetPhysicalDeviceQueueFamilyProperties(phy_.handle(), &queue_node_count, NULL); EXPECT(queue_node_count >= 1); VkQueueFamilyProperties *queue_props = new VkQueueFamilyProperties[queue_node_count]; vkGetPhysicalDeviceQueueFamilyProperties(phy_.handle(), &queue_node_count, queue_props); for (uint32_t i = 0; i < queue_node_count; i++) { VkQueue queue; for (uint32_t j = 0; j < queue_props[i].queueCount; j++) { // TODO: Need to add support for separate MEMMGR and work queues, // including synchronization vkGetDeviceQueue(handle(), i, j, &queue); if (queue_props[i].queueFlags & VK_QUEUE_GRAPHICS_BIT) { queues_[GRAPHICS].push_back(new Queue(queue, i)); } if (queue_props[i].queueFlags & VK_QUEUE_COMPUTE_BIT) { queues_[COMPUTE].push_back(new Queue(queue, i)); } if (queue_props[i].queueFlags & VK_QUEUE_TRANSFER_BIT) { queues_[DMA].push_back(new Queue(queue, i)); } } } delete[] queue_props; EXPECT(!queues_[GRAPHICS].empty() || !queues_[COMPUTE].empty()); }
/** Please see header for specification */ Anvil::Queue::Queue(const Anvil::BaseDevice* in_device_ptr, uint32_t in_queue_family_index, uint32_t in_queue_index, bool in_mt_safe) :CallbacksSupportProvider (QUEUE_CALLBACK_ID_COUNT), DebugMarkerSupportProvider(in_device_ptr, VK_DEBUG_REPORT_OBJECT_TYPE_QUEUE_EXT), MTSafetySupportProvider (in_mt_safe), m_device_ptr (in_device_ptr), m_queue (VK_NULL_HANDLE), m_queue_family_index (in_queue_family_index), m_queue_index (in_queue_index) { /* Retrieve the Vulkan handle */ vkGetDeviceQueue(m_device_ptr->get_device_vk(), in_queue_family_index, in_queue_index, &m_queue); anvil_assert(m_queue != VK_NULL_HANDLE); /* Determine whether the queue supports sparse bindings */ m_supports_sparse_bindings = !!(m_device_ptr->get_queue_family_info(in_queue_family_index)->flags & VK_QUEUE_SPARSE_BINDING_BIT); /* Cache a fence that may be optionally used for submissions */ { auto create_info_ptr = Anvil::FenceCreateInfo::create(m_device_ptr, false); /* create_signalled */ create_info_ptr->set_mt_safety(Anvil::Utils::convert_boolean_to_mt_safety_enum(is_mt_safe()) ); m_submit_fence_ptr = Anvil::Fence::create(std::move(create_info_ptr) ); } /* OK, register the wrapper instance and leave */ Anvil::ObjectTracker::get()->register_object(Anvil::OBJECT_TYPE_QUEUE, this); }
bool VkContext::CreateDevice() { float queuePriority = 1.0f; VkDeviceQueueCreateInfo queueCreateInfo = {}; queueCreateInfo.sType = VK_STRUCTURE_TYPE_DEVICE_QUEUE_CREATE_INFO; queueCreateInfo.queueFamilyIndex = queueIndex; queueCreateInfo.queueCount = 1; queueCreateInfo.pQueuePriorities = &queuePriority; std::vector<const char*> deviceExtensions; std::vector<const char*> deviceLayers; #ifdef VOXL_DEBUG // Add validation layers deviceLayers.push_back("VK_LAYER_LUNARG_standard_validation"); #endif deviceExtensions.push_back(VK_KHR_SWAPCHAIN_EXTENSION_NAME); VkDeviceCreateInfo deviceCreateInfo = {}; deviceCreateInfo.sType = VK_STRUCTURE_TYPE_DEVICE_CREATE_INFO; deviceCreateInfo.pQueueCreateInfos = &queueCreateInfo; deviceCreateInfo.queueCreateInfoCount = 1; deviceCreateInfo.enabledExtensionCount = (u32)deviceExtensions.size(); deviceCreateInfo.ppEnabledExtensionNames = deviceExtensions.data(); deviceCreateInfo.enabledLayerCount = (u32)deviceLayers.size(); deviceCreateInfo.ppEnabledLayerNames = deviceLayers.data(); CheckVkResult(vkCreateDevice(physDev, &deviceCreateInfo, nullptr, &dev)); // Get queue vkGetDeviceQueue(dev, queueIndex, 0, &queue); return true; }
void getPresentationQueue(VulkanContext& context, VulkanSurfaceContext& sc) { uint32_t queueFamiliesCount; vkGetPhysicalDeviceQueueFamilyProperties(context.physicalDevice, &queueFamiliesCount, nullptr); std::vector<VkQueueFamilyProperties> queueFamiliesProperties(queueFamiliesCount); vkGetPhysicalDeviceQueueFamilyProperties(context.physicalDevice, &queueFamiliesCount, queueFamiliesProperties.data()); uint32_t presentQueueFamilyIndex = 0xffff; for (uint32_t j = 0; j < queueFamiliesCount; ++j) { VkBool32 supported = VK_FALSE; vkGetPhysicalDeviceSurfaceSupportKHR(context.physicalDevice, j, sc.surface, &supported); if (supported) { presentQueueFamilyIndex = j; break; } } ASSERT_POSTCONDITION(presentQueueFamilyIndex != 0xffff, "This physical device does not support the presentation queue."); if (context.graphicsQueueFamilyIndex != presentQueueFamilyIndex) { vkGetDeviceQueue(context.device, presentQueueFamilyIndex, 0, &sc.presentQueue); } else { sc.presentQueue = context.graphicsQueue; } ASSERT_POSTCONDITION(sc.presentQueue, "Unable to obtain presentation queue."); }
void vulkan_create_logical_device(ReaperRoot& root, VulkanBackend& backend) { std::vector<VkDeviceQueueCreateInfo> queue_create_infos; std::vector<float> queue_priorities = {1.0f}; queue_create_infos.push_back({ VK_STRUCTURE_TYPE_DEVICE_QUEUE_CREATE_INFO, // VkStructureType sType nullptr, // const void *pNext 0, // VkDeviceQueueCreateFlags flags backend.physicalDeviceInfo.graphicsQueueIndex, // uint32_t queueFamilyIndex static_cast<uint32_t>(queue_priorities.size()), // uint32_t queueCount &queue_priorities[0] // const float *pQueuePriorities }); if (backend.physicalDeviceInfo.graphicsQueueIndex != backend.physicalDeviceInfo.presentQueueIndex) { queue_create_infos.push_back({ VK_STRUCTURE_TYPE_DEVICE_QUEUE_CREATE_INFO, // VkStructureType sType nullptr, // const void *pNext 0, // VkDeviceQueueCreateFlags flags backend.physicalDeviceInfo.presentQueueIndex, // uint32_t queueFamilyIndex static_cast<uint32_t>(queue_priorities.size()), // uint32_t queueCount &queue_priorities[0] // const float *pQueuePriorities }); } Assert(!queue_create_infos.empty()); Assert(!queue_priorities.empty()); Assert(queue_priorities.size() == queue_create_infos.size()); std::vector<const char*> device_extensions = {VK_KHR_SWAPCHAIN_EXTENSION_NAME}; uint32_t queueCreateCount = static_cast<uint32_t>(queue_create_infos.size()); uint32_t deviceExtensionCount = static_cast<uint32_t>(device_extensions.size()); log_info(root, "vulkan: using {} device level extensions", device_extensions.size()); for (auto& e : device_extensions) log_debug(root, "- {}", e); VkDeviceCreateInfo device_create_info = { VK_STRUCTURE_TYPE_DEVICE_CREATE_INFO, // VkStructureType sType nullptr, // const void *pNext 0, // VkDeviceCreateFlags flags queueCreateCount, // uint32_t queueCreateInfoCount &queue_create_infos[0], // const VkDeviceQueueCreateInfo *pQueueCreateInfos 0, // uint32_t enabledLayerCount nullptr, // const char * const *ppEnabledLayerNames deviceExtensionCount, // uint32_t enabledExtensionCount (deviceExtensionCount > 0 ? &device_extensions[0] : nullptr), // const char * const *ppEnabledExtensionNames nullptr // const VkPhysicalDeviceFeatures *pEnabledFeatures }; Assert(vkCreateDevice(backend.physicalDevice, &device_create_info, nullptr, &backend.device) == VK_SUCCESS, "could not create Vulkan device"); vulkan_load_device_level_functions(backend.device); vkGetDeviceQueue( backend.device, backend.physicalDeviceInfo.graphicsQueueIndex, 0, &backend.deviceInfo.graphicsQueue); vkGetDeviceQueue(backend.device, backend.physicalDeviceInfo.presentQueueIndex, 0, &backend.deviceInfo.presentQueue); }
static void SetupVulkan(const char** extensions, uint32_t extensions_count) { VkResult err; // Create Vulkan Instance { VkInstanceCreateInfo create_info = {}; create_info.sType = VK_STRUCTURE_TYPE_INSTANCE_CREATE_INFO; create_info.enabledExtensionCount = extensions_count; create_info.ppEnabledExtensionNames = extensions; #ifdef IMGUI_VULKAN_DEBUG_REPORT // Enabling multiple validation layers grouped as LunarG standard validation const char* layers[] = { "VK_LAYER_LUNARG_standard_validation" }; create_info.enabledLayerCount = 1; create_info.ppEnabledLayerNames = layers; // Enable debug report extension (we need additional storage, so we duplicate the user array to add our new extension to it) const char** extensions_ext = (const char**)malloc(sizeof(const char*) * (extensions_count + 1)); memcpy(extensions_ext, extensions, extensions_count * sizeof(const char*)); extensions_ext[extensions_count] = "VK_EXT_debug_report"; create_info.enabledExtensionCount = extensions_count + 1; create_info.ppEnabledExtensionNames = extensions_ext; // Create Vulkan Instance err = vkCreateInstance(&create_info, g_Allocator, &g_Instance); check_vk_result(err); free(extensions_ext); // Get the function pointer (required for any extensions) auto vkCreateDebugReportCallbackEXT = (PFN_vkCreateDebugReportCallbackEXT)vkGetInstanceProcAddr(g_Instance, "vkCreateDebugReportCallbackEXT"); IM_ASSERT(vkCreateDebugReportCallbackEXT != NULL); // Setup the debug report callback VkDebugReportCallbackCreateInfoEXT debug_report_ci = {}; debug_report_ci.sType = VK_STRUCTURE_TYPE_DEBUG_REPORT_CALLBACK_CREATE_INFO_EXT; debug_report_ci.flags = VK_DEBUG_REPORT_ERROR_BIT_EXT | VK_DEBUG_REPORT_WARNING_BIT_EXT | VK_DEBUG_REPORT_PERFORMANCE_WARNING_BIT_EXT; debug_report_ci.pfnCallback = debug_report; debug_report_ci.pUserData = NULL; err = vkCreateDebugReportCallbackEXT(g_Instance, &debug_report_ci, g_Allocator, &g_DebugReport); check_vk_result(err); #else // Create Vulkan Instance without any debug feature err = vkCreateInstance(&create_info, g_Allocator, &g_Instance); check_vk_result(err); #endif } // Select GPU { uint32_t gpu_count; err = vkEnumeratePhysicalDevices(g_Instance, &gpu_count, NULL); check_vk_result(err); VkPhysicalDevice* gpus = (VkPhysicalDevice*)malloc(sizeof(VkPhysicalDevice) * gpu_count); err = vkEnumeratePhysicalDevices(g_Instance, &gpu_count, gpus); check_vk_result(err); // If a number >1 of GPUs got reported, you should find the best fit GPU for your purpose // e.g. VK_PHYSICAL_DEVICE_TYPE_DISCRETE_GPU if available, or with the greatest memory available, etc. // for sake of simplicity we'll just take the first one, assuming it has a graphics queue family. g_PhysicalDevice = gpus[0]; free(gpus); } // Select graphics queue family { uint32_t count; vkGetPhysicalDeviceQueueFamilyProperties(g_PhysicalDevice, &count, NULL); VkQueueFamilyProperties* queues = (VkQueueFamilyProperties*)malloc(sizeof(VkQueueFamilyProperties) * count); vkGetPhysicalDeviceQueueFamilyProperties(g_PhysicalDevice, &count, queues); for (uint32_t i = 0; i < count; i++) if (queues[i].queueFlags & VK_QUEUE_GRAPHICS_BIT) { g_QueueFamily = i; break; } free(queues); IM_ASSERT(g_QueueFamily != -1); } // Create Logical Device (with 1 queue) { int device_extension_count = 1; const char* device_extensions[] = { "VK_KHR_swapchain" }; const float queue_priority[] = { 1.0f }; VkDeviceQueueCreateInfo queue_info[1] = {}; queue_info[0].sType = VK_STRUCTURE_TYPE_DEVICE_QUEUE_CREATE_INFO; queue_info[0].queueFamilyIndex = g_QueueFamily; queue_info[0].queueCount = 1; queue_info[0].pQueuePriorities = queue_priority; VkDeviceCreateInfo create_info = {}; create_info.sType = VK_STRUCTURE_TYPE_DEVICE_CREATE_INFO; create_info.queueCreateInfoCount = sizeof(queue_info) / sizeof(queue_info[0]); create_info.pQueueCreateInfos = queue_info; create_info.enabledExtensionCount = device_extension_count; create_info.ppEnabledExtensionNames = device_extensions; err = vkCreateDevice(g_PhysicalDevice, &create_info, g_Allocator, &g_Device); check_vk_result(err); vkGetDeviceQueue(g_Device, g_QueueFamily, 0, &g_Queue); } // Create Descriptor Pool { VkDescriptorPoolSize pool_sizes[] = { { VK_DESCRIPTOR_TYPE_SAMPLER, 1000 }, { VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER, 1000 }, { VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE, 1000 }, { VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, 1000 }, { VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER, 1000 }, { VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER, 1000 }, { VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER, 1000 }, { VK_DESCRIPTOR_TYPE_STORAGE_BUFFER, 1000 }, { VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC, 1000 }, { VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC, 1000 }, { VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT, 1000 } }; VkDescriptorPoolCreateInfo pool_info = {}; pool_info.sType = VK_STRUCTURE_TYPE_DESCRIPTOR_POOL_CREATE_INFO; pool_info.flags = VK_DESCRIPTOR_POOL_CREATE_FREE_DESCRIPTOR_SET_BIT; pool_info.maxSets = 1000 * IM_ARRAYSIZE(pool_sizes); pool_info.poolSizeCount = (uint32_t)IM_ARRAYSIZE(pool_sizes); pool_info.pPoolSizes = pool_sizes; err = vkCreateDescriptorPool(g_Device, &pool_info, g_Allocator, &g_DescriptorPool); check_vk_result(err); } }
// Devices void Renderer::_InitDevice() { { uint32_t gpu_count = 0; // Read number of GPU's vkEnumeratePhysicalDevices(_instance, &gpu_count, nullptr); std::vector<VkPhysicalDevice> gpu_list(gpu_count); // Populate list vkEnumeratePhysicalDevices(_instance, &gpu_count, gpu_list.data()); _gpu = gpu_list[0]; // Get the first available list vkGetPhysicalDeviceProperties(_gpu, &_gpu_properties); vkGetPhysicalDeviceMemoryProperties(_gpu, &_gpu_memory_properties); } { uint32_t family_count = 0; // Read number of GPU queue family properties vkGetPhysicalDeviceQueueFamilyProperties(_gpu, &family_count, nullptr); std::vector<VkQueueFamilyProperties> family_property_list(family_count); // Populate list vkGetPhysicalDeviceQueueFamilyProperties(_gpu, &family_count, family_property_list.data()); // Find the graphics family bool found = false; for (uint32_t i = 0; i < family_count; ++i) { if (family_property_list[i].queueFlags & VK_QUEUE_GRAPHICS_BIT) { found = true; _graphics_family_index = i; } } if (!found) { assert(0 && "Vulkan ERROR: Queue family supporting graphics not found."); std::exit(-1); } } // Instance Layers { uint32_t layer_count = 0; // Read the number of layers vkEnumerateInstanceLayerProperties(&layer_count, nullptr); std::vector<VkLayerProperties> layer_property_list(layer_count); // Populate list vkEnumerateInstanceLayerProperties(&layer_count, layer_property_list.data()); #if BUILD_ENABLE_VULKAN_RUNTIME_DEBUG std::cout << "Instance layers: \n"; for (auto &i : layer_property_list) { std::cout << " " << i.layerName << "\t\t | " << i.description << std::endl; } std::cout << std::endl; #endif } // Instance Extensions { uint32_t extension_count = 0; // Read the number of extensions vkEnumerateInstanceExtensionProperties(nullptr, &extension_count, nullptr); std::vector<VkExtensionProperties> extension_property_list(extension_count); // Populate list vkEnumerateInstanceExtensionProperties(nullptr, &extension_count, extension_property_list.data()); #if BUILD_ENABLE_VULKAN_RUNTIME_DEBUG std::cout << "Instance extensions: \n"; for (auto &i : extension_property_list) { std::cout << " " << i.extensionName << "\t\t | " << i.specVersion << std::endl; } std::cout << std::endl; #endif } // Device Layers { uint32_t layer_count = 0; // Read the number of layers vkEnumerateDeviceLayerProperties(_gpu, &layer_count, nullptr); std::vector<VkLayerProperties> layer_property_list(layer_count); // Populate list vkEnumerateDeviceLayerProperties(_gpu, &layer_count, layer_property_list.data()); #if BUILD_ENABLE_VULKAN_RUNTIME_DEBUG std::cout << "Device layers: \n"; for (auto &i : layer_property_list) { std::cout << " " << i.layerName << "\t\t | " << i.description << std::endl; } std::cout << std::endl; #endif } // Device Extensions { uint32_t extension_count = 0; // Read the number of extensions vkEnumerateDeviceExtensionProperties(_gpu, nullptr, &extension_count, nullptr); std::vector<VkExtensionProperties> extension_property_list(extension_count); // Populate list vkEnumerateDeviceExtensionProperties(_gpu, nullptr, &extension_count, extension_property_list.data()); #if BUILD_ENABLE_VULKAN_RUNTIME_DEBUG std::cout << "Device extensions: \n"; for (auto &i : extension_property_list) { std::cout << " " << i.extensionName << "\t\t | " << i.specVersion << std::endl; } std::cout << std::endl; #endif } float queue_priorities[] {1.0f}; VkDeviceQueueCreateInfo device_queue_create_info {}; device_queue_create_info.sType = VK_STRUCTURE_TYPE_DEVICE_QUEUE_CREATE_INFO; device_queue_create_info.queueFamilyIndex = _graphics_family_index; device_queue_create_info.queueCount = 1; device_queue_create_info.pQueuePriorities = queue_priorities; VkDeviceCreateInfo device_create_info = {}; device_create_info.sType = VK_STRUCTURE_TYPE_DEVICE_CREATE_INFO; device_create_info.queueCreateInfoCount = 1; device_create_info.pQueueCreateInfos = &device_queue_create_info; device_create_info.enabledLayerCount = (uint32_t) _device_layer_list.size(); device_create_info.ppEnabledLayerNames = _device_layer_list.data(); device_create_info.enabledExtensionCount = (uint32_t) _device_extension_list.size(); device_create_info.ppEnabledExtensionNames = _device_extension_list.data(); ErrorCheck(vkCreateDevice(_gpu, &device_create_info, nullptr, &_device)); vkGetDeviceQueue(_device, _graphics_family_index, 0, &_queue); }
Context::Context() { if(!loadVulkanLibrary()) { CV_Error(Error::StsError, "loadVulkanLibrary failed"); return; } else if (!loadVulkanEntry()) { CV_Error(Error::StsError, "loadVulkanEntry failed"); return; } else if (!loadVulkanGlobalFunctions()) { CV_Error(Error::StsError, "loadVulkanGlobalFunctions failed"); return; } // create VkInstance, VkPhysicalDevice std::vector<const char *> enabledExtensions; if (enableValidationLayers) { uint32_t layerCount; vkEnumerateInstanceLayerProperties(&layerCount, NULL); std::vector<VkLayerProperties> layerProperties(layerCount); vkEnumerateInstanceLayerProperties(&layerCount, layerProperties.data()); bool foundLayer = false; for (VkLayerProperties prop : layerProperties) { if (strcmp("VK_LAYER_LUNARG_standard_validation", prop.layerName) == 0) { foundLayer = true; break; } } if (!foundLayer) { throw std::runtime_error("Layer VK_LAYER_LUNARG_standard_validation not supported\n"); } kEnabledLayers.push_back("VK_LAYER_LUNARG_standard_validation"); uint32_t extensionCount; vkEnumerateInstanceExtensionProperties(nullptr, &extensionCount, NULL); std::vector<VkExtensionProperties> extensionProperties(extensionCount); vkEnumerateInstanceExtensionProperties(nullptr, &extensionCount, extensionProperties.data()); bool foundExtension = false; for (VkExtensionProperties prop : extensionProperties) { if (strcmp(VK_EXT_DEBUG_REPORT_EXTENSION_NAME, prop.extensionName) == 0) { foundExtension = true; break; } } if (!foundExtension) { throw std::runtime_error("Extension VK_EXT_DEBUG_REPORT_EXTENSION_NAME not supported\n"); } enabledExtensions.push_back(VK_EXT_DEBUG_REPORT_EXTENSION_NAME); } VkApplicationInfo applicationInfo = {}; applicationInfo.sType = VK_STRUCTURE_TYPE_APPLICATION_INFO; applicationInfo.pApplicationName = "VkCom Library"; applicationInfo.applicationVersion = 0; applicationInfo.pEngineName = "vkcom"; applicationInfo.engineVersion = 0; applicationInfo.apiVersion = VK_API_VERSION_1_0;; VkInstanceCreateInfo createInfo = {}; createInfo.sType = VK_STRUCTURE_TYPE_INSTANCE_CREATE_INFO; createInfo.flags = 0; createInfo.pApplicationInfo = &applicationInfo; // Give our desired layers and extensions to vulkan. createInfo.enabledLayerCount = kEnabledLayers.size(); createInfo.ppEnabledLayerNames = kEnabledLayers.data(); createInfo.enabledExtensionCount = enabledExtensions.size(); createInfo.ppEnabledExtensionNames = enabledExtensions.data(); VK_CHECK_RESULT(vkCreateInstance(&createInfo, NULL, &kInstance)); if (!loadVulkanFunctions(kInstance)) { CV_Error(Error::StsError, "loadVulkanFunctions failed"); return; } if (enableValidationLayers && vkCreateDebugReportCallbackEXT) { VkDebugReportCallbackCreateInfoEXT createInfo = {}; createInfo.sType = VK_STRUCTURE_TYPE_DEBUG_REPORT_CALLBACK_CREATE_INFO_EXT; createInfo.flags = VK_DEBUG_REPORT_ERROR_BIT_EXT | VK_DEBUG_REPORT_WARNING_BIT_EXT | VK_DEBUG_REPORT_PERFORMANCE_WARNING_BIT_EXT; createInfo.pfnCallback = &debugReportCallbackFn; // Create and register callback. VK_CHECK_RESULT(vkCreateDebugReportCallbackEXT(kInstance, &createInfo, NULL, &kDebugReportCallback)); } // find physical device uint32_t deviceCount; vkEnumeratePhysicalDevices(kInstance, &deviceCount, NULL); if (deviceCount == 0) { throw std::runtime_error("could not find a device with vulkan support"); } std::vector<VkPhysicalDevice> devices(deviceCount); vkEnumeratePhysicalDevices(kInstance, &deviceCount, devices.data()); for (VkPhysicalDevice device : devices) { if (true) { kPhysicalDevice = device; break; } } kQueueFamilyIndex = getComputeQueueFamilyIndex(); // create device, queue, command pool VkDeviceQueueCreateInfo queueCreateInfo = {}; queueCreateInfo.sType = VK_STRUCTURE_TYPE_DEVICE_QUEUE_CREATE_INFO; queueCreateInfo.queueFamilyIndex = kQueueFamilyIndex; queueCreateInfo.queueCount = 1; // create one queue in this family. We don't need more. float queuePriorities = 1.0; // we only have one queue, so this is not that imporant. queueCreateInfo.pQueuePriorities = &queuePriorities; VkDeviceCreateInfo deviceCreateInfo = {}; // Specify any desired device features here. We do not need any for this application, though. VkPhysicalDeviceFeatures deviceFeatures = {}; deviceCreateInfo.sType = VK_STRUCTURE_TYPE_DEVICE_CREATE_INFO; deviceCreateInfo.enabledLayerCount = kEnabledLayers.size(); deviceCreateInfo.ppEnabledLayerNames = kEnabledLayers.data(); deviceCreateInfo.pQueueCreateInfos = &queueCreateInfo; deviceCreateInfo.queueCreateInfoCount = 1; deviceCreateInfo.pEnabledFeatures = &deviceFeatures; VK_CHECK_RESULT(vkCreateDevice(kPhysicalDevice, &deviceCreateInfo, NULL, &kDevice)); // Get a handle to the only member of the queue family. vkGetDeviceQueue(kDevice, kQueueFamilyIndex, 0, &kQueue); // create command pool VkCommandPoolCreateInfo commandPoolCreateInfo = {}; commandPoolCreateInfo.sType = VK_STRUCTURE_TYPE_COMMAND_POOL_CREATE_INFO; commandPoolCreateInfo.flags = VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT; // the queue family of this command pool. All command buffers allocated from this command pool, // must be submitted to queues of this family ONLY. commandPoolCreateInfo.queueFamilyIndex = kQueueFamilyIndex; VK_CHECK_RESULT(vkCreateCommandPool(kDevice, &commandPoolCreateInfo, NULL, &kCmdPool)); }
void VulkanBase::createInstance() { // Application info init const VkApplicationInfo applicationInfo = { .sType = VK_STRUCTURE_TYPE_APPLICATION_INFO, .pNext = NULL, .pApplicationName = name.c_str(), .applicationVersion = 1, .pEngineName = engineName.c_str(), .engineVersion = 1, .apiVersion = VK_API_VERSION, //FIXME Nvidia driver not updated to latest Vulkan Version }; VkInstanceCreateInfo instanceCreateInfo = { .sType = VK_STRUCTURE_TYPE_INSTANCE_CREATE_INFO, .pNext = NULL, .flags = VK_FLAGS_NONE, .pApplicationInfo = &applicationInfo, .enabledLayerCount = 0, .ppEnabledLayerNames = NULL, .enabledExtensionCount = 0, .ppEnabledExtensionNames = NULL, }; std::vector<const char*> enabledExtensions = { VK_KHR_SURFACE_EXTENSION_NAME, VK_KHR_XCB_SURFACE_EXTENSION_NAME}; //Check if extensions are present vkUtils::checkGlobalExtensionPresent(VK_KHR_SURFACE_EXTENSION_NAME); vkUtils::checkGlobalExtensionPresent(VK_KHR_XCB_SURFACE_EXTENSION_NAME); #ifdef _DEBUG if (enableValidation) { //Extensions management enabledExtensions.push_back(VK_EXT_DEBUG_REPORT_EXTENSION_NAME); vkUtils::checkGlobalExtensionPresent(VK_EXT_DEBUG_REPORT_EXTENSION_NAME); // Layer management instanceCreateInfo.enabledLayerCount = vkDebug::validationLayerCount; instanceCreateInfo.ppEnabledLayerNames = vkDebug::validationLayerNames; // Check standard debug layers are present for(uint32_t i = 0; i < instanceCreateInfo.enabledLayerCount; i++) { vkUtils::checkGlobalLayerPresent(vkDebug::validationLayerNames[i]); } } #endif // DEBUG instanceCreateInfo.ppEnabledExtensionNames = enabledExtensions.data(); instanceCreateInfo.enabledExtensionCount = (uint32_t) enabledExtensions.size(); CHECK_RESULT(vkCreateInstance(&instanceCreateInfo, nullptr, &instance)); } void VulkanBase::selectVkPhysicalDevice() { uint32_t physicalDeviceCount = 0; CHECK_RESULT(vkEnumeratePhysicalDevices(instance,&physicalDeviceCount,nullptr)); if (physicalDeviceCount<=0) { ERROR("No physical device found"); } std::vector<VkPhysicalDevice> physicalDevicesVector(physicalDeviceCount); CHECK_RESULT(vkEnumeratePhysicalDevices(instance,&physicalDeviceCount,physicalDevicesVector.data())); #ifdef _DEBUG int deviceIndex = 0; for(const auto & phyDev : physicalDevicesVector) { VkPhysicalDeviceProperties phyDevProperties; vkGetPhysicalDeviceProperties(phyDev, &phyDevProperties); std::cout << "--- Physical device: " << phyDevProperties.deviceName << " (index: " << (deviceIndex++) << ")" << std::endl; std::cout << " apiVersion: " << phyDevProperties.apiVersion << std::endl; std::cout << " driverVersion: " << phyDevProperties.driverVersion << std::endl; std::cout << " vendorID: " << phyDevProperties.vendorID << std::endl; std::cout << " deviceID: " << phyDevProperties.deviceID << std::endl; std::cout << " deviceType: "; switch(phyDevProperties.deviceType) { case VK_PHYSICAL_DEVICE_TYPE_OTHER: std::cout << "OTHER"; break; case VK_PHYSICAL_DEVICE_TYPE_INTEGRATED_GPU: std::cout << "INTEGRATED_GPU"; break; case VK_PHYSICAL_DEVICE_TYPE_DISCRETE_GPU: std::cout << "DISCRETE_GPU"; break; case VK_PHYSICAL_DEVICE_TYPE_VIRTUAL_GPU: std::cout << "VIRTUAL_GPU"; break; case VK_PHYSICAL_DEVICE_TYPE_CPU: std::cout << "CPU"; break; default: std::cout << "UNKNOWN!!!"; break; } std::cout << std::endl; } #endif // _DEBUG physicalDevice = physicalDevicesVector.at(0); // Gather Physical Device Memory Properties vkGetPhysicalDeviceMemoryProperties(physicalDevice,&physicalDeviceMemoryProperties); } void VulkanBase::selectQueue() { uint32_t queueFamilyPropertyCount = 0; vkGetPhysicalDeviceQueueFamilyProperties(physicalDevice,&queueFamilyPropertyCount,nullptr); if (queueFamilyPropertyCount<=0) ERROR("Physical device has no queue families"); std::vector<VkQueueFamilyProperties> queueFamilyPropertiesVector(queueFamilyPropertyCount); vkGetPhysicalDeviceQueueFamilyProperties(physicalDevice,&queueFamilyPropertyCount,queueFamilyPropertiesVector.data()); uint32_t queueFamilyIndex = 0; int32_t selectedQueueFamilyIndex = -1; VkBool32 presentSupport = VK_FALSE; #ifdef _DEBUG std::cout << std::endl << "--- Number of queue families " << queueFamilyPropertyCount << std::endl; #endif // _DEBUG for(const auto & queueFamProp : queueFamilyPropertiesVector) { CHECK_RESULT(vkGetPhysicalDeviceSurfaceSupportKHR(physicalDevice, queueFamilyIndex, swapchain.surface, &presentSupport)); #ifdef _DEBUG std::cout << "--- Properties for queue family " << queueFamilyIndex << std::endl; std::cout << " queueFlags:"; if(queueFamProp.queueFlags & VK_QUEUE_GRAPHICS_BIT) std::cout << " G"; if(queueFamProp.queueFlags & VK_QUEUE_COMPUTE_BIT) std::cout << " C"; if(queueFamProp.queueFlags & VK_QUEUE_TRANSFER_BIT) std::cout << " T"; if(queueFamProp.queueFlags & VK_QUEUE_SPARSE_BINDING_BIT) std::cout << " S"; std::cout << '\n'; std::cout << " queueCount: " << queueFamProp.queueCount << std::endl; std::cout << " timestampValidBits: " << queueFamProp.timestampValidBits << std::endl; std::cout << " minImageTransferGranularity: " << queueFamProp.minImageTransferGranularity.width << ", " << queueFamProp.minImageTransferGranularity.height << ", " << queueFamProp.minImageTransferGranularity.depth << std::endl; std::cout << " Supports present?: " << std::boolalpha << bool(presentSupport) << std::endl << std::endl; #endif // _DEBUG if (bool(queueFamProp.queueFlags & VK_QUEUE_GRAPHICS_BIT) && presentSupport == VK_TRUE) { if (selectedQueueFamilyIndex < 0) selectedQueueFamilyIndex = queueFamilyIndex; } queueFamilyIndex++; } if (selectedQueueFamilyIndex<0) ERROR("No queue with both graphics and present capabilities found"); // Create device after selecting the queue std::array<float,1> queuePriorities = {0.0f}; VkDeviceQueueCreateInfo queueCreateInfo = { .sType = VK_STRUCTURE_TYPE_DEVICE_QUEUE_CREATE_INFO, .pNext = nullptr, .flags = VK_FLAGS_NONE, .queueFamilyIndex = (uint32_t) selectedQueueFamilyIndex, .queueCount = 1, //Number of queues to create .pQueuePriorities = queuePriorities.data() }; // Call to createDevice createDevice(queueCreateInfo,1); //Get a handle to the selected queue vkGetDeviceQueue(device, (uint32_t) selectedQueueFamilyIndex, 0, &queue); //TODO get handle if using multiple queues queueFamilyIndex = (uint32_t) selectedQueueFamilyIndex; } void VulkanBase::createDevice(VkDeviceQueueCreateInfo requestedQueues, uint32_t requestedQueuesCount) { //Check extensions available on the selected physical device before creating it // Check swap chain extension vkUtils::checkDeviceExtensionPresent(physicalDevice,VK_KHR_SWAPCHAIN_EXTENSION_NAME); std::vector<const char*> enabledExtensions = {VK_KHR_SWAPCHAIN_EXTENSION_NAME}; VkDeviceCreateInfo deviceCreateInfo = { .sType = VK_STRUCTURE_TYPE_DEVICE_CREATE_INFO, .pNext = nullptr, .flags = VK_FLAGS_NONE, .queueCreateInfoCount = requestedQueuesCount, .pQueueCreateInfos = &requestedQueues, .enabledLayerCount = 0, .ppEnabledLayerNames = nullptr, .enabledExtensionCount = (uint32_t) enabledExtensions.size(), .ppEnabledExtensionNames = enabledExtensions.data(), .pEnabledFeatures = NULL }; #ifdef _DEBUG if (enableValidation) { deviceCreateInfo.enabledLayerCount = vkDebug::validationLayerCount; deviceCreateInfo.ppEnabledLayerNames = vkDebug::validationLayerNames; // Check standard debug layers are present on the device for(uint32_t i = 0; i < deviceCreateInfo.enabledLayerCount; i++) { vkUtils::checkGlobalLayerPresent(vkDebug::validationLayerNames[i]); } } #endif // _DEBUG CHECK_RESULT(vkCreateDevice(physicalDevice, &deviceCreateInfo, nullptr, &device)); } void VulkanBase::createCommandPool(const uint32_t queueFamilyIndex, const VkCommandPoolCreateFlagBits createFlagBits) { const VkCommandPoolCreateInfo commandPoolCreateInfo= { .sType = VK_STRUCTURE_TYPE_COMMAND_POOL_CREATE_INFO, .pNext = NULL, .flags = createFlagBits, .queueFamilyIndex = queueFamilyIndex }; CHECK_RESULT(vkCreateCommandPool(device, &commandPoolCreateInfo,nullptr,&commandPool)); #ifdef _DEBUG std::cout << "\n+++ Created command pool" << std::endl; #endif // _DEBUG } void VulkanBase::createSynchroItems() { // Semaphores VkSemaphoreCreateInfo semaphoreCreateInfo = { .sType = VK_STRUCTURE_TYPE_SEMAPHORE_CREATE_INFO, .pNext = NULL, .flags = VK_FLAGS_NONE }; // Semaphore signaled on swapchain image ready to use and wait on the queue before rendering/present CHECK_RESULT(vkCreateSemaphore(device, &semaphoreCreateInfo, nullptr, &imageAcquiredSemaphore)); // Semaphore signaled on queue rendering termination and waited on present operation CHECK_RESULT(vkCreateSemaphore(device, &semaphoreCreateInfo, nullptr, &renderingCompletedSemaphore)); // Fences VkFenceCreateInfo fenceCreateInfo = { .sType = VK_STRUCTURE_TYPE_FENCE_CREATE_INFO, .pNext = NULL, .flags = VK_FLAGS_NONE }; CHECK_RESULT(vkCreateFence(device, &fenceCreateInfo, nullptr, &presentFence)); #ifdef _DEBUG std::cout << "\n+++ Created semaphores and fences\n"; #endif // _DEBUG } void VulkanBase::createCommandBuffers(VkCommandBuffer* cmdBuffer, uint32_t commandBufferCount, VkCommandBufferLevel cmdBufferLevel) { const VkCommandBufferAllocateInfo commandBufferAllocateInfo = { .sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_ALLOCATE_INFO, .pNext = NULL, .commandPool = commandPool, .level = cmdBufferLevel, .commandBufferCount = commandBufferCount }; CHECK_RESULT(vkAllocateCommandBuffers(device, &commandBufferAllocateInfo, cmdBuffer)); #ifdef _DEBUG std::cout << "\n+++ Allocated " << commandBufferCount << " command buffers" << std::endl; #endif // _DEBUG } void VulkanBase::setupInitCommandBuffer() { VkCommandBufferBeginInfo commandBufferBeginInfo = { .sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_BEGIN_INFO, .pNext = NULL, .flags = VK_FLAGS_NONE, .pInheritanceInfo = NULL }; CHECK_RESULT(vkBeginCommandBuffer(initCommandBuffer, &commandBufferBeginInfo)); // Creates an image memory barrier to change the layout for every image on the swapchain VkImageMemoryBarrier imageMemoryBarrier = { .sType = VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER, .pNext = NULL, .srcAccessMask = VK_FLAGS_NONE, .dstAccessMask = VK_FLAGS_NONE, .oldLayout = VK_IMAGE_LAYOUT_UNDEFINED, .newLayout = VK_IMAGE_LAYOUT_PRESENT_SRC_KHR, .srcQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED, .dstQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED, .image = 0, .subresourceRange = {VK_IMAGE_ASPECT_COLOR_BIT, 0, 1, 0, 1} }; // Pipeline Barrier for each swapchain image for (const auto& image: swapchain.swapchainImagesVector){ imageMemoryBarrier.image = image; vkCmdPipelineBarrier(initCommandBuffer, VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT, VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT, //Put barrier on top of the pipeline VK_FLAGS_NONE, 0, nullptr, // memoryBarrier 0, nullptr, // bufferMemoryBarrier 1, &imageMemoryBarrier); // imageMemoryBarrier } CHECK_RESULT(vkEndCommandBuffer(initCommandBuffer)); #ifdef _DEBUG std::cout << "\n+++ Finished recording initCommandBuffer\n"; #endif // _DEBUG } void VulkanBase::setupPresentCommandBuffer(const VkImage currentSwapchainImage, const float* clearColors) { VkCommandBufferBeginInfo commandBufferBeginInfo = { .sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_BEGIN_INFO, .pNext = NULL, .flags = VK_COMMAND_BUFFER_USAGE_SIMULTANEOUS_USE_BIT, .pInheritanceInfo = NULL }; CHECK_RESULT(vkBeginCommandBuffer(presentCommandBuffer, &commandBufferBeginInfo)); VkImageMemoryBarrier imageMemoryBarrier = { .sType = VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER, .pNext = NULL, .srcAccessMask = VK_ACCESS_MEMORY_READ_BIT, .dstAccessMask = VK_ACCESS_TRANSFER_WRITE_BIT, .oldLayout = VK_IMAGE_LAYOUT_PRESENT_SRC_KHR, .newLayout = VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, .srcQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED, .dstQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED, .image = currentSwapchainImage, .subresourceRange = {VK_IMAGE_ASPECT_COLOR_BIT, 0, 1, 0, 1} }; //Set barrier on top to change layout and access vkCmdPipelineBarrier(presentCommandBuffer, VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT, VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT, VK_FLAGS_NONE, 0, nullptr, 0, nullptr, 1, &imageMemoryBarrier); VkClearColorValue clearColorValue; VkImageSubresourceRange imageSubresourceRange = {VK_IMAGE_ASPECT_COLOR_BIT, 0, 1, 0, 1}; clearColorValue.float32[0] = clearColors[0]; clearColorValue.float32[1] = clearColors[1]; clearColorValue.float32[2] = clearColors[2]; clearColorValue.float32[3] = 1.0f; // Command to clear the swapchain image vkCmdClearColorImage(presentCommandBuffer,currentSwapchainImage, VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, &clearColorValue, 1, &imageSubresourceRange); /* * Transition the swapchain image from VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL * to VK_IMAGE_LAYOUT_PRESENT_SRC_KHR */ imageMemoryBarrier.oldLayout = VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL; imageMemoryBarrier.newLayout = VK_IMAGE_LAYOUT_PRESENT_SRC_KHR; imageMemoryBarrier.srcAccessMask = VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT | VK_ACCESS_TRANSFER_WRITE_BIT; imageMemoryBarrier.dstAccessMask = VK_ACCESS_MEMORY_READ_BIT; // Set barrier end of pipeline vkCmdPipelineBarrier(presentCommandBuffer, VK_PIPELINE_STAGE_ALL_COMMANDS_BIT, VK_PIPELINE_STAGE_BOTTOM_OF_PIPE_BIT, 0, 0, nullptr, 0, nullptr, 1, &imageMemoryBarrier ); CHECK_RESULT(vkEndCommandBuffer(presentCommandBuffer)); #ifdef _DEBUG //std::cout << "\n+++ Finished recording presentCommandBuffer\n"; #endif // _DEBUG } void VulkanBase::renderFrame(const float* clearColors) { // Wait on previous frame fence (render too fast) //CHECK_RESULT(vkWaitForFences(device, 1, &presentFence, VK_TRUE, UINT64_MAX)); //CHECK_RESULT(vkResetFences(device, 1, &presentFence)); // Acquire next image on the swapchain uint32_t imageIndex = UINT64_MAX; CHECK_RESULT(vkAcquireNextImageKHR(device, swapchain.swapchain, UINT64_MAX, imageAcquiredSemaphore, VK_NULL_HANDLE, &imageIndex)); // Setup the present command buffer setupPresentCommandBuffer(swapchain.swapchainImagesVector.at(imageIndex),clearColors); // Submit present command buffer to the queue // Waits on imageAcquiredSemaphore so it doesnt start rendering until the image from the swapchain is ready and // it also signals the renderingCompletedSemaphore used by the later present VkPipelineStageFlags pipelineStageFlags = VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT; VkSubmitInfo submitInfo = { .sType = VK_STRUCTURE_TYPE_SUBMIT_INFO, .pNext = NULL, .waitSemaphoreCount = 1, .pWaitSemaphores = &imageAcquiredSemaphore, .pWaitDstStageMask = &pipelineStageFlags, .commandBufferCount = 1, .pCommandBuffers = &presentCommandBuffer, .signalSemaphoreCount = 1, .pSignalSemaphores = &renderingCompletedSemaphore }; CHECK_RESULT(vkQueueSubmit(queue, 1, &submitInfo, VK_NULL_HANDLE)); // Present the rendered image VkPresentInfoKHR presentInfo = { .sType = VK_STRUCTURE_TYPE_PRESENT_INFO_KHR, .pNext = NULL, .waitSemaphoreCount = 1, .pWaitSemaphores = &renderingCompletedSemaphore, .swapchainCount = 1, .pSwapchains = &swapchain.swapchain, .pImageIndices = &imageIndex, .pResults = nullptr }; CHECK_RESULT(vkQueuePresentKHR(queue,&presentInfo)); CHECK_RESULT(vkQueueWaitIdle(queue)); //TODO Not sure this is the correct way... } void VulkanBase::prepare() { //Allocate command Buffers createCommandBuffers(&initCommandBuffer, 1, VK_COMMAND_BUFFER_LEVEL_PRIMARY); createCommandBuffers(&presentCommandBuffer, 1, VK_COMMAND_BUFFER_LEVEL_PRIMARY); commandBuffersVector.push_back(initCommandBuffer); commandBuffersVector.push_back(presentCommandBuffer); //Initialize command Buffers setupInitCommandBuffer(); // Submit initialization command buffer to the queue VkSubmitInfo submitInfo = { .sType = VK_STRUCTURE_TYPE_SUBMIT_INFO, .pNext = nullptr, .waitSemaphoreCount = 0, .pWaitSemaphores = nullptr, .pWaitDstStageMask = VK_FLAGS_NONE, .commandBufferCount = 1, .pCommandBuffers = &initCommandBuffer, .signalSemaphoreCount = 0, .pSignalSemaphores = nullptr }; CHECK_RESULT(vkQueueSubmit(queue,1,&submitInfo, VK_NULL_HANDLE)); CHECK_RESULT(vkQueueWaitIdle(queue)); vkFreeCommandBuffers(device, commandPool, 1, &initCommandBuffer); #ifdef _DEBUG std::cout << "\n+++ initCommandBuffer work complete!\n"; std::cout << "\n******* Rendering Start ******\n"; #endif // _DEBUG }
VkResult create_surface(VkInstance vk_instance, VkPhysicalDevice gpu, VkDevice* out_device, DrawCommandBuffer* out_draw_command_buffer, SwapChain* out_swap_chain) { if ((out_swap_chain == nullptr) || (out_draw_command_buffer == nullptr)) { return VK_ERROR_INITIALIZATION_FAILED; } VkWin32SurfaceCreateInfoKHR surfaceCreateInfo; surfaceCreateInfo.sType = VK_STRUCTURE_TYPE_WIN32_SURFACE_CREATE_INFO_KHR; surfaceCreateInfo.hinstance = out_swap_chain->instance; surfaceCreateInfo.hwnd = out_swap_chain->window; VK_THROW(vkCreateWin32SurfaceKHR(vk_instance, &surfaceCreateInfo, NULL, &(out_swap_chain->surface))); uint32_t queue_count; vkGetPhysicalDeviceQueueFamilyProperties(gpu, &queue_count, nullptr); assert(queue_count > 0); std::vector<VkBool32> support_presentable_swap_chain(queue_count); std::vector<VkQueueFamilyProperties> properties(queue_count); vkGetPhysicalDeviceQueueFamilyProperties(gpu, &queue_count, properties.data()); for (uint32_t qidx = 0; qidx < queue_count; ++qidx) { vkGetPhysicalDeviceSurfaceSupportKHR(gpu, qidx, out_swap_chain->surface, &(support_presentable_swap_chain[qidx])); } uint32_t graphics_queue = UINT32_MAX; uint32_t swap_chain_queue = UINT32_MAX; for (uint32_t qidx = 0; qidx < queue_count; ++qidx) { if (check_flag(properties[qidx].queueFlags, VK_QUEUE_GRAPHICS_BIT) && properties[qidx].queueCount > 0) { graphics_queue = qidx; if (support_presentable_swap_chain[qidx]) { swap_chain_queue = qidx; break; } } } if (swap_chain_queue == UINT32_MAX) // Can't find a graphic queue that also support swap chain. Select two different queue. { for (uint32_t qidx = 0; qidx < queue_count; ++qidx) { if (support_presentable_swap_chain[qidx] && (properties[qidx].queueCount > 0)) { swap_chain_queue = qidx; break; } } } // Generate error if could not find both a graphics and a present queue if ((graphics_queue == UINT32_MAX) || (swap_chain_queue == UINT32_MAX)) { vkDestroySurfaceKHR(vk_instance, out_swap_chain->surface, nullptr); out_swap_chain->surface = nullptr; return VK_ERROR_INITIALIZATION_FAILED; } uint32_t format_count; VK_THROW(vkGetPhysicalDeviceSurfaceFormatsKHR(gpu, out_swap_chain->surface, &format_count, NULL)); std::vector<VkSurfaceFormatKHR> surface_formats(format_count); VK_THROW(vkGetPhysicalDeviceSurfaceFormatsKHR(gpu, out_swap_chain->surface, &format_count, surface_formats.data())); // If the format list includes just one entry of VK_FORMAT_UNDEFINED, // the surface has no preferred format. Otherwise, at least one // supported format will be returned if (format_count == 1 && surface_formats[0].format == VK_FORMAT_UNDEFINED) { out_swap_chain->surface_format.format = VK_FORMAT_B8G8R8A8_UNORM; out_swap_chain->surface_format.colorSpace = VK_COLORSPACE_SRGB_NONLINEAR_KHR; } else { assert(format_count > 0); out_swap_chain->surface_format = surface_formats[0]; } VK_THROW(create_device(gpu, graphics_queue, swap_chain_queue, out_device)); vkGetDeviceQueue(*out_device, graphics_queue, 0, &out_draw_command_buffer->draw_queue); vkGetDeviceQueue(*out_device, swap_chain_queue, 0, &out_swap_chain->present_queue); out_draw_command_buffer->queue_family_idx = graphics_queue; out_swap_chain->queue_family_idx = swap_chain_queue; out_swap_chain->gpu = gpu; return VK_SUCCESS; }
int main(int argc, char *argv[]) { VkResult U_ASSERT_ONLY res; struct sample_info info = {}; char sample_title[] = "Swapchain Initialization Sample"; /* * Set up swapchain: * - Get supported uses for all queues * - Try to find a queue that supports both graphics and present * - If no queue supports both, find a present queue and make sure we have a * graphics queue * - Get a list of supported formats and use the first one * - Get surface properties and present modes and use them to create a swap * chain * - Create swap chain buffers * - For each buffer, create a color attachment view and set its layout to * color attachment */ init_global_layer_properties(info); init_instance_extension_names(info); init_device_extension_names(info); init_instance(info, sample_title); init_enumerate_device(info); init_connection(info); init_window_size(info, 50, 50); init_window(info); /* VULKAN_KEY_START */ // Construct the surface description: #ifdef _WIN32 VkWin32SurfaceCreateInfoKHR createInfo = {}; createInfo.sType = VK_STRUCTURE_TYPE_WIN32_SURFACE_CREATE_INFO_KHR; createInfo.pNext = NULL; createInfo.hinstance = info.connection; createInfo.hwnd = info.window; res = vkCreateWin32SurfaceKHR(info.inst, &createInfo, NULL, &info.surface); #else // _WIN32 VkXcbSurfaceCreateInfoKHR createInfo = {}; createInfo.sType = VK_STRUCTURE_TYPE_XCB_SURFACE_CREATE_INFO_KHR; createInfo.pNext = NULL; createInfo.connection = info.connection; createInfo.window = info.window; res = vkCreateXcbSurfaceKHR(info.inst, &createInfo, NULL, &info.surface); #endif // _WIN32 assert(res == VK_SUCCESS); // Iterate over each queue to learn whether it supports presenting: VkBool32 *supportsPresent = (VkBool32 *)malloc(info.queue_count * sizeof(VkBool32)); for (uint32_t i = 0; i < info.queue_count; i++) { vkGetPhysicalDeviceSurfaceSupportKHR(info.gpus[0], i, info.surface, &supportsPresent[i]); } // Search for a graphics queue and a present queue in the array of queue // families, try to find one that supports both uint32_t graphicsQueueNodeIndex = UINT32_MAX; for (uint32_t i = 0; i < info.queue_count; i++) { if ((info.queue_props[i].queueFlags & VK_QUEUE_GRAPHICS_BIT) != 0) { if (supportsPresent[i] == VK_TRUE) { graphicsQueueNodeIndex = i; break; } } } free(supportsPresent); // Generate error if could not find a queue that supports both a graphics // and present if (graphicsQueueNodeIndex == UINT32_MAX) { std::cout << "Could not find a queue that supports both graphics and " "present\n"; exit(-1); } info.graphics_queue_family_index = graphicsQueueNodeIndex; init_device(info); // Get the list of VkFormats that are supported: uint32_t formatCount; res = vkGetPhysicalDeviceSurfaceFormatsKHR(info.gpus[0], info.surface, &formatCount, NULL); assert(res == VK_SUCCESS); VkSurfaceFormatKHR *surfFormats = (VkSurfaceFormatKHR *)malloc(formatCount * sizeof(VkSurfaceFormatKHR)); res = vkGetPhysicalDeviceSurfaceFormatsKHR(info.gpus[0], info.surface, &formatCount, surfFormats); assert(res == VK_SUCCESS); // If the format list includes just one entry of VK_FORMAT_UNDEFINED, // the surface has no preferred format. Otherwise, at least one // supported format will be returned. if (formatCount == 1 && surfFormats[0].format == VK_FORMAT_UNDEFINED) { info.format = VK_FORMAT_B8G8R8A8_UNORM; } else { assert(formatCount >= 1); info.format = surfFormats[0].format; } VkSurfaceCapabilitiesKHR surfCapabilities; res = vkGetPhysicalDeviceSurfaceCapabilitiesKHR(info.gpus[0], info.surface, &surfCapabilities); assert(res == VK_SUCCESS); uint32_t presentModeCount; res = vkGetPhysicalDeviceSurfacePresentModesKHR(info.gpus[0], info.surface, &presentModeCount, NULL); assert(res == VK_SUCCESS); VkPresentModeKHR *presentModes = (VkPresentModeKHR *)malloc(presentModeCount * sizeof(VkPresentModeKHR)); res = vkGetPhysicalDeviceSurfacePresentModesKHR( info.gpus[0], info.surface, &presentModeCount, presentModes); assert(res == VK_SUCCESS); VkExtent2D swapChainExtent; // width and height are either both -1, or both not -1. if (surfCapabilities.currentExtent.width == (uint32_t)-1) { // If the surface size is undefined, the size is set to // the size of the images requested. swapChainExtent.width = info.width; swapChainExtent.height = info.height; } else { // If the surface size is defined, the swap chain size must match swapChainExtent = surfCapabilities.currentExtent; } // If mailbox mode is available, use it, as is the lowest-latency non- // tearing mode. If not, try IMMEDIATE which will usually be available, // and is fastest (though it tears). If not, fall back to FIFO which is // always available. VkPresentModeKHR swapchainPresentMode = VK_PRESENT_MODE_FIFO_KHR; for (size_t i = 0; i < presentModeCount; i++) { if (presentModes[i] == VK_PRESENT_MODE_MAILBOX_KHR) { swapchainPresentMode = VK_PRESENT_MODE_MAILBOX_KHR; break; } if ((swapchainPresentMode != VK_PRESENT_MODE_MAILBOX_KHR) && (presentModes[i] == VK_PRESENT_MODE_IMMEDIATE_KHR)) { swapchainPresentMode = VK_PRESENT_MODE_IMMEDIATE_KHR; } } // Determine the number of VkImage's to use in the swap chain (we desire to // own only 1 image at a time, besides the images being displayed and // queued for display): uint32_t desiredNumberOfSwapChainImages = surfCapabilities.minImageCount + 1; if ((surfCapabilities.maxImageCount > 0) && (desiredNumberOfSwapChainImages > surfCapabilities.maxImageCount)) { // Application must settle for fewer images than desired: desiredNumberOfSwapChainImages = surfCapabilities.maxImageCount; } VkSurfaceTransformFlagBitsKHR preTransform; if (surfCapabilities.supportedTransforms & VK_SURFACE_TRANSFORM_IDENTITY_BIT_KHR) { preTransform = VK_SURFACE_TRANSFORM_IDENTITY_BIT_KHR; } else { preTransform = surfCapabilities.currentTransform; } VkSwapchainCreateInfoKHR swap_chain = {}; swap_chain.sType = VK_STRUCTURE_TYPE_SWAPCHAIN_CREATE_INFO_KHR; swap_chain.pNext = NULL; swap_chain.surface = info.surface; swap_chain.minImageCount = desiredNumberOfSwapChainImages; swap_chain.imageFormat = info.format; swap_chain.imageExtent.width = swapChainExtent.width; swap_chain.imageExtent.height = swapChainExtent.height; swap_chain.preTransform = preTransform; swap_chain.compositeAlpha = VK_COMPOSITE_ALPHA_OPAQUE_BIT_KHR; swap_chain.imageArrayLayers = 1; swap_chain.presentMode = swapchainPresentMode; swap_chain.oldSwapchain = NULL; swap_chain.clipped = true; swap_chain.imageColorSpace = VK_COLORSPACE_SRGB_NONLINEAR_KHR; swap_chain.imageUsage = VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT; swap_chain.imageSharingMode = VK_SHARING_MODE_EXCLUSIVE; swap_chain.queueFamilyIndexCount = 0; swap_chain.pQueueFamilyIndices = NULL; res = vkCreateSwapchainKHR(info.device, &swap_chain, NULL, &info.swap_chain); assert(res == VK_SUCCESS); res = vkGetSwapchainImagesKHR(info.device, info.swap_chain, &info.swapchainImageCount, NULL); assert(res == VK_SUCCESS); VkImage *swapchainImages = (VkImage *)malloc(info.swapchainImageCount * sizeof(VkImage)); assert(swapchainImages); res = vkGetSwapchainImagesKHR(info.device, info.swap_chain, &info.swapchainImageCount, swapchainImages); assert(res == VK_SUCCESS); info.buffers.resize(info.swapchainImageCount); // Going to need a command buffer to send the memory barriers in // set_image_layout but we couldn't have created one before we knew // what our graphics_queue_family_index is, but now that we have it, // create the command buffer init_command_pool(info); init_command_buffer(info); execute_begin_command_buffer(info); vkGetDeviceQueue(info.device, info.graphics_queue_family_index, 0, &info.queue); for (uint32_t i = 0; i < info.swapchainImageCount; i++) { VkImageViewCreateInfo color_image_view = {}; color_image_view.sType = VK_STRUCTURE_TYPE_IMAGE_VIEW_CREATE_INFO; color_image_view.pNext = NULL; color_image_view.format = info.format; color_image_view.components.r = VK_COMPONENT_SWIZZLE_R; color_image_view.components.g = VK_COMPONENT_SWIZZLE_G; color_image_view.components.b = VK_COMPONENT_SWIZZLE_B; color_image_view.components.a = VK_COMPONENT_SWIZZLE_A; color_image_view.subresourceRange.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT; color_image_view.subresourceRange.baseMipLevel = 0; color_image_view.subresourceRange.levelCount = 1; color_image_view.subresourceRange.baseArrayLayer = 0; color_image_view.subresourceRange.layerCount = 1; color_image_view.viewType = VK_IMAGE_VIEW_TYPE_2D; color_image_view.flags = 0; info.buffers[i].image = swapchainImages[i]; set_image_layout(info, info.buffers[i].image, VK_IMAGE_ASPECT_COLOR_BIT, VK_IMAGE_LAYOUT_UNDEFINED, VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL); color_image_view.image = info.buffers[i].image; res = vkCreateImageView(info.device, &color_image_view, NULL, &info.buffers[i].view); assert(res == VK_SUCCESS); } free(swapchainImages); execute_end_command_buffer(info); execute_queue_command_buffer(info); /* VULKAN_KEY_END */ /* Clean Up */ VkCommandBuffer cmd_bufs[1] = {info.cmd}; vkFreeCommandBuffers(info.device, info.cmd_pool, 1, cmd_bufs); vkDestroyCommandPool(info.device, info.cmd_pool, NULL); for (uint32_t i = 0; i < info.swapchainImageCount; i++) { vkDestroyImageView(info.device, info.buffers[i].view, NULL); } vkDestroySwapchainKHR(info.device, info.swap_chain, NULL); destroy_device(info); destroy_window(info); destroy_instance(info); return 0; }
bool VulkanDevice::Init(VulkanInstance * vulkanInstance, HWND hwnd) { VkResult result; // GPU uint32_t numGPUs = 0; vkEnumeratePhysicalDevices(vulkanInstance->GetInstance(), &numGPUs, VK_NULL_HANDLE); if (numGPUs == 0) { gLogManager->AddMessage("ERROR: No GPUs found!"); return false; } std::vector<VkPhysicalDevice> pGPUs(numGPUs); vkEnumeratePhysicalDevices(vulkanInstance->GetInstance(), &numGPUs, pGPUs.data()); gpu = pGPUs[0]; vkGetPhysicalDeviceProperties(gpu, &gpuProperties); vkGetPhysicalDeviceMemoryProperties(gpu, &memoryProperties); gLogManager->AddMessage("Rendering with: " + std::string(gpuProperties.deviceName)); // Queue family uint32_t numQueueFamily = 0; vkGetPhysicalDeviceQueueFamilyProperties(gpu, &numQueueFamily, VK_NULL_HANDLE); if (numQueueFamily == 0) { gLogManager->AddMessage("ERROR: No Queue Families were found!"); return false; } queueFamiliyProperties.resize(numQueueFamily); vkGetPhysicalDeviceQueueFamilyProperties(gpu, &numQueueFamily, queueFamiliyProperties.data()); // Surface VkWin32SurfaceCreateInfoKHR win32SurfaceCI{}; win32SurfaceCI.sType = VK_STRUCTURE_TYPE_WIN32_SURFACE_CREATE_INFO_KHR; win32SurfaceCI.hinstance = GetModuleHandle(NULL); win32SurfaceCI.hwnd = hwnd; result = vkCreateWin32SurfaceKHR(vulkanInstance->GetInstance(), &win32SurfaceCI, VK_NULL_HANDLE, &surface); if (result != VK_SUCCESS) { gLogManager->AddMessage("ERROR: Couldn't create Win32 Surface!"); return false; } VkBool32 * supportsPresent = new VkBool32[queueFamiliyProperties.size()]; for (uint32_t i = 0; i < queueFamiliyProperties.size(); i++) vkGetPhysicalDeviceSurfaceSupportKHR(gpu, i, surface, &supportsPresent[i]); graphicsQueueFamilyIndex = UINT32_MAX; for (uint32_t i = 0; i < queueFamiliyProperties.size(); i++) { if ((queueFamiliyProperties[i].queueFlags & VK_QUEUE_GRAPHICS_BIT) != 0) { if (supportsPresent[i] == VK_TRUE) { graphicsQueueFamilyIndex = i; break; } } } delete[] supportsPresent; if (graphicsQueueFamilyIndex == UINT32_MAX) { gLogManager->AddMessage("ERROR: Couldn't find a graphics queue family index!"); return false; } uint32_t numFormats; result = vkGetPhysicalDeviceSurfaceFormatsKHR(gpu, surface, &numFormats, VK_NULL_HANDLE); if (result != VK_SUCCESS) { gLogManager->AddMessage("ERROR: Couldn't get surface formats!"); return false; } VkSurfaceFormatKHR * pSurfaceFormats = new VkSurfaceFormatKHR[numFormats]; result = vkGetPhysicalDeviceSurfaceFormatsKHR(gpu, surface, &numFormats, pSurfaceFormats); if (numFormats == 1 && pSurfaceFormats[0].format == VK_FORMAT_UNDEFINED) format = VK_FORMAT_B8G8R8A8_UNORM; else format = pSurfaceFormats[0].format; // Device queue float pQueuePriorities[] = { 1.0f }; VkDeviceQueueCreateInfo deviceQueueCI{}; deviceQueueCI.sType = VK_STRUCTURE_TYPE_DEVICE_QUEUE_CREATE_INFO; deviceQueueCI.queueCount = 1; deviceQueueCI.queueFamilyIndex = graphicsQueueFamilyIndex; deviceQueueCI.pQueuePriorities = pQueuePriorities; VkPhysicalDeviceFeatures deviceFeatures{}; deviceFeatures.shaderClipDistance = VK_TRUE; deviceFeatures.shaderCullDistance = VK_TRUE; deviceFeatures.geometryShader = VK_TRUE; deviceFeatures.shaderTessellationAndGeometryPointSize = VK_TRUE; deviceFeatures.fillModeNonSolid = VK_TRUE; // Device VkDeviceCreateInfo deviceCI{}; deviceCI.sType = VK_STRUCTURE_TYPE_DEVICE_CREATE_INFO; deviceCI.queueCreateInfoCount = 1; deviceCI.pQueueCreateInfos = &deviceQueueCI; deviceCI.enabledExtensionCount = (uint32_t)deviceExtensions.size(); deviceCI.ppEnabledExtensionNames = deviceExtensions.data(); deviceCI.pEnabledFeatures = &deviceFeatures; result = vkCreateDevice(gpu, &deviceCI, VK_NULL_HANDLE, &device); if (result != VK_SUCCESS) { gLogManager->AddMessage("ERROR: vkCreateDevice() failed!"); return false; } vkGetDeviceQueue(device, graphicsQueueFamilyIndex, 0, &deviceQueue); return true; }
// Create the base Vulkan objects needed by the GrVkGpu object const GrVkBackendContext* GrVkBackendContext::Create(uint32_t* presentQueueIndexPtr, bool(*canPresent)(VkInstance, VkPhysicalDevice, uint32_t queueIndex)) { VkPhysicalDevice physDev; VkDevice device; VkInstance inst; VkResult err; const VkApplicationInfo app_info = { VK_STRUCTURE_TYPE_APPLICATION_INFO, // sType nullptr, // pNext "vktest", // pApplicationName 0, // applicationVersion "vktest", // pEngineName 0, // engineVerison kGrVkMinimumVersion, // apiVersion }; GrVkExtensions extensions; extensions.initInstance(kGrVkMinimumVersion); SkTArray<const char*> instanceLayerNames; SkTArray<const char*> instanceExtensionNames; uint32_t extensionFlags = 0; #ifdef ENABLE_VK_LAYERS for (size_t i = 0; i < SK_ARRAY_COUNT(kDebugLayerNames); ++i) { if (extensions.hasInstanceLayer(kDebugLayerNames[i])) { instanceLayerNames.push_back(kDebugLayerNames[i]); } } if (extensions.hasInstanceExtension(VK_EXT_DEBUG_REPORT_EXTENSION_NAME)) { instanceExtensionNames.push_back(VK_EXT_DEBUG_REPORT_EXTENSION_NAME); extensionFlags |= kEXT_debug_report_GrVkExtensionFlag; } #endif if (extensions.hasInstanceExtension(VK_KHR_SURFACE_EXTENSION_NAME)) { instanceExtensionNames.push_back(VK_KHR_SURFACE_EXTENSION_NAME); extensionFlags |= kKHR_surface_GrVkExtensionFlag; } if (extensions.hasInstanceExtension(VK_KHR_SWAPCHAIN_EXTENSION_NAME)) { instanceExtensionNames.push_back(VK_KHR_SWAPCHAIN_EXTENSION_NAME); extensionFlags |= kKHR_swapchain_GrVkExtensionFlag; } #ifdef SK_BUILD_FOR_WIN if (extensions.hasInstanceExtension(VK_KHR_WIN32_SURFACE_EXTENSION_NAME)) { instanceExtensionNames.push_back(VK_KHR_WIN32_SURFACE_EXTENSION_NAME); extensionFlags |= kKHR_win32_surface_GrVkExtensionFlag; } #elif SK_BUILD_FOR_ANDROID if (extensions.hasInstanceExtension(VK_KHR_ANDROID_SURFACE_EXTENSION_NAME)) { instanceExtensionNames.push_back(VK_KHR_ANDROID_SURFACE_EXTENSION_NAME); extensionFlags |= kKHR_android_surface_GrVkExtensionFlag; } #elif SK_BUILD_FOR_UNIX if (extensions.hasInstanceExtension(VK_KHR_XLIB_SURFACE_EXTENSION_NAME)) { instanceExtensionNames.push_back(VK_KHR_XLIB_SURFACE_EXTENSION_NAME); extensionFlags |= kKHR_xlib_surface_GrVkExtensionFlag; } #endif const VkInstanceCreateInfo instance_create = { VK_STRUCTURE_TYPE_INSTANCE_CREATE_INFO, // sType nullptr, // pNext 0, // flags &app_info, // pApplicationInfo (uint32_t) instanceLayerNames.count(), // enabledLayerNameCount instanceLayerNames.begin(), // ppEnabledLayerNames (uint32_t) instanceExtensionNames.count(), // enabledExtensionNameCount instanceExtensionNames.begin(), // ppEnabledExtensionNames }; err = vkCreateInstance(&instance_create, nullptr, &inst); if (err < 0) { SkDebugf("vkCreateInstance failed: %d\n", err); return nullptr; } uint32_t gpuCount; err = vkEnumeratePhysicalDevices(inst, &gpuCount, nullptr); if (err) { SkDebugf("vkEnumeratePhysicalDevices failed: %d\n", err); vkDestroyInstance(inst, nullptr); return nullptr; } SkASSERT(gpuCount > 0); // Just returning the first physical device instead of getting the whole array. // TODO: find best match for our needs gpuCount = 1; err = vkEnumeratePhysicalDevices(inst, &gpuCount, &physDev); if (err) { SkDebugf("vkEnumeratePhysicalDevices failed: %d\n", err); vkDestroyInstance(inst, nullptr); return nullptr; } // query to get the initial queue props size uint32_t queueCount; vkGetPhysicalDeviceQueueFamilyProperties(physDev, &queueCount, nullptr); SkASSERT(queueCount >= 1); SkAutoMalloc queuePropsAlloc(queueCount * sizeof(VkQueueFamilyProperties)); // now get the actual queue props VkQueueFamilyProperties* queueProps = (VkQueueFamilyProperties*)queuePropsAlloc.get(); vkGetPhysicalDeviceQueueFamilyProperties(physDev, &queueCount, queueProps); // iterate to find the graphics queue uint32_t graphicsQueueIndex = queueCount; for (uint32_t i = 0; i < queueCount; i++) { if (queueProps[i].queueFlags & VK_QUEUE_GRAPHICS_BIT) { graphicsQueueIndex = i; break; } } SkASSERT(graphicsQueueIndex < queueCount); // iterate to find the present queue, if needed uint32_t presentQueueIndex = graphicsQueueIndex; if (presentQueueIndexPtr && canPresent) { for (uint32_t i = 0; i < queueCount; i++) { if (canPresent(inst, physDev, i)) { presentQueueIndex = i; break; } } SkASSERT(presentQueueIndex < queueCount); *presentQueueIndexPtr = presentQueueIndex; } extensions.initDevice(kGrVkMinimumVersion, inst, physDev); SkTArray<const char*> deviceLayerNames; SkTArray<const char*> deviceExtensionNames; #ifdef ENABLE_VK_LAYERS for (size_t i = 0; i < SK_ARRAY_COUNT(kDebugLayerNames); ++i) { if (extensions.hasDeviceLayer(kDebugLayerNames[i])) { deviceLayerNames.push_back(kDebugLayerNames[i]); } } #endif if (extensions.hasDeviceExtension(VK_KHR_SWAPCHAIN_EXTENSION_NAME)) { deviceExtensionNames.push_back(VK_KHR_SWAPCHAIN_EXTENSION_NAME); extensionFlags |= kKHR_swapchain_GrVkExtensionFlag; } if (extensions.hasDeviceExtension("VK_NV_glsl_shader")) { deviceExtensionNames.push_back("VK_NV_glsl_shader"); extensionFlags |= kNV_glsl_shader_GrVkExtensionFlag; } // query to get the physical device properties VkPhysicalDeviceFeatures deviceFeatures; vkGetPhysicalDeviceFeatures(physDev, &deviceFeatures); // this looks like it would slow things down, // and we can't depend on it on all platforms deviceFeatures.robustBufferAccess = VK_FALSE; uint32_t featureFlags = 0; if (deviceFeatures.geometryShader) { featureFlags |= kGeometryShader_GrVkFeatureFlag; } if (deviceFeatures.dualSrcBlend) { featureFlags |= kDualSrcBlend_GrVkFeatureFlag; } if (deviceFeatures.sampleRateShading) { featureFlags |= kSampleRateShading_GrVkFeatureFlag; } float queuePriorities[1] = { 0.0 }; // Here we assume no need for swapchain queue // If one is needed, the client will need its own setup code const VkDeviceQueueCreateInfo queueInfo[2] = { { VK_STRUCTURE_TYPE_DEVICE_QUEUE_CREATE_INFO, // sType nullptr, // pNext 0, // VkDeviceQueueCreateFlags graphicsQueueIndex, // queueFamilyIndex 1, // queueCount queuePriorities, // pQueuePriorities }, { VK_STRUCTURE_TYPE_DEVICE_QUEUE_CREATE_INFO, // sType nullptr, // pNext 0, // VkDeviceQueueCreateFlags presentQueueIndex, // queueFamilyIndex 1, // queueCount queuePriorities, // pQueuePriorities } }; uint32_t queueInfoCount = (presentQueueIndex != graphicsQueueIndex) ? 2 : 1; const VkDeviceCreateInfo deviceInfo = { VK_STRUCTURE_TYPE_DEVICE_CREATE_INFO, // sType nullptr, // pNext 0, // VkDeviceCreateFlags queueInfoCount, // queueCreateInfoCount queueInfo, // pQueueCreateInfos (uint32_t) deviceLayerNames.count(), // layerCount deviceLayerNames.begin(), // ppEnabledLayerNames (uint32_t) deviceExtensionNames.count(), // extensionCount deviceExtensionNames.begin(), // ppEnabledExtensionNames &deviceFeatures // ppEnabledFeatures }; err = vkCreateDevice(physDev, &deviceInfo, nullptr, &device); if (err) { SkDebugf("CreateDevice failed: %d\n", err); vkDestroyInstance(inst, nullptr); return nullptr; } VkQueue queue; vkGetDeviceQueue(device, graphicsQueueIndex, 0, &queue); GrVkBackendContext* ctx = new GrVkBackendContext(); ctx->fInstance = inst; ctx->fPhysicalDevice = physDev; ctx->fDevice = device; ctx->fQueue = queue; ctx->fGraphicsQueueIndex = graphicsQueueIndex; ctx->fMinAPIVersion = kGrVkMinimumVersion; ctx->fExtensions = extensionFlags; ctx->fFeatures = featureFlags; ctx->fInterface.reset(GrVkCreateInterface(inst, device, extensionFlags)); return ctx; }
bool VulkanContext::CreateDevice(VkSurfaceKHR surface, bool enable_validation_layer) { u32 queue_family_count; vkGetPhysicalDeviceQueueFamilyProperties(m_physical_device, &queue_family_count, nullptr); if (queue_family_count == 0) { ERROR_LOG(VIDEO, "No queue families found on specified vulkan physical device."); return false; } std::vector<VkQueueFamilyProperties> queue_family_properties(queue_family_count); vkGetPhysicalDeviceQueueFamilyProperties(m_physical_device, &queue_family_count, queue_family_properties.data()); INFO_LOG(VIDEO, "%u vulkan queue families", queue_family_count); // Find graphics and present queues. m_graphics_queue_family_index = queue_family_count; m_present_queue_family_index = queue_family_count; for (uint32_t i = 0; i < queue_family_count; i++) { VkBool32 graphics_supported = queue_family_properties[i].queueFlags & VK_QUEUE_GRAPHICS_BIT; if (graphics_supported) { m_graphics_queue_family_index = i; // Quit now, no need for a present queue. if (!surface) { break; } } if (surface) { VkBool32 present_supported; VkResult res = vkGetPhysicalDeviceSurfaceSupportKHR(m_physical_device, i, surface, &present_supported); if (res != VK_SUCCESS) { LOG_VULKAN_ERROR(res, "vkGetPhysicalDeviceSurfaceSupportKHR failed: "); return false; } if (present_supported) { m_present_queue_family_index = i; } // Prefer one queue family index that does both graphics and present. if (graphics_supported && present_supported) { break; } } } if (m_graphics_queue_family_index == queue_family_count) { ERROR_LOG(VIDEO, "Vulkan: Failed to find an acceptable graphics queue."); return false; } if (surface && m_present_queue_family_index == queue_family_count) { ERROR_LOG(VIDEO, "Vulkan: Failed to find an acceptable present queue."); return false; } VkDeviceCreateInfo device_info = {}; device_info.sType = VK_STRUCTURE_TYPE_DEVICE_CREATE_INFO; device_info.pNext = nullptr; device_info.flags = 0; static constexpr float queue_priorities[] = {1.0f}; VkDeviceQueueCreateInfo graphics_queue_info = {}; graphics_queue_info.sType = VK_STRUCTURE_TYPE_DEVICE_QUEUE_CREATE_INFO; graphics_queue_info.pNext = nullptr; graphics_queue_info.flags = 0; graphics_queue_info.queueFamilyIndex = m_graphics_queue_family_index; graphics_queue_info.queueCount = 1; graphics_queue_info.pQueuePriorities = queue_priorities; VkDeviceQueueCreateInfo present_queue_info = {}; present_queue_info.sType = VK_STRUCTURE_TYPE_DEVICE_QUEUE_CREATE_INFO; present_queue_info.pNext = nullptr; present_queue_info.flags = 0; present_queue_info.queueFamilyIndex = m_present_queue_family_index; present_queue_info.queueCount = 1; present_queue_info.pQueuePriorities = queue_priorities; std::array<VkDeviceQueueCreateInfo, 2> queue_infos = {{ graphics_queue_info, present_queue_info, }}; device_info.queueCreateInfoCount = 1; if (m_graphics_queue_family_index != m_present_queue_family_index) { device_info.queueCreateInfoCount = 2; } device_info.pQueueCreateInfos = queue_infos.data(); ExtensionList enabled_extensions; if (!SelectDeviceExtensions(&enabled_extensions, surface != VK_NULL_HANDLE)) return false; device_info.enabledLayerCount = 0; device_info.ppEnabledLayerNames = nullptr; device_info.enabledExtensionCount = static_cast<uint32_t>(enabled_extensions.size()); device_info.ppEnabledExtensionNames = enabled_extensions.data(); // Check for required features before creating. if (!SelectDeviceFeatures()) return false; device_info.pEnabledFeatures = &m_device_features; // Enable debug layer on debug builds if (enable_validation_layer) { static const char* layer_names[] = {"VK_LAYER_LUNARG_standard_validation"}; device_info.enabledLayerCount = 1; device_info.ppEnabledLayerNames = layer_names; } VkResult res = vkCreateDevice(m_physical_device, &device_info, nullptr, &m_device); if (res != VK_SUCCESS) { LOG_VULKAN_ERROR(res, "vkCreateDevice failed: "); return false; } // With the device created, we can fill the remaining entry points. if (!LoadVulkanDeviceFunctions(m_device)) return false; // Grab the graphics and present queues. vkGetDeviceQueue(m_device, m_graphics_queue_family_index, 0, &m_graphics_queue); if (surface) { vkGetDeviceQueue(m_device, m_present_queue_family_index, 0, &m_present_queue); } return true; }
bool VulkanContext::InitQueue() { // Iterate over each queue to learn whether it supports presenting: VkBool32 *supportsPresent = new VkBool32[queue_count]; for (uint32_t i = 0; i < queue_count; i++) { vkGetPhysicalDeviceSurfaceSupportKHR(physical_devices_[physical_device_], i, surface_, &supportsPresent[i]); } // Search for a graphics queue and a present queue in the array of queue // families, try to find one that supports both uint32_t graphicsQueueNodeIndex = UINT32_MAX; uint32_t presentQueueNodeIndex = UINT32_MAX; for (uint32_t i = 0; i < queue_count; i++) { if ((queue_props[i].queueFlags & VK_QUEUE_GRAPHICS_BIT) != 0) { if (graphicsQueueNodeIndex == UINT32_MAX) { graphicsQueueNodeIndex = i; } if (supportsPresent[i] == VK_TRUE) { graphicsQueueNodeIndex = i; presentQueueNodeIndex = i; break; } } } if (presentQueueNodeIndex == UINT32_MAX) { // If didn't find a queue that supports both graphics and present, then // find a separate present queue. for (uint32_t i = 0; i < queue_count; ++i) { if (supportsPresent[i] == VK_TRUE) { presentQueueNodeIndex = i; break; } } } delete[] supportsPresent; // Generate error if could not find both a graphics and a present queue if (graphicsQueueNodeIndex == UINT32_MAX || presentQueueNodeIndex == UINT32_MAX) { ELOG("Could not find a graphics and a present queue"); return false; } graphics_queue_family_index_ = graphicsQueueNodeIndex; // Get the list of VkFormats that are supported: uint32_t formatCount = 0; VkResult res = vkGetPhysicalDeviceSurfaceFormatsKHR(physical_devices_[physical_device_], surface_, &formatCount, nullptr); _assert_msg_(G3D, res == VK_SUCCESS, "Failed to get formats for device %p: %d surface: %p", physical_devices_[physical_device_], (int)res, surface_); if (res != VK_SUCCESS) { return false; } std::vector<VkSurfaceFormatKHR> surfFormats(formatCount); res = vkGetPhysicalDeviceSurfaceFormatsKHR(physical_devices_[physical_device_], surface_, &formatCount, surfFormats.data()); assert(res == VK_SUCCESS); if (res != VK_SUCCESS) { return false; } // If the format list includes just one entry of VK_FORMAT_UNDEFINED, // the surface has no preferred format. Otherwise, at least one // supported format will be returned. if (formatCount == 0 || (formatCount == 1 && surfFormats[0].format == VK_FORMAT_UNDEFINED)) { ILOG("swapchain_format: Falling back to B8G8R8A8_UNORM"); swapchainFormat_ = VK_FORMAT_B8G8R8A8_UNORM; } else { swapchainFormat_ = VK_FORMAT_UNDEFINED; for (uint32_t i = 0; i < formatCount; ++i) { if (surfFormats[i].colorSpace != VK_COLORSPACE_SRGB_NONLINEAR_KHR) { continue; } if (surfFormats[i].format == VK_FORMAT_B8G8R8A8_UNORM || surfFormats[i].format == VK_FORMAT_R8G8B8A8_UNORM) { swapchainFormat_ = surfFormats[i].format; break; } } if (swapchainFormat_ == VK_FORMAT_UNDEFINED) { // Okay, take the first one then. swapchainFormat_ = surfFormats[0].format; } ILOG("swapchain_format: %d (/%d)", swapchainFormat_, formatCount); } vkGetDeviceQueue(device_, graphics_queue_family_index_, 0, &gfx_queue_); ILOG("gfx_queue_: %p", gfx_queue_); return true; }
bool Tutorial01::GetDeviceQueue() { vkGetDeviceQueue( Vulkan.Device, Vulkan.QueueFamilyIndex, 0, &Vulkan.Queue ); return true; }
bool initSwapChains() { std::cout << "initing swapchain..."; if( !getSurfaceFormats() || !getSurfacePresentModes() ) { return false; } VkResult res; res = vkGetPhysicalDeviceSurfaceCapabilitiesKHR( gDevices[0], gSurface, &gSurfaceCaps ); if( res != VK_SUCCESS ) { std::cout << "error getting surface capabilities\n"; } VkExtent2D swapChainExtent = gSurfaceCaps.currentExtent; if( std::find( gPresentModes.begin(), gPresentModes.end(), VK_PRESENT_MODE_MAILBOX_KHR ) != gPresentModes.end() ) gPresentMode = VK_PRESENT_MODE_MAILBOX_KHR; else if( std::find( gPresentModes.begin(), gPresentModes.end(), VK_PRESENT_MODE_IMMEDIATE_KHR ) != gPresentModes.end() ) gPresentMode = VK_PRESENT_MODE_IMMEDIATE_KHR; else gPresentMode = VK_PRESENT_MODE_FIFO_KHR; uint32_t desiredNumberOfSwapChainImages = gSurfaceCaps.minImageCount + 1; desiredNumberOfSwapChainImages = gSurfaceCaps.maxImageCount ? max( desiredNumberOfSwapChainImages, gSurfaceCaps.maxImageCount ) : desiredNumberOfSwapChainImages; VkSurfaceTransformFlagBitsKHR preTransform; preTransform = gSurfaceCaps.supportedTransforms & VK_SURFACE_TRANSFORM_IDENTITY_BIT_KHR ? VK_SURFACE_TRANSFORM_IDENTITY_BIT_KHR : gSurfaceCaps.currentTransform; VkSwapchainCreateInfoKHR swapChain = {}; swapChain.sType = VK_STRUCTURE_TYPE_SWAPCHAIN_CREATE_INFO_KHR; swapChain.pNext = nullptr; swapChain.surface = gSurface; swapChain.minImageCount = desiredNumberOfSwapChainImages; swapChain.imageFormat = gFormat; swapChain.imageExtent = swapChainExtent; swapChain.preTransform = preTransform; swapChain.compositeAlpha = VK_COMPOSITE_ALPHA_OPAQUE_BIT_KHR; swapChain.imageArrayLayers = 1; swapChain.presentMode = gPresentMode; swapChain.oldSwapchain = NULL; swapChain.clipped = true; swapChain.imageColorSpace = VK_COLORSPACE_SRGB_NONLINEAR_KHR; swapChain.imageUsage = VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT; swapChain.imageSharingMode = VK_SHARING_MODE_EXCLUSIVE; swapChain.queueFamilyIndexCount = 0; swapChain.pQueueFamilyIndices = nullptr; res = vkCreateSwapchainKHR( gDevice, &swapChain, nullptr, &gSwapchain ); if( res != VK_SUCCESS ) { std::cout << "error creating swapchain "<< res << std::endl; return false; } std::vector<VkImage> images; u32 imagesCount = 0; HR( vkGetSwapchainImagesKHR(gDevice, gSwapchain, &imagesCount, nullptr ) ); images.resize( imagesCount ); HR( vkGetSwapchainImagesKHR(gDevice, gSwapchain, &imagesCount, images.data() ) ); beginCommandBuffer( gCmd ); vkGetDeviceQueue( gDevice, gQueueFamilyIndex, 0, &gQueue ); gSwapBuffers.resize( imagesCount ); for( u32 i = 0; i < gSwapBuffers.size(); ++i ) { VkImageViewCreateInfo imageView = {}; imageView.sType = VK_STRUCTURE_TYPE_IMAGE_VIEW_CREATE_INFO; imageView.pNext = nullptr; imageView.format = gFormat; imageView.components.r = VK_COMPONENT_SWIZZLE_R; imageView.components.g = VK_COMPONENT_SWIZZLE_G; imageView.components.b = VK_COMPONENT_SWIZZLE_B; imageView.components.a = VK_COMPONENT_SWIZZLE_A; imageView.subresourceRange.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT; imageView.subresourceRange.baseMipLevel = 0; imageView.subresourceRange.levelCount = 1; imageView.subresourceRange.baseArrayLayer = 0; imageView.subresourceRange.layerCount = 1; imageView.viewType = VK_IMAGE_VIEW_TYPE_2D; imageView.flags = 0; imageView.image = images[i]; setImageLayout( gCmd, gSwapBuffers[i].image, VK_IMAGE_ASPECT_COLOR_BIT, VK_IMAGE_LAYOUT_UNDEFINED, VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL ); HR( vkCreateImageView( gDevice, &imageView, nullptr, &gSwapBuffers[i].view ) ); } endCommandBuffer( gCmd ); executeQueue( gCmd ); std::cout << "inited\n"; return true; }
bool create_device_queue() { vkGetDeviceQueue(vk_globals::device, vk_globals::graphics_queue.index, 0, &(vk_globals::graphics_queue.handle)); vkGetDeviceQueue(vk_globals::device, vk_globals::present_queue.index, 0, &(vk_globals::present_queue.handle)); return true; }
void VulkanExampleBase::initVulkan(bool enableValidation) { VkResult err; // Vulkan instance err = createInstance(enableValidation); if (err) { vkTools::exitFatal("Could not create Vulkan instance : \n" + vkTools::errorString(err), "Fatal error"); } #if defined(__ANDROID__) loadVulkanFunctions(instance); #endif // Physical device uint32_t gpuCount = 0; // Get number of available physical devices err = vkEnumeratePhysicalDevices(instance, &gpuCount, nullptr); assert(!err); assert(gpuCount > 0); // Enumerate devices std::vector<VkPhysicalDevice> physicalDevices(gpuCount); err = vkEnumeratePhysicalDevices(instance, &gpuCount, physicalDevices.data()); if (err) { vkTools::exitFatal("Could not enumerate phyiscal devices : \n" + vkTools::errorString(err), "Fatal error"); } // Note : // This example will always use the first physical device reported, // change the vector index if you have multiple Vulkan devices installed // and want to use another one physicalDevice = physicalDevices[0]; // Find a queue that supports graphics operations uint32_t graphicsQueueIndex = 0; uint32_t queueCount; vkGetPhysicalDeviceQueueFamilyProperties(physicalDevice, &queueCount, NULL); assert(queueCount >= 1); std::vector<VkQueueFamilyProperties> queueProps; queueProps.resize(queueCount); vkGetPhysicalDeviceQueueFamilyProperties(physicalDevice, &queueCount, queueProps.data()); for (graphicsQueueIndex = 0; graphicsQueueIndex < queueCount; graphicsQueueIndex++) { if (queueProps[graphicsQueueIndex].queueFlags & VK_QUEUE_GRAPHICS_BIT) break; } assert(graphicsQueueIndex < queueCount); // Vulkan device std::array<float, 1> queuePriorities = { 0.0f }; VkDeviceQueueCreateInfo queueCreateInfo = {}; queueCreateInfo.sType = VK_STRUCTURE_TYPE_DEVICE_QUEUE_CREATE_INFO; queueCreateInfo.queueFamilyIndex = graphicsQueueIndex; queueCreateInfo.queueCount = 1; queueCreateInfo.pQueuePriorities = queuePriorities.data(); err = createDevice(queueCreateInfo, enableValidation); assert(!err); // Store properties (including limits) and features of the phyiscal device // So examples can check against them and see if a feature is actually supported vkGetPhysicalDeviceProperties(physicalDevice, &deviceProperties); vkGetPhysicalDeviceFeatures(physicalDevice, &deviceFeatures); #if defined(__ANDROID__) LOGD(deviceProperties.deviceName); #endif // Gather physical device memory properties vkGetPhysicalDeviceMemoryProperties(physicalDevice, &deviceMemoryProperties); // Get the graphics queue vkGetDeviceQueue(device, graphicsQueueIndex, 0, &queue); // Find a suitable depth format VkBool32 validDepthFormat = vkTools::getSupportedDepthFormat(physicalDevice, &depthFormat); assert(validDepthFormat); swapChain.connect(instance, physicalDevice, device); // Create synchronization objects VkSemaphoreCreateInfo semaphoreCreateInfo = vkTools::initializers::semaphoreCreateInfo(); // Create a semaphore used to synchronize image presentation // Ensures that the image is displayed before we start submitting new commands to the queu err = vkCreateSemaphore(device, &semaphoreCreateInfo, nullptr, &semaphores.presentComplete); assert(!err); // Create a semaphore used to synchronize command submission // Ensures that the image is not presented until all commands have been sumbitted and executed err = vkCreateSemaphore(device, &semaphoreCreateInfo, nullptr, &semaphores.renderComplete); assert(!err); // Set up submit info structure // Semaphores will stay the same during application lifetime // Command buffer submission info is set by each example submitInfo = vkTools::initializers::submitInfo(); submitInfo.pWaitDstStageMask = &submitPipelineStages; submitInfo.waitSemaphoreCount = 1; submitInfo.pWaitSemaphores = &semaphores.presentComplete; submitInfo.signalSemaphoreCount = 1; submitInfo.pSignalSemaphores = &semaphores.renderComplete; }
static void setup_vulkan(GLFWwindow* window) { VkResult err; // Create Vulkan Instance { uint32_t extensions_count; const char** glfw_extensions = glfwGetRequiredInstanceExtensions(&extensions_count); VkInstanceCreateInfo create_info = {}; create_info.sType = VK_STRUCTURE_TYPE_INSTANCE_CREATE_INFO; create_info.enabledExtensionCount = extensions_count; create_info.ppEnabledExtensionNames = glfw_extensions; #ifdef IMGUI_VULKAN_DEBUG_REPORT // enabling multiple validation layers grouped as lunarg standard validation const char* layers[] = {"VK_LAYER_LUNARG_standard_validation"}; create_info.enabledLayerCount = 1; create_info.ppEnabledLayerNames = layers; // need additional storage for char pointer to debug report extension const char** extensions = (const char**)malloc(sizeof(const char*) * (extensions_count + 1)); for (size_t i = 0; i < extensions_count; i++) extensions[i] = glfw_extensions[i]; extensions[ extensions_count ] = "VK_EXT_debug_report"; create_info.enabledExtensionCount = extensions_count+1; create_info.ppEnabledExtensionNames = extensions; #endif // IMGUI_VULKAN_DEBUG_REPORT err = vkCreateInstance(&create_info, g_Allocator, &g_Instance); check_vk_result(err); #ifdef IMGUI_VULKAN_DEBUG_REPORT free(extensions); // create the debug report callback VkDebugReportCallbackCreateInfoEXT debug_report_ci ={}; debug_report_ci.sType = VK_STRUCTURE_TYPE_DEBUG_REPORT_CALLBACK_CREATE_INFO_EXT; debug_report_ci.flags = VK_DEBUG_REPORT_ERROR_BIT_EXT | VK_DEBUG_REPORT_WARNING_BIT_EXT | VK_DEBUG_REPORT_PERFORMANCE_WARNING_BIT_EXT; debug_report_ci.pfnCallback = debug_report; debug_report_ci.pUserData = NULL; // get the proc address of the function pointer, required for used extensions PFN_vkCreateDebugReportCallbackEXT vkCreateDebugReportCallbackEXT = (PFN_vkCreateDebugReportCallbackEXT)vkGetInstanceProcAddr(g_Instance, "vkCreateDebugReportCallbackEXT"); err = vkCreateDebugReportCallbackEXT( g_Instance, &debug_report_ci, g_Allocator, &g_Debug_Report ); check_vk_result(err); #endif // IMGUI_VULKAN_DEBUG_REPORT } // Create Window Surface { err = glfwCreateWindowSurface(g_Instance, window, g_Allocator, &g_Surface); check_vk_result(err); } // Get GPU { uint32_t gpu_count; err = vkEnumeratePhysicalDevices(g_Instance, &gpu_count, NULL); check_vk_result(err); VkPhysicalDevice* gpus = (VkPhysicalDevice*)malloc(sizeof(VkPhysicalDevice) * gpu_count); err = vkEnumeratePhysicalDevices(g_Instance, &gpu_count, gpus); check_vk_result(err); // If a number >1 of GPUs got reported, you should find the best fit GPU for your purpose // e.g. VK_PHYSICAL_DEVICE_TYPE_DISCRETE_GPU if available, or with the greatest memory available, etc. // for sake of simplicity we'll just take the first one, assuming it has a graphics queue family. g_Gpu = gpus[0]; free(gpus); } // Get queue { uint32_t count; vkGetPhysicalDeviceQueueFamilyProperties(g_Gpu, &count, NULL); VkQueueFamilyProperties* queues = (VkQueueFamilyProperties*)malloc(sizeof(VkQueueFamilyProperties) * count); vkGetPhysicalDeviceQueueFamilyProperties(g_Gpu, &count, queues); for (uint32_t i = 0; i < count; i++) { if (queues[i].queueFlags & VK_QUEUE_GRAPHICS_BIT) { g_QueueFamily = i; break; } } free(queues); } // Check for WSI support { VkBool32 res; vkGetPhysicalDeviceSurfaceSupportKHR(g_Gpu, g_QueueFamily, g_Surface, &res); if (res != VK_TRUE) { fprintf(stderr, "Error no WSI support on physical device 0\n"); exit(-1); } } // Get Surface Format { // Per Spec Format and View Format are expected to be the same unless VK_IMAGE_CREATE_MUTABLE_BIT was set at image creation // Assuming that the default behavior is without setting this bit, there is no need for separate Spawchain image and image view format // additionally several new color spaces were introduced with Vulkan Spec v1.0.40 // hence we must make sure that a format with the mostly available color space, VK_COLOR_SPACE_SRGB_NONLINEAR_KHR, is found and used uint32_t count; vkGetPhysicalDeviceSurfaceFormatsKHR(g_Gpu, g_Surface, &count, NULL); VkSurfaceFormatKHR *formats = (VkSurfaceFormatKHR*)malloc(sizeof(VkSurfaceFormatKHR) * count); vkGetPhysicalDeviceSurfaceFormatsKHR(g_Gpu, g_Surface, &count, formats); // first check if only one format, VK_FORMAT_UNDEFINED, is available, which would imply that any format is available if (count == 1) { if( formats[0].format == VK_FORMAT_UNDEFINED ) { g_SurfaceFormat.format = VK_FORMAT_B8G8R8A8_UNORM; g_SurfaceFormat.colorSpace = VK_COLORSPACE_SRGB_NONLINEAR_KHR; } else { // no point in searching another format g_SurfaceFormat = formats[0]; } } else { // request several formats, the first found will be used VkFormat requestSurfaceImageFormat[] = {VK_FORMAT_B8G8R8A8_UNORM, VK_FORMAT_R8G8B8A8_UNORM, VK_FORMAT_B8G8R8_UNORM, VK_FORMAT_R8G8B8_UNORM}; VkColorSpaceKHR requestSurfaceColorSpace = VK_COLORSPACE_SRGB_NONLINEAR_KHR; bool requestedFound = false; for (size_t i = 0; i < sizeof(requestSurfaceImageFormat) / sizeof(requestSurfaceImageFormat[0]); i++) { if( requestedFound ) { break; } for (uint32_t j = 0; j < count; j++) { if (formats[j].format == requestSurfaceImageFormat[i] && formats[j].colorSpace == requestSurfaceColorSpace) { g_SurfaceFormat = formats[j]; requestedFound = true; } } } // if none of the requested image formats could be found, use the first available if (!requestedFound) g_SurfaceFormat = formats[0]; } free(formats); } // Get Present Mode { // Requst a certain mode and confirm that it is available. If not use VK_PRESENT_MODE_FIFO_KHR which is mandatory #ifdef IMGUI_UNLIMITED_FRAME_RATE g_PresentMode = VK_PRESENT_MODE_IMMEDIATE_KHR; #else g_PresentMode = VK_PRESENT_MODE_FIFO_KHR; #endif uint32_t count = 0; vkGetPhysicalDeviceSurfacePresentModesKHR(g_Gpu, g_Surface, &count, nullptr); VkPresentModeKHR* presentModes = (VkPresentModeKHR*)malloc(sizeof(VkQueueFamilyProperties) * count); vkGetPhysicalDeviceSurfacePresentModesKHR(g_Gpu, g_Surface, &count, presentModes); bool presentModeAvailable = false; for (size_t i = 0; i < count; i++) { if (presentModes[i] == g_PresentMode) { presentModeAvailable = true; break; } } if( !presentModeAvailable ) g_PresentMode = VK_PRESENT_MODE_FIFO_KHR; // always available } // Create Logical Device { int device_extension_count = 1; const char* device_extensions[] = {"VK_KHR_swapchain"}; const uint32_t queue_index = 0; const uint32_t queue_count = 1; const float queue_priority[] = {1.0f}; VkDeviceQueueCreateInfo queue_info[1] = {}; queue_info[0].sType = VK_STRUCTURE_TYPE_DEVICE_QUEUE_CREATE_INFO; queue_info[0].queueFamilyIndex = g_QueueFamily; queue_info[0].queueCount = queue_count; queue_info[0].pQueuePriorities = queue_priority; VkDeviceCreateInfo create_info = {}; create_info.sType = VK_STRUCTURE_TYPE_DEVICE_CREATE_INFO; create_info.queueCreateInfoCount = sizeof(queue_info)/sizeof(queue_info[0]); create_info.pQueueCreateInfos = queue_info; create_info.enabledExtensionCount = device_extension_count; create_info.ppEnabledExtensionNames = device_extensions; err = vkCreateDevice(g_Gpu, &create_info, g_Allocator, &g_Device); check_vk_result(err); vkGetDeviceQueue(g_Device, g_QueueFamily, queue_index, &g_Queue); } // Create Framebuffers { int w, h; glfwGetFramebufferSize(window, &w, &h); resize_vulkan(window, w, h); glfwSetFramebufferSizeCallback(window, resize_vulkan); } // Create Command Buffers for (int i = 0; i < IMGUI_VK_QUEUED_FRAMES; i++) { { VkCommandPoolCreateInfo info = {}; info.sType = VK_STRUCTURE_TYPE_COMMAND_POOL_CREATE_INFO; info.flags = VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT; info.queueFamilyIndex = g_QueueFamily; err = vkCreateCommandPool(g_Device, &info, g_Allocator, &g_CommandPool[i]); check_vk_result(err); } { VkCommandBufferAllocateInfo info = {}; info.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_ALLOCATE_INFO; info.commandPool = g_CommandPool[i]; info.level = VK_COMMAND_BUFFER_LEVEL_PRIMARY; info.commandBufferCount = 1; err = vkAllocateCommandBuffers(g_Device, &info, &g_CommandBuffer[i]); check_vk_result(err); } { VkFenceCreateInfo info = {}; info.sType = VK_STRUCTURE_TYPE_FENCE_CREATE_INFO; info.flags = VK_FENCE_CREATE_SIGNALED_BIT; err = vkCreateFence(g_Device, &info, g_Allocator, &g_Fence[i]); check_vk_result(err); } { VkSemaphoreCreateInfo info = {}; info.sType = VK_STRUCTURE_TYPE_SEMAPHORE_CREATE_INFO; err = vkCreateSemaphore(g_Device, &info, g_Allocator, &g_PresentCompleteSemaphore[i]); check_vk_result(err); err = vkCreateSemaphore(g_Device, &info, g_Allocator, &g_RenderCompleteSemaphore[i]); check_vk_result(err); } } // Create Descriptor Pool { VkDescriptorPoolSize pool_size[11] = { { VK_DESCRIPTOR_TYPE_SAMPLER, 1000 }, { VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER, 1000 }, { VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE, 1000 }, { VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, 1000 }, { VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER, 1000 }, { VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER, 1000 }, { VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER, 1000 }, { VK_DESCRIPTOR_TYPE_STORAGE_BUFFER, 1000 }, { VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC, 1000 }, { VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC, 1000 }, { VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT, 1000 } }; VkDescriptorPoolCreateInfo pool_info = {}; pool_info.sType = VK_STRUCTURE_TYPE_DESCRIPTOR_POOL_CREATE_INFO; pool_info.flags = VK_DESCRIPTOR_POOL_CREATE_FREE_DESCRIPTOR_SET_BIT; pool_info.maxSets = 1000 * 11; pool_info.poolSizeCount = 11; pool_info.pPoolSizes = pool_size; err = vkCreateDescriptorPool(g_Device, &pool_info, g_Allocator, &g_DescriptorPool); check_vk_result(err); } }