/** Please see header for specification */ bool Anvil::DescriptorSetLayout::init() { bool result = false; VkResult result_vk; anvil_assert(m_layout == VK_NULL_HANDLE); /* Bake the Vulkan object */ auto create_info_ptr = m_create_info_ptr->create_descriptor_set_layout_create_info(m_device_ptr); if (create_info_ptr == nullptr) { anvil_assert(create_info_ptr != nullptr); goto end; } result_vk = Anvil::Vulkan::vkCreateDescriptorSetLayout(m_device_ptr->get_device_vk(), create_info_ptr->struct_chain_ptr->get_root_struct(), nullptr, /* pAllocator */ &m_layout); anvil_assert_vk_call_succeeded(result_vk); if (is_vk_call_successful(result_vk) ) { set_vk_handle(m_layout); } result = is_vk_call_successful(result_vk); end: return result; }
/** Destroys the underlying Vulkan Semaphore instance. */ void Anvil::Semaphore::release_semaphore() { if (m_semaphore != VK_NULL_HANDLE) { lock(); { Anvil::Vulkan::vkDestroySemaphore(m_device_ptr->get_device_vk(), m_semaphore, nullptr /* pAllocator */); } unlock(); m_semaphore = VK_NULL_HANDLE; set_vk_handle(m_semaphore); } }
/** Please see header for specification */ Anvil::PipelineCache::PipelineCache(const Anvil::BaseDevice* in_device_ptr, bool in_mt_safe, size_t in_initial_data_size, const void* in_initial_data) :DebugMarkerSupportProvider(in_device_ptr, Anvil::ObjectType::PIPELINE_CACHE), MTSafetySupportProvider (in_mt_safe), m_device_ptr (in_device_ptr), m_pipeline_cache (VK_NULL_HANDLE) { VkPipelineCacheCreateInfo cache_create_info; VkResult result_vk (VK_ERROR_INITIALIZATION_FAILED); ANVIL_REDUNDANT_VARIABLE(result_vk); cache_create_info.flags = 0; cache_create_info.initialDataSize = in_initial_data_size; cache_create_info.pInitialData = in_initial_data; cache_create_info.pNext = nullptr; cache_create_info.sType = VK_STRUCTURE_TYPE_PIPELINE_CACHE_CREATE_INFO; result_vk = Anvil::Vulkan::vkCreatePipelineCache(m_device_ptr->get_device_vk(), &cache_create_info, nullptr, /* pAllocator */ &m_pipeline_cache); anvil_assert_vk_call_succeeded(result_vk); if (is_vk_call_successful(result_vk) ) { set_vk_handle(m_pipeline_cache); } anvil_assert(m_pipeline_cache != VK_NULL_HANDLE); /* Register the instance */ Anvil::ObjectTracker::get()->register_object(Anvil::ObjectType::PIPELINE_CACHE, this); }
bool Anvil::Event::init() { VkEventCreateInfo event_create_info; VkResult result (VK_ERROR_INITIALIZATION_FAILED); /* Spawn a new event */ event_create_info.flags = 0; event_create_info.pNext = nullptr; event_create_info.sType = VK_STRUCTURE_TYPE_EVENT_CREATE_INFO; result = vkCreateEvent(m_device_ptr->get_device_vk(), &event_create_info, nullptr, /* pAllocator */ &m_event); anvil_assert_vk_call_succeeded(result); if (is_vk_call_successful(result) ) { set_vk_handle(m_event); } return is_vk_call_successful(result); }
bool Anvil::Fence::init() { VkFenceCreateInfo fence_create_info; VkResult result (VK_ERROR_INITIALIZATION_FAILED); Anvil::StructChainer<VkFenceCreateInfo> struct_chainer; Anvil::StructChainUniquePtr<VkFenceCreateInfo> struct_chain_ptr; /* Sanity checks */ if (m_create_info_ptr->get_exportable_external_fence_handle_types() != Anvil::EXTERNAL_FENCE_HANDLE_TYPE_NONE) { if (!m_device_ptr->get_extension_info()->khr_external_fence() ) { anvil_assert(m_device_ptr->get_extension_info()->khr_external_fence() ); goto end; } } /* Spawn a new fence */ { fence_create_info.flags = (m_create_info_ptr->should_create_signalled() ) ? VK_FENCE_CREATE_SIGNALED_BIT : 0u; fence_create_info.pNext = nullptr; fence_create_info.sType = VK_STRUCTURE_TYPE_FENCE_CREATE_INFO; struct_chainer.append_struct(fence_create_info); } if (m_create_info_ptr->get_exportable_external_fence_handle_types() != Anvil::EXTERNAL_FENCE_HANDLE_TYPE_NONE) { VkExportFenceCreateInfo create_info; create_info.handleTypes = Anvil::Utils::convert_external_fence_handle_type_bits_to_vk_external_fence_handle_type_flags(m_create_info_ptr->get_exportable_external_fence_handle_types() ); create_info.pNext = nullptr; create_info.sType = VK_STRUCTURE_TYPE_EXPORT_FENCE_CREATE_INFO_KHR; struct_chainer.append_struct(create_info); } #if defined(_WIN32) { const Anvil::ExternalNTHandleInfo* nt_handle_info_ptr = nullptr; if (m_create_info_ptr->get_exportable_nt_handle_info(&nt_handle_info_ptr) ) { VkExportFenceWin32HandleInfoKHR handle_info; anvil_assert(nt_handle_info_ptr != nullptr); anvil_assert(m_create_info_ptr->get_exportable_external_fence_handle_types() & Anvil::EXTERNAL_FENCE_HANDLE_TYPE_OPAQUE_WIN32_BIT); handle_info.dwAccess = nt_handle_info_ptr->access; handle_info.name = (nt_handle_info_ptr->name.size() > 0) ? &nt_handle_info_ptr->name.at(0) : nullptr; handle_info.pAttributes = nt_handle_info_ptr->attributes_ptr; handle_info.pNext = nullptr; handle_info.sType = VK_STRUCTURE_TYPE_EXPORT_FENCE_WIN32_HANDLE_INFO_KHR; struct_chainer.append_struct(handle_info); } } #endif struct_chain_ptr = struct_chainer.create_chain(); if (struct_chain_ptr == nullptr) { anvil_assert(struct_chain_ptr != nullptr); goto end; } result = vkCreateFence(m_device_ptr->get_device_vk(), struct_chain_ptr->get_root_struct(), nullptr, /* pAllocator */ &m_fence); anvil_assert_vk_call_succeeded(result); if (is_vk_call_successful(result) ) { set_vk_handle(m_fence); } end: return is_vk_call_successful(result); }
/* Please see header for specification */ bool Anvil::RenderingSurface::init() { const Anvil::DeviceType& device_type (m_device_ptr->get_type() ); bool init_successful (false); auto instance_ptr (m_create_info_ptr->get_instance_ptr() ); uint32_t n_physical_devices(0); VkResult result (VK_SUCCESS); const WindowPlatform window_platform (m_create_info_ptr->get_window_ptr()->get_platform()); const bool is_dummy_window_platform(window_platform == WINDOW_PLATFORM_DUMMY || window_platform == WINDOW_PLATFORM_DUMMY_WITH_PNG_SNAPSHOTS); switch (device_type) { case Anvil::DeviceType::MULTI_GPU: { const Anvil::MGPUDevice* mgpu_device_ptr(dynamic_cast<const Anvil::MGPUDevice*>(m_device_ptr) ); n_physical_devices = mgpu_device_ptr->get_n_physical_devices(); break; } case Anvil::DeviceType::SINGLE_GPU: { n_physical_devices = 1; break; } default: { anvil_assert_fail(); goto end; } } if (!is_dummy_window_platform) { auto window_ptr = m_create_info_ptr->get_window_ptr(); #if defined(ANVIL_INCLUDE_WIN3264_WINDOW_SYSTEM_SUPPORT) && defined(_WIN32) { VkWin32SurfaceCreateInfoKHR surface_create_info; surface_create_info.flags = 0; surface_create_info.hinstance = GetModuleHandle(nullptr); surface_create_info.hwnd = window_ptr->get_handle(); surface_create_info.pNext = nullptr; surface_create_info.sType = VK_STRUCTURE_TYPE_WIN32_SURFACE_CREATE_INFO_KHR; result = instance_ptr->get_extension_khr_win32_surface_entrypoints().vkCreateWin32SurfaceKHR(instance_ptr->get_instance_vk(), &surface_create_info, nullptr, /* pAllocator */ &m_surface); } #endif #if defined(ANVIL_INCLUDE_XCB_WINDOW_SYSTEM_SUPPORT) && !defined(_WIN32) { VkXcbSurfaceCreateInfoKHR surface_create_info; surface_create_info.flags = 0; surface_create_info.window = window_ptr->get_handle(); surface_create_info.connection = static_cast<xcb_connection_t*>(window_ptr->get_connection()); surface_create_info.pNext = nullptr; surface_create_info.sType = VK_STRUCTURE_TYPE_XCB_SURFACE_CREATE_INFO_KHR; result = instance_ptr->get_extension_khr_xcb_surface_entrypoints().vkCreateXcbSurfaceKHR(instance_ptr->get_instance_vk(), &surface_create_info, nullptr, /* pAllocator */ &m_surface); } #endif anvil_assert_vk_call_succeeded(result); if (is_vk_call_successful(result) ) { set_vk_handle(m_surface); } } else { anvil_assert(window_platform != WINDOW_PLATFORM_UNKNOWN); } if (is_dummy_window_platform == false) { /* Is there at least one queue fam that can be used together with at least one physical device associated with * the logical device to present using the surface we've just spawned and the physical device user has specified? */ const auto& queue_families(m_device_ptr->get_physical_device_queue_families() ); for (uint32_t n_physical_device = 0; n_physical_device < n_physical_devices; ++n_physical_device) { Anvil::RenderingSurface::PhysicalDeviceCapabilities* physical_device_caps_ptr = nullptr; const Anvil::PhysicalDevice* physical_device_ptr = nullptr; switch (device_type) { case Anvil::DeviceType::MULTI_GPU: { const Anvil::MGPUDevice* mgpu_device_ptr(dynamic_cast<const Anvil::MGPUDevice*>(m_device_ptr) ); physical_device_ptr = mgpu_device_ptr->get_physical_device(n_physical_device); physical_device_caps_ptr = &m_physical_device_capabilities[physical_device_ptr->get_device_group_device_index()]; break; } case Anvil::DeviceType::SINGLE_GPU: { const Anvil::SGPUDevice* sgpu_device_ptr(dynamic_cast<const Anvil::SGPUDevice*>(m_device_ptr) ); physical_device_ptr = sgpu_device_ptr->get_physical_device(); physical_device_caps_ptr = &m_physical_device_capabilities[physical_device_ptr->get_device_group_device_index()]; break; } default: { anvil_assert_fail(); goto end; } } for (uint32_t n_queue_family = 0; n_queue_family < static_cast<uint32_t>(queue_families.size() ); ++n_queue_family) { VkBool32 is_presentation_supported = VK_FALSE; { const auto& khr_surface_entrypoints = instance_ptr->get_extension_khr_surface_entrypoints(); result = khr_surface_entrypoints.vkGetPhysicalDeviceSurfaceSupportKHR(physical_device_ptr->get_physical_device(), n_queue_family, m_surface, &is_presentation_supported); } if (is_vk_call_successful(result) && is_presentation_supported == VK_TRUE) { physical_device_caps_ptr->present_capable_queue_fams.push_back(n_queue_family); } } } } else { /* offscreen rendering. Any physical device that offers universal queue can be used to "present" */ for (uint32_t n_physical_device = 0; n_physical_device < n_physical_devices; ++n_physical_device) { switch (device_type) { case Anvil::DeviceType::MULTI_GPU: { const Anvil::MGPUDevice* mgpu_device_ptr(dynamic_cast<const Anvil::MGPUDevice*>(m_device_ptr) ); if (mgpu_device_ptr->get_n_universal_queues() > 0) { const Anvil::PhysicalDevice* physical_device_ptr = mgpu_device_ptr->get_physical_device(n_physical_device); auto& result_caps = m_physical_device_capabilities[physical_device_ptr->get_device_group_device_index()]; result_caps.present_capable_queue_fams.push_back(mgpu_device_ptr->get_universal_queue(0)->get_queue_family_index() ); } break; } case Anvil::DeviceType::SINGLE_GPU: { const Anvil::SGPUDevice* sgpu_device_ptr(dynamic_cast<const Anvil::SGPUDevice*>(m_device_ptr) ); if (sgpu_device_ptr->get_n_universal_queues() > 0) { const Anvil::PhysicalDevice* physical_device_ptr = sgpu_device_ptr->get_physical_device(); auto& result_caps = m_physical_device_capabilities[physical_device_ptr->get_device_group_device_index()]; result_caps.present_capable_queue_fams.push_back(sgpu_device_ptr->get_universal_queue(0)->get_queue_family_index() ); } break; } default: { anvil_assert_fail(); goto end; } } } result = VK_SUCCESS; } if (!is_vk_call_successful(result) ) { anvil_assert_vk_call_succeeded(result); init_successful = false; } else { /* Retrieve Vulkan object capabilities and cache them */ cache_surface_properties(); init_successful = true; } end: return init_successful; }
/** Initializes the swapchain object. */ bool Anvil::Swapchain::init() { uint32_t n_swapchain_images = 0; auto parent_surface_ptr = m_create_info_ptr->get_rendering_surface(); VkResult result = VK_ERROR_INITIALIZATION_FAILED; Anvil::StructChainUniquePtr<VkSwapchainCreateInfoKHR> struct_chain_ptr; std::vector<VkImage> swapchain_images; const VkSurfaceTransformFlagBitsKHR swapchain_transformation = VK_SURFACE_TRANSFORM_IDENTITY_BIT_KHR; const WindowPlatform window_platform = m_create_info_ptr->get_window()->get_platform(); const bool is_offscreen_rendering_enabled = (window_platform == WINDOW_PLATFORM_DUMMY || window_platform == WINDOW_PLATFORM_DUMMY_WITH_PNG_SNAPSHOTS); m_size.width = parent_surface_ptr->get_width (); m_size.height = parent_surface_ptr->get_height(); /* not doing offscreen rendering */ if (!is_offscreen_rendering_enabled) { const auto& khr_swapchain_entrypoints = m_device_ptr->get_extension_khr_swapchain_entrypoints(); Anvil::StructChainer<VkSwapchainCreateInfoKHR> struct_chainer; #ifdef _DEBUG { const Anvil::SGPUDevice* sgpu_device_ptr(dynamic_cast<const Anvil::SGPUDevice*>(m_device_ptr) ); const Anvil::DeviceType device_type = m_device_ptr->get_type(); uint32_t n_physical_devices = 0; bool result_bool = false; const char* required_surface_extension_name = nullptr; VkSurfaceCapabilitiesKHR surface_caps; VkCompositeAlphaFlagsKHR supported_composite_alpha_flags = static_cast<VkCompositeAlphaFlagsKHR>(0); VkSurfaceTransformFlagsKHR supported_surface_transform_flags; #ifdef _WIN32 #if defined(ANVIL_INCLUDE_WIN3264_WINDOW_SYSTEM_SUPPORT) required_surface_extension_name = VK_KHR_WIN32_SURFACE_EXTENSION_NAME; #endif #else #if defined(ANVIL_INCLUDE_XCB_WINDOW_SYSTEM_SUPPORT) required_surface_extension_name = VK_KHR_XCB_SURFACE_EXTENSION_NAME; #endif #endif anvil_assert(required_surface_extension_name == nullptr || m_device_ptr->get_parent_instance()->is_instance_extension_supported(required_surface_extension_name) ); switch (device_type) { case Anvil::DEVICE_TYPE_SINGLE_GPU: n_physical_devices = 1; break; default: { anvil_assert_fail(); } } for (uint32_t n_physical_device = 0; n_physical_device < n_physical_devices; ++n_physical_device) { const Anvil::PhysicalDevice* current_physical_device_ptr = nullptr; switch (device_type) { case Anvil::DEVICE_TYPE_SINGLE_GPU: current_physical_device_ptr = sgpu_device_ptr->get_physical_device(); break; default: { anvil_assert_fail(); } } /* Ensure opaque composite alpha mode is supported */ anvil_assert(parent_surface_ptr->get_supported_composite_alpha_flags(&supported_composite_alpha_flags) ); anvil_assert(supported_composite_alpha_flags & VK_COMPOSITE_ALPHA_OPAQUE_BIT_KHR); /* Ensure we can use the swapchain image format */ anvil_assert(parent_surface_ptr->is_compatible_with_image_format(m_create_info_ptr->get_format(), &result_bool) ); anvil_assert(result_bool); /* Ensure the transformation we're about to request is supported by the rendering surface */ anvil_assert(parent_surface_ptr->get_supported_transformations(&supported_surface_transform_flags) ); anvil_assert(supported_surface_transform_flags & swapchain_transformation); /* Ensure the requested number of swapchain images is reasonable*/ anvil_assert(parent_surface_ptr->get_capabilities(&surface_caps) ); anvil_assert(surface_caps.maxImageCount == 0 || surface_caps.maxImageCount >= m_create_info_ptr->get_n_images() ); } } #endif { VkSwapchainCreateInfoKHR create_info; create_info.clipped = true; /* we won't be reading from the presentable images */ create_info.compositeAlpha = VK_COMPOSITE_ALPHA_OPAQUE_BIT_KHR; create_info.flags = m_create_info_ptr->get_flags(); create_info.imageArrayLayers = 1; create_info.imageColorSpace = VK_COLORSPACE_SRGB_NONLINEAR_KHR; create_info.imageExtent.height = parent_surface_ptr->get_height(); create_info.imageExtent.width = parent_surface_ptr->get_width (); create_info.imageFormat = m_create_info_ptr->get_format (); create_info.imageSharingMode = VK_SHARING_MODE_EXCLUSIVE; create_info.imageUsage = m_create_info_ptr->get_usage_flags(); create_info.minImageCount = m_create_info_ptr->get_n_images (); create_info.oldSwapchain = VK_NULL_HANDLE; create_info.pNext = nullptr; create_info.pQueueFamilyIndices = nullptr; create_info.presentMode = m_create_info_ptr->get_present_mode(); create_info.preTransform = swapchain_transformation; create_info.queueFamilyIndexCount = 0; create_info.sType = VK_STRUCTURE_TYPE_SWAPCHAIN_CREATE_INFO_KHR; create_info.surface = parent_surface_ptr->get_surface(); struct_chainer.append_struct(create_info); } struct_chain_ptr = struct_chainer.create_chain(); parent_surface_ptr->lock(); { result = khr_swapchain_entrypoints.vkCreateSwapchainKHR(m_device_ptr->get_device_vk(), struct_chain_ptr->get_root_struct(), nullptr, /* pAllocator */ &m_swapchain); } parent_surface_ptr->unlock(); anvil_assert_vk_call_succeeded(result); if (is_vk_call_successful(result) ) { set_vk_handle(m_swapchain); } /* Retrieve swap-chain images */ result = khr_swapchain_entrypoints.vkGetSwapchainImagesKHR(m_device_ptr->get_device_vk(), m_swapchain, &n_swapchain_images, nullptr); /* pSwapchainImages */ anvil_assert_vk_call_succeeded(result); anvil_assert (n_swapchain_images > 0); swapchain_images.resize(n_swapchain_images); result = khr_swapchain_entrypoints.vkGetSwapchainImagesKHR(m_device_ptr->get_device_vk(), m_swapchain, &n_swapchain_images, &swapchain_images[0]); anvil_assert_vk_call_succeeded(result); } else /* offscreen rendering */ { m_create_info_ptr->set_usage_flags(m_create_info_ptr->get_usage_flags() | VK_IMAGE_USAGE_TRANSFER_SRC_BIT); n_swapchain_images = m_create_info_ptr->get_n_images(); } for (uint32_t n_result_image = 0; n_result_image < n_swapchain_images; ++n_result_image) { /* Spawn an Image wrapper class for the swap-chain image. */ if (!is_offscreen_rendering_enabled) { auto create_info_ptr = Anvil::ImageCreateInfo::create_swapchain_wrapper(m_device_ptr, this, swapchain_images[n_result_image], n_result_image); create_info_ptr->set_mt_safety(Anvil::Utils::convert_boolean_to_mt_safety_enum(is_mt_safe() ) ); m_image_ptrs[n_result_image] = Anvil::Image::create(std::move(create_info_ptr) ); } else { auto create_info_ptr = Anvil::ImageCreateInfo::create_nonsparse_alloc(m_device_ptr, VK_IMAGE_TYPE_2D, m_create_info_ptr->get_format(), VK_IMAGE_TILING_OPTIMAL, m_create_info_ptr->get_usage_flags(), m_size.width, m_size.height, 1, /* base_mipmap_depth */ 1, VK_SAMPLE_COUNT_1_BIT, QUEUE_FAMILY_GRAPHICS_BIT, VK_SHARING_MODE_EXCLUSIVE, false, /* in_use_full_mipmap_chain */ 0, /* in_memory_features */ 0, /* in_create_flags */ VK_IMAGE_LAYOUT_GENERAL, nullptr); create_info_ptr->set_mt_safety(Anvil::Utils::convert_boolean_to_mt_safety_enum(is_mt_safe() ) ); m_image_ptrs[n_result_image] = Anvil::Image::create(std::move(create_info_ptr) ); } /* For each swap-chain image, create a relevant view */ { auto create_info_ptr = Anvil::ImageViewCreateInfo::create_2D(m_device_ptr, m_image_ptrs[n_result_image].get(), 0, /* n_base_layer */ 0, /* n_base_mipmap_level */ 1, /* n_mipmaps */ VK_IMAGE_ASPECT_COLOR_BIT, m_create_info_ptr->get_format(), VK_COMPONENT_SWIZZLE_R, VK_COMPONENT_SWIZZLE_G, VK_COMPONENT_SWIZZLE_B, VK_COMPONENT_SWIZZLE_A); create_info_ptr->set_mt_safety(Anvil::Utils::convert_boolean_to_mt_safety_enum(is_mt_safe() ) ); m_image_view_ptrs[n_result_image] = Anvil::ImageView::create(std::move(create_info_ptr) ); } result = VK_SUCCESS; } /* Sign up for present submission notifications. This is needed to ensure that number of presented frames == * number of acquired frames at destruction time. */ { std::vector<Anvil::Queue*> queues; switch (m_device_ptr->get_type() ) { case Anvil::DEVICE_TYPE_SINGLE_GPU: { const std::vector<uint32_t>* queue_fams_with_present_support_ptr(nullptr); const auto rendering_surface_ptr (m_create_info_ptr->get_rendering_surface() ); const Anvil::SGPUDevice* sgpu_device_ptr (dynamic_cast<const Anvil::SGPUDevice*>(m_device_ptr) ); if (!rendering_surface_ptr->get_queue_families_with_present_support(&queue_fams_with_present_support_ptr) ) { break; } if (queue_fams_with_present_support_ptr == nullptr) { anvil_assert(queue_fams_with_present_support_ptr != nullptr); } else { for (const auto queue_fam : *queue_fams_with_present_support_ptr) { const uint32_t n_queues = sgpu_device_ptr->get_n_queues(queue_fam); for (uint32_t n_queue = 0; n_queue < n_queues; ++n_queue) { auto queue_ptr = sgpu_device_ptr->get_queue_for_queue_family_index(queue_fam, n_queue); anvil_assert(queue_ptr != nullptr); if (std::find(queues.begin(), queues.end(), queue_ptr) == queues.end() ) { queues.push_back(queue_ptr); } } } } break; } } for (auto queue_ptr : queues) { queue_ptr->register_for_callbacks( QUEUE_CALLBACK_ID_PRESENT_REQUEST_ISSUED, std::bind(&Swapchain::on_present_request_issued, this, std::placeholders::_1), this ); m_observed_queues.push_back(queue_ptr); } } /* Sign up for "about to close the parent window" notifications. Swapchain instance SHOULD be deinitialized * before the window is destroyed, so we're going to act as nice citizens. */ m_create_info_ptr->get_window()->register_for_callbacks( WINDOW_CALLBACK_ID_ABOUT_TO_CLOSE, std::bind(&Swapchain::on_parent_window_about_to_close, this), this ); return is_vk_call_successful(result); }
/** Performs a number of image view type-specific sanity checks and creates the requested * Vulkan image view instance. * * For argument discussion, please see documentation for the constructors above. * * @return true if the function executed successfully, false otherwise. **/ bool Anvil::ImageView::init() { const auto aspect_mask = m_create_info_ptr->get_aspect (); const auto format = m_create_info_ptr->get_format (); const auto image_view_type = m_create_info_ptr->get_type (); const auto n_base_layer = m_create_info_ptr->get_base_layer (); const auto n_base_mip = m_create_info_ptr->get_base_mipmap_level(); const auto n_layers = m_create_info_ptr->get_n_layers (); const auto n_mips = m_create_info_ptr->get_n_mipmaps (); VkFormat parent_image_format = VK_FORMAT_UNDEFINED; uint32_t parent_image_n_layers = 0; uint32_t parent_image_n_mipmaps = 0; auto parent_image_ptr = m_create_info_ptr->get_parent_image(); bool result = false; VkResult result_vk; Anvil::StructChainer<VkImageViewCreateInfo> struct_chainer; const auto& swizzle_array = m_create_info_ptr->get_swizzle_array(); parent_image_format = parent_image_ptr->get_create_info_ptr()->get_format(); parent_image_n_mipmaps = parent_image_ptr->get_n_mipmaps (); if (parent_image_ptr->get_create_info_ptr()->get_type_vk() != VK_IMAGE_TYPE_3D) { parent_image_n_layers = parent_image_ptr->get_create_info_ptr()->get_n_layers(); } else { parent_image_ptr->get_image_mipmap_size(0, /* in_n_mipmap */ nullptr, /* out_opt_width_ptr */ nullptr, /* out_opt_height_ptr */ &parent_image_n_layers); } if (!(parent_image_n_layers >= n_base_layer + n_layers)) { anvil_assert(parent_image_n_layers >= n_base_layer + n_layers); goto end; } if (!(parent_image_n_mipmaps >= n_base_mip + n_mips)) { anvil_assert(parent_image_n_mipmaps >= n_base_mip + n_mips); goto end; } if (((parent_image_ptr->get_create_info_ptr()->get_create_flags() & Anvil::IMAGE_CREATE_FLAG_MUTABLE_FORMAT_BIT) == 0) && (parent_image_format != format)) { anvil_assert(parent_image_format == format); goto end; } if (parent_image_ptr->get_create_info_ptr()->get_type_vk() == VK_IMAGE_TYPE_3D) { if (image_view_type == VK_IMAGE_VIEW_TYPE_2D || image_view_type == VK_IMAGE_VIEW_TYPE_2D_ARRAY) { if (!m_device_ptr->get_extension_info()->khr_maintenance1() ) { anvil_assert(m_device_ptr->get_extension_info()->khr_maintenance1()); goto end; } if ((parent_image_ptr->get_create_info_ptr()->get_create_flags() & Anvil::IMAGE_CREATE_FLAG_2D_ARRAY_COMPATIBLE_BIT) == 0) { anvil_assert((parent_image_ptr->get_create_info_ptr()->get_create_flags() & Anvil::IMAGE_CREATE_FLAG_2D_ARRAY_COMPATIBLE_BIT) != 0); goto end; } } } /* Create the image view instance */ { VkImageViewCreateInfo image_view_create_info; image_view_create_info.components.a = swizzle_array[3]; image_view_create_info.components.b = swizzle_array[2]; image_view_create_info.components.g = swizzle_array[1]; image_view_create_info.components.r = swizzle_array[0]; image_view_create_info.flags = 0; image_view_create_info.format = format; image_view_create_info.image = parent_image_ptr->get_image(); image_view_create_info.pNext = nullptr; image_view_create_info.sType = VK_STRUCTURE_TYPE_IMAGE_VIEW_CREATE_INFO; image_view_create_info.subresourceRange.aspectMask = aspect_mask; image_view_create_info.subresourceRange.baseArrayLayer = n_base_layer; image_view_create_info.subresourceRange.baseMipLevel = n_base_mip; image_view_create_info.subresourceRange.layerCount = n_layers; image_view_create_info.subresourceRange.levelCount = n_mips; image_view_create_info.viewType = image_view_type; struct_chainer.append_struct(image_view_create_info); } { auto chain_ptr = struct_chainer.create_chain(); result_vk = vkCreateImageView(m_device_ptr->get_device_vk(), chain_ptr->get_root_struct(), nullptr, /* pAllocator */ &m_image_view); } if (!is_vk_call_successful(result_vk) ) { anvil_assert_vk_call_succeeded(result_vk); goto end; } /* Cache the properties */ set_vk_handle(m_image_view); /* All done */ result = true; end: return result; }
/* Please see header for specification */ bool Anvil::RenderPass::init() { std::vector<VkAttachmentDescription> renderpass_attachments_vk; VkRenderPassCreateInfo render_pass_create_info; bool result (false); VkResult result_vk; std::vector<VkSubpassDependency> subpass_dependencies_vk; std::vector<VkSubpassDescription> subpass_descriptions_vk; /* NOTE: We need to reserve storage in advance for each of the vectors below, * so that it is guaranteed the push_back() calls do not cause a realloc() * and invalidate already cached pointers to filled Vulkan descriptors. * To achieve this, we could encapsulate the code below in a two-iteration loop, * whose first iteration would count how many elements we need for each vector, * and the second one would reserve that space and proceed with inserting the elements. * * That would look ugly though. * * In order to keep things clean & simple, we instantiate the following structure on heap * for each subpass. On subpass level, we can easily predict how many elements in the worst * case scenario we're going to insert, so that will do the trick. Slight performance cost, * but baking is an offline task, so we should be OK. **/ typedef struct SubPassAttachmentSet { /** Constructor. * * @param in_n_max_color_attachments Maximum number of color attachments the subpass will define. * @param in_n_max_input_attachments Maximum number of input attachments the subpass will define. * @param in_n_max_preserve_attachments Maximum number of preserve attachments the subpass will define. **/ explicit SubPassAttachmentSet(uint32_t in_n_max_color_attachments, uint32_t in_n_max_input_attachments, uint32_t in_n_max_preserve_attachments) :n_max_color_attachments (in_n_max_color_attachments), n_max_input_attachments (in_n_max_input_attachments), n_max_preserve_attachments(in_n_max_preserve_attachments) { color_attachments_vk.reserve (n_max_color_attachments); input_attachments_vk.reserve (n_max_input_attachments); preserve_attachments_vk.reserve (n_max_preserve_attachments); resolve_color_attachments_vk.reserve(n_max_color_attachments); } /** Helper function which verifies the maximum number of attachments specified at * creation time is not exceeded. **/ void do_sanity_checks() { anvil_assert(color_attachments_vk.size() <= n_max_color_attachments); anvil_assert(input_attachments_vk.size() <= n_max_input_attachments); anvil_assert(preserve_attachments_vk.size() <= n_max_preserve_attachments); anvil_assert(resolve_color_attachments_vk.size() <= n_max_color_attachments); } std::vector<VkAttachmentReference> color_attachments_vk; VkAttachmentReference depth_attachment_vk; std::vector<VkAttachmentReference> input_attachments_vk; std::vector<uint32_t> preserve_attachments_vk; std::vector<VkAttachmentReference> resolve_color_attachments_vk; private: uint32_t n_max_color_attachments; uint32_t n_max_input_attachments; uint32_t n_max_preserve_attachments; } SubPassAttachmentSet; std::vector<std::unique_ptr<SubPassAttachmentSet> > subpass_attachment_sets; anvil_assert(m_render_pass == VK_NULL_HANDLE); /* Set up helper descriptor storage space */ subpass_dependencies_vk.reserve(m_render_pass_create_info_ptr->m_subpass_dependencies.size() ); subpass_descriptions_vk.reserve(m_render_pass_create_info_ptr->m_subpasses.size() ); for (auto renderpass_attachment_iterator = m_render_pass_create_info_ptr->m_attachments.cbegin(); renderpass_attachment_iterator != m_render_pass_create_info_ptr->m_attachments.cend(); ++renderpass_attachment_iterator) { VkAttachmentDescription attachment_vk; attachment_vk.finalLayout = renderpass_attachment_iterator->final_layout; attachment_vk.flags = (renderpass_attachment_iterator->may_alias) ? VK_ATTACHMENT_DESCRIPTION_MAY_ALIAS_BIT : 0u; attachment_vk.format = renderpass_attachment_iterator->format; attachment_vk.initialLayout = renderpass_attachment_iterator->initial_layout; attachment_vk.loadOp = renderpass_attachment_iterator->color_depth_load_op; attachment_vk.samples = static_cast<VkSampleCountFlagBits>(renderpass_attachment_iterator->sample_count); attachment_vk.stencilLoadOp = renderpass_attachment_iterator->stencil_load_op; attachment_vk.stencilStoreOp = renderpass_attachment_iterator->stencil_store_op; attachment_vk.storeOp = renderpass_attachment_iterator->color_depth_store_op; renderpass_attachments_vk.push_back(attachment_vk); } for (auto subpass_dependency_iterator = m_render_pass_create_info_ptr->m_subpass_dependencies.cbegin(); subpass_dependency_iterator != m_render_pass_create_info_ptr->m_subpass_dependencies.cend(); ++subpass_dependency_iterator) { VkSubpassDependency dependency_vk; dependency_vk.dependencyFlags = ((subpass_dependency_iterator->by_region) ? VK_DEPENDENCY_BY_REGION_BIT : 0u); dependency_vk.dstAccessMask = subpass_dependency_iterator->destination_access_mask; dependency_vk.dstStageMask = subpass_dependency_iterator->destination_stage_mask; dependency_vk.dstSubpass = (subpass_dependency_iterator->destination_subpass_ptr != nullptr) ? subpass_dependency_iterator->destination_subpass_ptr->index : VK_SUBPASS_EXTERNAL; dependency_vk.srcAccessMask = subpass_dependency_iterator->source_access_mask; dependency_vk.srcStageMask = subpass_dependency_iterator->source_stage_mask; dependency_vk.srcSubpass = (subpass_dependency_iterator->source_subpass_ptr != nullptr) ? subpass_dependency_iterator->source_subpass_ptr->index : VK_SUBPASS_EXTERNAL; subpass_dependencies_vk.push_back(dependency_vk); } /* We now have all the data needed to create Vulkan subpass instances. */ for (auto subpass_iterator = m_render_pass_create_info_ptr->m_subpasses.cbegin(); subpass_iterator != m_render_pass_create_info_ptr->m_subpasses.cend(); ++subpass_iterator) { std::unique_ptr<SubPassAttachmentSet> current_subpass_attachment_set_ptr; uint32_t highest_subpass_color_attachment_location = UINT32_MAX; uint32_t highest_subpass_input_attachment_index = UINT32_MAX; bool need_color_resolve_attachments = false; VkSubpassDescription subpass_vk; VkAttachmentReference unused_reference; unused_reference.attachment = VK_ATTACHMENT_UNUSED; unused_reference.layout = VK_IMAGE_LAYOUT_UNDEFINED; /* Determine whether any of the color attachments are going to be resolved. */ for (auto subpass_color_attachment_iterator = (*subpass_iterator)->color_attachments_map.cbegin(); subpass_color_attachment_iterator != (*subpass_iterator)->color_attachments_map.cend(); ++subpass_color_attachment_iterator) { if (subpass_color_attachment_iterator->second.resolve_attachment_index != UINT32_MAX) { need_color_resolve_attachments = true; break; } } /* Determine the highest color attachment location & input attachment index. */ for (auto subpass_color_attachment_iterator = (*subpass_iterator)->color_attachments_map.cbegin(); subpass_color_attachment_iterator != (*subpass_iterator)->color_attachments_map.cend(); ++subpass_color_attachment_iterator) { if (highest_subpass_color_attachment_location == UINT32_MAX || subpass_color_attachment_iterator->first > highest_subpass_color_attachment_location) { highest_subpass_color_attachment_location = subpass_color_attachment_iterator->first; } } for (auto subpass_input_attachment_iterator = (*subpass_iterator)->input_attachments_map.cbegin(); subpass_input_attachment_iterator != (*subpass_iterator)->input_attachments_map.cend(); ++subpass_input_attachment_iterator) { if (highest_subpass_input_attachment_index == UINT32_MAX || subpass_input_attachment_iterator->first > highest_subpass_input_attachment_index) { highest_subpass_input_attachment_index = subpass_input_attachment_iterator->first; } } /* Instantiate a new subpass attachment set for current subpass */ current_subpass_attachment_set_ptr.reset( new SubPassAttachmentSet(highest_subpass_color_attachment_location + 1, /* n_max_color_attachments */ static_cast<uint32_t>((*subpass_iterator)->input_attachments_map.size() ), /* n_max_input_attachments */ static_cast<uint32_t>((*subpass_iterator)->preserved_attachments.size() ) /* n_max_preserved_attachments */) ); /* Prepare unused VK color, depth, input & resolve attachment descriptors */ for (uint32_t n_color_attachment = 0; n_color_attachment < static_cast<uint32_t>(highest_subpass_color_attachment_location + 1); ++n_color_attachment) { current_subpass_attachment_set_ptr->color_attachments_vk.push_back(unused_reference); if (need_color_resolve_attachments) { current_subpass_attachment_set_ptr->resolve_color_attachments_vk.push_back(unused_reference); } } for (uint32_t n_input_attachment = 0; n_input_attachment < static_cast<uint32_t>(highest_subpass_input_attachment_index + 1); ++n_input_attachment) { current_subpass_attachment_set_ptr->input_attachments_vk.push_back(unused_reference); } /* Update those of the color/depth/input references, for which we have been provided actual descriptors */ for (auto subpass_color_attachment_iterator = (*subpass_iterator)->color_attachments_map.cbegin(); subpass_color_attachment_iterator != (*subpass_iterator)->color_attachments_map.cend(); ++subpass_color_attachment_iterator) { current_subpass_attachment_set_ptr->color_attachments_vk[subpass_color_attachment_iterator->first] = m_render_pass_create_info_ptr->get_attachment_reference_from_subpass_attachment(subpass_color_attachment_iterator->second); if (need_color_resolve_attachments) { if (subpass_color_attachment_iterator->second.resolve_attachment_index != UINT32_MAX) { current_subpass_attachment_set_ptr->resolve_color_attachments_vk[subpass_color_attachment_iterator->first] = m_render_pass_create_info_ptr->get_attachment_reference_for_resolve_attachment(subpass_iterator, subpass_color_attachment_iterator); } } } if ((*subpass_iterator)->depth_stencil_attachment.attachment_index != UINT32_MAX) { current_subpass_attachment_set_ptr->depth_attachment_vk = m_render_pass_create_info_ptr->get_attachment_reference_from_subpass_attachment((*subpass_iterator)->depth_stencil_attachment); } else { current_subpass_attachment_set_ptr->depth_attachment_vk = unused_reference; } for (auto subpass_input_attachment_iterator = (*subpass_iterator)->input_attachments_map.cbegin(); subpass_input_attachment_iterator != (*subpass_iterator)->input_attachments_map.cend(); ++subpass_input_attachment_iterator) { current_subpass_attachment_set_ptr->input_attachments_vk[subpass_input_attachment_iterator->first] = m_render_pass_create_info_ptr->get_attachment_reference_from_subpass_attachment(subpass_input_attachment_iterator->second); } /* Fill the preserved attachments vector. These do not use indices or locations, so the process is much simpler */ for (auto subpass_preserve_attachment_iterator = (*subpass_iterator)->preserved_attachments.cbegin(); subpass_preserve_attachment_iterator != (*subpass_iterator)->preserved_attachments.cend(); ++subpass_preserve_attachment_iterator) { current_subpass_attachment_set_ptr->preserve_attachments_vk.push_back( m_render_pass_create_info_ptr->m_attachments.at(subpass_preserve_attachment_iterator->attachment_index).index ); } /* Prepare the VK subpass descriptor */ const uint32_t n_color_attachments = highest_subpass_color_attachment_location + 1; const uint32_t n_input_attachments = highest_subpass_input_attachment_index + 1; const uint32_t n_preserved_attachments = static_cast<uint32_t>((*subpass_iterator)->preserved_attachments.size() ); const uint32_t n_resolved_attachments = ((*subpass_iterator)->resolved_attachments_map.size() == 0) ? 0 : n_color_attachments; subpass_vk.colorAttachmentCount = n_color_attachments; subpass_vk.flags = 0; subpass_vk.inputAttachmentCount = n_input_attachments; subpass_vk.pColorAttachments = (n_color_attachments > 0) ? ¤t_subpass_attachment_set_ptr->color_attachments_vk.at(0) : nullptr; subpass_vk.pDepthStencilAttachment = ((*subpass_iterator)->depth_stencil_attachment.attachment_index != UINT32_MAX) ? ¤t_subpass_attachment_set_ptr->depth_attachment_vk : nullptr; subpass_vk.pInputAttachments = (n_input_attachments > 0) ? ¤t_subpass_attachment_set_ptr->input_attachments_vk.at(0) : nullptr; subpass_vk.pipelineBindPoint = VK_PIPELINE_BIND_POINT_GRAPHICS; subpass_vk.pPreserveAttachments = (n_preserved_attachments > 0) ? ¤t_subpass_attachment_set_ptr->preserve_attachments_vk.at(0) : nullptr; subpass_vk.preserveAttachmentCount = n_preserved_attachments; subpass_vk.pResolveAttachments = (n_resolved_attachments > 0) ? ¤t_subpass_attachment_set_ptr->resolve_color_attachments_vk.at(0) : nullptr; current_subpass_attachment_set_ptr->do_sanity_checks(); subpass_attachment_sets.push_back( std::move(current_subpass_attachment_set_ptr) ); subpass_descriptions_vk.push_back(subpass_vk); } /* Set up a create info descriptor and spawn a new Vulkan RenderPass object. */ render_pass_create_info.attachmentCount = static_cast<uint32_t>(m_render_pass_create_info_ptr->m_attachments.size () ); render_pass_create_info.dependencyCount = static_cast<uint32_t>(m_render_pass_create_info_ptr->m_subpass_dependencies.size() ); render_pass_create_info.subpassCount = static_cast<uint32_t>(m_render_pass_create_info_ptr->m_subpasses.size () ); render_pass_create_info.flags = 0; render_pass_create_info.pAttachments = (render_pass_create_info.attachmentCount > 0) ? &renderpass_attachments_vk.at(0) : nullptr; render_pass_create_info.pDependencies = (render_pass_create_info.dependencyCount > 0) ? &subpass_dependencies_vk.at(0) : nullptr; render_pass_create_info.pNext = nullptr; render_pass_create_info.pSubpasses = (render_pass_create_info.subpassCount > 0) ? &subpass_descriptions_vk.at(0) : nullptr; render_pass_create_info.sType = VK_STRUCTURE_TYPE_RENDER_PASS_CREATE_INFO; result_vk = vkCreateRenderPass(m_device_ptr->get_device_vk(), &render_pass_create_info, nullptr, /* pAllocator */ &m_render_pass); if (!is_vk_call_successful(result_vk) ) { anvil_assert_vk_call_succeeded(result_vk); goto end; } set_vk_handle(m_render_pass); result = true; end: return result; }
/* Please see header for specification */ bool Anvil::Semaphore::reset() { VkResult result (VK_ERROR_INITIALIZATION_FAILED); Anvil::StructChainer<VkSemaphoreCreateInfo> struct_chainer; Anvil::StructChainUniquePtr<VkSemaphoreCreateInfo> struct_chain_ptr; release_semaphore(); /* Sanity checks */ if (m_create_info_ptr->get_exportable_external_semaphore_handle_types() != Anvil::ExternalSemaphoreHandleTypeFlagBits::NONE) { if (!m_device_ptr->get_extension_info()->khr_external_semaphore() ) { anvil_assert(m_device_ptr->get_extension_info()->khr_external_semaphore() ); goto end; } } /* Spawn a new semaphore */ { VkSemaphoreCreateInfo semaphore_create_info; semaphore_create_info.flags = 0; semaphore_create_info.pNext = nullptr; semaphore_create_info.sType = VK_STRUCTURE_TYPE_SEMAPHORE_CREATE_INFO; struct_chainer.append_struct(semaphore_create_info); } if (m_create_info_ptr->get_exportable_external_semaphore_handle_types() != Anvil::ExternalSemaphoreHandleTypeFlagBits::NONE) { VkExportSemaphoreCreateInfo create_info; create_info.handleTypes = m_create_info_ptr->get_exportable_external_semaphore_handle_types().get_vk(); create_info.pNext = nullptr; create_info.sType = VK_STRUCTURE_TYPE_EXPORT_SEMAPHORE_CREATE_INFO_KHR; struct_chainer.append_struct(create_info); } #if defined(_WIN32) { const Anvil::ExternalNTHandleInfo* nt_handle_info_ptr = nullptr; if (m_create_info_ptr->get_exportable_nt_handle_info(&nt_handle_info_ptr) ) { VkExportSemaphoreWin32HandleInfoKHR handle_info; anvil_assert( nt_handle_info_ptr != nullptr); anvil_assert(((m_create_info_ptr->get_exportable_external_semaphore_handle_types() & Anvil::ExternalSemaphoreHandleTypeFlagBits::OPAQUE_WIN32_BIT) != 0) || ((m_create_info_ptr->get_exportable_external_semaphore_handle_types() & Anvil::ExternalSemaphoreHandleTypeFlagBits::D3D12_FENCE_BIT) != 0)); handle_info.dwAccess = nt_handle_info_ptr->access; handle_info.name = (nt_handle_info_ptr->name.size() > 0) ? &nt_handle_info_ptr->name.at(0) : nullptr; handle_info.pAttributes = nt_handle_info_ptr->attributes_ptr; handle_info.pNext = nullptr; handle_info.sType = VK_STRUCTURE_TYPE_EXPORT_SEMAPHORE_WIN32_HANDLE_INFO_KHR; struct_chainer.append_struct(handle_info); } } #endif struct_chain_ptr = struct_chainer.create_chain(); if (struct_chain_ptr == nullptr) { anvil_assert(struct_chain_ptr != nullptr); goto end; } result = Anvil::Vulkan::vkCreateSemaphore(m_device_ptr->get_device_vk(), struct_chain_ptr->get_root_struct(), nullptr, /* pAllocator */ &m_semaphore); anvil_assert_vk_call_succeeded(result); if (is_vk_call_successful(result) ) { set_vk_handle(m_semaphore); } end: return is_vk_call_successful(result); }