Example #1
0
void VulkanContext::PopulateBackendInfoMultisampleModes(
    VideoConfig* config, VkPhysicalDevice gpu, const VkPhysicalDeviceProperties& properties)
{
  // Query image support for the EFB texture formats.
  VkImageFormatProperties efb_color_properties = {};
  vkGetPhysicalDeviceImageFormatProperties(
      gpu, EFB_COLOR_TEXTURE_FORMAT, VK_IMAGE_TYPE_2D, VK_IMAGE_TILING_OPTIMAL,
      VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT, 0, &efb_color_properties);
  VkImageFormatProperties efb_depth_properties = {};
  vkGetPhysicalDeviceImageFormatProperties(
      gpu, EFB_DEPTH_TEXTURE_FORMAT, VK_IMAGE_TYPE_2D, VK_IMAGE_TILING_OPTIMAL,
      VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT, 0, &efb_depth_properties);

  // We can only support MSAA if it's supported on our render target formats.
  VkSampleCountFlags supported_sample_counts = properties.limits.framebufferColorSampleCounts &
                                               properties.limits.framebufferDepthSampleCounts &
                                               efb_color_properties.sampleCounts &
                                               efb_depth_properties.sampleCounts;

  // No AA
  config->backend_info.AAModes.clear();
  config->backend_info.AAModes.emplace_back(1);

  // 2xMSAA/SSAA
  if (supported_sample_counts & VK_SAMPLE_COUNT_2_BIT)
    config->backend_info.AAModes.emplace_back(2);

  // 4xMSAA/SSAA
  if (supported_sample_counts & VK_SAMPLE_COUNT_4_BIT)
    config->backend_info.AAModes.emplace_back(4);

  // 8xMSAA/SSAA
  if (supported_sample_counts & VK_SAMPLE_COUNT_8_BIT)
    config->backend_info.AAModes.emplace_back(8);

  // 16xMSAA/SSAA
  if (supported_sample_counts & VK_SAMPLE_COUNT_16_BIT)
    config->backend_info.AAModes.emplace_back(16);

  // 32xMSAA/SSAA
  if (supported_sample_counts & VK_SAMPLE_COUNT_32_BIT)
    config->backend_info.AAModes.emplace_back(32);

  // 64xMSAA/SSAA
  if (supported_sample_counts & VK_SAMPLE_COUNT_64_BIT)
    config->backend_info.AAModes.emplace_back(64);
}
Example #2
0
std::unique_ptr<StagingTexture2D> StagingTexture2D::Create(STAGING_BUFFER_TYPE type, u32 width,
                                                           u32 height, VkFormat format)
{
// TODO: Using a buffer here as opposed to a linear texture is faster on AMD.
// NVIDIA also seems faster with buffers over textures.
#if 0
  // Check for support for this format as a linear texture.
  // Some drivers don't support this (e.g. adreno).
  VkImageFormatProperties properties;
  VkResult res = vkGetPhysicalDeviceImageFormatProperties(
      g_object_cache->GetPhysicalDevice(), format, VK_IMAGE_TYPE_2D, VK_IMAGE_TILING_LINEAR,
      VK_IMAGE_USAGE_TRANSFER_SRC_BIT | VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, 0, &properties);
  if (res == VK_SUCCESS && width <= properties.maxExtent.width &&
      height <= properties.maxExtent.height)
  {
    return StagingTexture2DLinear::Create(type, width, height, format);
  }
#endif

  // Fall back to a buffer copy.
  return StagingTexture2DBuffer::Create(type, width, height, format);
}
Example #3
0
File: tut7.c Project: ShabbyX/vktut
tut1_error tut7_create_images(struct tut1_physical_device *phy_dev, struct tut2_device *dev,
		struct tut7_image *images, uint32_t image_count)
{
	/*
	 * In this function, we will create a bunch of images.  Images in graphics serve essentially two purposes.  One
	 * is to provide data to shaders, traditionally known as textures.  Another is to render into either as the
	 * final result or for further use, traditionally also known as textures.  Vulkan calls all of these "images",
	 * which are just glorified "buffers".  We already worked with a Vulkan buffer in Tutorial 4, which was just an
	 * array of data.  Images on the other hand can have up to 3 dimensions, a format (such as BGRA), multisampling
	 * properties, tiling properties and a layout.  They are glorified buffers because all of these features can be
	 * emulated with buffers, although besides requiring more work in the shaders, using images also allows a lot
	 * more optimization by the device and its driver.
	 *
	 * That said, creating images is fairly similar to buffers.  You create an image, allocate memory to it, create
	 * an image view for access to the image, you bind it to a command buffer through a descriptor set and go on
	 * using it in the shaders.  Like buffers, you can choose to initialize the image.  The data sent through
	 * images could be anything, such as textures used to draw objects, patterns used by a shader to apply an
	 * effect, or just general data for the shaders.  The image can be written to as well.  The data written to an
	 * image could also be for anything, such as the final colors that go on to be displayed on the screen, the
	 * depth or stencil data, the output of a filter used for further processing, the processed image to be
	 * retrieved by the application (e.g. used by gimp), a texture that evolves over time, etc.
	 *
	 * Loading the image data is outside the scope of this tutorial, so we'll leave that for another time.  Once
	 * the image is created, it's device memory can be mapped, loaded and unmapped, so it is not necessary to do
	 * that in this function either.
	 */
	uint32_t successful = 0;
	tut1_error retval = TUT1_ERROR_NONE;
	VkResult res;

	for (uint32_t i = 0; i < image_count; ++i)
	{
		images[i].image = NULL;
		images[i].image_mem = NULL;
		images[i].view = NULL;
		images[i].sampler = NULL;

		/*
		 * To create an image, we need a CreateInfo struct as usual.  Some parts of this struct is similar to
		 * VkBufferCreateInfo from Tutorial 3.  The ones that need explanation are explained here.  The image
		 * type specifies what are the dimensions of the image.  In these tutorial series, we will use 2D
		 * images for simplicity.  Also for simplicity, let's ignore mipmapping and image layers.  The image
		 * format is one of VK_FORMAT.  A normal format could be VK_FORMAT_B8G8R8A8_UNORM, but it might make
		 * sense to use other formats, especially for images that would get their data from a texture file.
		 *
		 * If the image is going to be initialized, for example from a texture file, then the structure of the
		 * image data, otherwise known as "tiling", must be set to linear.  This means that the image is stored
		 * as a normal row-major array, so that when its memory is mapped by the application, it would make
		 * sense!  If the image is not to be initialized on the other hand, it is better to keep the tiling as
		 * optimal, which means whatever format the GPU likes best.  It is necessary for the application to
		 * copy a linear image to an optimal one for GPU usage.  If the application wants to read the image
		 * back, it must copy it from an optimal image to a linear one.  This also means that the `usage` of
		 * the linear images can contain only TRANSFER_SRC and TRANSFER_DST bits.  More on image copies when we
		 * actually start using them.
		 *
		 * Linear images are sampled only once.  Optimal images can be multisampled.  You can read about
		 * multisampling online (from OpenGL), but in short it asks for each pixel to be sampled at multiple
		 * locations inside the pixel, which helps with antialiasing.  Here, we will simply choose a higher
		 * number of samples as allowed by the GPU (retrieved with vkGetPhysicalDeviceImageFormatProperties
		 * below).
		 *
		 * Linear images are also restricted to 2D, no mipmapping and no layers, which is fortunate because we
		 * wanted those for simplicity anyway!  There is also a restriction on the format of the image, which
		 * cannot be depth/stencil.
		 *
		 * In Tutorial 3, we specified the buffer usage as storage, which was a rather generic specification.
		 * For the image, we have more options to specify the usage.  Choosing the minimum usage bits for each
		 * image, specifying only what we actually want to do with the image allows the GPU to possibly place
		 * the image in the most optimal memory location, or load/unload the image at necessary times.  This is
		 * left to the application to provide as it varies from case to case.  The usages in short are:
		 *
		 * Transfer src/dst: whether the image can be used as a source/destination of an image copy.
		 * Sampled: whether the image can be sampled by a shader.
		 * Storage: whether the image can be read from and written to by a shader.
		 * Color attachment: whether the image can be used as a render target (for color).
		 * Depth/stencil attachment: whether the image can be used as a render target (for depth/stencil).
		 * Transient attachment: whether the image is lazily allocated (ignored for now).
		 * Input attachment: whether the image can be read (unfiltered) by a shader (ignored for now).
		 *
		 * If the image was to be shared between queue families, it should be declared with a special
		 * `sharingMode` specifying that there would be concurrent access to the image, and by which queue
		 * families.  We are going to use the images and views created here in multiple pipelines, one for each
		 * swapchain image.  Since those pipelines may be created on top of different queue families, we need
		 * to tell Vulkan that these images would be shared.  At the time of this writing, on Nvidia cards
		 * there is only one queue family and sharing is meaningless.  However, it is legal for a driver to
		 * expose multiple similar queue families instead of one queue family with multiple queues.  The
		 * application is expected to provide the queue families that would use this image.  Most likely, the
		 * result of `tut7_get_presentable_queues` is what you would want.
		 *
		 * Finally, an image has a layout.  Each layout is limited in what operations can be done in it, but
		 * instead is optimal for a task.  The possible image layouts are:
		 *
		 * Undefined: no device access is allowed.  This is used as an initial layout and must be transitioned
		 *   away from before use.
		 * Preinitialized: no device access is allowed.  Similar to undefined, this is only an initial layout
		 *   and must be transitioned away from before use.  The only difference is that the contents of the
		 *   image are kept during the transition.
		 * General: supports all types of device access.
		 * Color attachment optimal: only usable with color attachment images.
		 * Depth/stencil attachment optimal: only usable with depth/stencil attachment images.
		 * Depth/stencil read-only optimal: only usable with depth/stencil attachment images.  The difference
		 *   between this and the depth/stencil attachment optimal layout is that this image can also be used
		 *   as a read-only sampled image or input attachment for use by the shaders.
		 * Shader read-only optimal: only usable with sampled and input attachment images.  Similar to
		 *   depth/stencil read-only optimal, this layout can be used as a read-only image or input attachment
		 *   for use by the shaders.
		 * Transfer src/dst optimal: only usable with transfer src/dst images and must only be used as the
		 *   source or destination of an image transfer.
		 * Present src (extension): used for presenting an image to a swapchain.  An image taken from the
		 *   swapchain is in this layout and must be transitioned away before use after
		 *   vkAcquireNextImageKHR.  Before giving the image back with vkQueuePresentKHR, it must be
		 *   transitioned again to this layout.
		 *
		 * For linear images, which are going to be initialized by the application, we will use the
		 * preinitialized layout.  Otherwise, the layout must be undefined and later transitioned to the
		 * desired layout using a pipeline barrier (more on this later).
		 */
		VkImageTiling tiling = VK_IMAGE_TILING_OPTIMAL;
		VkSampleCountFlagBits samples = VK_SAMPLE_COUNT_1_BIT;
		VkImageLayout layout = VK_IMAGE_LAYOUT_UNDEFINED;
		if (images[i].will_be_initialized || images[i].host_visible)
		{
			images[i].usage &= VK_IMAGE_USAGE_TRANSFER_SRC_BIT | VK_IMAGE_USAGE_TRANSFER_DST_BIT;
			layout = VK_IMAGE_LAYOUT_PREINITIALIZED;
			tiling = VK_IMAGE_TILING_LINEAR;
		}
		else if (images[i].multisample)
		{
			/*
			 * To get the format properties for an image, we need to tell Vulkan how we expect to create the image,
			 * i.e. what is its format, type, tiling, usage and flags (which we didn't use).  We could check many
			 * of the parameters given to this function with the properties returned from this function, but we'll
			 * just take a possible sampling count out of it, and assume the parameters are fine.  In a real
			 * application, you would want to do more validity checks.
			 */
			VkImageFormatProperties format_properties;
			res = vkGetPhysicalDeviceImageFormatProperties(phy_dev->physical_device, images[i].format, VK_IMAGE_TYPE_2D,
					tiling, images[i].usage, 0, &format_properties);
			tut1_error_sub_set_vkresult(&retval, res);
			if (res == 0)
			{
				for (uint32_t s = VK_SAMPLE_COUNT_16_BIT; s != 0; s >>= 1)
					if ((format_properties.sampleCounts & s))
					{
						samples = s;
						break;
					}
			}
		}

		/*
		 * Create the image with the above description as usual.  The CreateInfo struct takes the parameters
		 * and memory allocation callbacks are not used.
		 */
		bool shared = images[i].sharing_queue_count > 1;
		struct VkImageCreateInfo image_info = {
			.sType = VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO,
			.imageType = VK_IMAGE_TYPE_2D,
			.format = images[i].format,
			.extent = {images[i].extent.width, images[i].extent.height, 1},
			.mipLevels = 1,
			.arrayLayers = 1,
			.samples = samples,
			.tiling = tiling,
			.usage = images[i].usage,
			.sharingMode = shared?VK_SHARING_MODE_CONCURRENT:VK_SHARING_MODE_EXCLUSIVE,
			.queueFamilyIndexCount = shared?images[i].sharing_queue_count:0,
			.pQueueFamilyIndices = shared?images[i].sharing_queues:NULL,
			.initialLayout = layout,
		};
		res = vkCreateImage(dev->device, &image_info, NULL, &images[i].image);
		tut1_error_sub_set_vkresult(&retval, res);
		if (res)
			continue;

		/*
		 * In Tutorial 4, we created a buffer, allocated memory for it and bound the memory to the buffer.
		 * Images are glorified buffers and the process is similar.  The same argument regarding host-coherent
		 * memory holds here as well.  So, if the image requires device-local memory, we will look for that,
		 * otherwise we will look for memory that is not just host-visible, but also host-coherent.
		 */
		VkMemoryRequirements mem_req = {0};
		vkGetImageMemoryRequirements(dev->device, images[i].image, &mem_req);
		uint32_t mem_index = tut4_find_suitable_memory(phy_dev, dev, &mem_req,
				images[i].host_visible?
				VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT:
				VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT);
		if (mem_index >= phy_dev->memories.memoryTypeCount)
			continue;

		VkMemoryAllocateInfo mem_info = {
			.sType = VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO,
			.allocationSize = mem_req.size,
			.memoryTypeIndex = mem_index,
		};

		res = vkAllocateMemory(dev->device, &mem_info, NULL, &images[i].image_mem);
		tut1_error_sub_set_vkresult(&retval, res);
		if (res)
			continue;

		res = vkBindImageMemory(dev->device, images[i].image, images[i].image_mem, 0);
		tut1_error_sub_set_vkresult(&retval, res);
		if (res)
			continue;

		if (images[i].make_view)
		{
			/*
			 * Once we have an image, we need a view on the image to be able to use it.  This is just like in
			 * Tutorial 4 where we had a view on the buffer to work with it.  In Tutorial 4, we had divided up the
			 * buffer for concurrent processing in the shaders, and each view looked at a specific part of the
			 * buffer.  With images, this could also be useful, for example if one large image contains multiple
			 * areas of interest (such as a texture) where different shaders need to look at.  However, let's keep
			 * things as simple as possible and create a view that is as large as the image itself.
			 *
			 * The image view's CreateInfo is largely similar to the one for buffer views.  For image views, we
			 * need to specify which components of the image we want to view and the range is not a simple
			 * (offset, size) as was in the buffer view.
			 *
			 * For the components, we have the option to not only select which components (R, G, B and A) to view,
			 * but also to remap them (this operation is called swizzle).  For example to get the value of the red
			 * component in place of alpha etc.  The mapping for each component can be specified separately, and
			 * mapping 0 means identity.  We are not going to remap anything, so we'll leave all fields in
			 * `components` be 0.
			 *
			 * The range of the image asks for which mipmap levels and image array layers we are interested in,
			 * which are simply both 0 because we have only one of each.  As part of the range of the view, we also
			 * need to specify which aspect of the image we are looking it.  This could be color, depth, stencil
			 * etc.  Here, we will decide the aspect based on the image usage; if it's used as depth/stencil, we
			 * will set both depth and stencil aspects.  Otherwise we will view the color aspect.
			 */
			VkImageViewCreateInfo view_info = {
				.sType = VK_STRUCTURE_TYPE_IMAGE_VIEW_CREATE_INFO,
				.image = images[i].image,
				.viewType = VK_IMAGE_VIEW_TYPE_2D,
				.format = images[i].format,
				.subresourceRange = {
					.aspectMask = (images[i].usage & VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT) == 0?
							VK_IMAGE_ASPECT_COLOR_BIT:
							VK_IMAGE_ASPECT_DEPTH_BIT | VK_IMAGE_ASPECT_STENCIL_BIT,
					.baseMipLevel = 0,
					.levelCount = VK_REMAINING_MIP_LEVELS,
					.baseArrayLayer = 0,
					.layerCount = VK_REMAINING_ARRAY_LAYERS,
				},
			};

			res = vkCreateImageView(dev->device, &view_info, NULL, &images[i].view);
			tut1_error_sub_set_vkresult(&retval, res);
			if (res)
				continue;
		}

		if ((images[i].usage & VK_IMAGE_USAGE_SAMPLED_BIT))
		{
			/*
			 * If the image is going to be sampled, we can create a sampler for it as well.  A sampler
			 * specifies how to sample an image.  An image is just a glorified buffer, i.e., it's just an
			 * array, as I have said before as well.  However, the sampler is what makes using images so
			 * much more powerful.  When accessing a buffer, you can access each index individually.  With
			 * a sampler, you can access an image at non-integer indices.  The sampler then "filters" the
			 * image to provide some data for that index.
			 *
			 * The simplest example is magnification.  If you sample the image at coordinates (u+0.5,v)
			 * where u and v are integer pixel locations, then the color you get could be the average of
			 * the colors (values) at coordinates (u,v) and (u+1,v).  Vulkan uses the term `texel` to refer
			 * to these "texture" pixels.
			 *
			 * The sampler parameters are explained below:
			 *
			 * - magFilter, minFilter: what to do if asked to sample between the texels.  The options are
			 *   to take the value of the nearest texel, or interpolate between neighbors.  Think about it
			 *   as what to do if you try to zoom in or out of an image.  We'll go with interpolation,
			 *   since it's nicer.
			 * - mipmapMode: similarly, if the image has multiple mipmap levels, accessing between the
			 *   levels could either interpolate between two levels or clamp to the nearest.  We don't use
			 *   mipmaps here, so this doesn't matter, but let's tell it to interpolate anyway.
			 * - addressModeU/V/W: this specifies what happens if you access outside the image.  The
			 *   options are to:
			 *   * repeat the image as if it was a tiled to infinity in each direction,
			 *   * mirrored repeat the image as if a larger image containing the image and its mirror
			 *     was tiled to infinity in each direction,
			 *   * clamp to edge so that any access out of the image boundaries returns the value at the
			 *     closest point on the edge of the image,
			 *   * clamp to border so that any access out of the image boundaries returns a special
			 *     "border" value for the image (border value defined below),
			 *   * mirrored clamp to edge so that any access out of the image boundaries returns the value
			 *     at the closest point on the edge of a larger image that is made up of the image and its
			 *     mirror.
			 *   Each of these modes is useful in different situations.  "Repeat" is probably the most
			 *   problematic as it introduces discontinuity around the edges.  "Mirrored" solves this
			 *   problem and can add some interesting effects.  "Clamp to edge" also solves this problem,
			 *   and let's just use that.  "Clamp to border" would introduce other edges, and I imagine is
			 *   most useful for debugging.
			 * - anisotropyEnable, maxAnisotropy: whether anisotropic filtering is enabled and by how
			 *   much.  Anisotropic filtering is expensive but nice, so let's enable it.  The maximum value
			 *   for anisotropic filtering can be retrieved from the device's limits.
			 * - compareEnable, compareOp: this is used with depth images to result in a reading of 0 or 1
			 *   based on the result of a compare operation.  We are not interested in this for now.
			 * - minLod, maxLod: the level-of-detail value (mip level) gets clamped to these values.  We
			 *   are not using mipmapped images, so we'll just give 0 and 1 respectively.
			 * - borderColor: if the "clamp to border" addressing mode was selected, out-of-bound accesses
			 *   to the image would return the border color, which is set here.  Options are limited:
			 *   transparent, white and black.  Since we're using "clamp to edge" addressing, this value is
			 *   not used.
			 * - unnormalizedCoordinates: with Vulkan, you can either index the image using the
			 *   unnormalized coordinates, so that u and v span from 0 to size of the image, or you can
			 *   access the image using normalized coordinates, so that u and v span from 0 to 1.
			 *   Unnormalized coordinates can be useful in some circumstances, but normalized coordinates
			 *   lets you access the image without dealing with its sizes.  Aside from that, with
			 *   unnormalized coordinates, you are limited in the type of images you can access; only 1D
			 *   and 2D images with a single layer and single mip level are acceptable and essentially all
			 *   other features of the sampler must be disabled too.  Needless to say, we will use
			 *   normalized coordinates.
			 */
			VkSamplerCreateInfo sampler_info = {
				.sType = VK_STRUCTURE_TYPE_SAMPLER_CREATE_INFO,
				.magFilter = VK_FILTER_LINEAR,
				.minFilter = VK_FILTER_LINEAR,
				.mipmapMode = VK_SAMPLER_MIPMAP_MODE_LINEAR,
				.addressModeU = VK_SAMPLER_ADDRESS_MODE_CLAMP_TO_EDGE,
				.addressModeV = VK_SAMPLER_ADDRESS_MODE_CLAMP_TO_EDGE,
				.addressModeW = VK_SAMPLER_ADDRESS_MODE_CLAMP_TO_EDGE,
				.anisotropyEnable = true,
				.maxAnisotropy = phy_dev->properties.limits.maxSamplerAnisotropy,
				.minLod = 0,
				.maxLod = 1,
			};

			res = vkCreateSampler(dev->device, &sampler_info, NULL, &images[i].sampler);
			tut1_error_sub_set_vkresult(&retval, res);
			if (res)
				continue;
		}

		++successful;
	}

	/*
	 * Now that you have learned all about images, we're not going to use them in this tutorial.  Please don't hate
	 * me.  There is already so much here that rendering textured images can wait.  It was not all in vein though
	 * because we would need image views on the swapchain images anyway.  Now at least you understand the
	 * properties and restrictions of the swapchain images better.
	 */

	tut1_error_set_vkresult(&retval, successful == image_count?VK_SUCCESS:VK_INCOMPLETE);
	return retval;
}
Example #4
0
Error GrManagerImpl::initInternal(const GrManagerInitInfo& init)
{
	ANKI_LOGI("Initializing Vulkan backend");
	ANKI_CHECK(initInstance(init));
	ANKI_CHECK(initSurface(init));
	ANKI_CHECK(initDevice(init));
	vkGetDeviceQueue(m_device, m_queueIdx, 0, &m_queue);
	ANKI_CHECK(initSwapchain(init));

	{
		VkPipelineCacheCreateInfo ci = {};
		ci.sType = VK_STRUCTURE_TYPE_PIPELINE_CACHE_CREATE_INFO;
		vkCreatePipelineCache(m_device, &ci, nullptr, &m_pplineCache);
	}

	ANKI_CHECK(initMemory(*init.m_config));
	ANKI_CHECK(m_dsetAlloc.init(getAllocator(), m_device));
	m_pplineLayFactory.init(getAllocator(), m_device, &m_dsetAlloc.getDescriptorSetLayoutFactory());

	for(PerFrame& f : m_perFrame)
	{
		resetFrame(f);
	}

	glslang::InitializeProcess();
	m_fences.init(getAllocator(), m_device);
	m_semaphores.init(getAllocator(), m_device);

	m_queryAlloc.init(getAllocator(), m_device);

	m_samplerCache = getAllocator().newInstance<GrObjectCache>(m_manager);

	// Set m_r8g8b8ImagesSupported
	{
		VkImageFormatProperties props = {};
		VkResult res = vkGetPhysicalDeviceImageFormatProperties(m_physicalDevice,
			VK_FORMAT_R8G8B8_UNORM,
			VK_IMAGE_TYPE_2D,
			VK_IMAGE_TILING_OPTIMAL,
			VK_IMAGE_USAGE_SAMPLED_BIT | VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT,
			0,
			&props);

		if(res == VK_ERROR_FORMAT_NOT_SUPPORTED)
		{
			ANKI_LOGI("R8G8B8 Images are not supported. Will workaround this");
			m_r8g8b8ImagesSupported = false;
		}
		else
		{
			ANKI_ASSERT(res == VK_SUCCESS);
			ANKI_LOGI("R8G8B8 Images are supported");
			m_r8g8b8ImagesSupported = true;
		}
	}

	// Set m_s8ImagesSupported
	{
		VkImageFormatProperties props = {};
		VkResult res = vkGetPhysicalDeviceImageFormatProperties(m_physicalDevice,
			VK_FORMAT_S8_UINT,
			VK_IMAGE_TYPE_2D,
			VK_IMAGE_TILING_OPTIMAL,
			VK_IMAGE_USAGE_SAMPLED_BIT | VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT,
			0,
			&props);

		if(res == VK_ERROR_FORMAT_NOT_SUPPORTED)
		{
			ANKI_LOGI("S8 Images are not supported. Will workaround this");
			m_s8ImagesSupported = false;
		}
		else
		{
			ANKI_ASSERT(res == VK_SUCCESS);
			ANKI_LOGI("S8 Images are supported");
			m_s8ImagesSupported = true;
		}
	}

	// Set m_d24S8ImagesSupported
	{
		VkImageFormatProperties props = {};
		VkResult res = vkGetPhysicalDeviceImageFormatProperties(m_physicalDevice,
			VK_FORMAT_D24_UNORM_S8_UINT,
			VK_IMAGE_TYPE_2D,
			VK_IMAGE_TILING_OPTIMAL,
			VK_IMAGE_USAGE_SAMPLED_BIT | VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT,
			0,
			&props);

		if(res == VK_ERROR_FORMAT_NOT_SUPPORTED)
		{
			ANKI_LOGI("D24S8 Images are not supported. Will workaround this");
			m_d24S8ImagesSupported = false;
		}
		else
		{
			ANKI_ASSERT(res == VK_SUCCESS);
			ANKI_LOGI("D24S8 Images are supported");
			m_d24S8ImagesSupported = true;
		}
	}

	return ErrorCode::NONE;
}