Error GrManagerImpl::initDevice(const GrManagerInitInfo& init) { uint32_t count = 0; vkGetPhysicalDeviceQueueFamilyProperties(m_physicalDevice, &count, nullptr); ANKI_LOGI("VK: Number of queue families: %u\n", count); DynamicArrayAuto<VkQueueFamilyProperties> queueInfos(getAllocator()); queueInfos.create(count); vkGetPhysicalDeviceQueueFamilyProperties(m_physicalDevice, &count, &queueInfos[0]); uint32_t desiredFamilyIdx = MAX_U32; const VkQueueFlags DESITED_QUEUE_FLAGS = VK_QUEUE_GRAPHICS_BIT | VK_QUEUE_COMPUTE_BIT | VK_QUEUE_TRANSFER_BIT; for(U i = 0; i < count; ++i) { if((queueInfos[i].queueFlags & DESITED_QUEUE_FLAGS) == DESITED_QUEUE_FLAGS) { VkBool32 supportsPresent = false; ANKI_VK_CHECK(vkGetPhysicalDeviceSurfaceSupportKHR(m_physicalDevice, i, m_surface, &supportsPresent)); if(supportsPresent) { desiredFamilyIdx = i; break; } } } if(desiredFamilyIdx == MAX_U32) { ANKI_LOGE("Couldn't find a queue family with graphics+compute+transfer+present." "The assumption was wrong. The code needs to be reworked"); return ErrorCode::FUNCTION_FAILED; } m_queueIdx = desiredFamilyIdx; F32 priority = 1.0; VkDeviceQueueCreateInfo q = {}; q.sType = VK_STRUCTURE_TYPE_DEVICE_QUEUE_CREATE_INFO; q.queueFamilyIndex = desiredFamilyIdx; q.queueCount = 1; q.pQueuePriorities = &priority; static Array<const char*, 1> DEV_EXTENSIONS = {{VK_KHR_SWAPCHAIN_EXTENSION_NAME}}; VkDeviceCreateInfo ci = {}; ci.sType = VK_STRUCTURE_TYPE_DEVICE_CREATE_INFO; ci.queueCreateInfoCount = 1; ci.pQueueCreateInfos = &q; ci.enabledExtensionCount = DEV_EXTENSIONS.getSize(); ci.ppEnabledExtensionNames = &DEV_EXTENSIONS[0]; ci.pEnabledFeatures = &m_devFeatures; ANKI_VK_CHECK(vkCreateDevice(m_physicalDevice, &ci, nullptr, &m_device)); return ErrorCode::NONE; }
Error QueryAllocator::newQuery(QueryAllocationHandle& handle) { ANKI_ASSERT(!handle); LockGuard<Mutex> lock(m_mtx); // Find a not-full chunk Chunk* chunk = nullptr; for(Chunk& c : m_chunks) { if(c.m_subAllocationCount < MAX_SUB_ALLOCATIONS_PER_QUERY_CHUNK) { // Found one if(chunk == nullptr) { chunk = &c; } else if(c.m_subAllocationCount > chunk->m_subAllocationCount) { // To decrease fragmentation use the most full chunk chunk = &c; } } } if(chunk == nullptr) { // Create new chunk chunk = m_alloc.newInstance<Chunk>(); VkQueryPoolCreateInfo ci = {}; ci.sType = VK_STRUCTURE_TYPE_QUERY_POOL_CREATE_INFO; ci.queryType = VK_QUERY_TYPE_OCCLUSION; ci.queryCount = MAX_SUB_ALLOCATIONS_PER_QUERY_CHUNK; ANKI_VK_CHECK(vkCreateQueryPool(m_dev, &ci, nullptr, &chunk->m_pool)); m_chunks.pushBack(chunk); } ANKI_ASSERT(chunk); // Allocate from chunk for(U i = 0; i < MAX_SUB_ALLOCATIONS_PER_QUERY_CHUNK; ++i) { if(chunk->m_allocatedMask.get(i) == 0) { chunk->m_allocatedMask.set(i); ++chunk->m_subAllocationCount; handle.m_pool = chunk->m_pool; handle.m_queryIndex = i; handle.m_chunk = chunk; break; } } ANKI_ASSERT(handle == true); return ErrorCode::NONE; }
//============================================================================== void PipelineImpl::initGraphics(const PipelineInitInfo& init) { FilledGraphicsPipelineCreateInfo ci = FILLED; ci.pStages = &ci.m_stages[0]; initShaders(init, ci); // Init sub-states ci.pVertexInputState = initVertexStage(init.m_vertex, ci.m_vertex); ci.pInputAssemblyState = initInputAssemblyState(init.m_inputAssembler, ci.m_ia); ci.pTessellationState = initTessellationState(init.m_tessellation, ci.m_tess); ci.pViewportState = initViewportState(ci.m_vp); ci.pRasterizationState = initRasterizerState(init.m_rasterizer, ci.m_rast); ci.pMultisampleState = initMsState(ci.m_ms); ci.pDepthStencilState = initDsState(init.m_depthStencil, ci.m_ds); ci.pColorBlendState = initColorState(init.m_color, ci.m_color); ci.pDynamicState = nullptr; // No dynamic state as static at the moment // Finalize ci.layout = getGrManagerImpl().m_globalPipelineLayout; ci.renderPass = getGrManagerImpl().getOrCreateCompatibleRenderPass(init); ci.basePipelineHandle = VK_NULL_HANDLE; ANKI_VK_CHECK(vkCreateGraphicsPipelines( getDevice(), nullptr, 1, &ci, nullptr, &m_handle)); }
Error SamplerImpl::init(const SamplerInitInfo& ii) { // Fill the create cio VkSamplerCreateInfo ci; memset(&ci, 0, sizeof(ci)); ci.sType = VK_STRUCTURE_TYPE_SAMPLER_CREATE_INFO; if(ii.m_minMagFilter == SamplingFilter::NEAREST) { ci.magFilter = ci.minFilter = VK_FILTER_NEAREST; } else { ANKI_ASSERT(ii.m_minMagFilter == SamplingFilter::LINEAR); ci.magFilter = ci.minFilter = VK_FILTER_LINEAR; } if(ii.m_mipmapFilter == SamplingFilter::BASE || ii.m_mipmapFilter == SamplingFilter::NEAREST) { ci.mipmapMode = VK_SAMPLER_MIPMAP_MODE_NEAREST; } else { ci.mipmapMode = VK_SAMPLER_MIPMAP_MODE_LINEAR; } if(ii.m_repeat) { ci.addressModeU = ci.addressModeV = ci.addressModeW = VK_SAMPLER_ADDRESS_MODE_REPEAT; } else { ci.addressModeU = ci.addressModeV = ci.addressModeW = VK_SAMPLER_ADDRESS_MODE_CLAMP_TO_EDGE; } ci.mipLodBias = 0.0; if(ii.m_anisotropyLevel > 0) { ci.anisotropyEnable = VK_TRUE; ci.maxAnisotropy = ii.m_anisotropyLevel; } ci.compareOp = convertCompareOp(ii.m_compareOperation); if(ci.compareOp != VK_COMPARE_OP_ALWAYS) { ci.compareEnable = VK_TRUE; } ci.minLod = ii.m_minLod; ci.maxLod = ii.m_maxLod; ci.unnormalizedCoordinates = VK_FALSE; // Create ANKI_VK_CHECK(vkCreateSampler(getDevice(), &ci, nullptr, &m_handle)); return ErrorCode::NONE; }
//============================================================================== Error OcclusionQueryImpl::init(OcclusionQueryResultBit condRenderingBit) { m_condRenderingBit = condRenderingBit; VkQueryPoolCreateInfo ci = {}; ci.sType = VK_STRUCTURE_TYPE_QUERY_POOL_CREATE_INFO; ci.queryType = VK_QUERY_TYPE_OCCLUSION; ci.queryCount = 1; ANKI_VK_CHECK(vkCreateQueryPool(getDevice(), &ci, nullptr, &m_handle)); return ErrorCode::NONE; }
Error CommandBufferFactory::init(GenericMemoryPoolAllocator<U8> alloc, VkDevice dev, uint32_t queueFamily) { m_alloc = alloc; VkCommandPoolCreateInfo ci = {}; ci.sType = VK_STRUCTURE_TYPE_COMMAND_POOL_CREATE_INFO; ci.flags = VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT; ci.queueFamilyIndex = queueFamily; ANKI_VK_CHECK(vkCreateCommandPool(dev, &ci, nullptr, &m_pool)); m_dev = dev; return ErrorCode::NONE; }
Error GrManagerImpl::initSurface(const GrManagerInitInfo& init) { SDL_SysWMinfo wminfo; SDL_VERSION(&wminfo.version); if(!SDL_GetWindowWMInfo(init.m_window->getNative().m_window, &wminfo)) { ANKI_LOGE("SDL_GetWindowWMInfo() failed"); return ErrorCode::NONE; } #if ANKI_OS == ANKI_OS_LINUX VkXcbSurfaceCreateInfoKHR ci = {}; ci.sType = VK_STRUCTURE_TYPE_XCB_SURFACE_CREATE_INFO_KHR; ci.connection = XGetXCBConnection(wminfo.info.x11.display); ci.window = wminfo.info.x11.window; ANKI_VK_CHECK(vkCreateXcbSurfaceKHR(m_instance, &ci, nullptr, &m_surface)); #elif ANKI_OS == ANKI_OS_WINDOWS Array<TCHAR, 512> className; GetClassName(wminfo.info.win.window, &className[0], className.getSize()); WNDCLASS wce = {}; GetClassInfo(GetModuleHandle(NULL), &className[0], &wce); VkWin32SurfaceCreateInfoKHR ci = {}; ci.sType = VK_STRUCTURE_TYPE_WIN32_SURFACE_CREATE_INFO_KHR; ci.hinstance = wce.hInstance; ci.hwnd = wminfo.info.win.window; ANKI_VK_CHECK(vkCreateWin32SurfaceKHR(m_instance, &ci, nullptr, &m_surface)); #else #error TODO #endif return ErrorCode::NONE; }
//============================================================================== void PipelineImpl::initCompute(const PipelineInitInfo& init) { VkComputePipelineCreateInfo ci; ci.sType = VK_STRUCTURE_TYPE_COMPUTE_PIPELINE_CREATE_INFO; ci.pNext = nullptr; ci.layout = getGrManagerImpl().m_globalPipelineLayout; ci.basePipelineHandle = VK_NULL_HANDLE; VkPipelineShaderStageCreateInfo& stage = ci.stage; stage.sType = VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO; stage.pNext = nullptr; stage.flags = 0; stage.stage = VK_SHADER_STAGE_COMPUTE_BIT; stage.module = init.m_shaders[ShaderType::COMPUTE]->getImplementation().m_handle; stage.pName = "main"; stage.pSpecializationInfo = nullptr; ANKI_VK_CHECK(vkCreateComputePipelines( getDevice(), nullptr, 1, &ci, nullptr, &m_handle)); }
Error DescriptorSetAllocator::allocate(const DescriptorSetLayoutInfo& dsinf, VkDescriptorSet& out) { VkDescriptorSetLayout layout; ANKI_CHECK(m_layoutFactory.getOrCreateLayout(dsinf, layout)); out = VK_NULL_HANDLE; VkDescriptorSetAllocateInfo ci = {}; ci.sType = VK_STRUCTURE_TYPE_DESCRIPTOR_SET_ALLOCATE_INFO; ci.descriptorPool = m_globalDPool; ci.descriptorSetCount = 1; ci.pSetLayouts = &layout; LockGuard<Mutex> lock(m_mtx); if(++m_descriptorSetAllocationCount > MAX_RESOURCE_GROUPS) { ANKI_LOGE("Exceeded the MAX_RESOURCE_GROUPS"); return ErrorCode::OUT_OF_MEMORY; } ANKI_VK_CHECK(vkAllocateDescriptorSets(m_dev, &ci, &out)); return ErrorCode::NONE; }
Error DescriptorSetAllocator::initGlobalDsetPool() { Array<VkDescriptorPoolSize, 4> pools = {{}}; pools[0] = VkDescriptorPoolSize{VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER, MAX_TEXTURE_BINDINGS * MAX_RESOURCE_GROUPS}; pools[1] = VkDescriptorPoolSize{ VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC, MAX_UNIFORM_BUFFER_BINDINGS * MAX_RESOURCE_GROUPS}; pools[2] = VkDescriptorPoolSize{ VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC, MAX_STORAGE_BUFFER_BINDINGS * MAX_RESOURCE_GROUPS}; pools[3] = VkDescriptorPoolSize{VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, MAX_IMAGE_BINDINGS * MAX_RESOURCE_GROUPS}; VkDescriptorPoolCreateInfo ci = {}; ci.sType = VK_STRUCTURE_TYPE_DESCRIPTOR_POOL_CREATE_INFO; ci.flags = VK_DESCRIPTOR_POOL_CREATE_FREE_DESCRIPTOR_SET_BIT; ci.maxSets = MAX_RESOURCE_GROUPS; ci.poolSizeCount = pools.getSize(); ci.pPoolSizes = &pools[0]; ANKI_VK_CHECK(vkCreateDescriptorPool(m_dev, &ci, nullptr, &m_globalDPool)); return ErrorCode::NONE; }
//============================================================================== Error GrManagerImpl::initSurface(const GrManagerInitInfo& init) { SDL_SysWMinfo wminfo; SDL_VERSION(&wminfo.version); if(!SDL_GetWindowWMInfo(init.m_window->getNative().m_window, &wminfo)) { ANKI_LOGE("SDL_GetWindowWMInfo() failed"); return ErrorCode::NONE; } #if ANKI_OS == ANKI_OS_LINUX VkXcbSurfaceCreateInfoKHR ci = {}; ci.sType = VK_STRUCTURE_TYPE_XCB_SURFACE_CREATE_INFO_KHR; ci.connection = XGetXCBConnection(wminfo.info.x11.display); ci.window = wminfo.info.x11.window; ANKI_VK_CHECK(vkCreateXcbSurfaceKHR(m_instance, &ci, nullptr, &m_surface)); #else #error TODO #endif return ErrorCode::NONE; }
Error BufferImpl::init(const BufferInitInfo& inf) { ANKI_ASSERT(!isCreated()); PtrSize size = inf.m_size; BufferMapAccessBit access = inf.m_access; BufferUsageBit usage = inf.m_usage; ANKI_ASSERT(size > 0); ANKI_ASSERT(usage != BufferUsageBit::NONE); // Align the size to satisfy fill buffer alignRoundUp(4, size); // Create the buffer VkBufferCreateInfo ci = {}; ci.sType = VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO; ci.size = size; ci.usage = convertBufferUsageBit(usage); ci.sharingMode = VK_SHARING_MODE_EXCLUSIVE; ci.queueFamilyIndexCount = 1; U32 queueIdx = getGrManagerImpl().getGraphicsQueueFamily(); ci.pQueueFamilyIndices = &queueIdx; ANKI_VK_CHECK(vkCreateBuffer(getDevice(), &ci, nullptr, &m_handle)); getGrManagerImpl().trySetVulkanHandleName(inf.getName(), VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT, m_handle); // Get mem requirements VkMemoryRequirements req; vkGetBufferMemoryRequirements(getDevice(), m_handle, &req); U memIdx = MAX_U32; if(access == BufferMapAccessBit::WRITE) { // Only write, probably for uploads VkMemoryPropertyFlags preferDeviceLocal; VkMemoryPropertyFlags avoidDeviceLocal; if((usage & (~BufferUsageBit::TRANSFER_ALL)) != BufferUsageBit::NONE) { // Will be used for something other than transfer, try to put it in the device preferDeviceLocal = VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT; avoidDeviceLocal = 0; } else { // Will be used only for transfers, don't want it in the device preferDeviceLocal = 0; avoidDeviceLocal = VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT; } // Device & host & coherent but not cached memIdx = getGrManagerImpl().getGpuMemoryManager().findMemoryType(req.memoryTypeBits, VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT | preferDeviceLocal, VK_MEMORY_PROPERTY_HOST_CACHED_BIT | avoidDeviceLocal); // Fallback: host & coherent and not cached if(memIdx == MAX_U32) { memIdx = getGrManagerImpl().getGpuMemoryManager().findMemoryType(req.memoryTypeBits, VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT, VK_MEMORY_PROPERTY_HOST_CACHED_BIT | avoidDeviceLocal); } // Fallback: just host if(memIdx == MAX_U32) { ANKI_VK_LOGW("Using a fallback mode for write-only buffer"); memIdx = getGrManagerImpl().getGpuMemoryManager().findMemoryType( req.memoryTypeBits, VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT, 0); } } else if((access & BufferMapAccessBit::READ) != BufferMapAccessBit::NONE) { // Read or read/write // Cached & coherent memIdx = getGrManagerImpl().getGpuMemoryManager().findMemoryType(req.memoryTypeBits, VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_CACHED_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT, 0); // Fallback: Just cached if(memIdx == MAX_U32) { memIdx = getGrManagerImpl().getGpuMemoryManager().findMemoryType( req.memoryTypeBits, VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_CACHED_BIT, 0); } // Fallback: Just host if(memIdx == MAX_U32) { ANKI_VK_LOGW("Using a fallback mode for read/write buffer"); memIdx = getGrManagerImpl().getGpuMemoryManager().findMemoryType( req.memoryTypeBits, VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT, 0); } } else { // Not mapped ANKI_ASSERT(access == BufferMapAccessBit::NONE); // Device only memIdx = getGrManagerImpl().getGpuMemoryManager().findMemoryType( req.memoryTypeBits, VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT, VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT); // Fallback: Device with anything else if(memIdx == MAX_U32) { memIdx = getGrManagerImpl().getGpuMemoryManager().findMemoryType( req.memoryTypeBits, VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT, 0); } } ANKI_ASSERT(memIdx != MAX_U32); const VkPhysicalDeviceMemoryProperties& props = getGrManagerImpl().getMemoryProperties(); m_memoryFlags = props.memoryTypes[memIdx].propertyFlags; // Allocate getGrManagerImpl().getGpuMemoryManager().allocateMemory(memIdx, req.size, req.alignment, true, m_memHandle); // Bind mem to buffer { ANKI_TRACE_SCOPED_EVENT(VK_BIND_OBJECT); ANKI_VK_CHECK(vkBindBufferMemory(getDevice(), m_handle, m_memHandle.m_memory, m_memHandle.m_offset)); } m_access = access; m_size = inf.m_size; m_actualSize = size; m_usage = usage; return Error::NONE; }
Error GrManagerImpl::initSwapchain(const GrManagerInitInfo& init) { VkSurfaceCapabilitiesKHR surfaceProperties; ANKI_VK_CHECK(vkGetPhysicalDeviceSurfaceCapabilitiesKHR(m_physicalDevice, m_surface, &surfaceProperties)); if(surfaceProperties.currentExtent.width == MAX_U32 || surfaceProperties.currentExtent.height == MAX_U32) { ANKI_LOGE("Wrong surface size"); return ErrorCode::FUNCTION_FAILED; } m_surfaceWidth = surfaceProperties.currentExtent.width; m_surfaceHeight = surfaceProperties.currentExtent.height; uint32_t formatCount; ANKI_VK_CHECK(vkGetPhysicalDeviceSurfaceFormatsKHR(m_physicalDevice, m_surface, &formatCount, nullptr)); DynamicArrayAuto<VkSurfaceFormatKHR> formats(getAllocator()); formats.create(formatCount); ANKI_VK_CHECK(vkGetPhysicalDeviceSurfaceFormatsKHR(m_physicalDevice, m_surface, &formatCount, &formats[0])); VkColorSpaceKHR colorspace = VK_COLOR_SPACE_MAX_ENUM_KHR; while(formatCount--) { if(formats[formatCount].format == VK_FORMAT_B8G8R8A8_UNORM) { m_surfaceFormat = formats[formatCount].format; colorspace = formats[formatCount].colorSpace; break; } } if(m_surfaceFormat == VK_FORMAT_UNDEFINED) { ANKI_LOGE("Surface format not found"); return ErrorCode::FUNCTION_FAILED; } // Chose present mode uint32_t presentModeCount; vkGetPhysicalDeviceSurfacePresentModesKHR(m_physicalDevice, m_surface, &presentModeCount, nullptr); presentModeCount = min(presentModeCount, 4u); Array<VkPresentModeKHR, 4> presentModes; vkGetPhysicalDeviceSurfacePresentModesKHR(m_physicalDevice, m_surface, &presentModeCount, &presentModes[0]); VkPresentModeKHR presentMode = VK_PRESENT_MODE_MAX_ENUM_KHR; if(init.m_config->getNumber("vsync")) { presentMode = VK_PRESENT_MODE_FIFO_KHR; } else { for(U i = 0; i < presentModeCount; ++i) { if(presentModes[i] == VK_PRESENT_MODE_MAILBOX_KHR) { presentMode = VK_PRESENT_MODE_MAILBOX_KHR; break; } else if(presentModes[i] == VK_PRESENT_MODE_IMMEDIATE_KHR) { presentMode = VK_PRESENT_MODE_IMMEDIATE_KHR; break; } } } if(presentMode == VK_PRESENT_MODE_MAX_ENUM_KHR) { ANKI_LOGE("VK: Couldn't find a present mode"); return ErrorCode::FUNCTION_FAILED; } // Create swapchain VkSwapchainCreateInfoKHR ci = {}; ci.sType = VK_STRUCTURE_TYPE_SWAPCHAIN_CREATE_INFO_KHR; ci.surface = m_surface; ci.minImageCount = MAX_FRAMES_IN_FLIGHT; ci.imageFormat = m_surfaceFormat; ci.imageColorSpace = colorspace; ci.imageExtent = surfaceProperties.currentExtent; ci.imageArrayLayers = 1; ci.imageUsage = VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT; ci.imageSharingMode = VK_SHARING_MODE_EXCLUSIVE; ci.queueFamilyIndexCount = 1; ci.pQueueFamilyIndices = &m_queueIdx; ci.preTransform = VK_SURFACE_TRANSFORM_IDENTITY_BIT_KHR; ci.compositeAlpha = VK_COMPOSITE_ALPHA_OPAQUE_BIT_KHR; ci.presentMode = presentMode; ci.clipped = false; ci.oldSwapchain = VK_NULL_HANDLE; ANKI_VK_CHECK(vkCreateSwapchainKHR(m_device, &ci, nullptr, &m_swapchain)); // Get images uint32_t count = 0; ANKI_VK_CHECK(vkGetSwapchainImagesKHR(m_device, m_swapchain, &count, nullptr)); if(count != MAX_FRAMES_IN_FLIGHT) { ANKI_LOGE("Requested a swapchain with %u images but got one with %u", MAX_FRAMES_IN_FLIGHT, count); return ErrorCode::FUNCTION_FAILED; } ANKI_LOGI("VK: Created a swapchain. Image count: %u, present mode: %u", count, presentMode); Array<VkImage, MAX_FRAMES_IN_FLIGHT> images; ANKI_VK_CHECK(vkGetSwapchainImagesKHR(m_device, m_swapchain, &count, &images[0])); for(U i = 0; i < MAX_FRAMES_IN_FLIGHT; ++i) { m_backbuffers[i].m_image = images[i]; ANKI_ASSERT(images[i]); } // Create img views for(U i = 0; i < MAX_FRAMES_IN_FLIGHT; ++i) { VkImageViewCreateInfo ci = {}; ci.sType = VK_STRUCTURE_TYPE_IMAGE_VIEW_CREATE_INFO; ci.flags = 0; ci.image = m_backbuffers[i].m_image; ci.viewType = VK_IMAGE_VIEW_TYPE_2D; ci.format = m_surfaceFormat; ci.components = { VK_COMPONENT_SWIZZLE_R, VK_COMPONENT_SWIZZLE_G, VK_COMPONENT_SWIZZLE_B, VK_COMPONENT_SWIZZLE_A}; ci.subresourceRange.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT; ci.subresourceRange.baseMipLevel = 0; ci.subresourceRange.levelCount = 1; ci.subresourceRange.baseArrayLayer = 0; ci.subresourceRange.layerCount = 1; ANKI_VK_CHECK(vkCreateImageView(m_device, &ci, nullptr, &m_backbuffers[i].m_imageView)); } return ErrorCode::NONE; }
Error GrManagerImpl::initInstance(const GrManagerInitInfo& init) { // Create the instance // static Array<const char*, 8> LAYERS = {{"VK_LAYER_LUNARG_core_validation", "VK_LAYER_LUNARG_swapchain", "VK_LAYER_LUNARG_image", "VK_LAYER_GOOGLE_threading", "VK_LAYER_LUNARG_parameter_validation", "VK_LAYER_GOOGLE_unique_objects", "VK_LAYER_LUNARG_object_tracker", "VK_LAYER_LUNARG_standard_validation"}}; static Array<const char*, 2> EXTENSIONS = {{VK_KHR_SURFACE_EXTENSION_NAME, #if ANKI_OS == ANKI_OS_LINUX VK_KHR_XCB_SURFACE_EXTENSION_NAME #elif ANKI_OS == ANKI_OS_WINDOWS VK_KHR_WIN32_SURFACE_EXTENSION_NAME #else #error TODO #endif }}; VkApplicationInfo app = {}; app.sType = VK_STRUCTURE_TYPE_APPLICATION_INFO; app.pApplicationName = "unamed"; app.applicationVersion = 1; app.pEngineName = "AnKi 3D Engine"; app.engineVersion = (ANKI_VERSION_MAJOR << 1) | ANKI_VERSION_MINOR; app.apiVersion = VK_MAKE_VERSION(1, 0, 3); VkInstanceCreateInfo ci = {}; ci.sType = VK_STRUCTURE_TYPE_INSTANCE_CREATE_INFO; ci.pApplicationInfo = &app; if(init.m_config->getNumber("debugContext")) { ANKI_LOGI("VK: Will enable debug layers"); ci.enabledLayerCount = LAYERS.getSize(); ci.ppEnabledLayerNames = &LAYERS[0]; } ci.enabledExtensionCount = EXTENSIONS.getSize(); ci.ppEnabledExtensionNames = &EXTENSIONS[0]; #if ANKI_GR_MANAGER_DEBUG_MEMMORY VkAllocationCallbacks allocCbs = {}; VkAllocationCallbacks* pallocCbs = &allocCbs; allocCbs.pUserData = this; allocCbs.pfnAllocation = allocateCallback; allocCbs.pfnReallocation = reallocateCallback; allocCbs.pfnFree = freeCallback; #else VkAllocationCallbacks* pallocCbs = nullptr; #endif ANKI_VK_CHECK(vkCreateInstance(&ci, pallocCbs, &m_instance)); // Create the physical device // uint32_t count = 0; ANKI_VK_CHECK(vkEnumeratePhysicalDevices(m_instance, &count, nullptr)); ANKI_LOGI("VK: Number of physical devices: %u", count); if(count < 1) { ANKI_LOGE("Wrong number of physical devices"); return ErrorCode::FUNCTION_FAILED; } count = 1; ANKI_VK_CHECK(vkEnumeratePhysicalDevices(m_instance, &count, &m_physicalDevice)); vkGetPhysicalDeviceProperties(m_physicalDevice, &m_devProps); // Find vendor switch(m_devProps.vendorID) { case 0x13B5: m_vendor = GpuVendor::ARM; break; case 0x10DE: m_vendor = GpuVendor::NVIDIA; break; case 0x1002: case 0x1022: m_vendor = GpuVendor::AMD; break; } ANKI_LOGI("GPU vendor is %s", &GPU_VENDOR_STR[m_vendor][0]); vkGetPhysicalDeviceFeatures(m_physicalDevice, &m_devFeatures); return ErrorCode::NONE; }
Error MicroSwapchain::initInternal() { const VkDevice dev = m_factory->m_gr->getDevice(); // Get the surface size VkSurfaceCapabilitiesKHR surfaceProperties; U surfaceWidth = 0, surfaceHeight = 0; { ANKI_VK_CHECK(vkGetPhysicalDeviceSurfaceCapabilitiesKHR( m_factory->m_gr->getPhysicalDevice(), m_factory->m_gr->getSurface(), &surfaceProperties)); if(surfaceProperties.currentExtent.width == MAX_U32 || surfaceProperties.currentExtent.height == MAX_U32) { ANKI_VK_LOGE("Wrong surface size"); return Error::FUNCTION_FAILED; } surfaceWidth = surfaceProperties.currentExtent.width; surfaceHeight = surfaceProperties.currentExtent.height; } // Get the surface format VkFormat surfaceFormat = VK_FORMAT_END_RANGE; VkColorSpaceKHR colorspace = VK_COLOR_SPACE_MAX_ENUM_KHR; { uint32_t formatCount; ANKI_VK_CHECK(vkGetPhysicalDeviceSurfaceFormatsKHR( m_factory->m_gr->getPhysicalDevice(), m_factory->m_gr->getSurface(), &formatCount, nullptr)); DynamicArrayAuto<VkSurfaceFormatKHR> formats(getAllocator()); formats.create(formatCount); ANKI_VK_CHECK(vkGetPhysicalDeviceSurfaceFormatsKHR( m_factory->m_gr->getPhysicalDevice(), m_factory->m_gr->getSurface(), &formatCount, &formats[0])); while(formatCount--) { if(formats[formatCount].format == VK_FORMAT_B8G8R8A8_UNORM) { surfaceFormat = formats[formatCount].format; colorspace = formats[formatCount].colorSpace; break; } } if(surfaceFormat == VK_FORMAT_UNDEFINED) { ANKI_VK_LOGE("Surface format not found"); return Error::FUNCTION_FAILED; } } // Chose present mode VkPresentModeKHR presentMode = VK_PRESENT_MODE_MAX_ENUM_KHR; { uint32_t presentModeCount; vkGetPhysicalDeviceSurfacePresentModesKHR( m_factory->m_gr->getPhysicalDevice(), m_factory->m_gr->getSurface(), &presentModeCount, nullptr); presentModeCount = min(presentModeCount, 4u); Array<VkPresentModeKHR, 4> presentModes; vkGetPhysicalDeviceSurfacePresentModesKHR( m_factory->m_gr->getPhysicalDevice(), m_factory->m_gr->getSurface(), &presentModeCount, &presentModes[0]); if(m_factory->m_vsync) { presentMode = VK_PRESENT_MODE_FIFO_KHR; } else { for(U i = 0; i < presentModeCount; ++i) { if(presentModes[i] == VK_PRESENT_MODE_MAILBOX_KHR) { presentMode = VK_PRESENT_MODE_MAILBOX_KHR; break; } else if(presentModes[i] == VK_PRESENT_MODE_IMMEDIATE_KHR) { presentMode = VK_PRESENT_MODE_IMMEDIATE_KHR; break; } } } if(presentMode == VK_PRESENT_MODE_MAX_ENUM_KHR) { ANKI_VK_LOGE("Couldn't find a present mode"); return Error::FUNCTION_FAILED; } } // Create swapchain { VkSwapchainCreateInfoKHR ci = {}; ci.sType = VK_STRUCTURE_TYPE_SWAPCHAIN_CREATE_INFO_KHR; ci.surface = m_factory->m_gr->getSurface(); ci.minImageCount = MAX_FRAMES_IN_FLIGHT; ci.imageFormat = surfaceFormat; ci.imageColorSpace = colorspace; ci.imageExtent = surfaceProperties.currentExtent; ci.imageArrayLayers = 1; ci.imageUsage = VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT; ci.imageSharingMode = VK_SHARING_MODE_EXCLUSIVE; ci.queueFamilyIndexCount = 1; U32 idx = m_factory->m_gr->getGraphicsQueueIndex(); ci.pQueueFamilyIndices = &idx; ci.preTransform = VK_SURFACE_TRANSFORM_IDENTITY_BIT_KHR; ci.compositeAlpha = VK_COMPOSITE_ALPHA_OPAQUE_BIT_KHR; ci.presentMode = presentMode; ci.clipped = false; ci.oldSwapchain = VK_NULL_HANDLE; ANKI_VK_CHECK(vkCreateSwapchainKHR(dev, &ci, nullptr, &m_swapchain)); } // Get images { uint32_t count = 0; ANKI_VK_CHECK(vkGetSwapchainImagesKHR(dev, m_swapchain, &count, nullptr)); if(count != MAX_FRAMES_IN_FLIGHT) { ANKI_VK_LOGE("Requested a swapchain with %u images but got one with %u", MAX_FRAMES_IN_FLIGHT, count); return Error::FUNCTION_FAILED; } ANKI_VK_LOGI("Created a swapchain. Image count: %u, present mode: %u, size: %ux%u, vsync: %u", count, presentMode, surfaceWidth, surfaceHeight, U32(m_factory->m_vsync)); Array<VkImage, MAX_FRAMES_IN_FLIGHT> images; ANKI_VK_CHECK(vkGetSwapchainImagesKHR(dev, m_swapchain, &count, &images[0])); for(U i = 0; i < MAX_FRAMES_IN_FLIGHT; ++i) { TextureInitInfo init("SwapchainImg"); init.m_width = surfaceWidth; init.m_height = surfaceHeight; init.m_format = Format::B8G8R8A8_UNORM; ANKI_ASSERT(surfaceFormat == VK_FORMAT_B8G8R8A8_UNORM); init.m_usage = TextureUsageBit::IMAGE_COMPUTE_WRITE | TextureUsageBit::FRAMEBUFFER_ATTACHMENT_READ_WRITE | TextureUsageBit::PRESENT; init.m_type = TextureType::_2D; TextureImpl* tex = m_factory->m_gr->getAllocator().newInstance<TextureImpl>(m_factory->m_gr, init.getName()); m_textures[i].reset(tex); ANKI_CHECK(tex->initExternal(images[i], init)); } } return Error::NONE; }
Error DescriptorSetLayoutFactory::getOrCreateLayout(const DescriptorSetLayoutInfo& dsinf, VkDescriptorSetLayout& out) { out = VK_NULL_HANDLE; LockGuard<Mutex> lock(m_mtx); auto it = m_map.find(dsinf); if(it != m_map.getEnd()) { out = *it; } else { // Create the layout VkDescriptorSetLayoutCreateInfo ci = {}; ci.sType = VK_STRUCTURE_TYPE_DESCRIPTOR_SET_LAYOUT_CREATE_INFO; const U BINDING_COUNT = MAX_TEXTURE_BINDINGS + MAX_UNIFORM_BUFFER_BINDINGS + MAX_STORAGE_BUFFER_BINDINGS + MAX_IMAGE_BINDINGS; Array<VkDescriptorSetLayoutBinding, BINDING_COUNT> bindings; memset(&bindings[0], 0, sizeof(bindings)); ci.pBindings = &bindings[0]; U count = 0; U bindingIdx = 0; // Combined image samplers for(U i = 0; i < dsinf.m_texCount; ++i) { VkDescriptorSetLayoutBinding& binding = bindings[count++]; binding.binding = bindingIdx++; binding.descriptorType = VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER; binding.descriptorCount = 1; binding.stageFlags = VK_SHADER_STAGE_ALL; } // Uniform buffers bindingIdx = MAX_TEXTURE_BINDINGS; for(U i = 0; i < dsinf.m_uniCount; ++i) { VkDescriptorSetLayoutBinding& binding = bindings[count++]; binding.binding = bindingIdx++; binding.descriptorType = VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC; binding.descriptorCount = 1; binding.stageFlags = VK_SHADER_STAGE_ALL; } // Storage buffers bindingIdx = MAX_TEXTURE_BINDINGS + MAX_UNIFORM_BUFFER_BINDINGS; for(U i = 0; i < dsinf.m_storageCount; ++i) { VkDescriptorSetLayoutBinding& binding = bindings[count++]; binding.binding = bindingIdx++; binding.descriptorType = VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC; binding.descriptorCount = 1; binding.stageFlags = VK_SHADER_STAGE_ALL; } // Images bindingIdx = MAX_TEXTURE_BINDINGS + MAX_UNIFORM_BUFFER_BINDINGS + MAX_STORAGE_BUFFER_BINDINGS; for(U i = 0; i < dsinf.m_imgCount; ++i) { VkDescriptorSetLayoutBinding& binding = bindings[count++]; binding.binding = bindingIdx++; binding.descriptorType = VK_DESCRIPTOR_TYPE_STORAGE_IMAGE; binding.descriptorCount = 1; binding.stageFlags = VK_SHADER_STAGE_COMPUTE_BIT; } ANKI_ASSERT(count <= BINDING_COUNT); ci.bindingCount = count; ANKI_VK_CHECK(vkCreateDescriptorSetLayout(m_dev, &ci, nullptr, &out)); m_map.pushBack(m_alloc, dsinf, out); } return ErrorCode::NONE; }