void intel_img_view_init(struct intel_dev *dev, const VkImageViewCreateInfo *info, struct intel_img_view *view) { VkComponentMapping state_swizzles; uint32_t mip_levels, array_size; struct intel_img *img = intel_img(info->image); mip_levels = info->subresourceRange.levelCount; if (mip_levels > img->mip_levels - info->subresourceRange.baseMipLevel) mip_levels = img->mip_levels - info->subresourceRange.baseMipLevel; array_size = info->subresourceRange.layerCount; if (array_size > img->array_size - info->subresourceRange.baseArrayLayer) array_size = img->array_size - info->subresourceRange.baseArrayLayer; view->obj.destroy = img_view_destroy; view->img = img; if (!(img->usage & VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT)) { if (intel_gpu_gen(dev->gpu) >= INTEL_GEN(7.5)) { state_swizzles = info->components; view->shader_swizzles.r = VK_COMPONENT_SWIZZLE_R; view->shader_swizzles.g = VK_COMPONENT_SWIZZLE_G; view->shader_swizzles.b = VK_COMPONENT_SWIZZLE_B; view->shader_swizzles.a = VK_COMPONENT_SWIZZLE_A; } else { state_swizzles.r = VK_COMPONENT_SWIZZLE_R; state_swizzles.g = VK_COMPONENT_SWIZZLE_G; state_swizzles.b = VK_COMPONENT_SWIZZLE_B; state_swizzles.a = VK_COMPONENT_SWIZZLE_A; view->shader_swizzles = info->components; } /* shader_swizzles is ignored by the compiler */ if (view->shader_swizzles.r != VK_COMPONENT_SWIZZLE_R || view->shader_swizzles.g != VK_COMPONENT_SWIZZLE_G || view->shader_swizzles.b != VK_COMPONENT_SWIZZLE_B || view->shader_swizzles.a != VK_COMPONENT_SWIZZLE_A) { intel_dev_log(dev, VK_DEBUG_REPORT_WARNING_BIT_EXT, (struct intel_base*)view, 0, 0, "image data swizzling is ignored"); } if (intel_gpu_gen(dev->gpu) >= INTEL_GEN(7)) { surface_state_tex_gen7(dev->gpu, img, info->viewType, info->format, info->subresourceRange.baseMipLevel, mip_levels, info->subresourceRange.baseArrayLayer, array_size, state_swizzles, false, view->cmd); view->cmd_len = 8; } else { surface_state_tex_gen6(dev->gpu, img, info->viewType, info->format, info->subresourceRange.baseMipLevel, mip_levels, info->subresourceRange.baseArrayLayer, array_size, false, view->cmd); view->cmd_len = 6; } } }
static VkResult queue_submit_cmd_prepare(struct intel_queue *queue, struct intel_cmd *cmd) { if (unlikely(cmd->result != VK_SUCCESS || !cmd->primary)) { intel_dev_log(cmd->dev, VK_DEBUG_REPORT_ERROR_BIT_EXT, &cmd->obj.base, 0, 0, "invalid command buffer submitted"); } return queue_select_pipeline(queue, cmd->pipeline_select); }
static void queue_submit_hang(struct intel_queue *queue, struct intel_cmd *cmd, uint32_t active_lost, uint32_t pending_lost) { intel_cmd_decode(cmd, true); intel_dev_log(queue->dev, VK_DEBUG_REPORT_ERROR_BIT_EXT, &cmd->obj.base, 0, 0, "GPU hanged with %d/%d active/pending command buffers lost", active_lost, pending_lost); }
VkResult intel_queue_create(struct intel_dev *dev, enum intel_gpu_engine_type engine, struct intel_queue **queue_ret) { struct intel_queue *queue; enum intel_ring_type ring; VkFenceCreateInfo fence_info; VkResult ret; switch (engine) { case INTEL_GPU_ENGINE_3D: ring = INTEL_RING_RENDER; break; default: intel_dev_log(dev, VK_DEBUG_REPORT_ERROR_BIT_EXT, &dev->base, 0, 0, "invalid engine type"); return VK_ERROR_VALIDATION_FAILED_EXT; break; } queue = (struct intel_queue *) intel_base_create(&dev->base.handle, sizeof(*queue), dev->base.dbg, VK_DEBUG_REPORT_OBJECT_TYPE_QUEUE_EXT, NULL, 0); if (!queue) return VK_ERROR_OUT_OF_HOST_MEMORY; queue->dev = dev; queue->ring = ring; if (queue_init_hw_and_atomic_bo(queue) != VK_SUCCESS) { intel_queue_destroy(queue); return VK_ERROR_INITIALIZATION_FAILED; } memset(&fence_info, 0, sizeof(fence_info)); fence_info.sType = VK_STRUCTURE_TYPE_FENCE_CREATE_INFO; ret = intel_fence_create(dev, &fence_info, &queue->fence); if (ret != VK_SUCCESS) { intel_queue_destroy(queue); return ret; } *queue_ret = queue; return VK_SUCCESS; }