static bool ilo_cp_detect_hang(struct ilo_cp *cp) { uint32_t active_lost, pending_lost; bool guilty = false; if (likely(!(ilo_debug & ILO_DEBUG_HANG))) return false; /* wait and get reset stats */ if (intel_bo_wait(cp->last_submitted_bo, -1) || intel_winsys_get_reset_stats(cp->winsys, cp->render_ctx, &active_lost, &pending_lost)) return false; if (cp->active_lost != active_lost) { ilo_err("GPU hang caused by bo %p\n", cp->last_submitted_bo); cp->active_lost = active_lost; guilty = true; } if (cp->pending_lost != pending_lost) { ilo_err("GPU hang detected\n"); cp->pending_lost = pending_lost; } return guilty; }
static boolean ilo_fence_finish(struct pipe_screen *screen, struct pipe_fence_handle *f, uint64_t timeout) { struct ilo_fence *fence = ilo_fence(f); const int64_t wait_timeout = (timeout > INT64_MAX) ? -1 : timeout; /* already signalled */ if (!fence->bo) return true; /* wait and see if it returns error */ if (intel_bo_wait(fence->bo, wait_timeout)) return false; /* mark signalled */ intel_bo_unreference(fence->bo); fence->bo = NULL; return true; }
static VkResult queue_submit_cmd_debug(struct intel_queue *queue, struct intel_cmd *cmd) { uint32_t active[2], pending[2]; struct intel_bo *bo; VkDeviceSize used; VkResult ret; ret = queue_submit_cmd_prepare(queue, cmd); if (ret != VK_SUCCESS) return ret; if (intel_debug & INTEL_DEBUG_HANG) { intel_winsys_get_reset_stats(queue->dev->winsys, &active[0], &pending[0]); } bo = intel_cmd_get_batch(cmd, &used); ret = queue_submit_bo(queue, bo, used); if (ret != VK_SUCCESS) return ret; if (intel_debug & INTEL_DEBUG_HANG) { intel_bo_wait(bo, -1); intel_winsys_get_reset_stats(queue->dev->winsys, &active[1], &pending[1]); if (active[0] != active[1] || pending[0] != pending[1]) { queue_submit_hang(queue, cmd, active[1] - active[0], pending[1] - pending[0]); } } if (intel_debug & INTEL_DEBUG_BATCH) intel_cmd_decode(cmd, false); return VK_SUCCESS; }
VKAPI_ATTR VkResult VKAPI_CALL vkQueueSubmit( VkQueue queue_, uint32_t submitCount, const VkSubmitInfo* pSubmits, VkFence fence_) { struct intel_queue *queue = intel_queue(queue_); VkResult ret = VK_SUCCESS; struct intel_cmd *last_cmd; uint32_t i; for (uint32_t submit_idx = 0; submit_idx < submitCount; submit_idx++) { const VkSubmitInfo *submit = &pSubmits[submit_idx]; for (i = 0; i < submit->waitSemaphoreCount; i++) { struct intel_semaphore *pSemaphore = intel_semaphore(submit->pWaitSemaphores[i]); intel_wait_queue_semaphore(queue, pSemaphore); } if (unlikely(intel_debug)) { for (i = 0; i < submit->commandBufferCount; i++) { struct intel_cmd *cmd = intel_cmd(submit->pCommandBuffers[i]); ret = queue_submit_cmd_debug(queue, cmd); if (ret != VK_SUCCESS) break; } } else { for (i = 0; i < submit->commandBufferCount; i++) { struct intel_cmd *cmd = intel_cmd(submit->pCommandBuffers[i]); ret = queue_submit_cmd(queue, cmd); if (ret != VK_SUCCESS) break; } } /* no cmd submitted */ if (i == 0) return ret; last_cmd = intel_cmd(submit->pCommandBuffers[i - 1]); if (ret == VK_SUCCESS) { intel_fence_set_seqno(queue->fence, intel_cmd_get_batch(last_cmd, NULL)); if (fence_ != VK_NULL_HANDLE) { struct intel_fence *fence = intel_fence(fence_); intel_fence_copy(fence, queue->fence); } } else { struct intel_bo *last_bo; /* unbusy submitted BOs */ last_bo = intel_cmd_get_batch(last_cmd, NULL); intel_bo_wait(last_bo, -1); } for (i = 0; i < submit->signalSemaphoreCount; i++) { struct intel_semaphore *pSemaphore = intel_semaphore(submit->pSignalSemaphores[i]); intel_signal_queue_semaphore(queue, pSemaphore); } } return ret; }