/** * @brief Issue Dump command to hardware and wait for completion */ mali_error kbase_instr_hwcnt_dump(kbase_context *kctx) { unsigned long flags; mali_error err = MALI_ERROR_FUNCTION_FAILED; kbase_device *kbdev; KBASE_DEBUG_ASSERT(NULL != kctx); kbdev = kctx->kbdev; KBASE_DEBUG_ASSERT(NULL != kbdev); err = kbase_instr_hwcnt_dump_irq(kctx); if (MALI_ERROR_NONE != err) { /* Can't dump HW counters */ goto out; } /* Wait for dump & cacheclean to complete */ wait_event(kbdev->hwcnt.wait, kbdev->hwcnt.triggered != 0); spin_lock_irqsave(&kbdev->hwcnt.lock, flags); if (kbdev->hwcnt.state == KBASE_INSTR_STATE_RESETTING) { /* GPU is being reset */ spin_unlock_irqrestore(&kbdev->hwcnt.lock, flags); wait_event(kbdev->hwcnt.wait, kbdev->hwcnt.triggered != 0); spin_lock_irqsave(&kbdev->hwcnt.lock, flags); } if (kbdev->hwcnt.state == KBASE_INSTR_STATE_FAULT) { err = MALI_ERROR_FUNCTION_FAILED; kbdev->hwcnt.state = KBASE_INSTR_STATE_IDLE; } else { /* Dump done */ KBASE_DEBUG_ASSERT(kbdev->hwcnt.state == KBASE_INSTR_STATE_IDLE); err = MALI_ERROR_NONE; } spin_unlock_irqrestore(&kbdev->hwcnt.lock, flags); out: return err; }
/** * @brief Issue Dump command to hardware and wait for completion */ mali_error kbase_instr_hwcnt_dump(struct kbase_context *kctx) { unsigned long flags; mali_error err = MALI_ERROR_FUNCTION_FAILED; struct kbase_device *kbdev; #ifdef SEC_HWCNT if (kctx == NULL) { GPU_LOG(DVFS_ERROR, DUMMY, 0u, 0u, "kctx is NULL error in %s %d \n", __FUNCTION__, err); goto out; } #endif KBASE_DEBUG_ASSERT(NULL != kctx); kbdev = kctx->kbdev; KBASE_DEBUG_ASSERT(NULL != kbdev); err = kbase_instr_hwcnt_dump_irq(kctx); if (MALI_ERROR_NONE != err) { /* Can't dump HW counters */ GPU_LOG(DVFS_INFO, DUMMY, 0u, 0u, "kbase_instr_hwcnt_dump_irq error in %s %d \n", __FUNCTION__, err); goto out; } /* Wait for dump & cacheclean to complete */ #ifdef SEC_HWCNT if (kbdev->hwcnt.is_init) { int ret = wait_event_timeout(kbdev->hwcnt.wait, kbdev->hwcnt.triggered != 0, kbdev->hwcnt.timeout); if ((kbdev->hwcnt.trig_exception == 1) || (ret == 0)) { kbdev->hwcnt.trig_exception = 0; kbdev->hwcnt.state = KBASE_INSTR_STATE_IDLE; err = MALI_ERROR_FUNCTION_FAILED; GPU_LOG(DVFS_ERROR, DUMMY, 0u, 0u, "wait_event_timeout error in %s %d \n", __FUNCTION__, err); goto out; } } else #endif wait_event(kbdev->hwcnt.wait, kbdev->hwcnt.triggered != 0); spin_lock_irqsave(&kbdev->hwcnt.lock, flags); if (kbdev->hwcnt.state == KBASE_INSTR_STATE_RESETTING) { /* GPU is being reset */ spin_unlock_irqrestore(&kbdev->hwcnt.lock, flags); #ifdef SEC_HWCNT if (kbdev->hwcnt.is_init) wait_event_timeout(kbdev->hwcnt.wait, kbdev->hwcnt.triggered != 0, kbdev->hwcnt.timeout); else #endif wait_event(kbdev->hwcnt.wait, kbdev->hwcnt.triggered != 0); spin_lock_irqsave(&kbdev->hwcnt.lock, flags); } if (kbdev->hwcnt.state == KBASE_INSTR_STATE_FAULT) { err = MALI_ERROR_FUNCTION_FAILED; kbdev->hwcnt.state = KBASE_INSTR_STATE_IDLE; GPU_LOG(DVFS_ERROR, DUMMY, 0u, 0u, "hwcnt state is FAULT error in %s %d \n", __FUNCTION__, err); } else { /* Dump done */ KBASE_DEBUG_ASSERT(kbdev->hwcnt.state == KBASE_INSTR_STATE_IDLE); err = MALI_ERROR_NONE; } spin_unlock_irqrestore(&kbdev->hwcnt.lock, flags); out: return err; }