static void etnaviv_sched_timedout_job(struct drm_sched_job *sched_job) { struct etnaviv_gem_submit *submit = to_etnaviv_submit(sched_job); struct etnaviv_gpu *gpu = submit->gpu; /* block scheduler */ kthread_park(gpu->sched.thread); drm_sched_hw_job_reset(&gpu->sched, sched_job); /* get the GPU back into the init state */ etnaviv_core_dump(gpu); etnaviv_gpu_recover_hang(gpu); /* restart scheduler after GPU is usable again */ drm_sched_job_recovery(&gpu->sched); kthread_unpark(gpu->sched.thread); }
static void etnaviv_sched_timedout_job(struct drm_sched_job *sched_job) { struct etnaviv_gem_submit *submit = to_etnaviv_submit(sched_job); struct etnaviv_gpu *gpu = submit->gpu; u32 dma_addr; int change; /* * If the GPU managed to complete this jobs fence, the timout is * spurious. Bail out. */ if (dma_fence_is_signaled(submit->out_fence)) return; /* * If the GPU is still making forward progress on the front-end (which * should never loop) we shift out the timeout to give it a chance to * finish the job. */ dma_addr = gpu_read(gpu, VIVS_FE_DMA_ADDRESS); change = dma_addr - gpu->hangcheck_dma_addr; if (change < 0 || change > 16) { gpu->hangcheck_dma_addr = dma_addr; return; } /* block scheduler */ drm_sched_stop(&gpu->sched); if(sched_job) drm_sched_increase_karma(sched_job); /* get the GPU back into the init state */ etnaviv_core_dump(gpu); etnaviv_gpu_recover_hang(gpu); drm_sched_resubmit_jobs(&gpu->sched); /* restart scheduler after GPU is usable again */ drm_sched_start(&gpu->sched, true); }