/** * amdgpu_disable_vblank_kms - disable vblank interrupt * * @dev: drm dev pointer * @pipe: crtc to disable vblank interrupt for * * Disable the interrupt on the requested crtc (all asics). */ void amdgpu_disable_vblank_kms(struct drm_device *dev, unsigned int pipe) { struct amdgpu_device *adev = dev->dev_private; int idx = amdgpu_crtc_idx_to_irq_type(adev, pipe); amdgpu_irq_put(adev, &adev->crtc_irq, idx); }
/** * amdgpu_fence_driver_fini - tear down the fence driver * for all possible rings. * * @adev: amdgpu device pointer * * Tear down the fence driver for all possible rings (all asics). */ void amdgpu_fence_driver_fini(struct amdgpu_device *adev) { unsigned i, j; int r; for (i = 0; i < AMDGPU_MAX_RINGS; i++) { struct amdgpu_ring *ring = adev->rings[i]; if (!ring || !ring->fence_drv.initialized) continue; r = amdgpu_fence_wait_empty(ring); if (r) { /* no need to trigger GPU reset as we are unloading */ amdgpu_fence_driver_force_completion(adev); } amdgpu_irq_put(adev, ring->fence_drv.irq_src, ring->fence_drv.irq_type); amd_sched_fini(&ring->sched); del_timer_sync(&ring->fence_drv.fallback_timer); for (j = 0; j <= ring->fence_drv.num_fences_mask; ++j) fence_put(ring->fence_drv.fences[j]); kfree(ring->fence_drv.fences); ring->fence_drv.initialized = false; } }
static int gmc_v9_0_hw_fini(void *handle) { struct amdgpu_device *adev = (struct amdgpu_device *)handle; if (amdgpu_sriov_vf(adev)) { /* full access mode, so don't touch any GMC register */ DRM_DEBUG("For SRIOV client, shouldn't do anything.\n"); return 0; } amdgpu_irq_put(adev, &adev->gmc.vm_fault, 0); gmc_v9_0_gart_disable(adev); return 0; }
int xgpu_ai_mailbox_add_irq_id(struct amdgpu_device *adev) { int r; r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_BIF, 135, &adev->virt.rcv_irq); if (r) return r; r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_BIF, 138, &adev->virt.ack_irq); if (r) { amdgpu_irq_put(adev, &adev->virt.rcv_irq, 0); return r; } return 0; }
int xgpu_vi_mailbox_add_irq_id(struct amdgpu_device *adev) { int r; r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, 135, &adev->virt.rcv_irq); if (r) return r; r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, 138, &adev->virt.ack_irq); if (r) { amdgpu_irq_put(adev, &adev->virt.rcv_irq, 0); return r; } return 0; }
int xgpu_ai_mailbox_get_irq(struct amdgpu_device *adev) { int r; r = amdgpu_irq_get(adev, &adev->virt.rcv_irq, 0); if (r) return r; r = amdgpu_irq_get(adev, &adev->virt.ack_irq, 0); if (r) { amdgpu_irq_put(adev, &adev->virt.rcv_irq, 0); return r; } INIT_WORK(&adev->virt.flr_work, xgpu_ai_mailbox_flr_work); return 0; }
/** * amdgpu_fence_driver_suspend - suspend the fence driver * for all possible rings. * * @adev: amdgpu device pointer * * Suspend the fence driver for all possible rings (all asics). */ void amdgpu_fence_driver_suspend(struct amdgpu_device *adev) { int i, r; for (i = 0; i < AMDGPU_MAX_RINGS; i++) { struct amdgpu_ring *ring = adev->rings[i]; if (!ring || !ring->fence_drv.initialized) continue; /* wait for gpu to finish processing current batch */ r = amdgpu_fence_wait_empty(ring); if (r) { /* delay GPU reset to resume */ amdgpu_fence_driver_force_completion(adev); } /* disable the interrupt */ amdgpu_irq_put(adev, ring->fence_drv.irq_src, ring->fence_drv.irq_type); } }
void xgpu_ai_mailbox_put_irq(struct amdgpu_device *adev) { amdgpu_irq_put(adev, &adev->virt.ack_irq, 0); amdgpu_irq_put(adev, &adev->virt.rcv_irq, 0); }