/** * amdgpu_fence_driver_start_ring - make the fence driver * ready for use on the requested ring. * * @ring: ring to start the fence driver on * @irq_src: interrupt source to use for this ring * @irq_type: interrupt type to use for this ring * * Make the fence driver ready for processing (all asics). * Not all asics have all rings, so each asic will only * start the fence driver on the rings it has. * Returns 0 for success, errors for failure. */ int amdgpu_fence_driver_start_ring(struct amdgpu_ring *ring, struct amdgpu_irq_src *irq_src, unsigned irq_type) { struct amdgpu_device *adev = ring->adev; uint64_t index; if (ring != &adev->uvd.ring) { ring->fence_drv.cpu_addr = &adev->wb.wb[ring->fence_offs]; ring->fence_drv.gpu_addr = adev->wb.gpu_addr + (ring->fence_offs * 4); } else { /* put fence directly behind firmware */ index = ALIGN(adev->uvd.fw->size, 8); ring->fence_drv.cpu_addr = adev->uvd.cpu_addr + index; ring->fence_drv.gpu_addr = adev->uvd.gpu_addr + index; } amdgpu_fence_write(ring, atomic_read(&ring->fence_drv.last_seq)); amdgpu_irq_get(adev, irq_src, irq_type); ring->fence_drv.irq_src = irq_src; ring->fence_drv.irq_type = irq_type; ring->fence_drv.initialized = true; dev_info(adev->dev, "fence driver on ring %d use gpu addr 0x%016llx, " "cpu addr 0x%p\n", ring->idx, ring->fence_drv.gpu_addr, ring->fence_drv.cpu_addr); return 0; }
/** * amdgpu_fence_driver_force_completion - force all fence waiter to complete * * @adev: amdgpu device pointer * * In case of GPU reset failure make sure no process keep waiting on fence * that will never complete. */ void amdgpu_fence_driver_force_completion(struct amdgpu_device *adev) { int i; for (i = 0; i < AMDGPU_MAX_RINGS; i++) { struct amdgpu_ring *ring = adev->rings[i]; if (!ring || !ring->fence_drv.initialized) continue; amdgpu_fence_write(ring, ring->fence_drv.sync_seq); } }
void amdgpu_fence_driver_force_completion_ring(struct amdgpu_ring *ring) { if (ring) amdgpu_fence_write(ring, ring->fence_drv.sync_seq); }