/** * si_dma_ring_test_ib - test an IB on the DMA engine * * @ring: amdgpu_ring structure holding ring information * * Test a simple IB in the DMA ring (VI). * Returns 0 on success, error on failure. */ static int si_dma_ring_test_ib(struct amdgpu_ring *ring, long timeout) { struct amdgpu_device *adev = ring->adev; struct amdgpu_ib ib; struct dma_fence *f = NULL; unsigned index; u32 tmp = 0; u64 gpu_addr; long r; r = amdgpu_wb_get(adev, &index); if (r) { dev_err(adev->dev, "(%ld) failed to allocate wb slot\n", r); return r; } gpu_addr = adev->wb.gpu_addr + (index * 4); tmp = 0xCAFEDEAD; adev->wb.wb[index] = cpu_to_le32(tmp); memset(&ib, 0, sizeof(ib)); r = amdgpu_ib_get(adev, NULL, 256, &ib); if (r) { DRM_ERROR("amdgpu: failed to get ib (%ld).\n", r); goto err0; } ib.ptr[0] = DMA_PACKET(DMA_PACKET_WRITE, 0, 0, 0, 1); ib.ptr[1] = lower_32_bits(gpu_addr); ib.ptr[2] = upper_32_bits(gpu_addr) & 0xff; ib.ptr[3] = 0xDEADBEEF; ib.length_dw = 4; r = amdgpu_ib_schedule(ring, 1, &ib, NULL, NULL, &f); if (r) goto err1; r = dma_fence_wait_timeout(f, false, timeout); if (r == 0) { DRM_ERROR("amdgpu: IB test timed out\n"); r = -ETIMEDOUT; goto err1; } else if (r < 0) { DRM_ERROR("amdgpu: fence wait failed (%ld).\n", r); goto err1; } tmp = le32_to_cpu(adev->wb.wb[index]); if (tmp == 0xDEADBEEF) { DRM_INFO("ib test on ring %d succeeded\n", ring->idx); r = 0; } else { DRM_ERROR("amdgpu: ib test failed (0x%08X)\n", tmp); r = -EINVAL; } err1: amdgpu_ib_free(adev, &ib, NULL); dma_fence_put(f); err0: amdgpu_wb_free(adev, index); return r; }
int tegra_uapi_syncpt_wait(struct drm_device *drm, void *data, struct drm_file *file) { struct drm_tegra_syncpt_wait *args = data; struct tegra_drm_file *fpriv = file->driver_priv; struct tegra_drm *tegra = drm->dev_private; struct tegra_drm_context_v1 *context; struct dma_fence *fence = NULL; int ret; spin_lock(&tegra->context_lock); context = idr_find(&fpriv->uapi_v1_contexts, args->thresh); if (context) fence = drm_syncobj_fence_get(context->syncobj); spin_unlock(&tegra->context_lock); if (!context) return -EINVAL; if (fence) { ret = dma_fence_wait_timeout(fence, false, msecs_to_jiffies(args->timeout)); dma_fence_put(fence); if (!ret) return -ETIMEDOUT; if (ret < 0) return ret; } return 0; }
/** * uvd_v6_0_enc_ring_test_ib - test if UVD ENC IBs are working * * @ring: the engine to test on * */ static int uvd_v6_0_enc_ring_test_ib(struct amdgpu_ring *ring, long timeout) { struct dma_fence *fence = NULL; long r; r = uvd_v6_0_enc_get_create_msg(ring, 1, NULL); if (r) goto error; r = uvd_v6_0_enc_get_destroy_msg(ring, 1, &fence); if (r) goto error; r = dma_fence_wait_timeout(fence, false, timeout); if (r == 0) r = -ETIMEDOUT; else if (r > 0) r = 0; error: dma_fence_put(fence); return r; }
static void nouveau_gem_object_unmap(struct nouveau_bo *nvbo, struct nouveau_vma *vma) { const bool mapped = nvbo->bo.mem.mem_type != TTM_PL_SYSTEM; struct reservation_object *resv = nvbo->bo.resv; struct reservation_object_list *fobj; struct nouveau_gem_object_unmap *work; struct dma_fence *fence = NULL; fobj = reservation_object_get_list(resv); list_del_init(&vma->head); if (fobj && fobj->shared_count > 1) ttm_bo_wait(&nvbo->bo, false, false); else if (fobj && fobj->shared_count == 1) fence = rcu_dereference_protected(fobj->shared[0], reservation_object_held(resv)); else fence = reservation_object_get_excl(nvbo->bo.resv); if (!fence || !mapped) { nouveau_gem_object_delete(vma); return; } if (!(work = kmalloc(sizeof(*work), GFP_KERNEL))) { WARN_ON(dma_fence_wait_timeout(fence, false, 2 * HZ) <= 0); nouveau_gem_object_delete(vma); return; } work->work.func = nouveau_gem_object_delete_work; work->vma = vma; nouveau_cli_work_queue(vma->vmm->cli, fence, &work->work); }