Beispiel #1
0
static void
nouveau_gem_object_unmap(struct nouveau_bo *nvbo, struct nvkm_vma *vma)
{
	const bool mapped = nvbo->bo.mem.mem_type != TTM_PL_SYSTEM;
	struct reservation_object *resv = nvbo->bo.resv;
	struct reservation_object_list *fobj;
	struct fence *fence = NULL;

	fobj = reservation_object_get_list(resv);

	list_del(&vma->head);

	if (fobj && fobj->shared_count > 1)
		ttm_bo_wait(&nvbo->bo, true, false, false);
	else if (fobj && fobj->shared_count == 1)
		fence = rcu_dereference_protected(fobj->shared[0],
						reservation_object_held(resv));
	else
		fence = reservation_object_get_excl(nvbo->bo.resv);

	if (fence && mapped) {
		nouveau_fence_work(fence, nouveau_gem_object_delete, vma);
	} else {
		if (mapped)
			nvkm_vm_unmap(vma);
		nvkm_vm_put(vma);
		kfree(vma);
	}
}
Beispiel #2
0
void reservation_object_add_excl_fence(struct reservation_object *obj,
				       struct fence *fence)
{
	struct fence *old_fence = reservation_object_get_excl(obj);
	struct reservation_object_list *old;
	u32 i = 0;

	old = reservation_object_get_list(obj);
	if (old)
		i = old->shared_count;

	if (fence)
		fence_get(fence);

	preempt_disable();
	write_seqcount_begin(&obj->seq);
	/* write_seqcount_begin provides the necessary memory barrier */
	RCU_INIT_POINTER(obj->fence_excl, fence);
	if (old)
		old->shared_count = 0;
	write_seqcount_end(&obj->seq);
	preempt_enable();

	/* inplace update, no shared fences */
	while (i--)
		fence_put(rcu_dereference_protected(old->shared[i],
						reservation_object_held(obj)));

	if (old_fence)
		fence_put(old_fence);
}
Beispiel #3
0
/* must be called before _move_to_active().. */
int msm_gem_sync_object(struct drm_gem_object *obj,
		struct msm_fence_context *fctx, bool exclusive)
{
	struct msm_gem_object *msm_obj = to_msm_bo(obj);
	struct reservation_object_list *fobj;
	struct fence *fence;
	int i, ret;

	if (!exclusive) {
		/* NOTE: _reserve_shared() must happen before _add_shared_fence(),
		 * which makes this a slightly strange place to call it.  OTOH this
		 * is a convenient can-fail point to hook it in.  (And similar to
		 * how etnaviv and nouveau handle this.)
		 */
		ret = reservation_object_reserve_shared(msm_obj->resv);
		if (ret)
			return ret;
	}

	fobj = reservation_object_get_list(msm_obj->resv);
	if (!fobj || (fobj->shared_count == 0)) {
		fence = reservation_object_get_excl(msm_obj->resv);
		/* don't need to wait on our own fences, since ring is fifo */
		if (fence && (fence->context != fctx->context)) {
			ret = fence_wait(fence, true);
			if (ret)
				return ret;
		}
	}

	if (!exclusive || !fobj)
		return 0;

	for (i = 0; i < fobj->shared_count; i++) {
		fence = rcu_dereference_protected(fobj->shared[i],
						reservation_object_held(msm_obj->resv));
		if (fence->context != fctx->context) {
			ret = fence_wait(fence, true);
			if (ret)
				return ret;
		}
	}

	return 0;
}
Beispiel #4
0
static void
nouveau_gem_object_unmap(struct nouveau_bo *nvbo, struct nouveau_vma *vma)
{
	const bool mapped = nvbo->bo.mem.mem_type != TTM_PL_SYSTEM;
	struct reservation_object *resv = nvbo->bo.resv;
	struct reservation_object_list *fobj;
	struct nouveau_gem_object_unmap *work;
	struct dma_fence *fence = NULL;

	fobj = reservation_object_get_list(resv);

	list_del_init(&vma->head);

	if (fobj && fobj->shared_count > 1)
		ttm_bo_wait(&nvbo->bo, false, false);
	else if (fobj && fobj->shared_count == 1)
		fence = rcu_dereference_protected(fobj->shared[0],
						reservation_object_held(resv));
	else
		fence = reservation_object_get_excl(nvbo->bo.resv);

	if (!fence || !mapped) {
		nouveau_gem_object_delete(vma);
		return;
	}

	if (!(work = kmalloc(sizeof(*work), GFP_KERNEL))) {
		WARN_ON(dma_fence_wait_timeout(fence, false, 2 * HZ) <= 0);
		nouveau_gem_object_delete(vma);
		return;
	}

	work->work.func = nouveau_gem_object_delete_work;
	work->vma = vma;
	nouveau_cli_work_queue(vma->vmm->cli, fence, &work->work);
}