/* must be called before _move_to_active().. */ int msm_gem_sync_object(struct drm_gem_object *obj, struct msm_fence_context *fctx, bool exclusive) { struct msm_gem_object *msm_obj = to_msm_bo(obj); struct reservation_object_list *fobj; struct fence *fence; int i, ret; if (!exclusive) { /* NOTE: _reserve_shared() must happen before _add_shared_fence(), * which makes this a slightly strange place to call it. OTOH this * is a convenient can-fail point to hook it in. (And similar to * how etnaviv and nouveau handle this.) */ ret = reservation_object_reserve_shared(msm_obj->resv); if (ret) return ret; } fobj = reservation_object_get_list(msm_obj->resv); if (!fobj || (fobj->shared_count == 0)) { fence = reservation_object_get_excl(msm_obj->resv); /* don't need to wait on our own fences, since ring is fifo */ if (fence && (fence->context != fctx->context)) { ret = fence_wait(fence, true); if (ret) return ret; } } if (!exclusive || !fobj) return 0; for (i = 0; i < fobj->shared_count; i++) { fence = rcu_dereference_protected(fobj->shared[i], reservation_object_held(msm_obj->resv)); if (fence->context != fctx->context) { ret = fence_wait(fence, true); if (ret) return ret; } } return 0; }
/* * vgem_fence_attach_ioctl (DRM_IOCTL_VGEM_FENCE_ATTACH): * * Create and attach a fence to the vGEM handle. This fence is then exposed * via the dma-buf reservation object and visible to consumers of the exported * dma-buf. If the flags contain VGEM_FENCE_WRITE, the fence indicates the * vGEM buffer is being written to by the client and is exposed as an exclusive * fence, otherwise the fence indicates the client is current reading from the * buffer and all future writes should wait for the client to signal its * completion. Note that if a conflicting fence is already on the dma-buf (i.e. * an exclusive fence when adding a read, or any fence when adding a write), * -EBUSY is reported. Serialisation between operations should be handled * by waiting upon the dma-buf. * * This returns the handle for the new fence that must be signaled within 10 * seconds (or otherwise it will automatically expire). See * vgem_fence_signal_ioctl (DRM_IOCTL_VGEM_FENCE_SIGNAL). * * If the vGEM handle does not exist, vgem_fence_attach_ioctl returns -ENOENT. */ int vgem_fence_attach_ioctl(struct drm_device *dev, void *data, struct drm_file *file) { struct drm_vgem_fence_attach *arg = data; struct vgem_file *vfile = file->driver_priv; struct reservation_object *resv; struct drm_gem_object *obj; struct dma_fence *fence; int ret; if (arg->flags & ~VGEM_FENCE_WRITE) return -EINVAL; if (arg->pad) return -EINVAL; obj = drm_gem_object_lookup(file, arg->handle); if (!obj) return -ENOENT; ret = attach_dmabuf(dev, obj); if (ret) goto err; fence = vgem_fence_create(vfile, arg->flags); if (!fence) { ret = -ENOMEM; goto err; } /* Check for a conflicting fence */ resv = obj->dma_buf->resv; if (!reservation_object_test_signaled_rcu(resv, arg->flags & VGEM_FENCE_WRITE)) { ret = -EBUSY; goto err_fence; } /* Expose the fence via the dma-buf */ ret = 0; reservation_object_lock(resv, NULL); if (arg->flags & VGEM_FENCE_WRITE) reservation_object_add_excl_fence(resv, fence); else if ((ret = reservation_object_reserve_shared(resv)) == 0) reservation_object_add_shared_fence(resv, fence); reservation_object_unlock(resv); /* Record the fence in our idr for later signaling */ if (ret == 0) { mutex_lock(&vfile->fence_mutex); ret = idr_alloc(&vfile->fence_idr, fence, 1, 0, GFP_KERNEL); mutex_unlock(&vfile->fence_mutex); if (ret > 0) { arg->out_fence = ret; ret = 0; } } err_fence: if (ret) { dma_fence_signal(fence); dma_fence_put(fence); } err: drm_gem_object_put_unlocked(obj); return ret; }