int ioctl_request_fd() { /* The buffer exporter announces its wish to export a buffer. In this, it connects its own private buffer data, provides implementation for operations that can be performed on the exported dma_buf, and flags for the file associated with this buffer. Interface: struct dma_buf *dma_buf_export(void *priv, struct dma_buf_ops *ops, size_t size, int flags) */ /* Userspace entity requests for a file-descriptor (fd) which is a handle to the anonymous file associated with the buffer. It can then share the fd with other drivers and/or processes. Interface: int dma_buf_fd(struct dma_buf *dmabuf) */ int fd; curr_dma_buf = dma_buf_export(NULL, &dma_ops, dma_size, 0); fd = dma_buf_fd(curr_dma_buf); return fd; }
int ion_share_dma_buf(struct ion_client *client, struct ion_handle *handle) { struct ion_buffer *buffer; struct dma_buf *dmabuf; bool valid_handle; int fd; mutex_lock(&client->lock); valid_handle = ion_handle_validate(client, handle); mutex_unlock(&client->lock); if (!valid_handle) { WARN(1, "%s: invalid handle passed to share.\n", __func__); return -EINVAL; } buffer = handle->buffer; ion_buffer_get(buffer); dmabuf = dma_buf_export(buffer, &dma_buf_ops, buffer->size, O_RDWR); if (IS_ERR(dmabuf)) { ion_buffer_put(buffer); return PTR_ERR(dmabuf); } fd = dma_buf_fd(dmabuf, O_CLOEXEC); if (fd < 0) dma_buf_put(dmabuf); return fd; }
/** * tee_shm_get_fd() - Increase reference count and return file descriptor * @shm: Shared memory handle * @returns user space file descriptor to shared memory */ int tee_shm_get_fd(struct tee_shm *shm) { int fd; if (!(shm->flags & TEE_SHM_DMA_BUF)) return -EINVAL; get_dma_buf(shm->dmabuf); fd = dma_buf_fd(shm->dmabuf, O_CLOEXEC); if (fd < 0) dma_buf_put(shm->dmabuf); return fd; }
static int dmabuf_ioctl_export(struct dmabuf_file *priv, unsigned long flags) { int err; struct dmabuf_create *buf = (struct dmabuf_create *)flags; get_dma_buf(priv->buf); err = dma_buf_fd(priv->buf, flags); if (err < 0) dma_buf_put(priv->buf); priv->fd = err; if (copy_to_user(&buf->fd, &err, 4)) return -EFAULT; return 0; }
int exynos_dmabuf_prime_handle_to_fd(struct drm_device *drm_dev, struct drm_file *file, unsigned int handle, int *prime_fd) { struct drm_gem_object *obj; struct exynos_drm_gem_obj *exynos_gem_obj; int ret = 0; DRM_DEBUG_KMS("%s\n", __FILE__); ret = mutex_lock_interruptible(&drm_dev->struct_mutex); if (ret < 0) return ret; obj = drm_gem_object_lookup(drm_dev, file, handle); if (!obj) { DRM_DEBUG_KMS("failed to lookup gem object.\n"); ret = -EINVAL; goto err1; } exynos_gem_obj = to_exynos_gem_obj(obj); if (obj->prime_fd != -1) { /* we have a prime fd already referencing the object. */ goto have_fd; } /* * get the dmabuf object for a gem object after registering * the gem object to allocated dmabuf. * * P.S. dma_buf_export function performs the followings: * - create a new dmabuf object. * - dmabuf->priv = gem object. * - file->private_data = dmabuf. */ obj->export_dma_buf = dma_buf_export(obj, &exynos_dmabuf_ops, obj->size, 0600); if (!obj->export_dma_buf) { ret = PTR_ERR(obj->export_dma_buf); goto err2; } /* get file descriptor for a given dmabuf object. */ obj->prime_fd = dma_buf_fd(obj->export_dma_buf); if (obj->prime_fd < 0) { DRM_DEBUG_KMS("failed to get fd from dmabuf.\n"); dma_buf_put(obj->export_dma_buf); ret = obj->prime_fd; goto err2; } /* * this gem object is referenced by the fd so * the object refcount should be increased. * after that when dmabuf_ops->release() is called, * it will be decreased again. */ drm_gem_object_reference(obj); have_fd: *prime_fd = obj->prime_fd; err2: drm_gem_object_unreference(obj); err1: mutex_unlock(&drm_dev->struct_mutex); return ret; }