static int exynos_drm_gem_one_info(int id, void *ptr, void *data) { struct drm_gem_object *obj = ptr; struct exynos_drm_gem_info_data *gem_info_data = data; struct drm_exynos_file_private *file_priv = gem_info_data->filp->driver_priv; struct exynos_drm_gem_obj *exynos_gem = to_exynos_gem_obj(obj); struct exynos_drm_gem_buf *buf = exynos_gem->buffer; seq_printf(gem_info_data->m, "%3d \t%3d \t%3d \t%2d \t\t%2d \t0x%08lx"\ " \t0x%x \t0x%08lx \t%2d \t\t%2d \t\t%2d\n", gem_info_data->filp->pid, file_priv->tgid, id, atomic_read(&obj->refcount.refcount), atomic_read(&obj->handle_count), exynos_gem->size, exynos_gem->flags, buf->page_size, buf->pfnmap, obj->export_dma_buf ? 1 : 0, obj->import_attach ? 1 : 0); return 0; }
struct drm_framebuffer * exynos_drm_framebuffer_init(struct drm_device *dev, struct drm_mode_fb_cmd2 *mode_cmd, struct drm_gem_object *obj) { struct exynos_drm_fb *exynos_fb; int ret; exynos_fb = kzalloc(sizeof(*exynos_fb), GFP_KERNEL); if (!exynos_fb) { DRM_ERROR("failed to allocate exynos drm framebuffer\n"); return ERR_PTR(-ENOMEM); } ret = drm_framebuffer_init(dev, &exynos_fb->fb, &exynos_drm_fb_funcs); if (ret) { DRM_ERROR("failed to initialize framebuffer\n"); return ERR_PTR(ret); } drm_helper_mode_fill_fb_struct(&exynos_fb->fb, mode_cmd); exynos_fb->exynos_gem_obj[0] = to_exynos_gem_obj(obj); return &exynos_fb->fb; }
struct drm_framebuffer * exynos_drm_framebuffer_init(struct drm_device *dev, struct drm_mode_fb_cmd2 *mode_cmd, struct drm_gem_object *obj) { struct exynos_drm_fb *exynos_fb; struct exynos_drm_gem_obj *exynos_gem_obj; int ret; exynos_gem_obj = to_exynos_gem_obj(obj); ret = check_fb_gem_memory_type(dev, exynos_gem_obj); if (ret < 0) { DRM_ERROR("cannot use this gem memory type for fb.\n"); return ERR_PTR(-EINVAL); } exynos_fb = kzalloc(sizeof(*exynos_fb), GFP_KERNEL); if (!exynos_fb) return ERR_PTR(-ENOMEM); drm_helper_mode_fill_fb_struct(&exynos_fb->fb, mode_cmd); exynos_fb->exynos_gem_obj[0] = exynos_gem_obj; ret = drm_framebuffer_init(dev, &exynos_fb->fb, &exynos_drm_fb_funcs); if (ret) { DRM_ERROR("failed to initialize framebuffer\n"); return ERR_PTR(ret); } return &exynos_fb->fb; }
struct dma_buf *exynos_dmabuf_prime_export(struct drm_device *drm_dev, struct drm_gem_object *obj, int flags) { struct exynos_drm_gem_obj *exynos_gem_obj = to_exynos_gem_obj(obj); return dma_buf_export(obj, &exynos_dmabuf_ops, exynos_gem_obj->base.size, flags); }
static struct sg_table *exynos_map_dmabuf(struct dma_buf_attachment *attach, enum dma_data_direction direction) { struct drm_gem_object *obj = attach->dmabuf->priv; struct exynos_drm_gem_obj *exynos_gem_obj; struct exynos_drm_gem_buf *buffer; struct sg_table *sgt; int ret; DRM_DEBUG_KMS("%s\n", __FILE__); exynos_gem_obj = to_exynos_gem_obj(obj); buffer = exynos_gem_obj->buffer; /* TODO. consider physically non-continuous memory with IOMMU. */ sgt = kzalloc(sizeof(*sgt), GFP_KERNEL); if (!sgt) { DRM_DEBUG_KMS("failed to allocate sg table.\n"); return ERR_PTR(-ENOMEM); } ret = sg_alloc_table(sgt, 1, GFP_KERNEL); if (ret < 0) { DRM_DEBUG_KMS("failed to allocate scatter list.\n"); kfree(sgt); sgt = NULL; return ERR_PTR(-ENOMEM); } sg_init_table(sgt->sgl, 1); sg_dma_len(sgt->sgl) = buffer->size; sg_set_page(sgt->sgl, pfn_to_page(PFN_DOWN(buffer->dma_addr)), buffer->size, 0); sg_dma_address(sgt->sgl) = buffer->dma_addr; /* * increase reference count of this buffer. * * Note: * allocated physical memory region is being shared with others * so this region shouldn't be released until all references of * this region will be dropped by exynos_unmap_dmabuf(). */ atomic_inc(&buffer->shared_refcount); return sgt; }
static struct drm_framebuffer * exynos_user_fb_create(struct drm_device *dev, struct drm_file *file_priv, struct drm_mode_fb_cmd2 *mode_cmd) { struct drm_gem_object *obj; struct drm_framebuffer *fb; struct exynos_drm_fb *exynos_fb; int nr; int i; DRM_DEBUG_KMS("%s\n", __FILE__); obj = drm_gem_object_lookup(dev, file_priv, mode_cmd->handles[0]); if (!obj) { DRM_ERROR("failed to lookup gem object\n"); return ERR_PTR(-ENOENT); } drm_gem_object_unreference_unlocked(obj); fb = exynos_drm_framebuffer_init(dev, mode_cmd, obj); if (IS_ERR(fb)) return fb; exynos_fb = to_exynos_fb(fb); nr = exynos_drm_format_num_buffers(fb->pixel_format); for (i = 1; i < nr; i++) { obj = drm_gem_object_lookup(dev, file_priv, mode_cmd->handles[i]); if (!obj) { DRM_ERROR("failed to lookup gem object\n"); exynos_drm_fb_destroy(fb); return ERR_PTR(-ENOENT); } drm_gem_object_unreference_unlocked(obj); exynos_fb->exynos_gem_obj[i] = to_exynos_gem_obj(obj); } return fb; }
static void exynos_unmap_dmabuf(struct dma_buf_attachment *attach, struct sg_table *sgt) { struct drm_gem_object *obj = attach->dmabuf->priv; struct exynos_drm_gem_obj *exynos_gem_obj; struct exynos_drm_gem_buf *buffer; DRM_DEBUG_KMS("%s\n", __FILE__); exynos_gem_obj = to_exynos_gem_obj(obj); buffer = exynos_gem_obj->buffer; sg_free_table(sgt); kfree(sgt); sgt = NULL; if (atomic_read(&buffer->shared_refcount) <= 0) BUG(); atomic_dec(&buffer->shared_refcount); }
static int exynos_drm_list_gem_info(int id, void *ptr, void *data) { struct drm_gem_object *obj = ptr; struct drm_file *filp = data; struct exynos_drm_gem_obj *gem = to_exynos_gem_obj(obj); struct exynos_drm_gem_buf *buf = gem->buffer; DRM_INFO("%3d \t%3d \t%2d \t\t%2d \t0x%lx \t0x%x \t0x%lx "\ "\t%2d \t\t%2d \t\t%2d\n", filp->pid, id, atomic_read(&obj->refcount.refcount), atomic_read(&obj->handle_count), gem->size, gem->flags, buf->page_size, buf->pfnmap, obj->export_dma_buf ? 1 : 0, obj->import_attach ? 1 : 0); return 0; }
static struct exynos_drm_gem_obj *dma_buf_to_obj(struct dma_buf *buf) { return to_exynos_gem_obj(buf->priv); }
static struct drm_framebuffer * exynos_user_fb_create(struct drm_device *dev, struct drm_file *file_priv, struct drm_mode_fb_cmd2 *mode_cmd) { struct drm_gem_object *obj; struct exynos_drm_gem_obj *exynos_gem_obj; struct exynos_drm_fb *exynos_fb; int i, ret; exynos_fb = kzalloc(sizeof(*exynos_fb), GFP_KERNEL); if (!exynos_fb) return ERR_PTR(-ENOMEM); obj = drm_gem_object_lookup(dev, file_priv, mode_cmd->handles[0]); if (!obj) { DRM_ERROR("failed to lookup gem object\n"); ret = -ENOENT; goto err_free; } drm_helper_mode_fill_fb_struct(&exynos_fb->fb, mode_cmd); exynos_fb->exynos_gem_obj[0] = to_exynos_gem_obj(obj); exynos_fb->buf_cnt = exynos_drm_format_num_buffers(mode_cmd); DRM_DEBUG_KMS("buf_cnt = %d\n", exynos_fb->buf_cnt); for (i = 1; i < exynos_fb->buf_cnt; i++) { obj = drm_gem_object_lookup(dev, file_priv, mode_cmd->handles[i]); if (!obj) { DRM_ERROR("failed to lookup gem object\n"); ret = -ENOENT; exynos_fb->buf_cnt = i; goto err_unreference; } exynos_gem_obj = to_exynos_gem_obj(obj); exynos_fb->exynos_gem_obj[i] = exynos_gem_obj; ret = check_fb_gem_memory_type(dev, exynos_gem_obj); if (ret < 0) { DRM_ERROR("cannot use this gem memory type for fb.\n"); goto err_unreference; } } ret = drm_framebuffer_init(dev, &exynos_fb->fb, &exynos_drm_fb_funcs); if (ret) { DRM_ERROR("failed to init framebuffer.\n"); goto err_unreference; } return &exynos_fb->fb; err_unreference: for (i = 0; i < exynos_fb->buf_cnt; i++) { struct drm_gem_object *obj; obj = &exynos_fb->exynos_gem_obj[i]->base; if (obj) drm_gem_object_unreference_unlocked(obj); } err_free: kfree(exynos_fb); return ERR_PTR(ret); }
int exynos_dmabuf_prime_handle_to_fd(struct drm_device *drm_dev, struct drm_file *file, unsigned int handle, int *prime_fd) { struct drm_gem_object *obj; struct exynos_drm_gem_obj *exynos_gem_obj; int ret = 0; DRM_DEBUG_KMS("%s\n", __FILE__); ret = mutex_lock_interruptible(&drm_dev->struct_mutex); if (ret < 0) return ret; obj = drm_gem_object_lookup(drm_dev, file, handle); if (!obj) { DRM_DEBUG_KMS("failed to lookup gem object.\n"); ret = -EINVAL; goto err1; } exynos_gem_obj = to_exynos_gem_obj(obj); if (obj->prime_fd != -1) { /* we have a prime fd already referencing the object. */ goto have_fd; } /* * get the dmabuf object for a gem object after registering * the gem object to allocated dmabuf. * * P.S. dma_buf_export function performs the followings: * - create a new dmabuf object. * - dmabuf->priv = gem object. * - file->private_data = dmabuf. */ obj->export_dma_buf = dma_buf_export(obj, &exynos_dmabuf_ops, obj->size, 0600); if (!obj->export_dma_buf) { ret = PTR_ERR(obj->export_dma_buf); goto err2; } /* get file descriptor for a given dmabuf object. */ obj->prime_fd = dma_buf_fd(obj->export_dma_buf); if (obj->prime_fd < 0) { DRM_DEBUG_KMS("failed to get fd from dmabuf.\n"); dma_buf_put(obj->export_dma_buf); ret = obj->prime_fd; goto err2; } /* * this gem object is referenced by the fd so * the object refcount should be increased. * after that when dmabuf_ops->release() is called, * it will be decreased again. */ drm_gem_object_reference(obj); have_fd: *prime_fd = obj->prime_fd; err2: drm_gem_object_unreference(obj); err1: mutex_unlock(&drm_dev->struct_mutex); return ret; }