static struct tegra_bo *tegra_bo_import(struct drm_device *drm, struct dma_buf *buf) { struct tegra_drm *tegra = drm->dev_private; struct dma_buf_attachment *attach; struct tegra_bo *bo; int err; bo = tegra_bo_alloc_object(drm, buf->size); if (IS_ERR(bo)) return bo; attach = dma_buf_attach(buf, drm->dev); if (IS_ERR(attach)) { err = PTR_ERR(attach); goto free; } get_dma_buf(buf); bo->sgt = dma_buf_map_attachment(attach, DMA_TO_DEVICE); if (!bo->sgt) { err = -ENOMEM; goto detach; } if (IS_ERR(bo->sgt)) { err = PTR_ERR(bo->sgt); goto detach; } if (tegra->domain) { err = tegra_bo_iommu_map(tegra, bo); if (err < 0) goto detach; } else { if (bo->sgt->nents > 1) { err = -EINVAL; goto detach; } bo->paddr = sg_dma_address(bo->sgt->sgl); } bo->gem.import_attach = attach; return bo; detach: if (!IS_ERR_OR_NULL(bo->sgt)) dma_buf_unmap_attachment(attach, bo->sgt, DMA_TO_DEVICE); dma_buf_detach(buf, attach); dma_buf_put(buf); free: drm_gem_object_release(&bo->gem); kfree(bo); return ERR_PTR(err); }
struct drm_gem_object *udl_gem_prime_import(struct drm_device *dev, struct dma_buf *dma_buf) { struct dma_buf_attachment *attach; struct sg_table *sg; struct udl_gem_object *uobj; int ret; /* need to attach */ attach = dma_buf_attach(dma_buf, dev->dev); if (IS_ERR(attach)) return ERR_PTR(PTR_ERR(attach)); sg = dma_buf_map_attachment(attach, DMA_BIDIRECTIONAL); if (IS_ERR(sg)) { ret = PTR_ERR(sg); goto fail_detach; } ret = udl_prime_create(dev, dma_buf->size, sg, &uobj); if (ret) { goto fail_unmap; } uobj->base.import_attach = attach; return &uobj->base; fail_unmap: dma_buf_unmap_attachment(attach, sg, DMA_BIDIRECTIONAL); fail_detach: dma_buf_detach(dma_buf, attach); return ERR_PTR(ret); }
/** * rppc_alloc_dmabuf - import a buffer and store in a rppc buffer descriptor * @rpc - rppc instance handle * @fd - dma_buf file descriptor * @autoreg: flag indicating the mode of creation * * This function primarily imports a buffer into the driver and holds * a reference to the buffer on behalf of the remote processor. The * buffer to be imported is represented by a dma-buf file descriptor, * and as such is agnostic of the buffer allocator and/or exporter. * The buffer is imported using the dma-buf api, and a driver specific * buffer descriptor is used to store the imported buffer properties. * The imported buffers are all stored in a rppc instance specific * idr, to be used for looking up and cleaning up the driver buffer * descriptors. * * The @autoreg field is used to dictate the manner in which the buffer * is imported. The user-side can pre-register the buffers with the driver * (which will import the buffers) if the application is going to use * these repeatedly in consecutive function invocations. The buffers * are auto-imported if the user-side has not registered them previously * and are un-imported once the remote function call returns. * * This function is to be called only after checking that buffer has * not been imported already (see rppc_find_dmabuf). * * Return: allocated rppc_dma_buf or error */ struct rppc_dma_buf *rppc_alloc_dmabuf(struct rppc_instance *rpc, int fd, bool autoreg) { struct rppc_dma_buf *dma; void *ret; int id; dma = kzalloc(sizeof(*dma), GFP_KERNEL); if (!dma) return ERR_PTR(-ENOMEM); dma->fd = fd; dma->autoreg = !!autoreg; dma->buf = dma_buf_get(dma->fd); if (IS_ERR(dma->buf)) { ret = dma->buf; goto free_dma; } dma->attach = dma_buf_attach(dma->buf, rpc->dev); if (IS_ERR(dma->attach)) { ret = dma->attach; goto put_buf; } dma->sgt = dma_buf_map_attachment(dma->attach, DMA_BIDIRECTIONAL); if (IS_ERR(dma->sgt)) { ret = dma->sgt; goto detach_buf; } dma->pa = sg_dma_address(dma->sgt->sgl); mutex_lock(&rpc->lock); id = idr_alloc(&rpc->dma_idr, dma, 0, 0, GFP_KERNEL); dma->id = id; mutex_unlock(&rpc->lock); if (id < 0) { ret = ERR_PTR(id); goto unmap_buf; } return dma; unmap_buf: dma_buf_unmap_attachment(dma->attach, dma->sgt, DMA_BIDIRECTIONAL); detach_buf: dma_buf_detach(dma->buf, dma->attach); put_buf: dma_buf_put(dma->buf); free_dma: kfree(dma); return ret; }
void m2m1shot_unmap_dma_buf(struct device *dev, struct m2m1shot_buffer_plane_dma *plane, enum dma_data_direction dir) { if (plane->dmabuf) { exynos_ion_sync_dmabuf_for_device(dev, plane->dmabuf, plane->bytes_used, dir); dma_buf_unmap_attachment(plane->attachment, plane->sgt, dir); } else { exynos_ion_sync_sg_for_cpu(dev, plane->bytes_used, plane->sgt, dir); } }
static void fimg2d_unmap_dma_buf(struct fimg2d_control *info, struct fimg2d_dma *dma) { if (!dma->dma_addr) return; iovmm_unmap(info->dev, dma->dma_addr); dma_buf_unmap_attachment(dma->attachment, dma->sg_table, dma->direction); dma_buf_detach(dma->dma_buf, dma->attachment); dma_buf_put(dma->dma_buf); memset(dma, 0, sizeof(struct fimg2d_dma)); }
struct drm_gem_object *omap_gem_prime_import(struct drm_device *dev, struct dma_buf *dma_buf) { struct dma_buf_attachment *attach; struct drm_gem_object *obj; struct sg_table *sgt; int ret; if (dma_buf->ops == &omap_dmabuf_ops) { obj = dma_buf->priv; if (obj->dev == dev) { /* * Importing dmabuf exported from out own gem increases * refcount on gem itself instead of f_count of dmabuf. */ drm_gem_object_reference(obj); return obj; } } attach = dma_buf_attach(dma_buf, dev->dev); if (IS_ERR(attach)) return ERR_CAST(attach); get_dma_buf(dma_buf); sgt = dma_buf_map_attachment(attach, DMA_TO_DEVICE); if (IS_ERR(sgt)) { ret = PTR_ERR(sgt); goto fail_detach; } obj = omap_gem_new_dmabuf(dev, dma_buf->size, sgt); if (IS_ERR(obj)) { ret = PTR_ERR(obj); goto fail_unmap; } obj->import_attach = attach; return obj; fail_unmap: dma_buf_unmap_attachment(attach, sgt, DMA_TO_DEVICE); fail_detach: dma_buf_detach(dma_buf, attach); dma_buf_put(dma_buf); return ERR_PTR(ret); }
dma_addr_t decon_map_sec_dma_buf(struct dma_buf *dbuf, int plane) { struct decon_device *decon = get_decon_drvdata(0); /* 0: decon Int ID */ if (!dbuf || (plane >= MAX_BUF_PLANE_CNT) || (plane < 0)) return -EINVAL; dma.ion_handle = NULL; dma.fence = NULL; dma.dma_buf = dbuf; dma.attachment = dma_buf_attach(dbuf, decon->dev); if (IS_ERR(dma.attachment)) { decon_err("dma_buf_attach() failed: %ld\n", PTR_ERR(dma.attachment)); goto err_buf_map_attach; } dma.sg_table = dma_buf_map_attachment(dma.attachment, DMA_TO_DEVICE); if (IS_ERR(dma.sg_table)) { decon_err("dma_buf_map_attachment() failed: %ld\n", PTR_ERR(dma.sg_table)); goto err_buf_map_attachment; } dma.dma_addr = ion_iovmm_map(dma.attachment, 0, dma.dma_buf->size, DMA_TO_DEVICE, plane); if (IS_ERR_VALUE(dma.dma_addr)) { decon_err("iovmm_map() failed: %pa\n", &dma.dma_addr); goto err_iovmm_map; } exynos_ion_sync_dmabuf_for_device(decon->dev, dma.dma_buf, dma.dma_buf->size, DMA_TO_DEVICE); return dma.dma_addr; err_iovmm_map: dma_buf_unmap_attachment(dma.attachment, dma.sg_table, DMA_TO_DEVICE); err_buf_map_attachment: dma_buf_detach(dma.dma_buf, dma.attachment); err_buf_map_attach: return 0; }
static unsigned int fimg2d_map_dma_buf(struct fimg2d_control *info, struct fimg2d_dma *dma, int fd, enum dma_data_direction direction) { dma_addr_t dma_addr; dma->direction = direction; dma->dma_buf = dma_buf_get(fd); if (IS_ERR_OR_NULL(dma->dma_buf)) { dev_err(info->dev, "dma_buf_get() failed: %ld\n", PTR_ERR(dma->dma_buf)); goto err_buf_get; } dma->attachment = dma_buf_attach(dma->dma_buf, info->dev); if (IS_ERR_OR_NULL(dma->attachment)) { dev_err(info->dev, "dma_buf_attach() failed: %ld\n", PTR_ERR(dma->attachment)); goto err_buf_attach; } dma->sg_table = dma_buf_map_attachment(dma->attachment, direction); if (IS_ERR_OR_NULL(dma->sg_table)) { dev_err(info->dev, "dma_buf_map_attachment() failed: %ld\n", PTR_ERR(dma->sg_table)); goto err_buf_map_attachment; } dma_addr = iovmm_map(info->dev, dma->sg_table->sgl, 0, dma->dma_buf->size); if (!dma_addr || IS_ERR_VALUE(dma_addr)) { dev_err(info->dev, "iovmm_map() failed: %d\n", dma->dma_addr); goto err_iovmm_map; } dma->dma_addr = dma_addr; return dma->dma_buf->size; err_iovmm_map: dma_buf_unmap_attachment(dma->attachment, dma->sg_table, direction); err_buf_map_attachment: dma_buf_detach(dma->dma_buf, dma->attachment); err_buf_attach: dma_buf_put(dma->dma_buf); err_buf_get: return 0; }
int ioctl_end_of_operation(int fd) { /* When finished, the buffer-user notifies end-of-DMA to exporter Once the DMA for the current buffer-user is over, it signals 'end-of-DMA' to the exporter using the dma_buf_unmap_attachment API. Interface: void dma_buf_unmap_attachment(struct dma_buf_attachment *, struct sg_table *); */ /* todo - use fd from userland to get back the contexts */ dma_buf_unmap_attachment(curr_dma_buf_attachment, curr_sg_table); return 0; }
void tegra_bo_free_object(struct drm_gem_object *gem) { struct tegra_bo *bo = to_tegra_bo(gem); if (gem->import_attach) { dma_buf_unmap_attachment(gem->import_attach, bo->sgt, DMA_TO_DEVICE); drm_prime_gem_destroy(gem, NULL); } else { tegra_bo_destroy(gem->dev, bo); } drm_gem_free_mmap_offset(gem); drm_gem_object_release(gem); kfree(bo); }
/** * rppc_free_dmabuf - release the imported buffer * @id: idr index of the imported buffer descriptor * @p: imported buffer descriptor allocated during rppc_alloc_dmabuf * @data: rpc instance handle * * This function is used to release a buffer that has been previously * imported through a rppc_alloc_dmabuf call. The function can be used * either individually for releasing a specific buffer or in a loop iterator * for releasing all the buffers associated with a remote function call, or * during cleanup of the rpc instance. * * Return: 0 on success, and -ENOENT if invalid pointers passed in */ int rppc_free_dmabuf(int id, void *p, void *data) { struct rppc_dma_buf *dma = p; struct rppc_instance *rpc = data; if (!dma || !rpc) return -ENOENT; dma_buf_unmap_attachment(dma->attach, dma->sgt, DMA_BIDIRECTIONAL); dma_buf_detach(dma->buf, dma->attach); dma_buf_put(dma->buf); WARN_ON(id != dma->id); idr_remove(&rpc->dma_idr, id); kfree(dma); return 0; }
void decon_free_sec_dma_buf(int plane) { struct decon_device *decon = get_decon_drvdata(0); /* 0: decon Int ID */ if (IS_ERR_VALUE(dma.dma_addr) || !dma.dma_buf) return; ion_iovmm_unmap(dma.attachment, dma.dma_addr); dma_buf_unmap_attachment(dma.attachment, dma.sg_table, DMA_TO_DEVICE); exynos_ion_sync_dmabuf_for_cpu(decon->dev, dma.dma_buf, dma.dma_buf->size, DMA_FROM_DEVICE); dma_buf_detach(dma.dma_buf, dma.attachment); memset(&dma, 0, sizeof(dma)); }
void tegra_bo_free_object(struct drm_gem_object *gem) { struct tegra_drm *tegra = gem->dev->dev_private; struct tegra_bo *bo = to_tegra_bo(gem); if (tegra->domain) tegra_bo_iommu_unmap(tegra, bo); if (gem->import_attach) { dma_buf_unmap_attachment(gem->import_attach, bo->sgt, DMA_TO_DEVICE); drm_prime_gem_destroy(gem, NULL); } else { tegra_bo_free(gem->dev, bo); } drm_gem_object_release(gem); kfree(bo); }
struct drm_gem_object *nouveau_gem_prime_import(struct drm_device *dev, struct dma_buf *dma_buf) { struct dma_buf_attachment *attach; struct sg_table *sg; struct nouveau_bo *nvbo; int ret; if (dma_buf->ops == &nouveau_dmabuf_ops) { nvbo = dma_buf->priv; if (nvbo->gem) { if (nvbo->gem->dev == dev) { drm_gem_object_reference(nvbo->gem); dma_buf_put(dma_buf); return nvbo->gem; } } } /* need to attach */ attach = dma_buf_attach(dma_buf, dev->dev); if (IS_ERR(attach)) return ERR_PTR(PTR_ERR(attach)); sg = dma_buf_map_attachment(attach, DMA_BIDIRECTIONAL); if (IS_ERR(sg)) { ret = PTR_ERR(sg); goto fail_detach; } ret = nouveau_prime_new(dev, dma_buf->size, sg, &nvbo); if (ret) goto fail_unmap; nvbo->gem->import_attach = attach; return nvbo->gem; fail_unmap: dma_buf_unmap_attachment(attach, sg, DMA_BIDIRECTIONAL); fail_detach: dma_buf_detach(dma_buf, attach); return ERR_PTR(ret); }
struct drm_gem_object *radeon_gem_prime_import(struct drm_device *dev, struct dma_buf *dma_buf) { struct dma_buf_attachment *attach; struct sg_table *sg; struct radeon_bo *bo; int ret; if (dma_buf->ops == &radeon_dmabuf_ops) { bo = dma_buf->priv; if (bo->gem_base.dev == dev) { drm_gem_object_reference(&bo->gem_base); dma_buf_put(dma_buf); return &bo->gem_base; } } /* need to attach */ attach = dma_buf_attach(dma_buf, dev->dev); if (IS_ERR(attach)) return ERR_CAST(attach); sg = dma_buf_map_attachment(attach, DMA_BIDIRECTIONAL); if (IS_ERR(sg)) { ret = PTR_ERR(sg); goto fail_detach; } ret = radeon_prime_create(dev, dma_buf->size, sg, &bo); if (ret) goto fail_unmap; bo->gem_base.import_attach = attach; return &bo->gem_base; fail_unmap: dma_buf_unmap_attachment(attach, sg, DMA_BIDIRECTIONAL); fail_detach: dma_buf_detach(dma_buf, attach); return ERR_PTR(ret); }
static void vb2_dma_sg_unmap_dmabuf(void *mem_priv) { struct vb2_dma_sg_buf *buf = mem_priv; if (WARN_ON(!buf->db_attach)) { pr_err("trying to unpin a not attached buffer\n"); return; } if (WARN_ON(!buf->sg_table.sgl)) { pr_err("dmabuf buffer is already unpinned\n"); return; } dma_buf_unmap_attachment(buf->db_attach, &buf->sg_table, buf->write ? DMA_FROM_DEVICE : DMA_TO_DEVICE); buf->size = 0; buf->num_pages = 0; buf->sg_table.sgl = NULL; }
static void vb2_dma_contig_unmap_dmabuf(void *mem_priv) { struct vb2_dc_buf *buf = mem_priv; struct dma_buf *dmabuf; struct sg_table *sg; if (!buf || !buf->db_attach) return; WARN_ON(!buf->dma_addr); dmabuf = buf->db_attach->dmabuf; sg = dmabuf->priv; /* * Put the sg for this buffer: */ dma_buf_unmap_attachment(buf->db_attach, sg, DMA_FROM_DEVICE); buf->dma_addr = 0; buf->size = 0; }
static void vb2_dma_sg_unmap_dmabuf(void *mem_priv) { struct vb2_dma_sg_buf *buf = mem_priv; struct sg_table *sgt = buf->dma_sgt; if (WARN_ON(!buf->db_attach)) { pr_err("trying to unpin a not attached buffer\n"); return; } if (WARN_ON(!sgt)) { pr_err("dmabuf buffer is already unpinned\n"); return; } if (buf->vaddr) { dma_buf_vunmap(buf->db_attach->dmabuf, buf->vaddr); buf->vaddr = NULL; } dma_buf_unmap_attachment(buf->db_attach, sgt, buf->dma_dir); buf->dma_sgt = NULL; }
static void vb2_ion_unmap_dmabuf(void *buf_priv) { struct vb2_ion_buf *buf = buf_priv; if (WARN_ON(!buf)) { pr_err("%s: NO BUF!!!\n", __func__); return; } if (WARN_ON(!buf->attachment)) { pr_err("%s error: trying to unmap a non attached buffer\n", __func__); return; } if (WARN_ON(!buf->cookie.sgt)) { pr_err("%s error: dmabuf buffer is already unmapped\n", __func__); return; } dma_buf_unmap_attachment(buf->attachment, buf->cookie.sgt, buf->direction); buf->cookie.sgt = NULL; }
int tegra_dc_ext_set_cursor_image(struct tegra_dc_ext_user *user, struct tegra_dc_ext_cursor_image *args) { struct tegra_dc_ext *ext = user->ext; struct tegra_dc *dc = ext->dc; struct tegra_dc_dmabuf *handle, *old_handle; dma_addr_t phys_addr; int ret; u32 extformat = TEGRA_DC_EXT_CURSOR_FORMAT_FLAGS(args->flags); u32 fg = CURSOR_COLOR(args->foreground.r, args->foreground.g, args->foreground.b); u32 bg = CURSOR_COLOR(args->background.r, args->background.g, args->background.b); unsigned extsize = TEGRA_DC_EXT_CURSOR_IMAGE_FLAGS_SIZE(args->flags); enum tegra_dc_cursor_size size; enum tegra_dc_cursor_format format; switch (extsize) { case TEGRA_DC_EXT_CURSOR_IMAGE_FLAGS_SIZE_32x32: size = TEGRA_DC_CURSOR_SIZE_32X32; break; case TEGRA_DC_EXT_CURSOR_IMAGE_FLAGS_SIZE_64x64: size = TEGRA_DC_CURSOR_SIZE_64X64; break; case TEGRA_DC_EXT_CURSOR_IMAGE_FLAGS_SIZE_128x128: size = TEGRA_DC_CURSOR_SIZE_128X128; break; case TEGRA_DC_EXT_CURSOR_IMAGE_FLAGS_SIZE_256x256: size = TEGRA_DC_CURSOR_SIZE_256X256; break; default: return -EINVAL; } switch (extformat) { case TEGRA_DC_EXT_CURSOR_FORMAT_2BIT_LEGACY: format = TEGRA_DC_CURSOR_FORMAT_2BIT_LEGACY; break; case TEGRA_DC_EXT_CURSOR_FORMAT_RGBA_NON_PREMULT_ALPHA: format = TEGRA_DC_CURSOR_FORMAT_RGBA_NON_PREMULT_ALPHA; break; case TEGRA_DC_EXT_CURSOR_FORMAT_RGBA_PREMULT_ALPHA: format = TEGRA_DC_CURSOR_FORMAT_RGBA_PREMULT_ALPHA; break; default: return -EINVAL; } mutex_lock(&ext->cursor.lock); if (ext->cursor.user != user) { ret = -EACCES; goto unlock; } if (!ext->enabled) { ret = -ENXIO; goto unlock; } old_handle = ext->cursor.cur_handle; ret = tegra_dc_ext_pin_window(user, args->buff_id, &handle, &phys_addr); if (ret) goto unlock; ext->cursor.cur_handle = handle; ret = tegra_dc_cursor_image(dc, format, size, fg, bg, phys_addr); mutex_unlock(&ext->cursor.lock); if (old_handle) { dma_buf_unmap_attachment(old_handle->attach, old_handle->sgt, DMA_TO_DEVICE); dma_buf_detach(old_handle->buf, old_handle->attach); dma_buf_put(old_handle->buf); kfree(old_handle); } return ret; unlock: mutex_unlock(&ext->cursor.lock); return ret; }
struct drm_gem_object *exynos_dmabuf_prime_import(struct drm_device *drm_dev, struct dma_buf *dma_buf) { struct dma_buf_attachment *attach; struct sg_table *sgt; struct scatterlist *sgl; struct exynos_drm_gem_obj *exynos_gem_obj; struct exynos_drm_gem_buf *buffer; struct page *page; int ret; DRM_DEBUG_PRIME("%s\n", __FILE__); /* is this one of own objects? */ if (dma_buf->ops == &exynos_dmabuf_ops) { struct drm_gem_object *obj; exynos_gem_obj = dma_buf->priv; obj = &exynos_gem_obj->base; /* is it from our device? */ if (obj->dev == drm_dev) { drm_gem_object_reference(obj); return obj; } } attach = dma_buf_attach(dma_buf, drm_dev->dev); if (IS_ERR(attach)) return ERR_PTR(-EINVAL); sgt = dma_buf_map_attachment(attach, DMA_BIDIRECTIONAL); if (IS_ERR(sgt)) { ret = PTR_ERR(sgt); goto err_buf_detach; } buffer = kzalloc(sizeof(*buffer), GFP_KERNEL); if (!buffer) { DRM_ERROR("failed to allocate exynos_drm_gem_buf.\n"); ret = -ENOMEM; goto err_unmap_attach; } buffer->pages = kzalloc(sizeof(*page) * sgt->nents, GFP_KERNEL); if (!buffer->pages) { DRM_ERROR("failed to allocate pages.\n"); ret = -ENOMEM; goto err_free_buffer; } exynos_gem_obj = exynos_drm_gem_init(drm_dev, dma_buf->size); if (!exynos_gem_obj) { ret = -ENOMEM; goto err_free_pages; } sgl = sgt->sgl; if (sgt->nents == 1) { buffer->dma_addr = sg_dma_address(sgt->sgl); buffer->size = sg_dma_len(sgt->sgl); } else { unsigned int i = 0; buffer->dma_addr = sg_dma_address(sgl); while (i < sgt->nents) { buffer->pages[i] = sg_page(sgl); buffer->size += sg_dma_len(sgl); sgl = sg_next(sgl); i++; } } exynos_gem_obj->buffer = buffer; buffer->sgt = sgt; exynos_gem_obj->base.import_attach = attach; /* register buffer information to private buffer manager. */ ret = register_buf_to_priv_mgr(exynos_gem_obj, &exynos_gem_obj->priv_handle, &exynos_gem_obj->priv_id); if (ret < 0) goto err_release_gem; DRM_DEBUG_PRIME("ump id = %d, dma_addr = 0x%x, size = 0x%lx\n", exynos_gem_obj->priv_id, buffer->dma_addr, buffer->size); return &exynos_gem_obj->base; err_release_gem: drm_gem_object_release(&exynos_gem_obj->base); kfree(exynos_gem_obj); exynos_gem_obj = NULL; err_free_pages: kfree(buffer->pages); buffer->pages = NULL; err_free_buffer: kfree(buffer); buffer = NULL; err_unmap_attach: dma_buf_unmap_attachment(attach, sgt, DMA_BIDIRECTIONAL); err_buf_detach: dma_buf_detach(dma_buf, attach); return ERR_PTR(ret); }
struct drm_gem_object *exynos_dmabuf_prime_import(struct drm_device *drm_dev, struct dma_buf *dma_buf) { struct dma_buf_attachment *attach; struct sg_table *sgt; struct scatterlist *sgl; struct exynos_drm_gem_obj *exynos_gem_obj; struct exynos_drm_gem_buf *buffer; struct page *page; int ret; DRM_DEBUG_PRIME("%s\n", __FILE__); /* is this one of own objects? */ if (dma_buf->ops == &exynos_dmabuf_ops) { struct drm_gem_object *obj; exynos_gem_obj = dma_buf->priv; obj = &exynos_gem_obj->base; /* is it from our device? */ if (obj->dev == drm_dev) { /* * Importing dmabuf exported from out own gem increases * refcount on gem itself instead of f_count of dmabuf. */ drm_gem_object_reference(obj); dma_buf_put(dma_buf); return obj; } } attach = dma_buf_attach(dma_buf, drm_dev->dev); if (IS_ERR(attach)) return ERR_PTR(-EINVAL); sgt = dma_buf_map_attachment(attach, DMA_BIDIRECTIONAL); if (IS_ERR_OR_NULL(sgt)) { ret = PTR_ERR(sgt); goto err_buf_detach; } buffer = kzalloc(sizeof(*buffer), GFP_KERNEL); if (!buffer) { DRM_ERROR("failed to allocate exynos_drm_gem_buf.\n"); ret = -ENOMEM; goto err_unmap_attach; } buffer->pages = kzalloc(sizeof(*page) * sgt->nents, GFP_KERNEL); if (!buffer->pages) { DRM_ERROR("failed to allocate pages.\n"); ret = -ENOMEM; goto err_free_buffer; } exynos_gem_obj = exynos_drm_gem_init(drm_dev, dma_buf->size); if (!exynos_gem_obj) { ret = -ENOMEM; goto err_free_pages; } sgl = sgt->sgl; if (sgt->nents == 1) { buffer->dma_addr = sg_dma_address(sgt->sgl); buffer->size = sg_dma_len(sgt->sgl); /* always physically continuous memory if sgt->nents is 1. */ exynos_gem_obj->flags |= EXYNOS_BO_CONTIG; } else { unsigned int i = 0; buffer->dma_addr = sg_dma_address(sgl); while (i < sgt->nents) { buffer->pages[i] = sg_page(sgl); buffer->size += sg_dma_len(sgl); sgl = sg_next(sgl); i++; } exynos_gem_obj->flags |= EXYNOS_BO_NONCONTIG; } exynos_gem_obj->buffer = buffer; buffer->sgt = sgt; exynos_gem_obj->base.import_attach = attach; DRM_DEBUG_PRIME("dma_addr = 0x%x, size = 0x%lx\n", buffer->dma_addr, buffer->size); return &exynos_gem_obj->base; err_free_pages: kfree(buffer->pages); buffer->pages = NULL; err_free_buffer: kfree(buffer); buffer = NULL; err_unmap_attach: dma_buf_unmap_attachment(attach, sgt, DMA_BIDIRECTIONAL); err_buf_detach: dma_buf_detach(dma_buf, attach); return ERR_PTR(ret); }
static struct tegra_bo *tegra_bo_import(struct drm_device *drm, struct dma_buf *buf) { struct dma_buf_attachment *attach; struct tegra_bo *bo; ssize_t size; int err; bo = kzalloc(sizeof(*bo), GFP_KERNEL); if (!bo) return ERR_PTR(-ENOMEM); host1x_bo_init(&bo->base, &tegra_bo_ops); size = round_up(buf->size, PAGE_SIZE); err = drm_gem_object_init(drm, &bo->gem, size); if (err < 0) goto free; err = drm_gem_create_mmap_offset(&bo->gem); if (err < 0) goto release; attach = dma_buf_attach(buf, drm->dev); if (IS_ERR(attach)) { err = PTR_ERR(attach); goto free_mmap; } get_dma_buf(buf); bo->sgt = dma_buf_map_attachment(attach, DMA_TO_DEVICE); if (!bo->sgt) { err = -ENOMEM; goto detach; } if (IS_ERR(bo->sgt)) { err = PTR_ERR(bo->sgt); goto detach; } if (bo->sgt->nents > 1) { err = -EINVAL; goto detach; } bo->paddr = sg_dma_address(bo->sgt->sgl); bo->gem.import_attach = attach; return bo; detach: if (!IS_ERR_OR_NULL(bo->sgt)) dma_buf_unmap_attachment(attach, bo->sgt, DMA_TO_DEVICE); dma_buf_detach(buf, attach); dma_buf_put(buf); free_mmap: drm_gem_free_mmap_offset(&bo->gem); release: drm_gem_object_release(&bo->gem); free: kfree(bo); return ERR_PTR(err); }
int ump_dmabuf_import_wrapper(u32 __user *argument, struct ump_session_data *session_data) { ump_session_memory_list_element *session = NULL; struct ump_uk_dmabuf ump_dmabuf; ump_dd_handle *ump_handle; ump_dd_physical_block *blocks; struct dma_buf_attachment *attach; struct dma_buf *dma_buf; struct sg_table *sgt; struct scatterlist *sgl; unsigned long block_size; /* FIXME */ struct device dev; unsigned int i = 0, npages; int ret; /* Sanity check input parameters */ if (!argument || !session_data) { MSG_ERR(("NULL parameter.\n")); return -EINVAL; } if (copy_from_user(&ump_dmabuf, argument, sizeof(struct ump_uk_dmabuf))) { MSG_ERR(("copy_from_user() failed.\n")); return -EFAULT; } dma_buf = dma_buf_get(ump_dmabuf.fd); if (IS_ERR(dma_buf)) return PTR_ERR(dma_buf); /* * check whether dma_buf imported already exists or not. * * TODO * if already imported then dma_buf_put() should be called * and then just return dma_buf imported. */ attach = dma_buf_attach(dma_buf, &dev); if (IS_ERR(attach)) { ret = PTR_ERR(attach); goto err_dma_buf_put; } sgt = dma_buf_map_attachment(attach, DMA_BIDIRECTIONAL); if (IS_ERR(sgt)) { ret = PTR_ERR(sgt); goto err_dma_buf_detach; } npages = sgt->nents; /* really need? */ ump_dmabuf.ctx = (void *)session_data; block_size = sizeof(ump_dd_physical_block) * npages; blocks = (ump_dd_physical_block *)_mali_osk_malloc(block_size); if (NULL == blocks) { MSG_ERR(("Failed to allocate blocks\n")); ret = -ENOMEM; goto err_dmu_buf_unmap; } sgl = sgt->sgl; while (i < npages) { blocks[i].addr = sg_phys(sgl); blocks[i].size = sg_dma_len(sgl); sgl = sg_next(sgl); i++; } /* * Initialize the session memory list element, and add it * to the session object */ session = _mali_osk_calloc(1, sizeof(*session)); if (!session) { DBG_MSG(1, ("Failed to allocate session.\n")); ret = -EFAULT; goto err_free_block; } ump_handle = ump_dd_handle_create_from_phys_blocks(blocks, i); if (UMP_DD_HANDLE_INVALID == ump_handle) { DBG_MSG(1, ("Failed to create ump handle.\n")); ret = -EFAULT; goto err_free_session; } session->mem = (ump_dd_mem *)ump_handle; _mali_osk_lock_wait(session_data->lock, _MALI_OSK_LOCKMODE_RW); _mali_osk_list_add(&(session->list), &(session_data->list_head_session_memory_list)); _mali_osk_lock_signal(session_data->lock, _MALI_OSK_LOCKMODE_RW); _mali_osk_free(blocks); ump_dmabuf.ump_handle = (uint32_t)ump_handle; ump_dmabuf.size = ump_dd_size_get(ump_handle); if (copy_to_user(argument, &ump_dmabuf, sizeof(struct ump_uk_dmabuf))) { MSG_ERR(("copy_to_user() failed.\n")); ret = -EFAULT; goto err_release_ump_handle; } return 0; err_release_ump_handle: ump_dd_reference_release(ump_handle); err_free_session: _mali_osk_free(session); err_free_block: _mali_osk_free(blocks); err_dmu_buf_unmap: dma_buf_unmap_attachment(attach, sgt, DMA_BIDIRECTIONAL); err_dma_buf_detach: dma_buf_detach(dma_buf, attach); err_dma_buf_put: dma_buf_put(dma_buf); return ret; }
struct drm_gem_object *exynos_dmabuf_prime_import(struct drm_device *drm_dev, struct dma_buf *dma_buf) { struct dma_buf_attachment *attach; struct sg_table *sgt; struct scatterlist *sgl; struct exynos_drm_gem_obj *exynos_gem_obj; struct exynos_drm_gem_buf *buffer; int ret; /* is this one of own objects? */ if (dma_buf->ops == &exynos_dmabuf_ops) { struct drm_gem_object *obj; obj = dma_buf->priv; /* is it from our device? */ if (obj->dev == drm_dev) { /* * Importing dmabuf exported from out own gem increases * refcount on gem itself instead of f_count of dmabuf. */ drm_gem_object_reference(obj); return obj; } } attach = dma_buf_attach(dma_buf, drm_dev->dev); if (IS_ERR(attach)) return ERR_PTR(-EINVAL); get_dma_buf(dma_buf); sgt = dma_buf_map_attachment(attach, DMA_BIDIRECTIONAL); if (IS_ERR(sgt)) { ret = PTR_ERR(sgt); goto err_buf_detach; } buffer = kzalloc(sizeof(*buffer), GFP_KERNEL); if (!buffer) { ret = -ENOMEM; goto err_unmap_attach; } exynos_gem_obj = exynos_drm_gem_init(drm_dev, dma_buf->size); if (!exynos_gem_obj) { ret = -ENOMEM; goto err_free_buffer; } sgl = sgt->sgl; buffer->size = dma_buf->size; buffer->dma_addr = sg_dma_address(sgl); if (sgt->nents == 1) { /* always physically continuous memory if sgt->nents is 1. */ exynos_gem_obj->flags |= EXYNOS_BO_CONTIG; } else { /* * this case could be CONTIG or NONCONTIG type but for now * sets NONCONTIG. * TODO. we have to find a way that exporter can notify * the type of its own buffer to importer. */ exynos_gem_obj->flags |= EXYNOS_BO_NONCONTIG; } exynos_gem_obj->buffer = buffer; buffer->sgt = sgt; exynos_gem_obj->base.import_attach = attach; DRM_DEBUG_PRIME("dma_addr = 0x%x, size = 0x%lx\n", buffer->dma_addr, buffer->size); return &exynos_gem_obj->base; err_free_buffer: kfree(buffer); buffer = NULL; err_unmap_attach: dma_buf_unmap_attachment(attach, sgt, DMA_BIDIRECTIONAL); err_buf_detach: dma_buf_detach(dma_buf, attach); dma_buf_put(dma_buf); return ERR_PTR(ret); }
void nvhost_dmabuf_unpin(struct mem_handle *handle, struct sg_table *sgt) { dma_buf_unmap_attachment(to_dmabuf_att(handle), sgt, DMA_BIDIRECTIONAL); }
static void i915_gem_object_put_pages_dmabuf(struct drm_i915_gem_object *obj) { dma_buf_unmap_attachment(obj->base.import_attach, obj->pages, DMA_BIDIRECTIONAL); }
int exynos_dmabuf_prime_fd_to_handle(struct drm_device *drm_dev, struct drm_file *file, int prime_fd, unsigned int *handle) { struct drm_exynos_file_private *file_priv = file->driver_priv; struct dma_buf_attachment *attach; struct dma_buf *dmabuf; struct sg_table *sgt; struct exynos_drm_gem_obj *exynos_gem_obj; struct exynos_drm_gem_buf *buffer; int ret; DRM_DEBUG_KMS("%s\n", __FILE__); ret = mutex_lock_interruptible(&drm_dev->struct_mutex); if (ret < 0) return ret; dmabuf = dma_buf_get(prime_fd); if (IS_ERR(dmabuf)) { ret = PTR_ERR(dmabuf); goto out; } /* * if there is same dmabuf as the one to prime_fd * in file_priv->prime list then return the handle. * * Note: * but if the prime_fd from user belongs to another process * then there couldn't be the dmabuf in file_priv->prime list * because file_priv is unique to process. */ ret = drm_prime_lookup_fd_handle_mapping(&file_priv->prime, dmabuf, handle); if (!ret) { /* drop reference we got above. */ dma_buf_put(dmabuf); goto out; } attach = dma_buf_attach(dmabuf, drm_dev->dev); if (IS_ERR(attach)) { ret = PTR_ERR(attach); goto fail_put; } sgt = dma_buf_map_attachment(attach, DMA_BIDIRECTIONAL); if (IS_ERR(sgt)) { ret = PTR_ERR(sgt); goto fail_detach; } buffer = kzalloc(sizeof(*buffer), GFP_KERNEL); if (!buffer) { DRM_ERROR("failed to allocate exynos_drm_gem_buf.\n"); ret = -ENOMEM; goto fail_unmap; } exynos_gem_obj = exynos_drm_gem_init(drm_dev, dmabuf->size); if (!exynos_gem_obj) { ret = -ENOMEM; goto fail_unmap; } ret = drm_gem_handle_create(file, &exynos_gem_obj->base, handle); if (ret < 0) goto fail_handle; /* consider physically non-continuous memory with IOMMU. */ buffer->dma_addr = sg_dma_address(sgt->sgl); buffer->size = sg_dma_len(sgt->sgl); buffer->sgt = sgt; /* * import(fd to handle) means that the physical memory region * from the sgt is being shared with others so shared_refcount * should be 1. */ atomic_set(&buffer->shared_refcount, 1); exynos_gem_obj->base.import_attach = attach; ret = drm_prime_insert_fd_handle_mapping(&file_priv->prime, dmabuf, *handle); if (ret < 0) goto fail_handle; /* register buffer information to private buffer manager. */ ret = register_buf_to_priv_mgr(exynos_gem_obj, &exynos_gem_obj->priv_handle, &exynos_gem_obj->priv_id); if (ret < 0) { drm_prime_remove_fd_handle_mapping(&file_priv->prime, dmabuf); goto fail_handle; } DRM_DEBUG_KMS("fd = %d, handle = %d, dma_addr = 0x%x, size = 0x%lx\n", prime_fd, *handle, buffer->dma_addr, buffer->size); drm_gem_object_unreference(&exynos_gem_obj->base); mutex_unlock(&drm_dev->struct_mutex); return 0; fail_handle: drm_gem_object_unreference(&exynos_gem_obj->base); kfree(buffer); drm_gem_object_release(&exynos_gem_obj->base); kfree(exynos_gem_obj); fail_unmap: dma_buf_unmap_attachment(attach, sgt); fail_detach: dma_buf_detach(dmabuf, attach); fail_put: dma_buf_put(dmabuf); out: mutex_unlock(&drm_dev->struct_mutex); return ret; }