static struct sg_table *exynos_gem_map_dma_buf(struct dma_buf_attachment *attach, enum dma_data_direction dir) { struct exynos_drm_gem_obj *gem_obj = attach->dmabuf->priv; struct drm_device *dev = gem_obj->base.dev; struct exynos_drm_gem_buf *buf; struct sg_table *sgt = NULL; unsigned int npages; int nents; DRM_DEBUG_PRIME("%s\n", __FILE__); mutex_lock(&dev->struct_mutex); buf = gem_obj->buffer; /* there should always be pages allocated. */ if (!buf->pages) { DRM_ERROR("pages is null.\n"); goto err_unlock; } npages = buf->size / buf->page_size; sgt = exynos_pages_to_sg(buf->pages, npages, buf->page_size); nents = dma_map_sg(attach->dev, sgt->sgl, sgt->nents, dir); DRM_DEBUG_PRIME("npages = %d buffer size = 0x%lx page_size = 0x%lx\n", npages, buf->size, buf->page_size); err_unlock: mutex_unlock(&dev->struct_mutex); return sgt; }
int amdgpu_gem_prime_pin(struct drm_gem_object *obj) { struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj); long ret = 0; ret = amdgpu_bo_reserve(bo, false); if (unlikely(ret != 0)) return ret; /* * Wait for all shared fences to complete before we switch to future * use of exclusive fence on this prime shared bo. */ ret = reservation_object_wait_timeout_rcu(bo->tbo.resv, true, false, MAX_SCHEDULE_TIMEOUT); if (unlikely(ret < 0)) { DRM_DEBUG_PRIME("Fence wait failed: %li\n", ret); amdgpu_bo_unreserve(bo); return ret; } /* pin buffer into GTT */ ret = amdgpu_bo_pin(bo, AMDGPU_GEM_DOMAIN_GTT, NULL); if (likely(ret == 0)) bo->prime_shared_count++; amdgpu_bo_unreserve(bo); return ret; }
static void udl_unmap_dma_buf(struct dma_buf_attachment *attach, struct sg_table *sgt, enum dma_data_direction dir) { /* Nothing to do. */ DRM_DEBUG_PRIME("[DEV:%s] size:%zd dir:%d\n", dev_name(attach->dev), attach->dmabuf->size, dir); }
static int amdgpu_gem_map_attach(struct dma_buf *dma_buf, struct device *target_dev, struct dma_buf_attachment *attach) { struct drm_gem_object *obj = dma_buf->priv; struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj); struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev); long r; r = drm_gem_map_attach(dma_buf, target_dev, attach); if (r) return r; r = amdgpu_bo_reserve(bo, false); if (unlikely(r != 0)) goto error_detach; if (attach->dev->driver != adev->dev->driver) { /* * Wait for all shared fences to complete before we switch to future * use of exclusive fence on this prime shared bo. */ r = reservation_object_wait_timeout_rcu(bo->tbo.resv, true, false, MAX_SCHEDULE_TIMEOUT); if (unlikely(r < 0)) { DRM_DEBUG_PRIME("Fence wait failed: %li\n", r); goto error_unreserve; } } /* pin buffer into GTT */ r = amdgpu_bo_pin(bo, AMDGPU_GEM_DOMAIN_GTT, NULL); if (r) goto error_unreserve; if (attach->dev->driver != adev->dev->driver) bo->prime_shared_count++; error_unreserve: amdgpu_bo_unreserve(bo); error_detach: if (r) drm_gem_map_detach(dma_buf, attach); return r; }
static int udl_attach_dma_buf(struct dma_buf *dmabuf, struct device *dev, struct dma_buf_attachment *attach) { struct udl_drm_dmabuf_attachment *udl_attach; DRM_DEBUG_PRIME("[DEV:%s] size:%zd\n", dev_name(attach->dev), attach->dmabuf->size); udl_attach = kzalloc(sizeof(*udl_attach), GFP_KERNEL); if (!udl_attach) return -ENOMEM; udl_attach->dir = DMA_NONE; attach->priv = udl_attach; return 0; }
struct drm_gem_object * drm_gem_cma_prime_import_sg_table(struct drm_device *dev, size_t size, struct sg_table *sgt) { struct drm_gem_cma_object *cma_obj; if (sgt->nents != 1) return ERR_PTR(-EINVAL); /* Create a CMA GEM buffer. */ cma_obj = __drm_gem_cma_create(dev, size); if (IS_ERR(cma_obj)) return ERR_PTR(PTR_ERR(cma_obj)); cma_obj->paddr = sg_dma_address(sgt->sgl); cma_obj->sgt = sgt; DRM_DEBUG_PRIME("dma_addr = 0x%x, size = %zu\n", cma_obj->paddr, size); return &cma_obj->base; }
/** * drm_gem_cma_prime_import_sg_table - produce a CMA GEM object from another * driver's scatter/gather table of pinned pages * @dev: device to import into * @attach: DMA-BUF attachment * @sgt: scatter/gather table of pinned pages * * This function imports a scatter/gather table exported via DMA-BUF by * another driver. Imported buffers must be physically contiguous in memory * (i.e. the scatter/gather table must contain a single entry). Drivers that * use the CMA helpers should set this as their DRM driver's * ->gem_prime_import_sg_table() callback. * * Returns: * A pointer to a newly created GEM object or an ERR_PTR-encoded negative * error code on failure. */ struct drm_gem_object * drm_gem_cma_prime_import_sg_table(struct drm_device *dev, struct dma_buf_attachment *attach, struct sg_table *sgt) { struct drm_gem_cma_object *cma_obj; if (sgt->nents != 1) return ERR_PTR(-EINVAL); /* Create a CMA GEM buffer. */ cma_obj = __drm_gem_cma_create(dev, attach->dmabuf->size); if (IS_ERR(cma_obj)) return ERR_CAST(cma_obj); cma_obj->paddr = sg_dma_address(sgt->sgl); cma_obj->sgt = sgt; DRM_DEBUG_PRIME("dma_addr = %pad, size = %zu\n", &cma_obj->paddr, attach->dmabuf->size); return &cma_obj->base; }
static void exynos_dmabuf_release(struct dma_buf *dmabuf) { struct exynos_drm_gem_obj *exynos_gem_obj = dmabuf->priv; DRM_DEBUG_PRIME("%s\n", __FILE__); /* * exynos_dmabuf_release() call means that file object's * f_count is 0 and it calls drm_gem_object_handle_unreference() * to drop the references that these values had been increased * at drm_prime_handle_to_fd() */ if (exynos_gem_obj->base.export_dma_buf == dmabuf) { exynos_gem_obj->base.export_dma_buf = NULL; /* * drop this gem object refcount to release allocated buffer * and resources. */ drm_gem_object_unreference_unlocked(&exynos_gem_obj->base); } }
static void udl_detach_dma_buf(struct dma_buf *dmabuf, struct dma_buf_attachment *attach) { struct udl_drm_dmabuf_attachment *udl_attach = attach->priv; struct sg_table *sgt; if (!udl_attach) return; DRM_DEBUG_PRIME("[DEV:%s] size:%zd\n", dev_name(attach->dev), attach->dmabuf->size); sgt = &udl_attach->sgt; if (udl_attach->dir != DMA_NONE) dma_unmap_sg(attach->dev, sgt->sgl, sgt->nents, udl_attach->dir); sg_free_table(sgt); kfree(udl_attach); attach->priv = NULL; }
static struct sg_table * exynos_gem_map_dma_buf(struct dma_buf_attachment *attach, enum dma_data_direction dir) { struct exynos_drm_dmabuf_attachment *exynos_attach = attach->priv; struct exynos_drm_gem_obj *gem_obj = dma_buf_to_obj(attach->dmabuf); struct drm_device *dev = gem_obj->base.dev; struct exynos_drm_gem_buf *buf; struct scatterlist *rd, *wr; struct sg_table *sgt = NULL; unsigned int i; int nents, ret; /* just return current sgt if already requested. */ if (exynos_attach->dir == dir && exynos_attach->is_mapped) return &exynos_attach->sgt; buf = gem_obj->buffer; if (!buf) { DRM_ERROR("buffer is null.\n"); return ERR_PTR(-ENOMEM); } sgt = &exynos_attach->sgt; ret = sg_alloc_table(sgt, buf->sgt->orig_nents, GFP_KERNEL); if (ret) { DRM_ERROR("failed to alloc sgt.\n"); return ERR_PTR(-ENOMEM); } mutex_lock(&dev->struct_mutex); rd = buf->sgt->sgl; wr = sgt->sgl; for (i = 0; i < sgt->orig_nents; ++i) { sg_set_page(wr, sg_page(rd), rd->length, rd->offset); rd = sg_next(rd); wr = sg_next(wr); } if (dir != DMA_NONE) { nents = dma_map_sg(attach->dev, sgt->sgl, sgt->orig_nents, dir); if (!nents) { DRM_ERROR("failed to map sgl with iommu.\n"); sg_free_table(sgt); sgt = ERR_PTR(-EIO); goto err_unlock; } } exynos_attach->is_mapped = true; exynos_attach->dir = dir; attach->priv = exynos_attach; DRM_DEBUG_PRIME("buffer size = 0x%lx\n", buf->size); err_unlock: mutex_unlock(&dev->struct_mutex); return sgt; }
struct drm_gem_object *exynos_dmabuf_prime_import(struct drm_device *drm_dev, struct dma_buf *dma_buf) { struct dma_buf_attachment *attach; struct sg_table *sgt; struct scatterlist *sgl; struct exynos_drm_gem_obj *exynos_gem_obj; struct exynos_drm_gem_buf *buffer; int ret; /* is this one of own objects? */ if (dma_buf->ops == &exynos_dmabuf_ops) { struct drm_gem_object *obj; obj = dma_buf->priv; /* is it from our device? */ if (obj->dev == drm_dev) { /* * Importing dmabuf exported from out own gem increases * refcount on gem itself instead of f_count of dmabuf. */ drm_gem_object_reference(obj); return obj; } } attach = dma_buf_attach(dma_buf, drm_dev->dev); if (IS_ERR(attach)) return ERR_PTR(-EINVAL); get_dma_buf(dma_buf); sgt = dma_buf_map_attachment(attach, DMA_BIDIRECTIONAL); if (IS_ERR(sgt)) { ret = PTR_ERR(sgt); goto err_buf_detach; } buffer = kzalloc(sizeof(*buffer), GFP_KERNEL); if (!buffer) { ret = -ENOMEM; goto err_unmap_attach; } exynos_gem_obj = exynos_drm_gem_init(drm_dev, dma_buf->size); if (!exynos_gem_obj) { ret = -ENOMEM; goto err_free_buffer; } sgl = sgt->sgl; buffer->size = dma_buf->size; buffer->dma_addr = sg_dma_address(sgl); if (sgt->nents == 1) { /* always physically continuous memory if sgt->nents is 1. */ exynos_gem_obj->flags |= EXYNOS_BO_CONTIG; } else { /* * this case could be CONTIG or NONCONTIG type but for now * sets NONCONTIG. * TODO. we have to find a way that exporter can notify * the type of its own buffer to importer. */ exynos_gem_obj->flags |= EXYNOS_BO_NONCONTIG; } exynos_gem_obj->buffer = buffer; buffer->sgt = sgt; exynos_gem_obj->base.import_attach = attach; DRM_DEBUG_PRIME("dma_addr = 0x%x, size = 0x%lx\n", buffer->dma_addr, buffer->size); return &exynos_gem_obj->base; err_free_buffer: kfree(buffer); buffer = NULL; err_unmap_attach: dma_buf_unmap_attachment(attach, sgt, DMA_BIDIRECTIONAL); err_buf_detach: dma_buf_detach(dma_buf, attach); dma_buf_put(dma_buf); return ERR_PTR(ret); }
struct drm_gem_object *exynos_dmabuf_prime_import(struct drm_device *drm_dev, struct dma_buf *dma_buf) { struct dma_buf_attachment *attach; struct sg_table *sgt; struct scatterlist *sgl; struct exynos_drm_gem_obj *exynos_gem_obj; struct exynos_drm_gem_buf *buffer; struct page *page; int ret; DRM_DEBUG_PRIME("%s\n", __FILE__); /* is this one of own objects? */ if (dma_buf->ops == &exynos_dmabuf_ops) { struct drm_gem_object *obj; exynos_gem_obj = dma_buf->priv; obj = &exynos_gem_obj->base; /* is it from our device? */ if (obj->dev == drm_dev) { /* * Importing dmabuf exported from out own gem increases * refcount on gem itself instead of f_count of dmabuf. */ drm_gem_object_reference(obj); dma_buf_put(dma_buf); return obj; } } attach = dma_buf_attach(dma_buf, drm_dev->dev); if (IS_ERR(attach)) return ERR_PTR(-EINVAL); sgt = dma_buf_map_attachment(attach, DMA_BIDIRECTIONAL); if (IS_ERR_OR_NULL(sgt)) { ret = PTR_ERR(sgt); goto err_buf_detach; } buffer = kzalloc(sizeof(*buffer), GFP_KERNEL); if (!buffer) { DRM_ERROR("failed to allocate exynos_drm_gem_buf.\n"); ret = -ENOMEM; goto err_unmap_attach; } buffer->pages = kzalloc(sizeof(*page) * sgt->nents, GFP_KERNEL); if (!buffer->pages) { DRM_ERROR("failed to allocate pages.\n"); ret = -ENOMEM; goto err_free_buffer; } exynos_gem_obj = exynos_drm_gem_init(drm_dev, dma_buf->size); if (!exynos_gem_obj) { ret = -ENOMEM; goto err_free_pages; } sgl = sgt->sgl; if (sgt->nents == 1) { buffer->dma_addr = sg_dma_address(sgt->sgl); buffer->size = sg_dma_len(sgt->sgl); /* always physically continuous memory if sgt->nents is 1. */ exynos_gem_obj->flags |= EXYNOS_BO_CONTIG; } else { unsigned int i = 0; buffer->dma_addr = sg_dma_address(sgl); while (i < sgt->nents) { buffer->pages[i] = sg_page(sgl); buffer->size += sg_dma_len(sgl); sgl = sg_next(sgl); i++; } exynos_gem_obj->flags |= EXYNOS_BO_NONCONTIG; } exynos_gem_obj->buffer = buffer; buffer->sgt = sgt; exynos_gem_obj->base.import_attach = attach; DRM_DEBUG_PRIME("dma_addr = 0x%x, size = 0x%lx\n", buffer->dma_addr, buffer->size); return &exynos_gem_obj->base; err_free_pages: kfree(buffer->pages); buffer->pages = NULL; err_free_buffer: kfree(buffer); buffer = NULL; err_unmap_attach: dma_buf_unmap_attachment(attach, sgt, DMA_BIDIRECTIONAL); err_buf_detach: dma_buf_detach(dma_buf, attach); return ERR_PTR(ret); }
static struct sg_table *udl_map_dma_buf(struct dma_buf_attachment *attach, enum dma_data_direction dir) { struct udl_drm_dmabuf_attachment *udl_attach = attach->priv; struct udl_gem_object *obj = to_udl_bo(attach->dmabuf->priv); struct drm_device *dev = obj->base.dev; struct scatterlist *rd, *wr; struct sg_table *sgt = NULL; unsigned int i; int page_count; int nents, ret; DRM_DEBUG_PRIME("[DEV:%s] size:%zd dir=%d\n", dev_name(attach->dev), attach->dmabuf->size, dir); /* just return current sgt if already requested. */ if (udl_attach->dir == dir && udl_attach->is_mapped) return &udl_attach->sgt; if (!obj->pages) { ret = udl_gem_get_pages(obj); if (ret) { DRM_ERROR("failed to map pages.\n"); return ERR_PTR(ret); } } page_count = obj->base.size / PAGE_SIZE; obj->sg = drm_prime_pages_to_sg(obj->pages, page_count); if (IS_ERR(obj->sg)) { DRM_ERROR("failed to allocate sgt.\n"); return ERR_CAST(obj->sg); } sgt = &udl_attach->sgt; ret = sg_alloc_table(sgt, obj->sg->orig_nents, GFP_KERNEL); if (ret) { DRM_ERROR("failed to alloc sgt.\n"); return ERR_PTR(-ENOMEM); } mutex_lock(&dev->struct_mutex); rd = obj->sg->sgl; wr = sgt->sgl; for (i = 0; i < sgt->orig_nents; ++i) { sg_set_page(wr, sg_page(rd), rd->length, rd->offset); rd = sg_next(rd); wr = sg_next(wr); } if (dir != DMA_NONE) { nents = dma_map_sg(attach->dev, sgt->sgl, sgt->orig_nents, dir); if (!nents) { DRM_ERROR("failed to map sgl with iommu.\n"); sg_free_table(sgt); sgt = ERR_PTR(-EIO); goto err_unlock; } } udl_attach->is_mapped = true; udl_attach->dir = dir; attach->priv = udl_attach; err_unlock: mutex_unlock(&dev->struct_mutex); return sgt; }
struct drm_gem_object *exynos_dmabuf_prime_import(struct drm_device *drm_dev, struct dma_buf *dma_buf) { struct dma_buf_attachment *attach; struct sg_table *sgt; struct scatterlist *sgl; struct exynos_drm_gem_obj *exynos_gem_obj; struct exynos_drm_gem_buf *buffer; struct page *page; int ret; DRM_DEBUG_PRIME("%s\n", __FILE__); /* is this one of own objects? */ if (dma_buf->ops == &exynos_dmabuf_ops) { struct drm_gem_object *obj; exynos_gem_obj = dma_buf->priv; obj = &exynos_gem_obj->base; /* is it from our device? */ if (obj->dev == drm_dev) { drm_gem_object_reference(obj); return obj; } } attach = dma_buf_attach(dma_buf, drm_dev->dev); if (IS_ERR(attach)) return ERR_PTR(-EINVAL); sgt = dma_buf_map_attachment(attach, DMA_BIDIRECTIONAL); if (IS_ERR(sgt)) { ret = PTR_ERR(sgt); goto err_buf_detach; } buffer = kzalloc(sizeof(*buffer), GFP_KERNEL); if (!buffer) { DRM_ERROR("failed to allocate exynos_drm_gem_buf.\n"); ret = -ENOMEM; goto err_unmap_attach; } buffer->pages = kzalloc(sizeof(*page) * sgt->nents, GFP_KERNEL); if (!buffer->pages) { DRM_ERROR("failed to allocate pages.\n"); ret = -ENOMEM; goto err_free_buffer; } exynos_gem_obj = exynos_drm_gem_init(drm_dev, dma_buf->size); if (!exynos_gem_obj) { ret = -ENOMEM; goto err_free_pages; } sgl = sgt->sgl; if (sgt->nents == 1) { buffer->dma_addr = sg_dma_address(sgt->sgl); buffer->size = sg_dma_len(sgt->sgl); } else { unsigned int i = 0; buffer->dma_addr = sg_dma_address(sgl); while (i < sgt->nents) { buffer->pages[i] = sg_page(sgl); buffer->size += sg_dma_len(sgl); sgl = sg_next(sgl); i++; } } exynos_gem_obj->buffer = buffer; buffer->sgt = sgt; exynos_gem_obj->base.import_attach = attach; /* register buffer information to private buffer manager. */ ret = register_buf_to_priv_mgr(exynos_gem_obj, &exynos_gem_obj->priv_handle, &exynos_gem_obj->priv_id); if (ret < 0) goto err_release_gem; DRM_DEBUG_PRIME("ump id = %d, dma_addr = 0x%x, size = 0x%lx\n", exynos_gem_obj->priv_id, buffer->dma_addr, buffer->size); return &exynos_gem_obj->base; err_release_gem: drm_gem_object_release(&exynos_gem_obj->base); kfree(exynos_gem_obj); exynos_gem_obj = NULL; err_free_pages: kfree(buffer->pages); buffer->pages = NULL; err_free_buffer: kfree(buffer); buffer = NULL; err_unmap_attach: dma_buf_unmap_attachment(attach, sgt, DMA_BIDIRECTIONAL); err_buf_detach: dma_buf_detach(dma_buf, attach); return ERR_PTR(ret); }