Exemple #1
0
static struct sg_table *
i915_pages_create_for_stolen(struct drm_device *dev,
			     u32 offset, u32 size)
{
	struct drm_i915_private *dev_priv = dev->dev_private;
	struct sg_table *st;
	struct scatterlist *sg;

	DRM_DEBUG_DRIVER("offset=0x%x, size=%d\n", offset, size);
	BUG_ON(offset > dev_priv->gtt.stolen_size - size);

	/* We hide that we have no struct page backing our stolen object
	 * by wrapping the contiguous physical allocation with a fake
	 * dma mapping in a single scatterlist.
	 */

	st = kmalloc(sizeof(*st), GFP_KERNEL);
	if (st == NULL)
		return NULL;

	if (sg_alloc_table(st, 1, GFP_KERNEL)) {
		kfree(st);
		return NULL;
	}

	sg = st->sgl;
	sg->offset = 0;
	sg->length = size;

	sg_dma_address(sg) = (dma_addr_t)dev_priv->mm.stolen_base + offset;
	sg_dma_len(sg) = size;

	return st;
}
Exemple #2
0
/* Make a sg_table based on sg[] of crypto request. */
static int ss_sg_table_init(struct sg_table *sgt, struct scatterlist *sg,
							int len, char *vbase, dma_addr_t pbase)
{
	int i;
	int npages = 0;
	int offset = 0;
	struct scatterlist *src_sg = sg;
	struct scatterlist *dst_sg = NULL;

	npages = ss_sg_cnt(sg, len);
	WARN_ON(npages == 0);

	if (sg_alloc_table(sgt, npages, GFP_KERNEL)) {
		SS_ERR("sg_alloc_table(%d) failed!\n", npages);
		WARN_ON(1);
	}

	dst_sg = sgt->sgl;
	for (i=0; i<npages; i++) {
		sg_set_buf(dst_sg, vbase + offset, sg_dma_len(src_sg));
		offset += sg_dma_len(src_sg);
		src_sg = sg_next(src_sg);
		dst_sg = sg_next(dst_sg);
	}
	return 0;
}
Exemple #3
0
static struct sg_table *
i915_pages_create_for_stolen(struct drm_device *dev,
			     resource_size_t offset, resource_size_t size)
{
	struct drm_i915_private *dev_priv = to_i915(dev);
	struct sg_table *st;
	struct scatterlist *sg;

	GEM_BUG_ON(range_overflows(offset, size, resource_size(&dev_priv->dsm)));

	/* We hide that we have no struct page backing our stolen object
	 * by wrapping the contiguous physical allocation with a fake
	 * dma mapping in a single scatterlist.
	 */

	st = kmalloc(sizeof(*st), GFP_KERNEL);
	if (st == NULL)
		return ERR_PTR(-ENOMEM);

	if (sg_alloc_table(st, 1, GFP_KERNEL)) {
		kfree(st);
		return ERR_PTR(-ENOMEM);
	}

	sg = st->sgl;
	sg->offset = 0;
	sg->length = size;

	sg_dma_address(sg) = (dma_addr_t)dev_priv->dsm.start + offset;
	sg_dma_len(sg) = size;

	return st;
}
unsigned int disp_allocate_mva(unsigned int pa, unsigned int size, M4U_PORT_ID port)
{
	int ret = 0;
	unsigned int mva = 0;
	m4u_client_t *client = NULL;
	struct sg_table *sg_table = kzalloc(sizeof(struct sg_table), GFP_ATOMIC);

	sg_alloc_table(sg_table, 1, GFP_KERNEL);

	sg_dma_address(sg_table->sgl) = pa;
	sg_dma_len(sg_table->sgl) = size;
	client = m4u_create_client();
	if (IS_ERR_OR_NULL(client))
		DISPMSG("create client fail!\n");

	mva = pa;
	ret = m4u_alloc_mva(client, port, 0, sg_table, size, M4U_PROT_READ | M4U_PROT_WRITE,
			    M4U_FLAGS_FIX_MVA, &mva);
	/* m4u_alloc_mva(M4U_PORT_DISP_OVL0, pa_start, (pa_end - pa_start + 1), 0, 0, mva); */
	if (ret)
		DISPMSG("m4u_alloc_mva returns fail: %d\n", ret);

	DISPMSG("[DISPHAL] FB MVA is 0x%08X PA is 0x%08X\n", mva, pa);

	return mva;
}
static struct sg_table *exynos_pages_to_sg(struct page **pages, int nr_pages,
		unsigned int page_size)
{
	struct sg_table *sgt = NULL;
	struct scatterlist *sgl;
	int i, ret;

	sgt = kzalloc(sizeof(*sgt), GFP_KERNEL);
	if (!sgt)
		goto out;

	ret = sg_alloc_table(sgt, nr_pages, GFP_KERNEL);
	if (ret)
		goto err_free_sgt;

	if (page_size < PAGE_SIZE)
		page_size = PAGE_SIZE;

	for_each_sg(sgt->sgl, sgl, nr_pages, i)
		sg_set_page(sgl, pages[i], page_size, 0);

	return sgt;

err_free_sgt:
	kfree(sgt);
	sgt = NULL;
out:
	return NULL;
}
static int ion_mm_heap_allocate(struct ion_heap *heap,
                                struct ion_buffer *buffer,
                                unsigned long size, unsigned long align,
                                unsigned long flags)
{
    ion_mm_buffer_info* pBufferInfo = NULL;
    int ret;
    unsigned int addr;
    struct sg_table *table;
    struct scatterlist *sg;
    void* pVA;
    ION_FUNC_ENTER;
    pVA = vmalloc_user(size);
    buffer->priv_virt = NULL;
    if (IS_ERR_OR_NULL(pVA))
    {
        printk("[ion_mm_heap_allocate]: Error. Allocate buffer failed.\n");
        ION_FUNC_LEAVE;
        return -ENOMEM;
    }
    pBufferInfo = (ion_mm_buffer_info*) kzalloc(sizeof(ion_mm_buffer_info), GFP_KERNEL);
    if (IS_ERR_OR_NULL(pBufferInfo))
    {
        vfree(pVA);
        printk("[ion_mm_heap_allocate]: Error. Allocate ion_buffer failed.\n");
        ION_FUNC_LEAVE;
        return -ENOMEM;
    }
    table = kmalloc(sizeof(struct sg_table), GFP_KERNEL);
    if (!table)
    {
        vfree(pVA);
        kfree(pBufferInfo);
        ION_FUNC_LEAVE;
        return -ENOMEM;
    }
    ret = sg_alloc_table(table, PAGE_ALIGN(size) / PAGE_SIZE, GFP_KERNEL);
    if (ret)
    {
        vfree(pVA);
        kfree(pBufferInfo);
        kfree(table);
        ION_FUNC_LEAVE;
        return -ENOMEM;
    }
    sg = table->sgl;
    for (addr=(unsigned int)pVA; addr < (unsigned int) pVA + size; addr += PAGE_SIZE)
    {
        struct page *page = vmalloc_to_page((void*)addr);
        sg_set_page(sg, page, PAGE_SIZE, 0);
        sg = sg_next(sg);
    }
    buffer->sg_table = table;

    pBufferInfo->pVA = pVA;
    pBufferInfo->eModuleID = -1;
    buffer->priv_virt = pBufferInfo;
    ION_FUNC_LEAVE;
    return 0;
}
Exemple #7
0
static struct sg_table *mock_map_dma_buf(struct dma_buf_attachment *attachment,
					 enum dma_data_direction dir)
{
	struct mock_dmabuf *mock = to_mock(attachment->dmabuf);
	struct sg_table *st;
	struct scatterlist *sg;
	int i, err;

	st = kmalloc(sizeof(*st), GFP_KERNEL);
	if (!st)
		return ERR_PTR(-ENOMEM);

	err = sg_alloc_table(st, mock->npages, GFP_KERNEL);
	if (err)
		goto err_free;

	sg = st->sgl;
	for (i = 0; i < mock->npages; i++) {
		sg_set_page(sg, mock->pages[i], PAGE_SIZE, 0);
		sg = sg_next(sg);
	}

	if (!dma_map_sg(attachment->dev, st->sgl, st->nents, dir)) {
		err = -ENOMEM;
		goto err_st;
	}

	return st;

err_st:
	sg_free_table(st);
err_free:
	kfree(st);
	return ERR_PTR(err);
}
static struct sg_table *i915_gem_map_dma_buf(struct dma_buf_attachment *attachment,
					     enum dma_data_direction dir)
{
	struct drm_i915_gem_object *obj = dma_buf_to_obj(attachment->dmabuf);
	struct sg_table *st;
	struct scatterlist *src, *dst;
	int ret, i;

	ret = i915_mutex_lock_interruptible(obj->base.dev);
	if (ret)
		goto err;

	ret = i915_gem_object_get_pages(obj);
	if (ret)
		goto err_unlock;

	i915_gem_object_pin_pages(obj);

	/* Copy sg so that we make an independent mapping */
	st = kmalloc(sizeof(struct sg_table), GFP_KERNEL);
	if (st == NULL) {
		ret = -ENOMEM;
		goto err_unpin;
	}

	ret = sg_alloc_table(st, obj->pages->nents, GFP_KERNEL);
	if (ret)
		goto err_free;

	src = obj->pages->sgl;
	dst = st->sgl;
	for (i = 0; i < obj->pages->nents; i++) {
		sg_set_page(dst, sg_page(src), src->length, 0);
		dst = sg_next(dst);
		src = sg_next(src);
	}

	if (!dma_map_sg(attachment->dev, st->sgl, st->nents, dir)) {
		ret =-ENOMEM;
		goto err_free_sg;
	}

	mutex_unlock(&obj->base.dev->struct_mutex);
	return st;

err_free_sg:
	sg_free_table(st);
err_free:
	kfree(st);
err_unpin:
	i915_gem_object_unpin_pages(obj);
err_unlock:
	mutex_unlock(&obj->base.dev->struct_mutex);
err:
	return ERR_PTR(ret);
}
Exemple #9
0
static int __swiotlb_get_sgtable_page(struct sg_table *sgt,
				      struct page *page, size_t size)
{
	int ret = sg_alloc_table(sgt, 1, GFP_KERNEL);

	if (!ret)
		sg_set_page(sgt->sgl, page, PAGE_ALIGN(size), 0);

	return ret;
}
Exemple #10
0
static struct sg_table *_tee_shm_dma_buf_map_dma_buf(
		struct dma_buf_attachment *attach, enum dma_data_direction dir)
{
	struct tee_shm_attach *tee_shm_attach = attach->priv;
	struct tee_shm *tee_shm = attach->dmabuf->priv;
	struct sg_table *sgt = NULL;
	struct scatterlist *rd, *wr;
	unsigned int i;
	int nents, ret;
	struct tee *tee;

	tee = tee_shm->tee;

	INMSG();

	/* just return current sgt if already requested. */
	if (tee_shm_attach->dir == dir && tee_shm_attach->is_mapped) {
		OUTMSGX(&tee_shm_attach->sgt);
		return &tee_shm_attach->sgt;
	}

	sgt = &tee_shm_attach->sgt;

	ret = sg_alloc_table(sgt, tee_shm->sgt.orig_nents, GFP_KERNEL);
	if (ret) {
		dev_err(_DEV(tee), "failed to alloc sgt.\n");
		return ERR_PTR(-ENOMEM);
	}

	rd = tee_shm->sgt.sgl;
	wr = sgt->sgl;
	for (i = 0; i < sgt->orig_nents; ++i) {
		sg_set_page(wr, sg_page(rd), rd->length, rd->offset);
		rd = sg_next(rd);
		wr = sg_next(wr);
	}

	if (dir != DMA_NONE) {
		nents = dma_map_sg(attach->dev, sgt->sgl, sgt->orig_nents, dir);
		if (!nents) {
			dev_err(_DEV(tee), "failed to map sgl with iommu.\n");
			sg_free_table(sgt);
			sgt = ERR_PTR(-EIO);
			goto err_unlock;
		}
	}

	tee_shm_attach->is_mapped = true;
	tee_shm_attach->dir = dir;
	attach->priv = tee_shm_attach;

err_unlock:
	OUTMSGX(sgt);
	return sgt;
}
static struct sg_table *i915_gem_map_dma_buf(struct dma_buf_attachment *attachment,
					     enum dma_data_direction dir)
{
	struct drm_i915_gem_object *obj = attachment->dmabuf->priv;
	struct sg_table *st;
	struct scatterlist *src, *dst;
	int ret, i;

	ret = i915_mutex_lock_interruptible(obj->base.dev);
	if (ret)
		return ERR_PTR(ret);

	ret = i915_gem_object_get_pages(obj);
	if (ret) {
		st = ERR_PTR(ret);
		goto out;
	}

	/* Copy sg so that we make an independent mapping */
	st = kmalloc(sizeof(struct sg_table), GFP_KERNEL);
	if (st == NULL) {
		st = ERR_PTR(-ENOMEM);
		goto out;
	}

	ret = sg_alloc_table(st, obj->pages->nents, GFP_KERNEL);
	if (ret) {
		kfree(st);
		st = ERR_PTR(ret);
		goto out;
	}

	src = obj->pages->sgl;
	dst = st->sgl;
	for (i = 0; i < obj->pages->nents; i++) {
		sg_set_page(dst, sg_page(src), PAGE_SIZE, 0);
		dst = sg_next(dst);
		src = sg_next(src);
	}

	if (!dma_map_sg(attachment->dev, st->sgl, st->nents, dir)) {
		sg_free_table(st);
		kfree(st);
		st = ERR_PTR(-ENOMEM);
		goto out;
	}

	i915_gem_object_pin_pages(obj);

out:
	mutex_unlock(&obj->base.dev->struct_mutex);
	return st;
}
Exemple #12
0
Fichier : gem.c Projet : JaneDu/ath
static struct sg_table *
tegra_gem_prime_map_dma_buf(struct dma_buf_attachment *attach,
			    enum dma_data_direction dir)
{
	struct drm_gem_object *gem = attach->dmabuf->priv;
	struct tegra_bo *bo = to_tegra_bo(gem);
	struct sg_table *sgt;

	sgt = kmalloc(sizeof(*sgt), GFP_KERNEL);
	if (!sgt)
		return NULL;

	if (bo->pages) {
		struct scatterlist *sg;
		unsigned int i;

		if (sg_alloc_table(sgt, bo->num_pages, GFP_KERNEL))
			goto free;

		for_each_sg(sgt->sgl, sg, bo->num_pages, i)
			sg_set_page(sg, bo->pages[i], PAGE_SIZE, 0);

		if (dma_map_sg(attach->dev, sgt->sgl, sgt->nents, dir) == 0)
			goto free;
	} else {
		if (sg_alloc_table(sgt, 1, GFP_KERNEL))
			goto free;

		sg_dma_address(sgt->sgl) = bo->paddr;
		sg_dma_len(sgt->sgl) = gem->size;
	}

	return sgt;

free:
	sg_free_table(sgt);
	kfree(sgt);
	return NULL;
}
/*
 * Create scatter-list for the already allocated DMA buffer.
 * This function could be replace by dma_common_get_sgtable
 * as soon as it will avalaible.
 */
int ion_cma_get_sgtable(struct device *dev, struct sg_table *sgt,
			void *cpu_addr, dma_addr_t handle, size_t size)
{
	struct page *page = virt_to_page(cpu_addr);
	int ret;

	ret = sg_alloc_table(sgt, 1, GFP_KERNEL);
	if (unlikely(ret))
		return ret;

	sg_set_page(sgt->sgl, page, PAGE_ALIGN(size), 0);
	return 0;
}
Exemple #14
0
static struct sg_table *
huge_get_pages(struct drm_i915_gem_object *obj)
{
#define GFP (GFP_KERNEL | __GFP_NOWARN | __GFP_NORETRY)
	const unsigned long nreal = obj->scratch / PAGE_SIZE;
	const unsigned long npages = obj->base.size / PAGE_SIZE;
	struct scatterlist *sg, *src, *end;
	struct sg_table *pages;
	unsigned long n;

	pages = kmalloc(sizeof(*pages), GFP);
	if (!pages)
		return ERR_PTR(-ENOMEM);

	if (sg_alloc_table(pages, npages, GFP)) {
		kfree(pages);
		return ERR_PTR(-ENOMEM);
	}

	sg = pages->sgl;
	for (n = 0; n < nreal; n++) {
		struct page *page;

		page = alloc_page(GFP | __GFP_HIGHMEM);
		if (!page) {
			sg_mark_end(sg);
			goto err;
		}

		sg_set_page(sg, page, PAGE_SIZE, 0);
		sg = __sg_next(sg);
	}
	if (nreal < npages) {
		for (end = sg, src = pages->sgl; sg; sg = __sg_next(sg)) {
			sg_set_page(sg, sg_page(src), PAGE_SIZE, 0);
			src = __sg_next(src);
			if (src == end)
				src = pages->sgl;
		}
	}

	if (i915_gem_gtt_prepare_pages(obj, pages))
		goto err;

	return pages;

err:
	huge_free_pages(obj, pages);
	return ERR_PTR(-ENOMEM);
#undef GFP
}
static struct sg_table *exynos_map_dmabuf(struct dma_buf_attachment *attach,
					enum dma_data_direction direction)
{
	struct drm_gem_object *obj = attach->dmabuf->priv;
	struct exynos_drm_gem_obj *exynos_gem_obj;
	struct exynos_drm_gem_buf *buffer;
	struct sg_table *sgt;
	int ret;

	DRM_DEBUG_KMS("%s\n", __FILE__);

	exynos_gem_obj = to_exynos_gem_obj(obj);

	buffer = exynos_gem_obj->buffer;

	/* TODO. consider physically non-continuous memory with IOMMU. */

	sgt = kzalloc(sizeof(*sgt), GFP_KERNEL);
	if (!sgt) {
		DRM_DEBUG_KMS("failed to allocate sg table.\n");
		return ERR_PTR(-ENOMEM);
	}

	ret = sg_alloc_table(sgt, 1, GFP_KERNEL);
	if (ret < 0) {
		DRM_DEBUG_KMS("failed to allocate scatter list.\n");
		kfree(sgt);
		sgt = NULL;
		return ERR_PTR(-ENOMEM);
	}

	sg_init_table(sgt->sgl, 1);
	sg_dma_len(sgt->sgl) = buffer->size;
	sg_set_page(sgt->sgl, pfn_to_page(PFN_DOWN(buffer->dma_addr)),
			buffer->size, 0);
	sg_dma_address(sgt->sgl) = buffer->dma_addr;

	/*
	 * increase reference count of this buffer.
	 *
	 * Note:
	 * allocated physical memory region is being shared with others
	 * so this region shouldn't be released until all references of
	 * this region will be dropped by exynos_unmap_dmabuf().
	 */
	atomic_inc(&buffer->shared_refcount);

	return sgt;
}
Exemple #16
0
/*
 * Create scatter-list for the already allocated DMA buffer.
 * This function could be replaced by dma_common_get_sgtable
 * as soon as it will avalaible.
 */
static int ion_cma_get_sgtable(struct device *dev, struct sg_table *sgt,
			       void *cpu_addr, dma_addr_t handle, size_t size)
{
	struct page *page = phys_to_page(dma_to_phys(dev, handle));
	struct scatterlist *sg;
	int ret;

	ret = sg_alloc_table(sgt, 1, GFP_KERNEL);
	if (unlikely(ret))
		return ret;

	sg = sgt->sgl;
	sg_set_page(sg, page, PAGE_ALIGN(size), 0);
	sg_dma_address(sg) = sg_phys(sg);

	return 0;
}
static struct sg_table *dmabuf_map_dma_buf(struct dma_buf_attachment *attach,
					   enum dma_data_direction dir)
{
	struct dmabuf_file *priv = attach->dmabuf->priv;
	struct sg_table *sgt;

	sgt = kmalloc(sizeof(*sgt), GFP_KERNEL);
	if (!sgt)
		return NULL;

	if (sg_alloc_table(sgt, 1, GFP_KERNEL)) {
		kfree(sgt);
		return NULL;
	}

	sg_dma_address(sgt->sgl) = priv->phys;
	sg_dma_len(sgt->sgl) = priv->size;

	return sgt;
}
struct sg_table *ion_cp_heap_create_sg_table(struct ion_buffer *buffer)
{
	struct sg_table *table;
	int ret;

	table = kzalloc(sizeof(struct sg_table), GFP_KERNEL);
	if (!table)
		return ERR_PTR(-ENOMEM);

	ret = sg_alloc_table(table, 1, GFP_KERNEL);
	if (ret)
		goto err0;

	table->sgl->length = buffer->size;
	table->sgl->offset = 0;
	table->sgl->dma_address = buffer->priv_phys;

	return table;
err0:
	kfree(table);
	return ERR_PTR(ret);
}
Exemple #19
0
static struct sg_table *
tegra_gem_prime_map_dma_buf(struct dma_buf_attachment *attach,
			    enum dma_data_direction dir)
{
	struct drm_gem_object *gem = attach->dmabuf->priv;
	struct tegra_bo *bo = to_tegra_bo(gem);
	struct sg_table *sgt;

	sgt = kmalloc(sizeof(*sgt), GFP_KERNEL);
	if (!sgt)
		return NULL;

	if (sg_alloc_table(sgt, 1, GFP_KERNEL)) {
		kfree(sgt);
		return NULL;
	}

	sg_dma_address(sgt->sgl) = bo->paddr;
	sg_dma_len(sgt->sgl) = gem->size;

	return sgt;
}
static struct sg_table *omap_gem_map_dma_buf(
		struct dma_buf_attachment *attachment,
		enum dma_data_direction dir)
{
	struct drm_gem_object *obj = attachment->dmabuf->priv;
	struct sg_table *sg;
	dma_addr_t dma_addr;
	int ret;

	sg = kzalloc(sizeof(*sg), GFP_KERNEL);
	if (!sg)
		return ERR_PTR(-ENOMEM);

	/* camera, etc, need physically contiguous.. but we need a
	 * better way to know this..
	 */
	ret = omap_gem_pin(obj, &dma_addr);
	if (ret)
		goto out;

	ret = sg_alloc_table(sg, 1, GFP_KERNEL);
	if (ret)
		goto out;

	sg_init_table(sg->sgl, 1);
	sg_dma_len(sg->sgl) = obj->size;
	sg_set_page(sg->sgl, pfn_to_page(PFN_DOWN(dma_addr)), obj->size, 0);
	sg_dma_address(sg->sgl) = dma_addr;

	/* this must be after omap_gem_pin() to ensure we have pages attached */
	omap_gem_dma_sync_buffer(obj, dir);

	return sg;
out:
	kfree(sg);
	return ERR_PTR(ret);
}
static int vb2_dma_sg_dmabuf_ops_attach(struct dma_buf *dbuf, struct device *dev,
	struct dma_buf_attachment *dbuf_attach)
{
	struct vb2_dma_sg_attachment *attach;
	unsigned int i;
	struct scatterlist *rd, *wr;
	struct sg_table *sgt;
	struct vb2_dma_sg_buf *buf = dbuf->priv;
	int ret;

	attach = kzalloc(sizeof(*attach), GFP_KERNEL);
	if (!attach)
		return -ENOMEM;

	sgt = &attach->sgt;
	/* Copy the buf->base_sgt scatter list to the attachment, as we can't
	 * map the same scatter list to multiple attachments at the same time.
	 */
	ret = sg_alloc_table(sgt, buf->dma_sgt->orig_nents, GFP_KERNEL);
	if (ret) {
		kfree(attach);
		return -ENOMEM;
	}

	rd = buf->dma_sgt->sgl;
	wr = sgt->sgl;
	for (i = 0; i < sgt->orig_nents; ++i) {
		sg_set_page(wr, sg_page(rd), rd->length, rd->offset);
		rd = sg_next(rd);
		wr = sg_next(wr);
	}

	attach->dma_dir = DMA_NONE;
	dbuf_attach->priv = attach;

	return 0;
}
Exemple #22
0
dma_addr_t ispmmu_vmap(const struct scatterlist *sglist,
		       int sglen)
{
	int err;
	void *da;
	struct sg_table *sgt;
	unsigned int i;
	struct scatterlist *sg, *src = (struct scatterlist *)sglist;

	/*
	 * convert isp sglist to iommu sgt
	 * FIXME: should be fixed in the upper layer?
	 */
	sgt = kmalloc(sizeof(*sgt), GFP_KERNEL);
	if (!sgt)
		return -ENOMEM;
	err = sg_alloc_table(sgt, sglen, GFP_KERNEL);
	if (err)
		goto err_sg_alloc;

	for_each_sg(sgt->sgl, sg, sgt->nents, i)
		sg_set_buf(sg, phys_to_virt(sg_dma_address(src + i)),
			   sg_dma_len(src + i));

	da = (void *)iommu_vmap(isp_iommu, 0, sgt, IOMMU_FLAG);
	if (IS_ERR(da))
		goto err_vmap;

	return (dma_addr_t)da;

err_vmap:
	sg_free_table(sgt);
err_sg_alloc:
	kfree(sgt);
	return -ENOMEM;
}
static struct sg_table *adf_memblock_map(struct dma_buf_attachment *attach,
        enum dma_data_direction direction)
{
    struct adf_memblock_pdata *pdata = attach->dmabuf->priv;
    unsigned long pfn = PFN_DOWN(pdata->base);
    struct page *page = pfn_to_page(pfn);
    struct sg_table *table;
    int ret;

    table = kzalloc(sizeof(*table), GFP_KERNEL);
    if (!table)
        return ERR_PTR(-ENOMEM);

    ret = sg_alloc_table(table, 1, GFP_KERNEL);
    if (ret < 0)
        goto err;

    sg_set_page(table->sgl, page, attach->dmabuf->size, 0);
    return table;

err:
    kfree(table);
    return ERR_PTR(ret);
}
static struct sg_table *
		exynos_gem_map_dma_buf(struct dma_buf_attachment *attach,
					enum dma_data_direction dir)
{
	struct exynos_drm_dmabuf_attachment *exynos_attach = attach->priv;
	struct exynos_drm_gem_obj *gem_obj = dma_buf_to_obj(attach->dmabuf);
	struct drm_device *dev = gem_obj->base.dev;
	struct exynos_drm_gem_buf *buf;
	struct scatterlist *rd, *wr;
	struct sg_table *sgt = NULL;
	unsigned int i;
	int nents, ret;

	/* just return current sgt if already requested. */
	if (exynos_attach->dir == dir && exynos_attach->is_mapped)
		return &exynos_attach->sgt;

	buf = gem_obj->buffer;
	if (!buf) {
		DRM_ERROR("buffer is null.\n");
		return ERR_PTR(-ENOMEM);
	}

	sgt = &exynos_attach->sgt;

	ret = sg_alloc_table(sgt, buf->sgt->orig_nents, GFP_KERNEL);
	if (ret) {
		DRM_ERROR("failed to alloc sgt.\n");
		return ERR_PTR(-ENOMEM);
	}

	mutex_lock(&dev->struct_mutex);

	rd = buf->sgt->sgl;
	wr = sgt->sgl;
	for (i = 0; i < sgt->orig_nents; ++i) {
		sg_set_page(wr, sg_page(rd), rd->length, rd->offset);
		rd = sg_next(rd);
		wr = sg_next(wr);
	}

	if (dir != DMA_NONE) {
		nents = dma_map_sg(attach->dev, sgt->sgl, sgt->orig_nents, dir);
		if (!nents) {
			DRM_ERROR("failed to map sgl with iommu.\n");
			sg_free_table(sgt);
			sgt = ERR_PTR(-EIO);
			goto err_unlock;
		}
	}

	exynos_attach->is_mapped = true;
	exynos_attach->dir = dir;
	attach->priv = exynos_attach;

	DRM_DEBUG_PRIME("buffer size = 0x%lx\n", buf->size);

err_unlock:
	mutex_unlock(&dev->struct_mutex);
	return sgt;
}
static int i915_gem_object_get_pages_internal(struct drm_i915_gem_object *obj)
{
	struct drm_i915_private *i915 = to_i915(obj->base.dev);
	struct sg_table *st;
	struct scatterlist *sg;
	unsigned int sg_page_sizes;
	unsigned int npages;
	int max_order;
	gfp_t gfp;

	max_order = MAX_ORDER;
#ifdef CONFIG_SWIOTLB
	if (swiotlb_nr_tbl()) {
		unsigned int max_segment;

		max_segment = swiotlb_max_segment();
		if (max_segment) {
			max_segment = max_t(unsigned int, max_segment,
					    PAGE_SIZE) >> PAGE_SHIFT;
			max_order = min(max_order, ilog2(max_segment));
		}
	}
#endif

	gfp = GFP_KERNEL | __GFP_HIGHMEM | __GFP_RECLAIMABLE;
	if (IS_I965GM(i915) || IS_I965G(i915)) {
		/* 965gm cannot relocate objects above 4GiB. */
		gfp &= ~__GFP_HIGHMEM;
		gfp |= __GFP_DMA32;
	}

create_st:
	st = kmalloc(sizeof(*st), GFP_KERNEL);
	if (!st)
		return -ENOMEM;

	npages = obj->base.size / PAGE_SIZE;
	if (sg_alloc_table(st, npages, GFP_KERNEL)) {
		kfree(st);
		return -ENOMEM;
	}

	sg = st->sgl;
	st->nents = 0;
	sg_page_sizes = 0;

	do {
		int order = min(fls(npages) - 1, max_order);
		struct page *page;

		do {
			page = alloc_pages(gfp | (order ? QUIET : MAYFAIL),
					   order);
			if (page)
				break;
			if (!order--)
				goto err;

			/* Limit subsequent allocations as well */
			max_order = order;
		} while (1);

		sg_set_page(sg, page, PAGE_SIZE << order, 0);
		sg_page_sizes |= PAGE_SIZE << order;
		st->nents++;

		npages -= 1 << order;
		if (!npages) {
			sg_mark_end(sg);
			break;
		}

		sg = __sg_next(sg);
	} while (1);

	if (i915_gem_gtt_prepare_pages(obj, st)) {
		/* Failed to dma-map try again with single page sg segments */
		if (get_order(st->sgl->length)) {
			internal_free_pages(st);
			max_order = 0;
			goto create_st;
		}
		goto err;
	}

	/* Mark the pages as dontneed whilst they are still pinned. As soon
	 * as they are unpinned they are allowed to be reaped by the shrinker,
	 * and the caller is expected to repopulate - the contents of this
	 * object are only valid whilst active and pinned.
	 */
	obj->mm.madv = I915_MADV_DONTNEED;

	__i915_gem_object_set_pages(obj, st, sg_page_sizes);

	return 0;

err:
	sg_set_page(sg, NULL, 0, 0);
	sg_mark_end(sg);
	internal_free_pages(st);

	return -ENOMEM;
}
Exemple #26
0
static struct sg_table *udl_map_dma_buf(struct dma_buf_attachment *attach,
					enum dma_data_direction dir)
{
	struct udl_drm_dmabuf_attachment *udl_attach = attach->priv;
	struct udl_gem_object *obj = to_udl_bo(attach->dmabuf->priv);
	struct drm_device *dev = obj->base.dev;
	struct scatterlist *rd, *wr;
	struct sg_table *sgt = NULL;
	unsigned int i;
	int page_count;
	int nents, ret;

	DRM_DEBUG_PRIME("[DEV:%s] size:%zd dir=%d\n", dev_name(attach->dev),
			attach->dmabuf->size, dir);

	/* just return current sgt if already requested. */
	if (udl_attach->dir == dir && udl_attach->is_mapped)
		return &udl_attach->sgt;

	if (!obj->pages) {
		ret = udl_gem_get_pages(obj);
		if (ret) {
			DRM_ERROR("failed to map pages.\n");
			return ERR_PTR(ret);
		}
	}

	page_count = obj->base.size / PAGE_SIZE;
	obj->sg = drm_prime_pages_to_sg(obj->pages, page_count);
	if (IS_ERR(obj->sg)) {
		DRM_ERROR("failed to allocate sgt.\n");
		return ERR_CAST(obj->sg);
	}

	sgt = &udl_attach->sgt;

	ret = sg_alloc_table(sgt, obj->sg->orig_nents, GFP_KERNEL);
	if (ret) {
		DRM_ERROR("failed to alloc sgt.\n");
		return ERR_PTR(-ENOMEM);
	}

	mutex_lock(&dev->struct_mutex);

	rd = obj->sg->sgl;
	wr = sgt->sgl;
	for (i = 0; i < sgt->orig_nents; ++i) {
		sg_set_page(wr, sg_page(rd), rd->length, rd->offset);
		rd = sg_next(rd);
		wr = sg_next(wr);
	}

	if (dir != DMA_NONE) {
		nents = dma_map_sg(attach->dev, sgt->sgl, sgt->orig_nents, dir);
		if (!nents) {
			DRM_ERROR("failed to map sgl with iommu.\n");
			sg_free_table(sgt);
			sgt = ERR_PTR(-EIO);
			goto err_unlock;
		}
	}

	udl_attach->is_mapped = true;
	udl_attach->dir = dir;
	attach->priv = udl_attach;

err_unlock:
	mutex_unlock(&dev->struct_mutex);
	return sgt;
}
Exemple #27
0
static int
qce_ablkcipher_async_req_handle(struct crypto_async_request *async_req)
{
	struct ablkcipher_request *req = ablkcipher_request_cast(async_req);
	struct qce_cipher_reqctx *rctx = ablkcipher_request_ctx(req);
	struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
	struct qce_alg_template *tmpl = to_cipher_tmpl(async_req->tfm);
	struct qce_device *qce = tmpl->qce;
	enum dma_data_direction dir_src, dir_dst;
	struct scatterlist *sg;
	bool diff_dst;
	gfp_t gfp;
	int ret;

	rctx->iv = req->info;
	rctx->ivsize = crypto_ablkcipher_ivsize(ablkcipher);
	rctx->cryptlen = req->nbytes;

	diff_dst = (req->src != req->dst) ? true : false;
	dir_src = diff_dst ? DMA_TO_DEVICE : DMA_BIDIRECTIONAL;
	dir_dst = diff_dst ? DMA_FROM_DEVICE : DMA_BIDIRECTIONAL;

	rctx->src_nents = sg_nents_for_len(req->src, req->nbytes);
	if (diff_dst)
		rctx->dst_nents = sg_nents_for_len(req->dst, req->nbytes);
	else
		rctx->dst_nents = rctx->src_nents;
	if (rctx->src_nents < 0) {
		dev_err(qce->dev, "Invalid numbers of src SG.\n");
		return rctx->src_nents;
	}
	if (rctx->dst_nents < 0) {
		dev_err(qce->dev, "Invalid numbers of dst SG.\n");
		return -rctx->dst_nents;
	}

	rctx->dst_nents += 1;

	gfp = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
						GFP_KERNEL : GFP_ATOMIC;

	ret = sg_alloc_table(&rctx->dst_tbl, rctx->dst_nents, gfp);
	if (ret)
		return ret;

	sg_init_one(&rctx->result_sg, qce->dma.result_buf, QCE_RESULT_BUF_SZ);

	sg = qce_sgtable_add(&rctx->dst_tbl, req->dst);
	if (IS_ERR(sg)) {
		ret = PTR_ERR(sg);
		goto error_free;
	}

	sg = qce_sgtable_add(&rctx->dst_tbl, &rctx->result_sg);
	if (IS_ERR(sg)) {
		ret = PTR_ERR(sg);
		goto error_free;
	}

	sg_mark_end(sg);
	rctx->dst_sg = rctx->dst_tbl.sgl;

	ret = dma_map_sg(qce->dev, rctx->dst_sg, rctx->dst_nents, dir_dst);
	if (ret < 0)
		goto error_free;

	if (diff_dst) {
		ret = dma_map_sg(qce->dev, req->src, rctx->src_nents, dir_src);
		if (ret < 0)
			goto error_unmap_dst;
		rctx->src_sg = req->src;
	} else {
		rctx->src_sg = rctx->dst_sg;
	}

	ret = qce_dma_prep_sgs(&qce->dma, rctx->src_sg, rctx->src_nents,
			       rctx->dst_sg, rctx->dst_nents,
			       qce_ablkcipher_done, async_req);
	if (ret)
		goto error_unmap_src;

	qce_dma_issue_pending(&qce->dma);

	ret = qce_start(async_req, tmpl->crypto_alg_type, req->nbytes, 0);
	if (ret)
		goto error_terminate;

	return 0;

error_terminate:
	qce_dma_terminate_all(&qce->dma);
error_unmap_src:
	if (diff_dst)
		dma_unmap_sg(qce->dev, req->src, rctx->src_nents, dir_src);
error_unmap_dst:
	dma_unmap_sg(qce->dev, rctx->dst_sg, rctx->dst_nents, dir_dst);
error_free:
	sg_free_table(&rctx->dst_tbl);
	return ret;
}
Exemple #28
0
static struct sg_table *
i915_gem_object_get_pages_internal(struct drm_i915_gem_object *obj)
{
	struct drm_i915_private *i915 = to_i915(obj->base.dev);
	unsigned int npages = obj->base.size / PAGE_SIZE;
	struct sg_table *st;
	struct scatterlist *sg;
	int max_order;
	gfp_t gfp;

	st = kmalloc(sizeof(*st), GFP_KERNEL);
	if (!st)
		return ERR_PTR(-ENOMEM);

	if (sg_alloc_table(st, npages, GFP_KERNEL)) {
		kfree(st);
		return ERR_PTR(-ENOMEM);
	}

	sg = st->sgl;
	st->nents = 0;

	max_order = MAX_ORDER;
#ifdef CONFIG_SWIOTLB
	if (swiotlb_nr_tbl()) /* minimum max swiotlb size is IO_TLB_SEGSIZE */
		max_order = min(max_order, ilog2(IO_TLB_SEGPAGES));
#endif

	gfp = GFP_KERNEL | __GFP_HIGHMEM | __GFP_RECLAIMABLE;
	if (IS_CRESTLINE(i915) || IS_BROADWATER(i915)) {
		/* 965gm cannot relocate objects above 4GiB. */
		gfp &= ~__GFP_HIGHMEM;
		gfp |= __GFP_DMA32;
	}

	do {
		int order = min(fls(npages) - 1, max_order);
		struct page *page;

		do {
			page = alloc_pages(gfp | (order ? QUIET : 0), order);
			if (page)
				break;
			if (!order--)
				goto err;

			/* Limit subsequent allocations as well */
			max_order = order;
		} while (1);

		sg_set_page(sg, page, PAGE_SIZE << order, 0);
		st->nents++;

		npages -= 1 << order;
		if (!npages) {
			sg_mark_end(sg);
			break;
		}

		sg = __sg_next(sg);
	} while (1);

	if (i915_gem_gtt_prepare_pages(obj, st))
		goto err;

	/* Mark the pages as dontneed whilst they are still pinned. As soon
	 * as they are unpinned they are allowed to be reaped by the shrinker,
	 * and the caller is expected to repopulate - the contents of this
	 * object are only valid whilst active and pinned.
	 */
	obj->mm.madv = I915_MADV_DONTNEED;
	return st;

err:
	sg_mark_end(sg);
	internal_free_pages(st);
	return ERR_PTR(-ENOMEM);
}
static int ion_system_heap_allocate(struct ion_heap *heap,
				     struct ion_buffer *buffer,
				     unsigned long size, unsigned long align,
				     unsigned long flags)
{
	struct ion_system_heap *sys_heap = container_of(heap,
							struct ion_system_heap,
							heap);
	struct sg_table *table;
	struct sg_table table_sync;
	struct scatterlist *sg;
	struct scatterlist *sg_sync;
	int ret;
	struct list_head pages;
	struct list_head pages_from_pool;
	struct page_info *info, *tmp_info;
	int i = 0;
	unsigned int nents_sync = 0;
	unsigned long size_remaining = PAGE_ALIGN(size);
	size_t total_pages = 0;
	unsigned int max_order = orders[0];
	struct pages_mem data;
	unsigned int sz;
	bool split_pages = ion_buffer_fault_user_mappings(buffer);

	memset(&table_sync, 0, sizeof(table_sync));
	data.size = 0;
	INIT_LIST_HEAD(&pages);
	INIT_LIST_HEAD(&pages_from_pool);
	while (size_remaining > 0) {
		info = alloc_largest_available(sys_heap, buffer, size_remaining, max_order);
		if (!info)
			goto err;

		sz = (1 << info->order) * PAGE_SIZE;

		if (info->from_pool) {
			list_add_tail(&info->list, &pages_from_pool);
		} else {
			list_add_tail(&info->list, &pages);
			data.size += sz;
			++nents_sync;
		}
		size_remaining -= sz;
		total_pages += 1 << info->order;
		max_order = info->order;
		i++;
	}

	ret = ion_heap_alloc_pages_mem(&data);

	if (ret)
		goto err;

	table = kmalloc(sizeof(struct sg_table), GFP_KERNEL);
	if (!table)
		goto err_free_data_pages;

	if (split_pages)
		ret = sg_alloc_table(table, PAGE_ALIGN(size) / PAGE_SIZE,
				     GFP_KERNEL);
	else
		ret = sg_alloc_table(table, i, GFP_KERNEL);

	if (ret)
		goto err1;

	if (nents_sync) {
		ret = sg_alloc_table(&table_sync, nents_sync, GFP_KERNEL);
		if (ret)
			goto err_free_sg;
	}

	i = 0;
	sg = table->sgl;
	sg_sync = table_sync.sgl;

	/*
	 * We now have two separate lists. One list contains pages from the
	 * pool and the other pages from buddy. We want to merge these
	 * together while preserving the ordering of the pages (higher order
	 * first).
	 */
	do {
		if (!list_empty(&pages))
			info = list_first_entry(&pages, struct page_info, list);
		else
			info = NULL;
		if (!list_empty(&pages_from_pool))
			tmp_info = list_first_entry(&pages_from_pool,
							struct page_info, list);
		else
Exemple #30
0
/*---------------------------------------------------------------------------*/
static int xio_server_main(void *data)
{
	char **argv = (char **)data;
	struct xio_server	*server;	/* server portal */
	struct server_data	*server_data;
	char			url[256];
	struct xio_context_params ctx_params;
	struct xio_context	*ctx;
	int			i;
	struct xio_vmsg 	*omsg;
	void 			*buf = NULL;
	unsigned long           data_len = 0;

	atomic_add(2, &module_state);

	server_data = vzalloc(sizeof(*server_data));
	if (!server_data) {
		/*pr_err("server_data alloc failed\n");*/
		return 0;
	}

	/* create thread context for the server */
	memset(&ctx_params, 0, sizeof(ctx_params));
	ctx_params.flags = XIO_LOOP_GIVEN_THREAD;
	ctx_params.worker = current;

	ctx = xio_context_create(&ctx_params, 0, -1);
	if (!ctx) {
		vfree(server_data);
		pr_err("context open filed\n");
		return 0;
	}
	server_data->ctx = ctx;

	/* create "hello world" message */
	if (argv[3] != NULL && kstrtoul(argv[3], 0, &data_len)) { /* check, convert and assign data_len */
		data_len = 0;
	}
	for (i = 0; i < QUEUE_DEPTH; i++) {
		xio_reinit_msg(&server_data->rsp[i]);
		omsg = &server_data->rsp[i].out;

		/* header */
		server_data->rsp[i].out.header.iov_base = kstrdup("hello world header rsp 1", GFP_KERNEL);
		server_data->rsp[i].out.header.iov_len = strlen((const char *) server_data->rsp[i].out.header.iov_base) + 1;
		/* iovec[0]*/
		sg_alloc_table(&omsg->data_tbl, 64, GFP_KERNEL);
		/* currently only one entry */
		xio_init_vmsg(omsg, 1);     /* one entry (max_nents) */
		if (data_len < max_data_len) { /* small msgs */
			buf = kstrdup("hello world iovec rsp", GFP_KERNEL);
		} else { /* big msgs */
			if (!buf) {
				pr_info("allocating xio memory...\n");
				buf = kmalloc(data_len, GFP_KERNEL);
				memcpy(buf, "hello world iovec rsp", 22);
			}
		}
		sg_init_one(omsg->data_tbl.sgl, buf, strlen(buf) + 1);
		/* orig_nents is 64 */
		vmsg_sglist_set_nents(omsg, 1);
	}

	/* create url to connect to */
	sprintf(url, "%s://%s:%s", argv[4], argv[1], argv[2]);
	/* bind a listener server to a portal/url */
	server = xio_bind(ctx, &server_ops, url, NULL, 0, server_data);
	if (server) {
		pr_info("listen to %s\n", url);

		g_server_data = server_data;
		if (atomic_add_unless(&module_state, 4, 0x83))
			xio_context_run_loop(ctx);
		atomic_sub(4, &module_state);

		/* normal exit phase */
		pr_info("exit signaled\n");

		/* free the server */
		xio_unbind(server);
	}

	/* free the message */
	for (i = 0; i < QUEUE_DEPTH; i++) {
		kfree(server_data->rsp[i].out.header.iov_base);
		if (data_len < max_data_len) {
			/* Currently need to release only one entry */
			kfree(sg_virt(server_data->rsp[i].out.data_tbl.sgl));
		}
		xio_fini_vmsg(&server_data->rsp[i].out);
	}

        if (buf){
                pr_info("freeing xio memory...\n");
                kfree(buf);
                buf = NULL;
        }

	/* free the context */
	xio_context_destroy(ctx);

	vfree(server_data);

	complete_and_exit(&cleanup_complete, 0);
	return 0;
}