static void vb2_dma_sg_prepare(void *buf_priv)
{
	struct vb2_dma_sg_buf *buf = buf_priv;
	struct sg_table *sgt = buf->dma_sgt;

	/* DMABUF exporter will flush the cache for us */
	if (buf->db_attach)
		return;

	dma_sync_sg_for_device(buf->dev, sgt->sgl, sgt->nents, buf->dma_dir);
}
Beispiel #2
0
static void omap_aes_gcm_done_task(struct omap_aes_dev *dd)
{
	u8 *tag;
	int alen, clen, i, ret = 0, nsg;
	struct omap_aes_reqctx *rctx;

	alen = ALIGN(dd->assoc_len, AES_BLOCK_SIZE);
	clen = ALIGN(dd->total, AES_BLOCK_SIZE);
	rctx = aead_request_ctx(dd->aead_req);

	nsg = !!(dd->assoc_len && dd->total);

	dma_sync_sg_for_device(dd->dev, dd->out_sg, dd->out_sg_len,
			       DMA_FROM_DEVICE);
	dma_unmap_sg(dd->dev, dd->in_sg, dd->in_sg_len, DMA_TO_DEVICE);
	dma_unmap_sg(dd->dev, dd->out_sg, dd->out_sg_len, DMA_FROM_DEVICE);
	omap_aes_crypt_dma_stop(dd);

	omap_crypto_cleanup(dd->out_sg, dd->orig_out,
			    dd->aead_req->assoclen, dd->total,
			    FLAGS_OUT_DATA_ST_SHIFT, dd->flags);

	if (dd->flags & FLAGS_ENCRYPT)
		scatterwalk_map_and_copy(rctx->auth_tag,
					 dd->aead_req->dst,
					 dd->total + dd->aead_req->assoclen,
					 dd->authsize, 1);

	omap_crypto_cleanup(&dd->in_sgl[0], NULL, 0, alen,
			    FLAGS_ASSOC_DATA_ST_SHIFT, dd->flags);

	omap_crypto_cleanup(&dd->in_sgl[nsg], NULL, 0, clen,
			    FLAGS_IN_DATA_ST_SHIFT, dd->flags);

	if (!(dd->flags & FLAGS_ENCRYPT)) {
		tag = (u8 *)rctx->auth_tag;
		for (i = 0; i < dd->authsize; i++) {
			if (tag[i]) {
				dev_err(dd->dev, "GCM decryption: Tag Message is wrong\n");
				ret = -EBADMSG;
			}
		}
	}

	omap_aes_gcm_finish_req(dd, ret);
	omap_aes_gcm_handle_queue(dd, NULL);
}
Beispiel #3
0
static int rockchip_gem_get_pages(struct rockchip_gem_object *rk_obj)
{
	struct drm_device *drm = rk_obj->base.dev;
	int ret, i;
	struct scatterlist *s;

	rk_obj->pages = drm_gem_get_pages(&rk_obj->base);
	if (IS_ERR(rk_obj->pages))
		return PTR_ERR(rk_obj->pages);

	rk_obj->num_pages = rk_obj->base.size >> PAGE_SHIFT;

	rk_obj->sgt = drm_prime_pages_to_sg(rk_obj->pages, rk_obj->num_pages);
	if (IS_ERR(rk_obj->sgt)) {
		ret = PTR_ERR(rk_obj->sgt);
		goto err_put_pages;
	}

	/*
	 * Fake up the SG table so that dma_sync_sg_for_device() can be used
	 * to flush the pages associated with it.
	 *
	 * TODO: Replace this by drm_clflush_sg() once it can be implemented
	 * without relying on symbols that are not exported.
	 */
	for_each_sg(rk_obj->sgt->sgl, s, rk_obj->sgt->nents, i)
		sg_dma_address(s) = sg_phys(s);

	dma_sync_sg_for_device(drm->dev, rk_obj->sgt->sgl, rk_obj->sgt->nents,
			       DMA_TO_DEVICE);

	return 0;

err_put_pages:
	drm_gem_put_pages(&rk_obj->base, rk_obj->pages, false, false);
	return ret;
}
Beispiel #4
0
long fops_dma_l2l(struct pci_dev *dev, struct si_dma *dma)
{
	struct vm_area_struct *src_vma;
	struct vm_area_struct *dst_vma;
	struct vma_private_data *src_vma_data;
	struct vma_private_data *dst_vma_data;
	u64 src_aperture_gpu_addr;
	u64 dst_aperture_gpu_addr;
	u64 src_gpu_addr;
	u64 dst_gpu_addr;
	struct si_dma_l2l *l2l;
	struct dmas_timeouts_info t_info;
	long r;

	r = 0;

	if (dma->type == SI_DMA_TO_DEVICE || dma->type == SI_DMA_FROM_DEVICE
						|| dma->type == SI_DMA_ON_HOST)
		down_read(&current->mm->mmap_sem);

	l2l = &dma->params.l2l;

	/* if the src addr is a cpu aperture addr... */
	if (dma->type == SI_DMA_TO_DEVICE || dma->type == SI_DMA_ON_HOST) {
		r = cpu_addr_to_aperture_gpu_addr(dev, &src_aperture_gpu_addr,
				(void __iomem *)l2l->src_addr, &src_vma);
		if (r != 0) {
			if (r == NO_VMA_FOUND)
				r = -EINVAL;
			/*
			 * XXX:a vma can be around without a gpu aperture,
			 * for instance after a suspend.
			 */
			else if (r == NO_BA_MAP_FOUND)
				r = 0; /* blank dma operation */
			goto unlock_mmap_sem;
		}
	}

	/* if the dst addr is a cpu aperture addr... */
	if (dma->type == SI_DMA_FROM_DEVICE || dma->type == SI_DMA_ON_HOST) {
		r = cpu_addr_to_aperture_gpu_addr(dev, &dst_aperture_gpu_addr,
				(void __iomem *)l2l->dst_addr, &dst_vma);
		if (r != 0) {
			if (r == NO_VMA_FOUND)
				r = -EINVAL;
			/*
			 * XXX:a vma can be around without a gpu aperture,
			 * for instance after a suspend.
			 */
			else if (r == NO_BA_MAP_FOUND)
				r = 0; /* blank dma operation */
			goto unlock_mmap_sem;
		}
	}

	switch (dma->dir) {
	case SI_DMA_FROM_DEVICE:
		dst_gpu_addr = dst_aperture_gpu_addr;
		src_gpu_addr = l2l->src_addr;
		break;
	case SI_DMA_TO_DEVICE:
		dst_gpu_addr = l2l->dst_addr;
		src_gpu_addr = src_aperture_gpu_addr;
		break;
	case SI_DMA_ON_DEVICE:
		dst_gpu_addr = l2l->dst_addr;
		src_gpu_addr = l2l->src_addr;
		break;
	case SI_DMA_ON_HOST:
		dst_gpu_addr = dst_aperture_gpu_addr;
		src_gpu_addr = src_aperture_gpu_addr;
		break;
	default:
		r = -EINVAL;
		goto unlock_mmap_sem;
	}

	if (dma->type == SI_DMA_TO_DEVICE || dma->type == SI_DMA_ON_HOST) {
		src_vma_data = src_vma->vm_private_data;
		dma_sync_sg_for_device(&dev->dev, src_vma_data->sg_tbl.sgl,
				src_vma_data->sg_tbl.nents, DMA_BIDIRECTIONAL);
	}

	memcpy(&t_info, &dma->t_info, sizeof(t_info));
	r = dmas_cpy(dev, dst_gpu_addr, src_gpu_addr, l2l->sz, t_info);
	if (r < 0) {
		if (r == -DMAS_RING_TIMEOUT)
			r = SI_RING_TIMEOUT;
		else if (r == -DMAS_FENCE_TIMEOUT)
			r = SI_FENCE_TIMEOUT;
		goto unlock_mmap_sem;
	}

	if (dma->type == SI_DMA_FROM_DEVICE || dma->type == SI_DMA_ON_HOST) {
		dst_vma_data = dst_vma->vm_private_data;
		dma_sync_sg_for_cpu(&dev->dev, dst_vma_data->sg_tbl.sgl,
				dst_vma_data->sg_tbl.nents, DMA_BIDIRECTIONAL);
	}

unlock_mmap_sem:
	if (dma->type == SI_DMA_TO_DEVICE || dma->type == SI_DMA_FROM_DEVICE
						|| dma->type == SI_DMA_ON_HOST)
		up_read(&current->mm->mmap_sem);
	return r;
}
int ion_system_heap_cache_ops(struct ion_heap *heap, struct ion_buffer *buffer,
			void *vaddr, unsigned int offset, unsigned int length,
			unsigned int cmd)
{
	void (*outer_cache_op)(phys_addr_t, phys_addr_t);

	switch (cmd) {
	case ION_IOC_CLEAN_CACHES:
		if (!vaddr)
			dma_sync_sg_for_device(NULL, buffer->sglist, 1, DMA_TO_DEVICE);
		else
			dmac_clean_range(vaddr, vaddr + length);
		outer_cache_op = outer_clean_range;
		break;
	case ION_IOC_INV_CACHES:
		if (!vaddr)
			dma_sync_sg_for_cpu(NULL, buffer->sglist, 1, DMA_FROM_DEVICE);
		else
			dmac_inv_range(vaddr, vaddr + length);
		outer_cache_op = outer_inv_range;
		break;
	case ION_IOC_CLEAN_INV_CACHES:
		if (!vaddr) {
			dma_sync_sg_for_device(NULL, buffer->sglist, 1, DMA_TO_DEVICE);
			dma_sync_sg_for_cpu(NULL, buffer->sglist, 1, DMA_FROM_DEVICE);
		} else {
			dmac_flush_range(vaddr, vaddr + length);
		}
		outer_cache_op = outer_flush_range;
		break;
	default:
		return -EINVAL;
	}

	if (system_heap_has_outer_cache) {
		unsigned long pstart;
		void *vend;
		void *vtemp;
		unsigned long ln = 0;
		vend = buffer->priv_virt + buffer->size;
		vtemp = buffer->priv_virt + offset;

		if ((vtemp+length) > vend) {
			pr_err("Trying to flush outside of mapped range.\n");
			pr_err("End of mapped range: %p, trying to flush to "
				"address %p\n", vend, vtemp+length);
			WARN(1, "%s: called with heap name %s, buffer size 0x%x, "
				"vaddr 0x%p, offset 0x%x, length: 0x%x\n",
				__func__, heap->name, buffer->size, vaddr,
				offset, length);
			return -EINVAL;
		}

		for (; ln < length && vtemp < vend;
		      vtemp += PAGE_SIZE, ln += PAGE_SIZE) {
			struct page *page = vmalloc_to_page(vtemp);
			if (!page) {
				WARN(1, "Could not find page for virt. address %p\n",
					vtemp);
				return -EINVAL;
			}
			pstart = page_to_phys(page);
			/*
			 * If page -> phys is returning NULL, something
			 * has really gone wrong...
			 */
			if (!pstart) {
				WARN(1, "Could not translate %p to physical address\n",
					vtemp);
				return -EINVAL;
			}

			outer_cache_op(pstart, pstart + PAGE_SIZE);
		}
	}
	return 0;
}
Beispiel #6
0
static void membuf_sync(struct membuf *buf)
{
	dma_sync_sg_for_device(NULL, buf->sg, buf->sg_nr, DMA_TO_DEVICE);
}