Пример #1
0
static void ak98_sdio_stop_data(struct ak98_mci_host *host)
{
	u32 masks;

	PK1("%s\n", __func__);

	writel(0, host->base + AK98MCIDMACTRL);
	writel(0, host->base + AK98MCIDATACTRL);
	masks = readl(host->base + AK98MCIMASK);
	masks &= ~(MCI_DATAIRQMASKS|MCI_FIFOFULLMASK|MCI_FIFOEMPTYMASK);
	writel(masks, host->base + AK98MCIMASK);
	PK("DISABLE DATA IRQ\n"); 
     
#ifdef MCI_USE_L2FIFO_DMA
	if (host->data->flags & MMC_DATA_WRITE) {
		dma_sync_sg_for_cpu(mmc_dev(host->mmc), host->data->sg, host->data->sg_len, DMA_TO_DEVICE);
		dma_unmap_sg(mmc_dev(host->mmc), host->data->sg, host->data->sg_len, DMA_TO_DEVICE);
	} else {
		dma_sync_sg_for_cpu(mmc_dev(host->mmc), host->data->sg, host->data->sg_len, DMA_FROM_DEVICE);
		dma_unmap_sg(mmc_dev(host->mmc), host->data->sg, host->data->sg_len, DMA_FROM_DEVICE);
	}
#endif

	host->data = NULL; 
	
}
Пример #2
0
void s5p_mfc_cache_inv(void *alloc_ctx)
{
	struct vb2_ion_buf *buf = (struct vb2_ion_buf *)alloc_ctx;

	dma_sync_sg_for_cpu(buf->conf->dev, buf->sg, buf->nents,
		DMA_FROM_DEVICE);
}
Пример #3
0
int ion_cma_cache_ops(struct ion_heap *heap,
			struct ion_buffer *buffer, void *vaddr,
			unsigned int offset, unsigned int length,
			unsigned int cmd)
{
	void (*outer_cache_op)(phys_addr_t, phys_addr_t);

	switch (cmd) {
	case ION_IOC_CLEAN_CACHES:
		if (!vaddr)
			dma_sync_sg_for_device(NULL, buffer->sg_table->sgl,
				buffer->sg_table->nents, DMA_TO_DEVICE);
		else
			dmac_clean_range(vaddr, vaddr + length);
		outer_cache_op = outer_clean_range;
		break;
	case ION_IOC_INV_CACHES:
		if (!vaddr)
			dma_sync_sg_for_cpu(NULL, buffer->sg_table->sgl,
				buffer->sg_table->nents, DMA_FROM_DEVICE);
		else
			dmac_inv_range(vaddr, vaddr + length);
		outer_cache_op = outer_inv_range;
		break;
	case ION_IOC_CLEAN_INV_CACHES:
		if (!vaddr) {
			dma_sync_sg_for_device(NULL, buffer->sg_table->sgl,
				buffer->sg_table->nents, DMA_TO_DEVICE);
			dma_sync_sg_for_cpu(NULL, buffer->sg_table->sgl,
				buffer->sg_table->nents, DMA_FROM_DEVICE);
		} else {
			dmac_flush_range(vaddr, vaddr + length);
		}
		outer_cache_op = outer_flush_range;
		break;
	default:
		return -EINVAL;
	}

	if (cma_heap_has_outer_cache) {
		struct ion_cma_buffer_info *info = buffer->priv_virt;

		outer_cache_op(info->handle, info->handle + length);
	}

	return 0;
}
Пример #4
0
int videobuf_dma_sync(struct videobuf_queue *q, struct videobuf_dmabuf *dma)
{
	MAGIC_CHECK(dma->magic, MAGIC_DMABUF);
	BUG_ON(!dma->sglen);

	dma_sync_sg_for_cpu(q->dev, dma->sglist, dma->nr_pages, dma->direction);
	return 0;
}
Пример #5
0
static void isp_stat_buf_sync_for_cpu(struct ispstat *stat,
				      struct ispstat_buffer *buf)
{
	if (IS_COHERENT_BUF(stat))
		return;

	dma_sync_sg_for_cpu(stat->isp->dev, buf->iovm->sgt->sgl,
			    buf->iovm->sgt->nents, DMA_FROM_DEVICE);
}
Пример #6
0
static void isp_stat_buf_sync_for_cpu(struct ispstat *stat,
				      struct ispstat_buffer *buf)
{
	if (ISP_STAT_USES_DMAENGINE(stat))
		return;

	dma_sync_sg_for_cpu(stat->isp->dev, buf->sgt.sgl,
			    buf->sgt.nents, DMA_FROM_DEVICE);
}
Пример #7
0
static void vb2_dma_sg_finish(void *buf_priv)
{
	struct vb2_dma_sg_buf *buf = buf_priv;
	struct sg_table *sgt = buf->dma_sgt;

	/* DMABUF exporter will flush the cache for us */
	if (buf->db_attach)
		return;

	dma_sync_sg_for_cpu(buf->dev, sgt->sgl, sgt->nents, buf->dma_dir);
}
Пример #8
0
/**
 * usb_buffer_dmasync_sg - synchronize DMA and CPU view of scatterlist buffer(s)
 * @dev: device to which the scatterlist will be mapped
 * @is_in: mapping transfer direction
 * @sg: the scatterlist to synchronize
 * @n_hw_ents: the positive return value from usb_buffer_map_sg
 *
 * Use this when you are re-using a scatterlist's data buffers for
 * another USB request.
 */
void usb_buffer_dmasync_sg(const struct usb_device *dev, int is_in,
			   struct scatterlist *sg, int n_hw_ents)
{
	struct usb_bus		*bus;
	struct device		*controller;

	if (!dev
			|| !(bus = dev->bus)
			|| !(controller = bus->controller)
			|| !controller->dma_mask)
		return;

	dma_sync_sg_for_cpu(controller, sg, n_hw_ents,
			    is_in ? DMA_FROM_DEVICE : DMA_TO_DEVICE);
}
Пример #9
0
void kbase_sync_to_cpu(phys_addr_t paddr, void *vaddr, size_t sz)
{
#ifdef CONFIG_ARM
	__cpuc_flush_dcache_area(vaddr, sz);
	outer_flush_range(paddr, paddr + sz);
#elif defined(CONFIG_ARM64)
	/* FIXME (MID64-46): There's no other suitable cache flush function for ARM64 */
	flush_cache_all();
#elif defined(CONFIG_X86)
	struct scatterlist scl = { 0, };
	sg_set_page(&scl, pfn_to_page(PFN_DOWN(paddr)), sz, paddr & (PAGE_SIZE - 1));
	dma_sync_sg_for_cpu(NULL, &scl, 1, DMA_FROM_DEVICE);
#else
#error Implement cache maintenance for your architecture here
#endif
}
Пример #10
0
long fops_dma_l2l(struct pci_dev *dev, struct si_dma *dma)
{
	struct vm_area_struct *src_vma;
	struct vm_area_struct *dst_vma;
	struct vma_private_data *src_vma_data;
	struct vma_private_data *dst_vma_data;
	u64 src_aperture_gpu_addr;
	u64 dst_aperture_gpu_addr;
	u64 src_gpu_addr;
	u64 dst_gpu_addr;
	struct si_dma_l2l *l2l;
	struct dmas_timeouts_info t_info;
	long r;

	r = 0;

	if (dma->type == SI_DMA_TO_DEVICE || dma->type == SI_DMA_FROM_DEVICE
						|| dma->type == SI_DMA_ON_HOST)
		down_read(&current->mm->mmap_sem);

	l2l = &dma->params.l2l;

	/* if the src addr is a cpu aperture addr... */
	if (dma->type == SI_DMA_TO_DEVICE || dma->type == SI_DMA_ON_HOST) {
		r = cpu_addr_to_aperture_gpu_addr(dev, &src_aperture_gpu_addr,
				(void __iomem *)l2l->src_addr, &src_vma);
		if (r != 0) {
			if (r == NO_VMA_FOUND)
				r = -EINVAL;
			/*
			 * XXX:a vma can be around without a gpu aperture,
			 * for instance after a suspend.
			 */
			else if (r == NO_BA_MAP_FOUND)
				r = 0; /* blank dma operation */
			goto unlock_mmap_sem;
		}
	}

	/* if the dst addr is a cpu aperture addr... */
	if (dma->type == SI_DMA_FROM_DEVICE || dma->type == SI_DMA_ON_HOST) {
		r = cpu_addr_to_aperture_gpu_addr(dev, &dst_aperture_gpu_addr,
				(void __iomem *)l2l->dst_addr, &dst_vma);
		if (r != 0) {
			if (r == NO_VMA_FOUND)
				r = -EINVAL;
			/*
			 * XXX:a vma can be around without a gpu aperture,
			 * for instance after a suspend.
			 */
			else if (r == NO_BA_MAP_FOUND)
				r = 0; /* blank dma operation */
			goto unlock_mmap_sem;
		}
	}

	switch (dma->dir) {
	case SI_DMA_FROM_DEVICE:
		dst_gpu_addr = dst_aperture_gpu_addr;
		src_gpu_addr = l2l->src_addr;
		break;
	case SI_DMA_TO_DEVICE:
		dst_gpu_addr = l2l->dst_addr;
		src_gpu_addr = src_aperture_gpu_addr;
		break;
	case SI_DMA_ON_DEVICE:
		dst_gpu_addr = l2l->dst_addr;
		src_gpu_addr = l2l->src_addr;
		break;
	case SI_DMA_ON_HOST:
		dst_gpu_addr = dst_aperture_gpu_addr;
		src_gpu_addr = src_aperture_gpu_addr;
		break;
	default:
		r = -EINVAL;
		goto unlock_mmap_sem;
	}

	if (dma->type == SI_DMA_TO_DEVICE || dma->type == SI_DMA_ON_HOST) {
		src_vma_data = src_vma->vm_private_data;
		dma_sync_sg_for_device(&dev->dev, src_vma_data->sg_tbl.sgl,
				src_vma_data->sg_tbl.nents, DMA_BIDIRECTIONAL);
	}

	memcpy(&t_info, &dma->t_info, sizeof(t_info));
	r = dmas_cpy(dev, dst_gpu_addr, src_gpu_addr, l2l->sz, t_info);
	if (r < 0) {
		if (r == -DMAS_RING_TIMEOUT)
			r = SI_RING_TIMEOUT;
		else if (r == -DMAS_FENCE_TIMEOUT)
			r = SI_FENCE_TIMEOUT;
		goto unlock_mmap_sem;
	}

	if (dma->type == SI_DMA_FROM_DEVICE || dma->type == SI_DMA_ON_HOST) {
		dst_vma_data = dst_vma->vm_private_data;
		dma_sync_sg_for_cpu(&dev->dev, dst_vma_data->sg_tbl.sgl,
				dst_vma_data->sg_tbl.nents, DMA_BIDIRECTIONAL);
	}

unlock_mmap_sem:
	if (dma->type == SI_DMA_TO_DEVICE || dma->type == SI_DMA_FROM_DEVICE
						|| dma->type == SI_DMA_ON_HOST)
		up_read(&current->mm->mmap_sem);
	return r;
}
Пример #11
0
static int spi_imx_dma_transfer(struct spi_imx_data *spi_imx,
				struct spi_transfer *transfer)
{
	struct dma_async_tx_descriptor *desc_tx = NULL, *desc_rx = NULL;
	int ret;
	int left = 0;
	struct spi_master *master = spi_imx->bitbang.master;
	struct sg_table *tx = &transfer->tx_sg, *rx = &transfer->rx_sg;

	if (tx) {
		desc_tx = dmaengine_prep_slave_sg(master->dma_tx,
					tx->sgl, tx->nents, DMA_MEM_TO_DEV,
					DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
		if (!desc_tx)
			goto tx_nodma;

		desc_tx->callback = spi_imx_dma_tx_callback;
		desc_tx->callback_param = (void *)spi_imx;
		dmaengine_submit(desc_tx);
	}

	if (rx) {
		struct scatterlist *sgl_last = &rx->sgl[rx->nents - 1];
		unsigned int	orig_length = sgl_last->length;
		int	wml_mask = ~(spi_imx->rx_wml - 1);
		/*
		 * Adjust the transfer lenth of the last scattlist if there are
		 * some tail data, use PIO read to get the tail data since DMA
		 * sometimes miss the last tail interrupt.
		 */
		left = transfer->len % spi_imx->rx_wml;
		if (left)
			sgl_last->length = orig_length & wml_mask;

		desc_rx = dmaengine_prep_slave_sg(master->dma_rx,
					rx->sgl, rx->nents, DMA_DEV_TO_MEM,
					DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
		if (!desc_rx)
			goto rx_nodma;

		desc_rx->callback = spi_imx_dma_rx_callback;
		desc_rx->callback_param = (void *)spi_imx;
		dmaengine_submit(desc_rx);
	}

	reinit_completion(&spi_imx->dma_rx_completion);
	reinit_completion(&spi_imx->dma_tx_completion);

	/* Trigger the cspi module. */
	spi_imx->dma_finished = 0;
	spi_imx->devtype_data->trigger(spi_imx);

	dma_async_issue_pending(master->dma_tx);
	dma_async_issue_pending(master->dma_rx);
	/* Wait SDMA to finish the data transfer.*/
	ret = wait_for_completion_timeout(&spi_imx->dma_tx_completion,
					  IMX_DMA_TIMEOUT(transfer->len));
	if (!ret) {
		pr_warn("%s %s: I/O Error in DMA TX:%x\n",
			dev_driver_string(&master->dev),
			dev_name(&master->dev), transfer->len);
		dmaengine_terminate_all(master->dma_tx);
	} else {
		ret = wait_for_completion_timeout(&spi_imx->dma_rx_completion,
				IMX_DMA_TIMEOUT(transfer->len));
		if (!ret) {
			pr_warn("%s %s: I/O Error in DMA RX:%x\n",
				dev_driver_string(&master->dev),
				dev_name(&master->dev), transfer->len);
			spi_imx->devtype_data->reset(spi_imx);
			dmaengine_terminate_all(master->dma_rx);
		} else if (left) {
			/* read the tail data by PIO */
			dma_sync_sg_for_cpu(master->dma_rx->device->dev,
					    &rx->sgl[rx->nents - 1], 1,
					    DMA_FROM_DEVICE);
			spi_imx->rx_buf = transfer->rx_buf
						+ (transfer->len - left);
			spi_imx_tail_pio_set(spi_imx, left);
			reinit_completion(&spi_imx->xfer_done);

			spi_imx->devtype_data->intctrl(spi_imx, MXC_INT_TCEN);

			ret = wait_for_completion_timeout(&spi_imx->xfer_done,
						IMX_DMA_TIMEOUT(transfer->len));
			if (!ret) {
				pr_warn("%s %s: I/O Error in RX tail\n",
					dev_driver_string(&master->dev),
					dev_name(&master->dev));
			}
		}
	}

	spi_imx->dma_finished = 1;
	if (spi_imx->devtype_data->devtype == IMX6UL_ECSPI)
		spi_imx->devtype_data->trigger(spi_imx);

	if (!ret)
		ret = -ETIMEDOUT;
	else if (ret > 0)
		ret = transfer->len;

	return ret;

rx_nodma:
	dmaengine_terminate_all(master->dma_tx);
tx_nodma:
	pr_warn_once("%s %s: DMA not available, falling back to PIO\n",
		     dev_driver_string(&master->dev),
		     dev_name(&master->dev));
	return -EAGAIN;
}
int ion_system_heap_cache_ops(struct ion_heap *heap, struct ion_buffer *buffer,
			void *vaddr, unsigned int offset, unsigned int length,
			unsigned int cmd)
{
	void (*outer_cache_op)(phys_addr_t, phys_addr_t);

	switch (cmd) {
	case ION_IOC_CLEAN_CACHES:
		if (!vaddr)
			dma_sync_sg_for_device(NULL, buffer->sglist, 1, DMA_TO_DEVICE);
		else
			dmac_clean_range(vaddr, vaddr + length);
		outer_cache_op = outer_clean_range;
		break;
	case ION_IOC_INV_CACHES:
		if (!vaddr)
			dma_sync_sg_for_cpu(NULL, buffer->sglist, 1, DMA_FROM_DEVICE);
		else
			dmac_inv_range(vaddr, vaddr + length);
		outer_cache_op = outer_inv_range;
		break;
	case ION_IOC_CLEAN_INV_CACHES:
		if (!vaddr) {
			dma_sync_sg_for_device(NULL, buffer->sglist, 1, DMA_TO_DEVICE);
			dma_sync_sg_for_cpu(NULL, buffer->sglist, 1, DMA_FROM_DEVICE);
		} else {
			dmac_flush_range(vaddr, vaddr + length);
		}
		outer_cache_op = outer_flush_range;
		break;
	default:
		return -EINVAL;
	}

	if (system_heap_has_outer_cache) {
		unsigned long pstart;
		void *vend;
		void *vtemp;
		unsigned long ln = 0;
		vend = buffer->priv_virt + buffer->size;
		vtemp = buffer->priv_virt + offset;

		if ((vtemp+length) > vend) {
			pr_err("Trying to flush outside of mapped range.\n");
			pr_err("End of mapped range: %p, trying to flush to "
				"address %p\n", vend, vtemp+length);
			WARN(1, "%s: called with heap name %s, buffer size 0x%x, "
				"vaddr 0x%p, offset 0x%x, length: 0x%x\n",
				__func__, heap->name, buffer->size, vaddr,
				offset, length);
			return -EINVAL;
		}

		for (; ln < length && vtemp < vend;
		      vtemp += PAGE_SIZE, ln += PAGE_SIZE) {
			struct page *page = vmalloc_to_page(vtemp);
			if (!page) {
				WARN(1, "Could not find page for virt. address %p\n",
					vtemp);
				return -EINVAL;
			}
			pstart = page_to_phys(page);
			/*
			 * If page -> phys is returning NULL, something
			 * has really gone wrong...
			 */
			if (!pstart) {
				WARN(1, "Could not translate %p to physical address\n",
					vtemp);
				return -EINVAL;
			}

			outer_cache_op(pstart, pstart + PAGE_SIZE);
		}
	}
	return 0;
}