Exemple #1
0
struct sg_table *nvhost_nvmap_pin(struct mem_mgr *mgr,
		struct mem_handle *handle)
{
	int err = 0;
	dma_addr_t ret = 0;
	struct sg_table *sgt = kmalloc(sizeof(*sgt) + sizeof(*sgt->sgl),
			GFP_KERNEL);
	if (!sgt)
		return ERR_PTR(-ENOMEM);

	err = __sg_alloc_table(sgt, 1, 1, (gfp_t)(sgt+1), sg_kmalloc);
	if (err)
		return ERR_PTR(err);

	ret = nvmap_pin((struct nvmap_client *)mgr,
			(struct nvmap_handle_ref *)handle);
	if (IS_ERR_VALUE(ret)) {
		kfree(sgt);
		return ERR_PTR(ret);
	}
	sg_dma_address(sgt->sgl) = ret;

	return sgt;
}
Exemple #2
0
static void omap2_mcspi_tx_dma(struct spi_device *spi,
				struct spi_transfer *xfer,
				struct dma_slave_config cfg)
{
	struct omap2_mcspi	*mcspi;
	struct omap2_mcspi_dma  *mcspi_dma;
	unsigned int		count;

	mcspi = spi_master_get_devdata(spi->master);
	mcspi_dma = &mcspi->dma_channels[spi->chip_select];
	count = xfer->len;

	if (mcspi_dma->dma_tx) {
		struct dma_async_tx_descriptor *tx;
		struct scatterlist sg;

		dmaengine_slave_config(mcspi_dma->dma_tx, &cfg);

		sg_init_table(&sg, 1);
		sg_dma_address(&sg) = xfer->tx_dma;
		sg_dma_len(&sg) = xfer->len;

		tx = dmaengine_prep_slave_sg(mcspi_dma->dma_tx, &sg, 1,
		DMA_MEM_TO_DEV, DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
		if (tx) {
			tx->callback = omap2_mcspi_tx_callback;
			tx->callback_param = spi;
			dmaengine_submit(tx);
		} else {
			/* FIXME: fall back to PIO? */
		}
	}
	dma_async_issue_pending(mcspi_dma->dma_tx);
	omap2_mcspi_set_dma_req(spi, 0, 1);

}
Exemple #3
0
dma_addr_t ispmmu_vmap(const struct scatterlist *sglist,
		       int sglen)
{
	int err;
	void *da;
	struct sg_table *sgt;
	unsigned int i;
	struct scatterlist *sg, *src = (struct scatterlist *)sglist;

	/*
	 * convert isp sglist to iommu sgt
	 * FIXME: should be fixed in the upper layer?
	 */
	sgt = kmalloc(sizeof(*sgt), GFP_KERNEL);
	if (!sgt)
		return -ENOMEM;
	err = sg_alloc_table(sgt, sglen, GFP_KERNEL);
	if (err)
		goto err_sg_alloc;

	for_each_sg(sgt->sgl, sg, sgt->nents, i)
		sg_set_buf(sg, phys_to_virt(sg_dma_address(src + i)),
			   sg_dma_len(src + i));

	da = (void *)iommu_vmap(isp_iommu, 0, sgt, IOMMU_FLAG);
	if (IS_ERR(da))
		goto err_vmap;

	return (dma_addr_t)da;

err_vmap:
	sg_free_table(sgt);
err_sg_alloc:
	kfree(sgt);
	return -ENOMEM;
}
Exemple #4
0
/*
 * Map all data buffers for a command into PCI space and
 * setup the scatter/gather list if needed.
 */
static void pvscsi_map_buffers(struct pvscsi_adapter *adapter,
			       struct pvscsi_ctx *ctx, struct scsi_cmnd *cmd,
			       struct PVSCSIRingReqDesc *e)
{
	unsigned count;
	unsigned bufflen = scsi_bufflen(cmd);
	struct scatterlist *sg;

	e->dataLen = bufflen;
	e->dataAddr = 0;
	if (bufflen == 0)
		return;

	sg = scsi_sglist(cmd);
	count = scsi_sg_count(cmd);
	if (count != 0) {
		int segs = scsi_dma_map(cmd);
		if (segs > 1) {
			pvscsi_create_sg(ctx, sg, segs);

			e->flags |= PVSCSI_FLAG_CMD_WITH_SG_LIST;
			ctx->sglPA = pci_map_single(adapter->dev, ctx->sgl,
						    SGL_SIZE, PCI_DMA_TODEVICE);
			e->dataAddr = ctx->sglPA;
		} else
			e->dataAddr = sg_dma_address(sg);
	} else {
		/*
		 * In case there is no S/G list, scsi_sglist points
		 * directly to the buffer.
		 */
		ctx->dataPA = pci_map_single(adapter->dev, sg, bufflen,
					     cmd->sc_data_direction);
		e->dataAddr = ctx->dataPA;
	}
}
/**
 * rppc_buffer_lookup - convert a buffer pointer to a remote processor pointer
 * @rpc: rpc instance
 * @uva: buffer pointer that needs to be translated
 * @buva: base pointer of the allocated buffer
 * @fd: dma-buf file descriptor of the allocated buffer
 *
 * This function is used for converting a pointer value in the function
 * arguments to its appropriate remote processor device address value.
 * The @uva and @buva are used for identifying the offset of the function
 * argument pointer in an original allocation. This supports the cases where
 * an offset pointer (eg: alignment, packed buffers etc) needs to be passed
 * as the argument rather than the actual allocated pointer.
 *
 * The remote processor device address is done by retrieving the base physical
 * address of the buffer by importing the buffer and converting it to the
 * remote processor device address using a remoteproc api, with adjustments
 * to the offset.
 *
 * The offset is specifically adjusted for OMAP TILER to account for the stride
 * and mapping onto the remote processor.
 *
 * Return: remote processor device address, 0 on failure (implies invalid
 *	   arguments)
 */
dev_addr_t rppc_buffer_lookup(struct rppc_instance *rpc, virt_addr_t uva,
			      virt_addr_t buva, int fd)
{
	phys_addr_t lpa = 0;
	dev_addr_t rda = 0;
	long uoff = uva - buva;
	struct device *dev = rpc->dev;
	struct rppc_dma_buf *buf;

	dev_dbg(dev, "buva = %p uva = %p offset = %ld [0x%016lx] fd = %d\n",
		(void *)buva, (void *)uva, uoff, (ulong)uoff, fd);

	if (uoff < 0) {
		dev_err(dev, "invalid pointer values for uva = %p from buva = %p\n",
			(void *)uva, (void *)buva);
		return rda;
	}

	buf = rppc_find_dmabuf(rpc, fd);
	if (IS_ERR_OR_NULL(buf)) {
		buf = rppc_alloc_dmabuf(rpc, fd, true);
		if (IS_ERR(buf))
			goto out;
	}

	lpa = buf->pa;
	WARN_ON(lpa != sg_dma_address(buf->sgt->sgl));
	uoff = rppc_recalc_off(lpa, uoff);
	lpa += uoff;
	rda = rppc_local_to_remote_da(rpc, lpa);

out:
	dev_dbg(dev, "host uva %p == host pa %pa => remote da %p (fd %d)\n",
		(void *)uva, &lpa, (void *)rda, fd);
	return rda;
}
Exemple #6
0
static int srp_indirect_data(struct scsi_cmnd *sc, struct srp_cmd *cmd,
                             struct srp_indirect_buf *id,
                             enum dma_data_direction dir, srp_rdma_t rdma_io,
                             int dma_map, int ext_desc)
{
    struct iu_entry *iue = NULL;
    struct srp_direct_buf *md = NULL;
    struct scatterlist dummy, *sg = NULL;
    dma_addr_t token = 0;
    int err = 0;
    int nmd, nsg = 0, len;

    if (dma_map || ext_desc) {
        iue = (struct iu_entry *) sc->SCp.ptr;
        sg = scsi_sglist(sc);

        dprintk("%p %u %u %d %d\n",
                iue, scsi_bufflen(sc), id->len,
                cmd->data_in_desc_cnt, cmd->data_out_desc_cnt);
    }

    nmd = id->table_desc.len / sizeof(struct srp_direct_buf);

    if ((dir == DMA_FROM_DEVICE && nmd == cmd->data_in_desc_cnt) ||
            (dir == DMA_TO_DEVICE && nmd == cmd->data_out_desc_cnt)) {
        md = &id->desc_list[0];
        goto rdma;
    }

    if (ext_desc && dma_map) {
        md = dma_alloc_coherent(iue->target->dev, id->table_desc.len,
                                &token, GFP_KERNEL);
        if (!md) {
            eprintk("Can't get dma memory %u\n", id->table_desc.len);
            return -ENOMEM;
        }

        sg_init_one(&dummy, md, id->table_desc.len);
        sg_dma_address(&dummy) = token;
        sg_dma_len(&dummy) = id->table_desc.len;
        err = rdma_io(sc, &dummy, 1, &id->table_desc, 1, DMA_TO_DEVICE,
                      id->table_desc.len);
        if (err) {
            eprintk("Error copying indirect table %d\n", err);
            goto free_mem;
        }
    } else {
        eprintk("This command uses external indirect buffer\n");
        return -EINVAL;
    }

rdma:
    if (dma_map) {
        nsg = dma_map_sg(iue->target->dev, sg, scsi_sg_count(sc),
                         DMA_BIDIRECTIONAL);
        if (!nsg) {
            eprintk("fail to map %p %d\n", iue, scsi_sg_count(sc));
            err = -EIO;
            goto free_mem;
        }
        len = min(scsi_bufflen(sc), id->len);
    } else
        len = id->len;

    err = rdma_io(sc, sg, nsg, md, nmd, dir, len);

    if (dma_map)
        dma_unmap_sg(iue->target->dev, sg, nsg, DMA_BIDIRECTIONAL);

free_mem:
    if (token && dma_map)
        dma_free_coherent(iue->target->dev, id->table_desc.len, md, token);

    return err;
}
static int saa7146_pgtable_build(struct saa7146_dev *dev, struct saa7146_buf *buf)
{
	struct pci_dev *pci = dev->pci;
	struct scatterlist *list = buf->vb.dma.sglist;
	int length = buf->vb.dma.sglen;
	struct saa7146_format *sfmt = format_by_fourcc(dev,buf->fmt->pixelformat);

	DEB_EE(("dev:%p, buf:%p, sg_len:%d\n",dev,buf,length));

	if( 0 != IS_PLANAR(sfmt->trans)) {
		struct saa7146_pgtable *pt1 = &buf->pt[0];
		struct saa7146_pgtable *pt2 = &buf->pt[1];
		struct saa7146_pgtable *pt3 = &buf->pt[2];
		u32  *ptr1, *ptr2, *ptr3;
		u32 fill;

		int size = buf->fmt->width*buf->fmt->height;
		int i,p,m1,m2,m3,o1,o2;

		switch( sfmt->depth ) {
			case 12: {
				/* create some offsets inside the page table */
				m1 = ((size+PAGE_SIZE)/PAGE_SIZE)-1;
				m2 = ((size+(size/4)+PAGE_SIZE)/PAGE_SIZE)-1;
				m3 = ((size+(size/2)+PAGE_SIZE)/PAGE_SIZE)-1;
				o1 = size%PAGE_SIZE;
				o2 = (size+(size/4))%PAGE_SIZE;
				DEB_CAP(("size:%d, m1:%d, m2:%d, m3:%d, o1:%d, o2:%d\n",size,m1,m2,m3,o1,o2));
				break;
			}
			case 16: {
				/* create some offsets inside the page table */
				m1 = ((size+PAGE_SIZE)/PAGE_SIZE)-1;
				m2 = ((size+(size/2)+PAGE_SIZE)/PAGE_SIZE)-1;
				m3 = ((2*size+PAGE_SIZE)/PAGE_SIZE)-1;
				o1 = size%PAGE_SIZE;
				o2 = (size+(size/2))%PAGE_SIZE;
				DEB_CAP(("size:%d, m1:%d, m2:%d, m3:%d, o1:%d, o2:%d\n",size,m1,m2,m3,o1,o2));
				break;
			}
			default: {
				return -1;
			}
		}

		ptr1 = pt1->cpu;
		ptr2 = pt2->cpu;
		ptr3 = pt3->cpu;

		/* walk all pages, copy all page addresses to ptr1 */
		for (i = 0; i < length; i++, list++) {
			for (p = 0; p * 4096 < list->length; p++, ptr1++) {
				*ptr1 = cpu_to_le32(sg_dma_address(list) - list->offset);
			}
		}
/*
		ptr1 = pt1->cpu;
		for(j=0;j<40;j++) {
			printk("ptr1 %d: 0x%08x\n",j,ptr1[j]);
		}
*/

		/* if we have a user buffer, the first page may not be
		   aligned to a page boundary. */
		pt1->offset = buf->vb.dma.sglist->offset;
		pt2->offset = pt1->offset+o1;
		pt3->offset = pt1->offset+o2;

		/* create video-dma2 page table */
		ptr1 = pt1->cpu;
		for(i = m1; i <= m2 ; i++, ptr2++) {
			*ptr2 = ptr1[i];
		}
		fill = *(ptr2-1);
		for(;i<1024;i++,ptr2++) {
			*ptr2 = fill;
		}
		/* create video-dma3 page table */
		ptr1 = pt1->cpu;
		for(i = m2; i <= m3; i++,ptr3++) {
			*ptr3 = ptr1[i];
		}
		fill = *(ptr3-1);
		for(;i<1024;i++,ptr3++) {
			*ptr3 = fill;
		}
		/* finally: finish up video-dma1 page table */
		ptr1 = pt1->cpu+m1;
		fill = pt1->cpu[m1];
		for(i=m1;i<1024;i++,ptr1++) {
			*ptr1 = fill;
		}
/*
		ptr1 = pt1->cpu;
		ptr2 = pt2->cpu;
		ptr3 = pt3->cpu;
		for(j=0;j<40;j++) {
			printk("ptr1 %d: 0x%08x\n",j,ptr1[j]);
		}
		for(j=0;j<40;j++) {
			printk("ptr2 %d: 0x%08x\n",j,ptr2[j]);
		}
		for(j=0;j<40;j++) {
			printk("ptr3 %d: 0x%08x\n",j,ptr3[j]);
		}
*/
	} else {
		struct saa7146_pgtable *pt = &buf->pt[0];
		return saa7146_pgtable_build_single(pci, pt, list, length);
	}

	return 0;
}
static int msmsdcc_config_dma(struct msmsdcc_host *host, struct mmc_data *data)
{
	struct msmsdcc_nc_dmadata *nc;
	dmov_box *box;
	uint32_t rows;
	uint32_t crci;
	unsigned int n;
	int i, rc;
	struct scatterlist *sg = data->sg;

	rc = validate_dma(host, data);
	if (rc)
		return rc;

	host->dma.sg = data->sg;
	host->dma.num_ents = data->sg_len;

	nc = host->dma.nc;

	if (host->pdev_id == 1)
		crci = MSMSDCC_CRCI_SDC1;
	else if (host->pdev_id == 2)
		crci = MSMSDCC_CRCI_SDC2;
	else if (host->pdev_id == 3)
		crci = MSMSDCC_CRCI_SDC3;
	else if (host->pdev_id == 4)
		crci = MSMSDCC_CRCI_SDC4;
	else {
		host->dma.sg = NULL;
		host->dma.num_ents = 0;
		return -ENOENT;
	}

	if (data->flags & MMC_DATA_READ)
		host->dma.dir = DMA_FROM_DEVICE;
	else
		host->dma.dir = DMA_TO_DEVICE;

	/* host->curr.user_pages = (data->flags & MMC_DATA_USERPAGE); */
	host->curr.user_pages = 0;

	n = dma_map_sg(mmc_dev(host->mmc), host->dma.sg,
	               host->dma.num_ents, host->dma.dir);

	if (n != host->dma.num_ents) {
		printk(KERN_ERR "%s: Unable to map in all sg elements\n",
		       mmc_hostname(host->mmc));
		host->dma.sg = NULL;
		host->dma.num_ents = 0;
		return -ENOMEM;
	}

	box = &nc->cmd[0];
	for (i = 0; i < host->dma.num_ents; i++) {
		box->cmd = CMD_MODE_BOX;

		if (i == (host->dma.num_ents - 1))
			box->cmd |= CMD_LC;
		rows = (sg_dma_len(sg) % MCI_FIFOSIZE) ?
		       (sg_dma_len(sg) / MCI_FIFOSIZE) + 1 :
		       (sg_dma_len(sg) / MCI_FIFOSIZE) ;

		if (data->flags & MMC_DATA_READ) {
			box->src_row_addr = msmsdcc_fifo_addr(host);
			box->dst_row_addr = sg_dma_address(sg);

			box->src_dst_len = (MCI_FIFOSIZE << 16) |
			                   (MCI_FIFOSIZE);
			box->row_offset = MCI_FIFOSIZE;

			box->num_rows = rows * ((1 << 16) + 1);
			box->cmd |= CMD_SRC_CRCI(crci);
		} else {
			box->src_row_addr = sg_dma_address(sg);
			box->dst_row_addr = msmsdcc_fifo_addr(host);

			box->src_dst_len = (MCI_FIFOSIZE << 16) |
			                   (MCI_FIFOSIZE);
			box->row_offset = (MCI_FIFOSIZE << 16);

			box->num_rows = rows * ((1 << 16) + 1);
			box->cmd |= CMD_DST_CRCI(crci);
		}
		box++;
		sg++;
	}

	/* location of command block must be 64 bit aligned */
	BUG_ON(host->dma.cmd_busaddr & 0x07);

	nc->cmdptr = (host->dma.cmd_busaddr >> 3) | CMD_PTR_LP;
	host->dma.hdr.cmdptr = DMOV_CMD_PTR_LIST |
	                       DMOV_CMD_ADDR(host->dma.cmdptr_busaddr);
	host->dma.hdr.complete_func = msmsdcc_dma_complete_func;

	return 0;
}
/**
 * usb_sg_init - initializes scatterlist-based bulk/interrupt I/O request
 * @io: request block being initialized.  until usb_sg_wait() returns,
 *	treat this as a pointer to an opaque block of memory,
 * @dev: the usb device that will send or receive the data
 * @pipe: endpoint "pipe" used to transfer the data
 * @period: polling rate for interrupt endpoints, in frames or
 * 	(for high speed endpoints) microframes; ignored for bulk
 * @sg: scatterlist entries
 * @nents: how many entries in the scatterlist
 * @length: how many bytes to send from the scatterlist, or zero to
 * 	send every byte identified in the list.
 * @mem_flags: SLAB_* flags affecting memory allocations in this call
 *
 * Returns zero for success, else a negative errno value.  This initializes a
 * scatter/gather request, allocating resources such as I/O mappings and urb
 * memory (except maybe memory used by USB controller drivers).
 *
 * The request must be issued using usb_sg_wait(), which waits for the I/O to
 * complete (or to be canceled) and then cleans up all resources allocated by
 * usb_sg_init().
 *
 * The request may be canceled with usb_sg_cancel(), either before or after
 * usb_sg_wait() is called.
 */
int usb_sg_init (
	struct usb_sg_request	*io,
	struct usb_device	*dev,
	unsigned		pipe, 
	unsigned		period,
	struct scatterlist	*sg,
	int			nents,
	size_t			length,
	int			mem_flags
)
{
	int			i;
	int			urb_flags;
	int			dma;

	if (!io || !dev || !sg
			|| usb_pipecontrol (pipe)
			|| usb_pipeisoc (pipe)
			|| nents <= 0)
		return -EINVAL;

	spin_lock_init (&io->lock);
	io->dev = dev;
	io->pipe = pipe;
	io->sg = sg;
	io->nents = nents;

	/* not all host controllers use DMA (like the mainstream pci ones);
	 * they can use PIO (sl811) or be software over another transport.
	 */
	dma = (dev->dev.dma_mask != 0);
	if (dma)
		io->entries = usb_buffer_map_sg (dev, pipe, sg, nents);
	else
		io->entries = nents;

	/* initialize all the urbs we'll use */
	if (io->entries <= 0)
		return io->entries;

	io->count = 0;
	io->urbs = kmalloc (io->entries * sizeof *io->urbs, mem_flags);
	if (!io->urbs)
		goto nomem;

	urb_flags = URB_ASYNC_UNLINK | URB_NO_TRANSFER_DMA_MAP
			| URB_NO_INTERRUPT;
	if (usb_pipein (pipe))
		urb_flags |= URB_SHORT_NOT_OK;

	for (i = 0; i < io->entries; i++, io->count = i) {
		unsigned		len;

		io->urbs [i] = usb_alloc_urb (0, mem_flags);
		if (!io->urbs [i]) {
			io->entries = i;
			goto nomem;
		}

		io->urbs [i]->dev = NULL;
		io->urbs [i]->pipe = pipe;
		io->urbs [i]->interval = period;
		io->urbs [i]->transfer_flags = urb_flags;

		io->urbs [i]->complete = sg_complete;
		io->urbs [i]->context = io;
		io->urbs [i]->status = -EINPROGRESS;
		io->urbs [i]->actual_length = 0;

		if (dma) {
			/* hc may use _only_ transfer_dma */
			io->urbs [i]->transfer_dma = sg_dma_address (sg + i);
			len = sg_dma_len (sg + i);
		} else {
			/* hc may use _only_ transfer_buffer */
			io->urbs [i]->transfer_buffer =
				page_address (sg [i].page) + sg [i].offset;
			len = sg [i].length;
		}

		if (length) {
			len = min_t (unsigned, len, length);
			length -= len;
			if (length == 0)
				io->entries = i + 1;
		}
		io->urbs [i]->transfer_buffer_length = len;
	}
	io->urbs [--i]->transfer_flags &= ~URB_NO_INTERRUPT;

	/* transaction state */
	io->status = 0;
	io->bytes = 0;
	init_completion (&io->complete);
	return 0;

nomem:
	sg_clean (io);
	return -ENOMEM;
}
struct drm_gem_object *exynos_dmabuf_prime_import(struct drm_device *drm_dev,
				struct dma_buf *dma_buf)
{
	struct dma_buf_attachment *attach;
	struct sg_table *sgt;
	struct scatterlist *sgl;
	struct exynos_drm_gem_obj *exynos_gem_obj;
	struct exynos_drm_gem_buf *buffer;
	struct page *page;
	int ret;

	DRM_DEBUG_PRIME("%s\n", __FILE__);

	/* is this one of own objects? */
	if (dma_buf->ops == &exynos_dmabuf_ops) {
		struct drm_gem_object *obj;

		exynos_gem_obj = dma_buf->priv;
		obj = &exynos_gem_obj->base;

		/* is it from our device? */
		if (obj->dev == drm_dev) {
			/*
			 * Importing dmabuf exported from out own gem increases
			 * refcount on gem itself instead of f_count of dmabuf.
			 */
			drm_gem_object_reference(obj);
			dma_buf_put(dma_buf);
			return obj;
		}
	}

	attach = dma_buf_attach(dma_buf, drm_dev->dev);
	if (IS_ERR(attach))
		return ERR_PTR(-EINVAL);


	sgt = dma_buf_map_attachment(attach, DMA_BIDIRECTIONAL);
	if (IS_ERR_OR_NULL(sgt)) {
		ret = PTR_ERR(sgt);
		goto err_buf_detach;
	}

	buffer = kzalloc(sizeof(*buffer), GFP_KERNEL);
	if (!buffer) {
		DRM_ERROR("failed to allocate exynos_drm_gem_buf.\n");
		ret = -ENOMEM;
		goto err_unmap_attach;
	}

	buffer->pages = kzalloc(sizeof(*page) * sgt->nents, GFP_KERNEL);
	if (!buffer->pages) {
		DRM_ERROR("failed to allocate pages.\n");
		ret = -ENOMEM;
		goto err_free_buffer;
	}

	exynos_gem_obj = exynos_drm_gem_init(drm_dev, dma_buf->size);
	if (!exynos_gem_obj) {
		ret = -ENOMEM;
		goto err_free_pages;
	}

	sgl = sgt->sgl;

	if (sgt->nents == 1) {
		buffer->dma_addr = sg_dma_address(sgt->sgl);
		buffer->size = sg_dma_len(sgt->sgl);

		/* always physically continuous memory if sgt->nents is 1. */
		exynos_gem_obj->flags |= EXYNOS_BO_CONTIG;
	} else {
		unsigned int i = 0;

		buffer->dma_addr = sg_dma_address(sgl);
		while (i < sgt->nents) {
			buffer->pages[i] = sg_page(sgl);
			buffer->size += sg_dma_len(sgl);
			sgl = sg_next(sgl);
			i++;
		}

		exynos_gem_obj->flags |= EXYNOS_BO_NONCONTIG;
	}

	exynos_gem_obj->buffer = buffer;
	buffer->sgt = sgt;
	exynos_gem_obj->base.import_attach = attach;

	DRM_DEBUG_PRIME("dma_addr = 0x%x, size = 0x%lx\n", buffer->dma_addr,
								buffer->size);

	return &exynos_gem_obj->base;

err_free_pages:
	kfree(buffer->pages);
	buffer->pages = NULL;
err_free_buffer:
	kfree(buffer);
	buffer = NULL;
err_unmap_attach:
	dma_buf_unmap_attachment(attach, sgt, DMA_BIDIRECTIONAL);
err_buf_detach:
	dma_buf_detach(dma_buf, attach);
	return ERR_PTR(ret);
}
Exemple #11
0
/*********************************************************************
* Method	 :  
* Description:  
* Parameters :  
*	            
* Returns    :  
* Note       :
*********************************************************************/
static void  sdxc_init_idma_des(struct awsmc_host* smc_host, struct mmc_data* data)
{
    struct smc_idma_des* pdes = smc_host->pdes;
    u32 des_idx = 0;
    u32 buff_frag_num = 0;
    u32 remain;
    u32 i, j;
    
    /* ³õʼ»¯IDMA Descriptor */
    #if SDXC_DES_MODE == 0      //chain mode
    for (i=0; i<data->sg_len; i++)
    {
        buff_frag_num = data->sg[i].length >> SDXC_DES_NUM_SHIFT;   //SDXC_DES_NUM_SHIFT == 13, num = len/8192 = len>>13
        remain = data->sg[i].length & (SDXC_DES_BUFFER_MAX_LEN-1);
        if (remain)
        {
            buff_frag_num ++;
        }
        else
        {
            remain = SDXC_DES_BUFFER_MAX_LEN;
        }
        
        eLIBs_CleanFlushDCacheRegion(sg_virt(&data->sg[i]), data->sg[i].length);
        for (j=0; j < buff_frag_num; j++, des_idx++)
        {
			memset((void*)&pdes[des_idx], 0, sizeof(struct smc_idma_des));
            pdes[des_idx].des_chain = 1;
            pdes[des_idx].own = 1;
            pdes[des_idx].dic = 1;
            if (buff_frag_num > 1 && j != buff_frag_num-1)
            {
                pdes[des_idx].data_buf1_sz = 0x1fff & SDXC_DES_BUFFER_MAX_LEN;
            }
            else
            {
                pdes[des_idx].data_buf1_sz = remain;
            }
            
            pdes[des_idx].buf_addr_ptr1 = sg_dma_address(&data->sg[i]) + j * SDXC_DES_BUFFER_MAX_LEN;
            if (i==0 && j==0)
            {
                pdes[des_idx].first_des = 1;
            }
            
            if ((i == data->sg_len-1) && (j == buff_frag_num-1))
            {
                pdes[des_idx].dic = 0;
                pdes[des_idx].last_des = 1;
                pdes[des_idx].end_of_ring = 1;
                pdes[des_idx].buf_addr_ptr2 = 0;
            }
            else
            {
                pdes[des_idx].buf_addr_ptr2 = __pa(&pdes[des_idx+1]);
            }
			
//            awsmc_info("sg %d, frag %d, remain %d, des[%d](%08x): [0] = %08x, [1] = %08x, [2] = %08x, [3] = %08x\n", i, j, remain, 
//                                                                             des_idx, (u32)&pdes[des_idx], 
//                                                                             (u32)((u32*)&pdes[des_idx])[0], (u32)((u32*)&pdes[des_idx])[1], 
//                                                                             (u32)((u32*)&pdes[des_idx])[2], (u32)((u32*)&pdes[des_idx])[3]);
																			 
        }
    }
    #else      //fix length skip mode
    
    #endif
    
    eLIBs_CleanFlushDCacheRegion(pdes, sizeof(struct smc_idma_des) * (des_idx+1));
    
    return;
}
static int buffer_mgr_build_mlli(struct device *dev,
				 struct sg_data_array *sg_data,
				 struct mlli_params *mlli_params)
{
	struct scatterlist *cur_sg_entry;
	uint32_t *cur_mlli_entry;
	uint32_t data_len;
	uint32_t curr_nents = 0;
	uint32_t i;

	DX_LOG_DEBUG("sg_params: sg_data->num_of_sg = 0x%08X \n",
		   (uint32_t)sg_data->num_of_sg);
	/* Allocate memory from the pointed pool */
	mlli_params->mlli_virt_addr =
		dma_pool_alloc(mlli_params->curr_pool, GFP_KERNEL,
			       &mlli_params->mlli_dma_addr);
	if (unlikely(mlli_params->mlli_virt_addr == NULL)) {
		DX_LOG_ERR("mlli table dma_pool_alloc() failed\n");
		return -ENOMEM;
	}
	cur_mlli_entry = (unsigned int *)mlli_params->mlli_virt_addr;

	/* go over all SG and link it to one MLLI table */
	for(i = 0; i < sg_data->num_of_sg;i++) {

		data_len = sg_data->sg_data_len_array[i];

		for (cur_sg_entry = sg_data->sg_array[i];
		     (cur_sg_entry != NULL) && (data_len !=0);
		     cur_sg_entry = sg_next(cur_sg_entry) , cur_mlli_entry+=2) {
			uint32_t entry_len =
				((data_len > sg_dma_len(cur_sg_entry))) ?
				sg_dma_len(cur_sg_entry) : data_len;
			data_len -= entry_len;
			cur_mlli_entry[SEP_LLI_ADDR_WORD_OFFSET] =
					sg_dma_address(cur_sg_entry);
			cur_mlli_entry[SEP_LLI_SIZE_WORD_OFFSET] =
					entry_len;
			DX_LOG_DEBUG("entry[%d] : mlli addr = 0x%08lX "
				     "mlli_len =0x%08lX\n",
				   curr_nents,
				   (unsigned long)cur_mlli_entry[SEP_LLI_ADDR_WORD_OFFSET],
				   (unsigned long)cur_mlli_entry[SEP_LLI_SIZE_WORD_OFFSET]);
			curr_nents++;
		} /* for */
		/* set last bit in the current table*/
		SEP_LLI_SET(&mlli_params->mlli_virt_addr[
			SEP_LLI_ENTRY_BYTE_SIZE*(curr_nents-1)],
			LAST, sg_data->is_last[i]);
	} /* for */
	mlli_params->mlli_len =
		curr_nents * SEP_LLI_ENTRY_BYTE_SIZE;

	SEP_LLI_SET(&mlli_params->mlli_virt_addr[
		SEP_LLI_ENTRY_BYTE_SIZE*(curr_nents-1)],
		LAST, 1);

	DX_LOG_DEBUG("MLLI params: virt addr = 0x%08lX "
		     "dma_address=0x%08lX, mlli_len =0x%08X\n",
		   (unsigned long)mlli_params->mlli_virt_addr,
		   (unsigned long)mlli_params->mlli_dma_addr,
		   (unsigned)mlli_params->mlli_len);
	return 0;
}
Exemple #13
0
static inline int asd_map_scatterlist(struct sas_task *task,
				      struct sg_el *sg_arr,
				      gfp_t gfp_flags)
{
	struct asd_ascb *ascb = task->lldd_task;
	struct asd_ha_struct *asd_ha = ascb->ha;
	struct scatterlist *sc;
	int num_sg, res;

	if (task->data_dir == PCI_DMA_NONE)
		return 0;

	if (task->num_scatter == 0) {
		void *p = task->scatter;
		dma_addr_t dma = pci_map_single(asd_ha->pcidev, p,
						task->total_xfer_len,
						task->data_dir);
		sg_arr[0].bus_addr = cpu_to_le64((u64)dma);
		sg_arr[0].size = cpu_to_le32(task->total_xfer_len);
		sg_arr[0].flags |= ASD_SG_EL_LIST_EOL;
		return 0;
	}

	num_sg = pci_map_sg(asd_ha->pcidev, task->scatter, task->num_scatter,
			    task->data_dir);
	if (num_sg == 0)
		return -ENOMEM;

	if (num_sg > 3) {
		int i;

		ascb->sg_arr = asd_alloc_coherent(asd_ha,
						  num_sg*sizeof(struct sg_el),
						  gfp_flags);
		if (!ascb->sg_arr) {
			res = -ENOMEM;
			goto err_unmap;
		}
		for (sc = task->scatter, i = 0; i < num_sg; i++, sc++) {
			struct sg_el *sg =
				&((struct sg_el *)ascb->sg_arr->vaddr)[i];
			sg->bus_addr = cpu_to_le64((u64)sg_dma_address(sc));
			sg->size = cpu_to_le32((u32)sg_dma_len(sc));
			if (i == num_sg-1)
				sg->flags |= ASD_SG_EL_LIST_EOL;
		}

		for (sc = task->scatter, i = 0; i < 2; i++, sc++) {
			sg_arr[i].bus_addr =
				cpu_to_le64((u64)sg_dma_address(sc));
			sg_arr[i].size = cpu_to_le32((u32)sg_dma_len(sc));
		}
		sg_arr[1].next_sg_offs = 2 * sizeof(*sg_arr);
		sg_arr[1].flags |= ASD_SG_EL_LIST_EOS;

		memset(&sg_arr[2], 0, sizeof(*sg_arr));
		sg_arr[2].bus_addr=cpu_to_le64((u64)ascb->sg_arr->dma_handle);
	} else {
		int i;
		for (sc = task->scatter, i = 0; i < num_sg; i++, sc++) {
			sg_arr[i].bus_addr =
				cpu_to_le64((u64)sg_dma_address(sc));
			sg_arr[i].size = cpu_to_le32((u32)sg_dma_len(sc));
		}
		sg_arr[i-1].flags |= ASD_SG_EL_LIST_EOL;
	}

	return 0;
err_unmap:
	pci_unmap_sg(asd_ha->pcidev, task->scatter, task->num_scatter,
		     task->data_dir);
	return res;
}
Exemple #14
0
/**
 * Generic LRW-AES en/decryption
 * @param encrypt non-zero to encrypt, zero to decrypt
 * @param in Source of data
 * @param out Location to place en/decrypted data
 * @param nents Number of entries in scatter list, in and out must have the same
 *              number of entries
 * @param iv 8 byte array containing the I-Value
 * @return error code or 0 for success
 */
static int ox800_aeslrw_gencrypt(  u8  encrypt,
                            struct scatterlist* in,
                            struct scatterlist* out,
                            unsigned int nents,
                            u8  iv[])
{
    oxnas_dma_channel_t* dma_in;
    oxnas_dma_channel_t* dma_out;
    struct scatterlist* out_;
    char same_buffer;
    int status = 0;

    /* get dma resources (non blocking) */
    dma_in = oxnas_dma_request(0);
    dma_out = oxnas_dma_request(0);
    
    VPRINTK("dma in %d out %d \n", 
        dma_in->channel_number_,  
        dma_out->channel_number_);  

    if ((dma_in) && (dma_out)) {
        u32 reg;
        
        // shouldn't be busy or full
        reg = readl( OX800DPE_STATUS );
        if (! (reg & OX800DPE_STAT_IDLE) )
            printk("not idle after abort toggle");
        if (reg & OX800DPE_STAT_TX_NOTEMPTY)
            printk("tx fifo not empty after abort toggle");
        if (! (reg & OX800DPE_STAT_RX_SPACE) )
            printk("rx not empty after abort toggle");
        
        /* check to see if the destination buffer is the same as the source */
        same_buffer = (sg_phys(in) == sg_phys(out));
        
        /* map transfers */
        if (same_buffer) {
            dma_map_sg(NULL, in, nents, DMA_BIDIRECTIONAL);
            out_ = in;
        } else {
            /* map transfers */
            dma_map_sg(NULL, in, nents, DMA_TO_DEVICE);
            dma_map_sg(NULL, out, nents, DMA_FROM_DEVICE);
            out_ = out;
        }
#ifdef CIPHER_USE_SG_DMA        
        /* setup DMA transfers */ 
        oxnas_dma_device_set_sg(
            dma_in,
            OXNAS_DMA_TO_DEVICE,
            in,
            nents,
            &oxnas_dpe_rx_dma_settings,
            OXNAS_DMA_MODE_INC);
            
        oxnas_dma_device_set_sg(
            dma_out,
            OXNAS_DMA_FROM_DEVICE,
            out_,
            nents,
            &oxnas_dpe_tx_dma_settings,
            OXNAS_DMA_MODE_INC);

#else
        oxnas_dma_device_set(
            dma_in,
            OXNAS_DMA_TO_DEVICE,
            (unsigned char* )sg_dma_address(in),
            sg_dma_len(in),
            &oxnas_dpe_rx_dma_settings,
            OXNAS_DMA_MODE_INC,
            1 /*paused */ );
            
        oxnas_dma_device_set(
            dma_out,
            OXNAS_DMA_FROM_DEVICE,
            (unsigned char* )sg_dma_address(out_),
            sg_dma_len(out_),
            &oxnas_dpe_tx_dma_settings,
            OXNAS_DMA_MODE_INC,
            1 /*paused */ );
#endif

        /* set dma callbacks */
        oxnas_dma_set_callback(
            dma_in,
            OXNAS_DMA_CALLBACK_ARG_NUL,
            OXNAS_DMA_CALLBACK_ARG_NUL);
        
        oxnas_dma_set_callback(
            dma_out,
            OXNAS_DMA_CALLBACK_ARG_NUL,
            OXNAS_DMA_CALLBACK_ARG_NUL);
        
        
        /* set for AES LRW encryption or decryption */
        writel( (encrypt ? OX800DPE_CTL_DIRECTION_ENC : 0 ) |
               OX800DPE_CTL_MODE_LRW_AES, 
               OX800DPE_CONTROL);
        wmb();
        
        /* write in I-value */
        writel(*((u32* )&(iv[0])), OX800DPE_DATA_LRW0 );
        writel(*((u32* )&(iv[4])), OX800DPE_DATA_LRW1 );
        
        wmb();

        /* wait until done */
        while(  !(OX800DPE_STAT_IDLE & readl( OX800DPE_STATUS )) );
        
        /* start dma */
        oxnas_dma_start(dma_out);
        oxnas_dma_start(dma_in);
    
        /* wait (once for each channel) */
        while ( oxnas_dma_is_active( dma_out ) ||
                oxnas_dma_is_active( dma_in  ) )
        {
            schedule();
        }
        
        /* free any allocated dma channels */
        oxnas_dma_free( dma_in );
        oxnas_dma_free( dma_out );

        /* unmap transfers */
        if (same_buffer) {
            dma_unmap_sg(NULL, in, nents, DMA_BIDIRECTIONAL);
        } else {
            dma_unmap_sg(NULL, in, nents, DMA_TO_DEVICE);
            dma_unmap_sg(NULL, out, nents, DMA_FROM_DEVICE);
        }
        
        status = ox800_aeslrw_driver.result;
    } else {        
        /* free any allocated dma channels */
        if (dma_in) 
            oxnas_dma_free( dma_in );
        if (dma_out)
            oxnas_dma_free( dma_out );
        status = -EBUSY;        
    }
    /* return an indication of success */
    return status;
}
Exemple #15
0
static int hi_mci_setup_data(struct himci_host *host, struct mmc_data *data)
{
	unsigned int sg_phyaddr, sg_length;
	unsigned int i, ret = 0;
	unsigned int data_size;
	unsigned int max_des, des_cnt;
	struct himci_des *des;

	himci_trace(2, "begin");
	himci_assert(host);
	himci_assert(data);

	host->data = data;

	if (data->flags & MMC_DATA_READ)
		host->dma_dir = DMA_FROM_DEVICE;
	else
		host->dma_dir = DMA_TO_DEVICE;

	host->dma_sg = data->sg;
	host->dma_sg_num = dma_map_sg(mmc_dev(host->mmc),
			data->sg, data->sg_len, host->dma_dir);
	himci_assert(host->dma_sg_num);
	himci_trace(2, "host->dma_sg_num is %d\n", host->dma_sg_num);

	data_size = data->blksz * data->blocks;
	if (data_size > (DMA_BUFFER * MAX_DMA_DES)) {
		himci_error("mci request data_size is too big!\n");
		ret = -1;
		goto out;
	}

	himci_trace(2, "host->dma_paddr is 0x%08X,host->dma_vaddr is 0x%08X\n",
			(unsigned int)host->dma_paddr,
			(unsigned int)host->dma_vaddr);

	max_des = (PAGE_SIZE/sizeof(struct himci_des));
	des = (struct himci_des *)host->dma_vaddr;
	des_cnt = 0;

	for (i = 0; i < host->dma_sg_num; i++) {
		sg_length = sg_dma_len(&data->sg[i]);
		sg_phyaddr = sg_dma_address(&data->sg[i]);
		himci_trace(2, "sg[%d] sg_length is 0x%08X, "
				"sg_phyaddr is 0x%08X\n",
				i, (unsigned int)sg_length,
				(unsigned int)sg_phyaddr);
		while (sg_length) {
			des[des_cnt].idmac_des_ctrl = DMA_DES_OWN
				| DMA_DES_NEXT_DES;
			des[des_cnt].idmac_des_buf_addr = sg_phyaddr;
			/* idmac_des_next_addr is paddr for dma */
			des[des_cnt].idmac_des_next_addr = host->dma_paddr
				+ (des_cnt + 1) * sizeof(struct himci_des);

			if (sg_length >= 0x1F00) {
				des[des_cnt].idmac_des_buf_size = 0x1F00;
				sg_length -= 0x1F00;
				sg_phyaddr += 0x1F00;
			} else {
				/* FIXME:data alignment */
				des[des_cnt].idmac_des_buf_size = sg_length;
				sg_length = 0;
			}

			himci_trace(2, "des[%d] vaddr  is 0x%08X", i,
					(unsigned int)&des[i]);
			himci_trace(2, "des[%d].idmac_des_ctrl is 0x%08X",
			       i, (unsigned int)des[i].idmac_des_ctrl);
			himci_trace(2, "des[%d].idmac_des_buf_size is 0x%08X",
				i, (unsigned int)des[i].idmac_des_buf_size);
			himci_trace(2, "des[%d].idmac_des_buf_addr 0x%08X",
				i, (unsigned int)des[i].idmac_des_buf_addr);
			himci_trace(2, "des[%d].idmac_des_next_addr is 0x%08X",
				i, (unsigned int)des[i].idmac_des_next_addr);
			des_cnt++;
		}

		himci_assert(des_cnt < max_des);
	}
	des[0].idmac_des_ctrl |= DMA_DES_FIRST_DES;
	des[des_cnt - 1].idmac_des_ctrl |= DMA_DES_LAST_DES;
	des[des_cnt - 1].idmac_des_next_addr = 0;
out:
	return ret;
}
Exemple #16
0
static void ak98_sdio_start_data(struct ak98_mci_host *host, struct mmc_data *data)
{
	unsigned int datactrl, timeout;
	unsigned long long clks;
	void __iomem *base;
    
	PK("%s: blksz %04x blks %04x flags %08x\n",
	       __func__, data->blksz, data->blocks, data->flags);

	host->data = data;
	host->size = data->blksz * data->blocks;
	host->data_xfered = 0;

	ak98_mci_init_sg(host, data);  
	
	clks = (unsigned long long)data->timeout_ns * host->bus_clkrate;
	do_div(clks, 1000000000UL);
	timeout = data->timeout_clks + (unsigned int)clks;
	
	PK("timeout: %uns / %uclks, clks=%d\n", data->timeout_ns, data->timeout_clks,clks);

	base = host->base;
	writel(timeout, base + AK98MCIDATATIMER);
	writel(host->size, base + AK98MCIDATALENGTH);

	/* set l2 fifo info */
	writel (MCI_DMA_BUFEN | MCI_DMA_SIZE(MCI_L2FIFO_SIZE/4),
		base + AK98MCIDMACTRL);

#ifdef AKMCI_L2FIFO_DMA
    u32    regval;	

	/* get l2 fifo */
	regval = readl(host->l2base + L2FIFO_ASSIGN1);
	regval = (regval & (~(3<<12))) | (MCI_L2FIFO_NUM << 12);
	writel(regval, host->l2base + L2FIFO_ASSIGN1);

	regval = readl(host->l2base + L2FIFO_CONF1);
	regval |= (1 << (0 + MCI_L2FIFO_NUM))
		| (1 << (16 + MCI_L2FIFO_NUM))
		| (1 << (24 + MCI_L2FIFO_NUM));
	if (data->flags & MMC_DATA_WRITE)
		regval |= (1 << (8 + MCI_L2FIFO_NUM));
	else
		regval &= ~(1 << (8 + MCI_L2FIFO_NUM));
	writel(regval, host->l2base + L2FIFO_CONF1);

	/* set dma addr */
	if (data->flags & MMC_DATA_WRITE)
		dma_map_sg(mmc_dev(host->mmc), data->sg, data->sg_len, DMA_TO_DEVICE);
	else
		dma_map_sg(mmc_dev(host->mmc), data->sg, data->sg_len, DMA_FROM_DEVICE);
	writel(sg_dma_address(data->sg), host->l2base + MCI_L2FIFO_NUM);

	/* set dma size */
	if (host->size > L2DMA_MAX_SIZE)
		dma_size = L2DMA_MAX_SIZE;
	dma_times = dma_size/64;
	writel(dma_times, host->l2base + 0x40 + MCI_L2FIFO_NUM);

	if (host->size > L2DMA_MAX_SIZE) {
		/* need to handle dma int */
		regval = readl(host->l2base + L2FIFO_INTEN);
		regval |= (1 << (9 + MCI_L2FIFO_NUM));
		writel(regval, host->l2base + L2FIFO_INTEN);

		request_irq(AK88_L2MEM_IRQ(x)(9+MCI_L2FIFO_NUM), ak98_mcil2_irq,
			    IRQF_DISABLED, DRIVER_NAME "(dma)", host);
	}

	/* when to start dma? */
	regval = readl(host->l2base + L2FIFO_DMACONF);
	regval |= (1 | (1 << (24 + MCI_L2FIFO_NUM)));
	writel(regval, host->l2base + L2FIFO_DMACONF);

	if (dma_size % 64) {
		/* fraction DMA */
		(8 * MCI_L2FIFO_NUM)
	}

	/* set l2 fifo info */
	writel (MCI_DMA_BUFEN | MCI_DMA_EN | MCI_DMA_SIZE(MCI_L2FIFO_SIZE/4),
		base + AK98MCIDMACTRL);
#endif

	datactrl = MCI_DPSM_ENABLE;

	switch (host->bus_width) {
	case MMC_BUS_WIDTH_8:
		datactrl |= MCI_DPSM_BUSMODE(2);
		break;
	case MMC_BUS_WIDTH_4:
		datactrl |= MCI_DPSM_BUSMODE(1);
		break;
	case MMC_BUS_WIDTH_1:
	default:
		datactrl |= MCI_DPSM_BUSMODE(0);
		break;
	}

	if (data->flags & MMC_DATA_STREAM) {
		DBG(host, "%s", "STREAM Data\n");
		datactrl |= MCI_DPSM_STREAM;
	} else {
		DBG(host, "BLOCK Data: %u x %u\n", data->blksz, data->blocks);
		datactrl |= MCI_DPSM_BLOCKSIZE(data->blksz);
	}

	if (data->flags & MMC_DATA_READ) {
		datactrl |= MCI_DPSM_DIRECTION;
	}

	writel(readl(base + AK98MCIMASK) | MCI_DATAIRQMASKS, base + AK98MCIMASK);
	writel(datactrl, base + AK98MCIDATACTRL);

	PK("ENABLE DATA IRQ, datactrl: 0x%08x, timeout: 0x%08x, len: %u\n",
	       datactrl, readl(base+AK98MCIDATATIMER), host->size);

#ifdef AKMCI_L2FIFO_PIO
	if (data->flags & MMC_DATA_WRITE)
		mci_xfer(host);
#endif

#ifdef AKMCI_INNERFIFO_PIO
    unsigned int irqmask;
    
	irqmask = readl(base + AK98MCIMASK);
	if (data->flags & MMC_DATA_READ) {
		if (host->size > MCI_FIFOSIZE)
			irqmask |= MCI_FIFOFULLMASK;
		else
			;	/* wait for DATAEND int */
	} else {
		irqmask |= MCI_FIFOEMPTYMASK;
	}

	writel(irqmask, base + AK98MCIMASK);
#endif
}
Exemple #17
0
/**
 * Send I/O request to firmware.
 */
static          bfa_boolean_t
bfa_ioim_send_ioreq(struct bfa_ioim_s *ioim)
{
	struct bfa_itnim_s *itnim = ioim->itnim;
	struct bfi_ioim_req_s *m;
	static struct fcp_cmnd_s cmnd_z0 = { 0 };
	struct bfi_sge_s      *sge;
	u32        pgdlen = 0;
	u64 addr;
	struct scatterlist *sg;
	struct scsi_cmnd *cmnd = (struct scsi_cmnd *) ioim->dio;

	/**
	 * check for room in queue to send request now
	 */
	m = bfa_reqq_next(ioim->bfa, ioim->reqq);
	if (!m) {
		bfa_reqq_wait(ioim->bfa, ioim->reqq,
				  &ioim->iosp->reqq_wait);
		return BFA_FALSE;
	}

	/**
	 * build i/o request message next
	 */
	m->io_tag = bfa_os_htons(ioim->iotag);
	m->rport_hdl = ioim->itnim->rport->fw_handle;
	m->io_timeout = bfa_cb_ioim_get_timeout(ioim->dio);

	/**
	 * build inline IO SG element here
	 */
	sge = &m->sges[0];
	if (ioim->nsges) {
		sg = (struct scatterlist *)scsi_sglist(cmnd);
		addr = bfa_os_sgaddr(sg_dma_address(sg));
		sge->sga = *(union bfi_addr_u *) &addr;
		pgdlen = sg_dma_len(sg);
		sge->sg_len = pgdlen;
		sge->flags = (ioim->nsges > BFI_SGE_INLINE) ?
					BFI_SGE_DATA_CPL : BFI_SGE_DATA_LAST;
		bfa_sge_to_be(sge);
		sge++;
	}

	if (ioim->nsges > BFI_SGE_INLINE) {
		sge->sga = ioim->sgpg->sgpg_pa;
	} else {
		sge->sga.a32.addr_lo = 0;
		sge->sga.a32.addr_hi = 0;
	}
	sge->sg_len = pgdlen;
	sge->flags = BFI_SGE_PGDLEN;
	bfa_sge_to_be(sge);

	/**
	 * set up I/O command parameters
	 */
	bfa_os_assign(m->cmnd, cmnd_z0);
	m->cmnd.lun = bfa_cb_ioim_get_lun(ioim->dio);
	m->cmnd.iodir = bfa_cb_ioim_get_iodir(ioim->dio);
	bfa_os_assign(m->cmnd.cdb,
			*(struct scsi_cdb_s *)bfa_cb_ioim_get_cdb(ioim->dio));
	m->cmnd.fcp_dl = bfa_os_htonl(bfa_cb_ioim_get_size(ioim->dio));

	/**
	 * set up I/O message header
	 */
	switch (m->cmnd.iodir) {
	case FCP_IODIR_READ:
		bfi_h2i_set(m->mh, BFI_MC_IOIM_READ, 0, bfa_lpuid(ioim->bfa));
		bfa_stats(itnim, input_reqs);
		break;
	case FCP_IODIR_WRITE:
		bfi_h2i_set(m->mh, BFI_MC_IOIM_WRITE, 0, bfa_lpuid(ioim->bfa));
		bfa_stats(itnim, output_reqs);
		break;
	case FCP_IODIR_RW:
		bfa_stats(itnim, input_reqs);
		bfa_stats(itnim, output_reqs);
	default:
		bfi_h2i_set(m->mh, BFI_MC_IOIM_IO, 0, bfa_lpuid(ioim->bfa));
	}
	if (itnim->seq_rec ||
	    (bfa_cb_ioim_get_size(ioim->dio) & (sizeof(u32) - 1)))
		bfi_h2i_set(m->mh, BFI_MC_IOIM_IO, 0, bfa_lpuid(ioim->bfa));

#ifdef IOIM_ADVANCED
	m->cmnd.crn = bfa_cb_ioim_get_crn(ioim->dio);
	m->cmnd.priority = bfa_cb_ioim_get_priority(ioim->dio);
	m->cmnd.taskattr = bfa_cb_ioim_get_taskattr(ioim->dio);

	/**
	 * Handle large CDB (>16 bytes).
	 */
	m->cmnd.addl_cdb_len = (bfa_cb_ioim_get_cdblen(ioim->dio) -
					FCP_CMND_CDB_LEN) / sizeof(u32);
	if (m->cmnd.addl_cdb_len) {
		bfa_os_memcpy(&m->cmnd.cdb + 1, (struct scsi_cdb_s *)
				bfa_cb_ioim_get_cdb(ioim->dio) + 1,
				m->cmnd.addl_cdb_len * sizeof(u32));
		fcp_cmnd_fcpdl(&m->cmnd) =
				bfa_os_htonl(bfa_cb_ioim_get_size(ioim->dio));
	}
#endif

	/**
	 * queue I/O message to firmware
	 */
	bfa_reqq_produce(ioim->bfa, ioim->reqq);
	return BFA_TRUE;
}
int map_ablkcipher_request(struct device *dev, struct ablkcipher_request *req)
{
	struct ablkcipher_req_ctx *areq_ctx = ablkcipher_request_ctx(req);
	unsigned int iv_size = crypto_ablkcipher_ivsize(crypto_ablkcipher_reqtfm(req));
	struct mlli_params *mlli_params = &areq_ctx->mlli_params;
	struct sg_data_array sg_data;
	struct buff_mgr_handle *buff_mgr = crypto_drvdata->buff_mgr_handle;
	int dummy = 0;
	int rc = 0;

	areq_ctx->sec_dir = 0;
	areq_ctx->dma_buf_type = DX_DMA_BUF_DLLI;
	mlli_params->curr_pool = NULL;
	sg_data.num_of_sg = 0;

	/* Map IV buffer */
	if (likely(iv_size != 0) ) {
		dump_byte_array("iv", (uint8_t *)req->info, iv_size);
		areq_ctx->gen_ctx.iv_dma_addr =
			dma_map_single(dev, (void *)req->info,
				       iv_size, DMA_TO_DEVICE);
		if (unlikely(dma_mapping_error(dev,
					areq_ctx->gen_ctx.iv_dma_addr))) {
			DX_LOG_ERR("Mapping iv %u B at va=0x%08lX "
				   "for DMA failed\n",iv_size,
				    (unsigned long)req->info);
			return -ENOMEM;
		}
		DX_LOG_DEBUG("Mapped iv %u B at va=0x%08lX to dma=0x%08lX\n",
				iv_size, (unsigned long)req->info,
			     (unsigned long)areq_ctx->gen_ctx.iv_dma_addr);
	} else {
		areq_ctx->gen_ctx.iv_dma_addr = 0;
	}

	/* Map the src sg */
	if ( sg_is_last(req->src) &&
	     (sg_page(req->src) == NULL) &&
	     sg_dma_address(req->src)) {
		/* The source is secure no mapping is needed */
		areq_ctx->sec_dir = DX_SRC_DMA_IS_SECURE;
		areq_ctx->in_nents = 1;
	} else {
		if ( unlikely( dx_map_sg( dev,req->src, req->nbytes,
					  DMA_BIDIRECTIONAL,
					  &areq_ctx->in_nents,
					  LLI_MAX_NUM_OF_DATA_ENTRIES,
					  &dummy))){
			rc = -ENOMEM;
			goto fail_unmap_iv;
		}

		if ( areq_ctx->in_nents > 1 ) {
			areq_ctx->dma_buf_type = DX_DMA_BUF_MLLI;
		}
	}

	if ( unlikely(req->src == req->dst)) {
		if ( areq_ctx->sec_dir == DX_SRC_DMA_IS_SECURE ) {
			DX_LOG_ERR("Secure key inplace operation "
				   "is not supported \n");
			/* both sides are secure */
			rc = -ENOMEM;
			goto fail_unmap_din;
		}
		/* Handle inplace operation */
		if ( unlikely(areq_ctx->dma_buf_type == DX_DMA_BUF_MLLI) ) {
			areq_ctx->out_nents = 0;
			buffer_mgr_set_sg_entry(&sg_data,
						areq_ctx->in_nents,
						req->src,
						req->nbytes,
						true);
		}
	} else {
		if ( sg_is_last(req->dst) &&
		     (sg_page(req->dst) == NULL) &&
		     sg_dma_address(req->dst)) {
			if ( areq_ctx->sec_dir == DX_SRC_DMA_IS_SECURE ) {
				DX_LOG_ERR("Secure key in both sides is"
					   "not supported \n");
				/* both sides are secure */
				rc = -ENOMEM;
				goto fail_unmap_din;
			}
			/* The dest is secure no mapping is needed */
			areq_ctx->sec_dir = DX_DST_DMA_IS_SECURE;
			areq_ctx->out_nents = 1;
		} else {
			/* Map the dst sg */
			if ( unlikely( dx_map_sg(dev,req->dst, req->nbytes,
						 DMA_BIDIRECTIONAL,
						 &areq_ctx->out_nents,
						 LLI_MAX_NUM_OF_DATA_ENTRIES,
						 &dummy))){
				rc = -ENOMEM;
				goto fail_unmap_din;
			}

			if ( areq_ctx->out_nents > 1 ) {
				areq_ctx->dma_buf_type = DX_DMA_BUF_MLLI;
			}
		}
		if ( unlikely( (areq_ctx->dma_buf_type == DX_DMA_BUF_MLLI) ) ) {
			if (areq_ctx->sec_dir != DX_SRC_DMA_IS_SECURE) {
				buffer_mgr_set_sg_entry(&sg_data,
							areq_ctx->in_nents,
							req->src,
							req->nbytes,
							true);
			}
			if (areq_ctx->sec_dir != DX_DST_DMA_IS_SECURE) {
				buffer_mgr_set_sg_entry(&sg_data,
							areq_ctx->out_nents,
							req->dst,
							req->nbytes,
							true);
			}
		} /*few entries */
	} /* !inplace */

	if (unlikely(areq_ctx->dma_buf_type == DX_DMA_BUF_MLLI)) {
#if (DX_DEV_SIGNATURE == DX_CC441P_SIG)
		if (areq_ctx->sec_dir) {
			/* one of the sides is secure, can't use MLLI*/
			rc = -EINVAL;
			goto fail_unmap_dout;
		}
#endif
		mlli_params->curr_pool = buff_mgr->mlli_buffs_pool;
		if (unlikely(buffer_mgr_build_mlli(dev, &sg_data, mlli_params))) {
			rc = -ENOMEM;
			goto fail_unmap_dout;
		}
	} /*MLLI case*/

	DX_LOG_DEBUG(" buf type = %s \n",
		     dx_get_buff_type(areq_ctx->dma_buf_type));
	return 0;
fail_unmap_dout:
	if (areq_ctx->sec_dir != DX_DST_DMA_IS_SECURE) {
		dma_unmap_sg(dev, req->dst,
			     areq_ctx->out_nents, DMA_BIDIRECTIONAL);
	}
fail_unmap_din:
	if (areq_ctx->sec_dir != DX_SRC_DMA_IS_SECURE) {
		dma_unmap_sg(dev, req->src,
			     areq_ctx->in_nents, DMA_BIDIRECTIONAL);
	}
fail_unmap_iv:
	if (areq_ctx->gen_ctx.iv_dma_addr != 0) {
		dma_unmap_single(dev, areq_ctx->gen_ctx.iv_dma_addr,
				 iv_size, DMA_TO_DEVICE);
	}
	return rc;
}
static void sdhci_bcm2708_dma_complete_irq(struct sdhci_host *host,
					   u32 dma_cs)
{
	struct sdhci_bcm2708_priv *host_priv = SDHCI_HOST_PRIV(host);
	struct mmc_data *data;
	struct scatterlist *sg;
	int sg_len;
	int sg_ix;
	int sg_todo;
//	unsigned long flags;

	BUG_ON(NULL == host);

//	spin_lock_irqsave(&host->lock, flags);
	data = host->data;

#ifdef CHECK_DMA_USE
	if (host_priv->dmas_pending <= 0)
		DBG("on completion no DMA in progress - "
		    "now %"FMT_HPT" started %lu reset %lu stopped %lu\n",
		    hptime(), since_ns(host_priv->when_started),
		    since_ns(host_priv->when_reset),
		    since_ns(host_priv->when_stopped));
	else if (host_priv->dmas_pending > 1)
		DBG("still %d DMA in progress after completion - "
		    "now %"FMT_HPT" started %lu reset %lu stopped %lu\n",
		    host_priv->dmas_pending - 1,
		    hptime(), since_ns(host_priv->when_started),
		    since_ns(host_priv->when_reset),
		    since_ns(host_priv->when_stopped));
	BUG_ON(host_priv->dmas_pending <= 0);
	host_priv->dmas_pending -= 1;
	host_priv->when_stopped = hptime();
#endif
	host_priv->dma_wanted = 0;

	if (NULL == data) {
		DBG("PDMA unused completion - status 0x%X\n", dma_cs);
//		spin_unlock_irqrestore(&host->lock, flags);
		return;
	}
	sg = data->sg;
	sg_len = data->sg_len;
	sg_todo = sg_dma_len(&sg[host_priv->sg_ix]);

	DBG("PDMA complete %d/%d [%d]/[%d]..\n",
	    host_priv->sg_done, sg_todo,
	    host_priv->sg_ix+1, sg_len);

	BUG_ON(host_priv->sg_done > sg_todo);

	if (host_priv->sg_done >= sg_todo) {
		host_priv->sg_ix++;
		host_priv->sg_done = 0;
	}

	sg_ix = host_priv->sg_ix;
	if (sg_ix < sg_len) {
		u32 irq_mask;
		/* Set off next DMA if we've got the capacity */

		if (data->flags & MMC_DATA_READ)
			irq_mask = SDHCI_INT_DATA_AVAIL;
		else
			irq_mask = SDHCI_INT_SPACE_AVAIL;

		/* We have to use the interrupt status register on the BCM2708
		   rather than the SDHCI_PRESENT_STATE register because latency
		   in the glue logic means that the information retrieved from
		   the latter is not always up-to-date w.r.t the DMA engine -
		   it may not indicate that a read or a write is ready yet */
		if (sdhci_bcm2708_raw_readl(host, SDHCI_INT_STATUS) &
		    irq_mask) {
			size_t bytes = sg_dma_len(&sg[sg_ix]) -
				       host_priv->sg_done;
			dma_addr_t addr = sg_dma_address(&data->sg[sg_ix]) +
					  host_priv->sg_done;

			/* acknowledge interrupt */
			sdhci_bcm2708_raw_writel(host, irq_mask,
						 SDHCI_INT_STATUS);

			BUG_ON(0 == bytes);

			if (data->flags & MMC_DATA_READ)
				sdhci_platdma_read(host, addr, bytes);
			else
				sdhci_platdma_write(host, addr, bytes);
		} else {
			DBG("PDMA - wait avail\n");
			/* may generate an IRQ if already present */
			sdhci_signal_irqs(host, SDHCI_INT_DATA_AVAIL |
						SDHCI_INT_SPACE_AVAIL);
		}
	} else {
		if (sync_after_dma) {
			/* On the Arasan controller the stop command (which will be
			   scheduled after this completes) does not seem to work
			   properly if we allow it to be issued when we are
			   transferring data to/from the SD card.
			   We get CRC and DEND errors unless we wait for
			   the SD controller to finish reading/writing to the card. */
			u32 state_mask;
			int timeout=3*1000*1000;

			DBG("PDMA over - sync card\n");
			if (data->flags & MMC_DATA_READ)
				state_mask = SDHCI_DOING_READ;
			else
				state_mask = SDHCI_DOING_WRITE;

			while (0 != (sdhci_bcm2708_raw_readl(host, SDHCI_PRESENT_STATE) 
				& state_mask) && --timeout > 0)
			{
				udelay(1);
				continue;
			}
			if (timeout <= 0)
				printk(KERN_ERR"%s: final %s to SD card still "
				       "running\n",
				       mmc_hostname(host->mmc),
				       data->flags & MMC_DATA_READ? "read": "write");
		}
		if (host_priv->complete) {
			(*host_priv->complete)(host);
			DBG("PDMA %s complete\n",
			    data->flags & MMC_DATA_READ?"read":"write");
			sdhci_signal_irqs(host, SDHCI_INT_DATA_AVAIL |
						SDHCI_INT_SPACE_AVAIL);
		}
	}
//	spin_unlock_irqrestore(&host->lock, flags);
}
static int rk_load_data(struct rk_crypto_info *dev,
			struct scatterlist *sg_src,
			struct scatterlist *sg_dst)
{
	unsigned int count;

	dev->aligned = dev->aligned ?
		check_alignment(sg_src, sg_dst, dev->align_size) :
		dev->aligned;
	if (dev->aligned) {
		count = min(dev->left_bytes, sg_src->length);
		dev->left_bytes -= count;

		if (!dma_map_sg(dev->dev, sg_src, 1, DMA_TO_DEVICE)) {
			dev_err(dev->dev, "[%s:%d] dma_map_sg(src)  error\n",
				__func__, __LINE__);
			return -EINVAL;
		}
		dev->addr_in = sg_dma_address(sg_src);

		if (sg_dst) {
			if (!dma_map_sg(dev->dev, sg_dst, 1, DMA_FROM_DEVICE)) {
				dev_err(dev->dev,
					"[%s:%d] dma_map_sg(dst)  error\n",
					__func__, __LINE__);
				dma_unmap_sg(dev->dev, sg_src, 1,
					     DMA_TO_DEVICE);
				return -EINVAL;
			}
			dev->addr_out = sg_dma_address(sg_dst);
		}
	} else {
		count = (dev->left_bytes > PAGE_SIZE) ?
			PAGE_SIZE : dev->left_bytes;

		if (!sg_pcopy_to_buffer(dev->first, dev->nents,
					dev->addr_vir, count,
					dev->total - dev->left_bytes)) {
			dev_err(dev->dev, "[%s:%d] pcopy err\n",
				__func__, __LINE__);
			return -EINVAL;
		}
		dev->left_bytes -= count;
		sg_init_one(&dev->sg_tmp, dev->addr_vir, count);
		if (!dma_map_sg(dev->dev, &dev->sg_tmp, 1, DMA_TO_DEVICE)) {
			dev_err(dev->dev, "[%s:%d] dma_map_sg(sg_tmp)  error\n",
				__func__, __LINE__);
			return -ENOMEM;
		}
		dev->addr_in = sg_dma_address(&dev->sg_tmp);

		if (sg_dst) {
			if (!dma_map_sg(dev->dev, &dev->sg_tmp, 1,
					DMA_FROM_DEVICE)) {
				dev_err(dev->dev,
					"[%s:%d] dma_map_sg(sg_tmp)  error\n",
					__func__, __LINE__);
				dma_unmap_sg(dev->dev, &dev->sg_tmp, 1,
					     DMA_TO_DEVICE);
				return -ENOMEM;
			}
			dev->addr_out = sg_dma_address(&dev->sg_tmp);
		}
	}
	dev->count = count;
	return 0;
}
static void s5p_set_dma_outdata(struct s5p_aes_dev *dev, struct scatterlist *sg)
{
	SSS_WRITE(dev, FCBTDMAS, sg_dma_address(sg));
	SSS_WRITE(dev, FCBTDMAL, sg_dma_len(sg));
}
Exemple #22
0
static int set_rsa_priv_f3_pdb(struct akcipher_request *req,
			       struct rsa_edesc *edesc)
{
	struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
	struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
	struct caam_rsa_key *key = &ctx->key;
	struct device *dev = ctx->dev;
	struct rsa_priv_f3_pdb *pdb = &edesc->pdb.priv_f3;
	int sec4_sg_index = 0;
	size_t p_sz = key->p_sz;
	size_t q_sz = key->q_sz;

	pdb->p_dma = dma_map_single(dev, key->p, p_sz, DMA_TO_DEVICE);
	if (dma_mapping_error(dev, pdb->p_dma)) {
		dev_err(dev, "Unable to map RSA prime factor p memory\n");
		return -ENOMEM;
	}

	pdb->q_dma = dma_map_single(dev, key->q, q_sz, DMA_TO_DEVICE);
	if (dma_mapping_error(dev, pdb->q_dma)) {
		dev_err(dev, "Unable to map RSA prime factor q memory\n");
		goto unmap_p;
	}

	pdb->dp_dma = dma_map_single(dev, key->dp, p_sz, DMA_TO_DEVICE);
	if (dma_mapping_error(dev, pdb->dp_dma)) {
		dev_err(dev, "Unable to map RSA exponent dp memory\n");
		goto unmap_q;
	}

	pdb->dq_dma = dma_map_single(dev, key->dq, q_sz, DMA_TO_DEVICE);
	if (dma_mapping_error(dev, pdb->dq_dma)) {
		dev_err(dev, "Unable to map RSA exponent dq memory\n");
		goto unmap_dp;
	}

	pdb->c_dma = dma_map_single(dev, key->qinv, p_sz, DMA_TO_DEVICE);
	if (dma_mapping_error(dev, pdb->c_dma)) {
		dev_err(dev, "Unable to map RSA CRT coefficient qinv memory\n");
		goto unmap_dq;
	}

	pdb->tmp1_dma = dma_map_single(dev, key->tmp1, p_sz, DMA_BIDIRECTIONAL);
	if (dma_mapping_error(dev, pdb->tmp1_dma)) {
		dev_err(dev, "Unable to map RSA tmp1 memory\n");
		goto unmap_qinv;
	}

	pdb->tmp2_dma = dma_map_single(dev, key->tmp2, q_sz, DMA_BIDIRECTIONAL);
	if (dma_mapping_error(dev, pdb->tmp2_dma)) {
		dev_err(dev, "Unable to map RSA tmp2 memory\n");
		goto unmap_tmp1;
	}

	if (edesc->src_nents > 1) {
		pdb->sgf |= RSA_PRIV_PDB_SGF_G;
		pdb->g_dma = edesc->sec4_sg_dma;
		sec4_sg_index += edesc->src_nents;
	} else {
		pdb->g_dma = sg_dma_address(req->src);
	}

	if (edesc->dst_nents > 1) {
		pdb->sgf |= RSA_PRIV_PDB_SGF_F;
		pdb->f_dma = edesc->sec4_sg_dma +
			     sec4_sg_index * sizeof(struct sec4_sg_entry);
	} else {
		pdb->f_dma = sg_dma_address(req->dst);
	}

	pdb->sgf |= key->n_sz;
	pdb->p_q_len = (q_sz << RSA_PDB_Q_SHIFT) | p_sz;

	return 0;

unmap_tmp1:
	dma_unmap_single(dev, pdb->tmp1_dma, p_sz, DMA_BIDIRECTIONAL);
unmap_qinv:
	dma_unmap_single(dev, pdb->c_dma, p_sz, DMA_TO_DEVICE);
unmap_dq:
	dma_unmap_single(dev, pdb->dq_dma, q_sz, DMA_TO_DEVICE);
unmap_dp:
	dma_unmap_single(dev, pdb->dp_dma, p_sz, DMA_TO_DEVICE);
unmap_q:
	dma_unmap_single(dev, pdb->q_dma, q_sz, DMA_TO_DEVICE);
unmap_p:
	dma_unmap_single(dev, pdb->p_dma, p_sz, DMA_TO_DEVICE);

	return -ENOMEM;
}
Exemple #23
0
/**
 * ixgbe_fcoe_ddp_setup - called to set up ddp context
 * @netdev: the corresponding net_device
 * @xid: the exchange id requesting ddp
 * @sgl: the scatter-gather list for this request
 * @sgc: the number of scatter-gather items
 *
 * Returns : 1 for success and 0 for no ddp
 */
static int ixgbe_fcoe_ddp_setup(struct net_device *netdev, u16 xid,
				struct scatterlist *sgl, unsigned int sgc,
				int target_mode)
{
	struct ixgbe_adapter *adapter;
	struct ixgbe_hw *hw;
	struct ixgbe_fcoe *fcoe;
	struct ixgbe_fcoe_ddp *ddp;
	struct ixgbe_fcoe_ddp_pool *ddp_pool;
	struct scatterlist *sg;
	unsigned int i, j, dmacount;
	unsigned int len;
	static const unsigned int bufflen = IXGBE_FCBUFF_MIN;
	unsigned int firstoff = 0;
	unsigned int lastsize;
	unsigned int thisoff = 0;
	unsigned int thislen = 0;
	u32 fcbuff, fcdmarw, fcfltrw, fcrxctl;
	dma_addr_t addr = 0;

	if (!netdev || !sgl)
		return 0;

	adapter = netdev_priv(netdev);
	if (xid >= IXGBE_FCOE_DDP_MAX) {
		e_warn(drv, "xid=0x%x out-of-range\n", xid);
		return 0;
	}

	/* no DDP if we are already down or resetting */
	if (test_bit(__IXGBE_DOWN, &adapter->state) ||
	    test_bit(__IXGBE_RESETTING, &adapter->state))
		return 0;

	fcoe = &adapter->fcoe;
	ddp = &fcoe->ddp[xid];
	if (ddp->sgl) {
		e_err(drv, "xid 0x%x w/ non-null sgl=%p nents=%d\n",
		      xid, ddp->sgl, ddp->sgc);
		return 0;
	}
	ixgbe_fcoe_clear_ddp(ddp);


	if (!fcoe->ddp_pool) {
		e_warn(drv, "No ddp_pool resources allocated\n");
		return 0;
	}

	ddp_pool = per_cpu_ptr(fcoe->ddp_pool, get_cpu());
	if (!ddp_pool->pool) {
		e_warn(drv, "xid=0x%x no ddp pool for fcoe\n", xid);
		goto out_noddp;
	}

	/* setup dma from scsi command sgl */
	dmacount = dma_map_sg(&adapter->pdev->dev, sgl, sgc, DMA_FROM_DEVICE);
	if (dmacount == 0) {
		e_err(drv, "xid 0x%x DMA map error\n", xid);
		goto out_noddp;
	}

	/* alloc the udl from per cpu ddp pool */
	ddp->udl = dma_pool_alloc(ddp_pool->pool, GFP_ATOMIC, &ddp->udp);
	if (!ddp->udl) {
		e_err(drv, "failed allocated ddp context\n");
		goto out_noddp_unmap;
	}
	ddp->pool = ddp_pool->pool;
	ddp->sgl = sgl;
	ddp->sgc = sgc;

	j = 0;
	for_each_sg(sgl, sg, dmacount, i) {
		addr = sg_dma_address(sg);
		len = sg_dma_len(sg);
		while (len) {
			/* max number of buffers allowed in one DDP context */
			if (j >= IXGBE_BUFFCNT_MAX) {
				ddp_pool->noddp++;
				goto out_noddp_free;
			}

			/* get the offset of length of current buffer */
			thisoff = addr & ((dma_addr_t)bufflen - 1);
			thislen = min((bufflen - thisoff), len);
			/*
			 * all but the 1st buffer (j == 0)
			 * must be aligned on bufflen
			 */
			if ((j != 0) && (thisoff))
				goto out_noddp_free;
			/*
			 * all but the last buffer
			 * ((i == (dmacount - 1)) && (thislen == len))
			 * must end at bufflen
			 */
			if (((i != (dmacount - 1)) || (thislen != len))
			    && ((thislen + thisoff) != bufflen))
				goto out_noddp_free;

			ddp->udl[j] = (u64)(addr - thisoff);
			/* only the first buffer may have none-zero offset */
			if (j == 0)
				firstoff = thisoff;
			len -= thislen;
			addr += thislen;
			j++;
		}
	}
Exemple #24
0
static int safexcel_aes_send(struct crypto_async_request *base, int ring,
			     struct safexcel_request *request,
			     struct safexcel_cipher_req *sreq,
			     struct scatterlist *src, struct scatterlist *dst,
			     unsigned int cryptlen, unsigned int assoclen,
			     unsigned int digestsize, u8 *iv, int *commands,
			     int *results)
{
	struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(base->tfm);
	struct safexcel_crypto_priv *priv = ctx->priv;
	struct safexcel_command_desc *cdesc;
	struct safexcel_result_desc *rdesc;
	struct scatterlist *sg;
	unsigned int totlen = cryptlen + assoclen;
	int nr_src, nr_dst, n_cdesc = 0, n_rdesc = 0, queued = totlen;
	int i, ret = 0;

	if (src == dst) {
		nr_src = dma_map_sg(priv->dev, src,
				    sg_nents_for_len(src, totlen),
				    DMA_BIDIRECTIONAL);
		nr_dst = nr_src;
		if (!nr_src)
			return -EINVAL;
	} else {
		nr_src = dma_map_sg(priv->dev, src,
				    sg_nents_for_len(src, totlen),
				    DMA_TO_DEVICE);
		if (!nr_src)
			return -EINVAL;

		nr_dst = dma_map_sg(priv->dev, dst,
				    sg_nents_for_len(dst, totlen),
				    DMA_FROM_DEVICE);
		if (!nr_dst) {
			dma_unmap_sg(priv->dev, src,
				     sg_nents_for_len(src, totlen),
				     DMA_TO_DEVICE);
			return -EINVAL;
		}
	}

	memcpy(ctx->base.ctxr->data, ctx->key, ctx->key_len);

	if (ctx->aead) {
		memcpy(ctx->base.ctxr->data + ctx->key_len / sizeof(u32),
		       ctx->ipad, ctx->state_sz);
		memcpy(ctx->base.ctxr->data + (ctx->key_len + ctx->state_sz) / sizeof(u32),
		       ctx->opad, ctx->state_sz);
	}

	spin_lock_bh(&priv->ring[ring].egress_lock);

	/* command descriptors */
	for_each_sg(src, sg, nr_src, i) {
		int len = sg_dma_len(sg);

		/* Do not overflow the request */
		if (queued - len < 0)
			len = queued;

		cdesc = safexcel_add_cdesc(priv, ring, !n_cdesc, !(queued - len),
					   sg_dma_address(sg), len, totlen,
					   ctx->base.ctxr_dma);
		if (IS_ERR(cdesc)) {
			/* No space left in the command descriptor ring */
			ret = PTR_ERR(cdesc);
			goto cdesc_rollback;
		}
		n_cdesc++;

		if (n_cdesc == 1) {
			safexcel_context_control(ctx, base, sreq, cdesc);
			if (ctx->aead)
				safexcel_aead_token(ctx, iv, cdesc,
						    sreq->direction, cryptlen,
						    assoclen, digestsize);
			else
				safexcel_skcipher_token(ctx, iv, cdesc,
							cryptlen);
		}

		queued -= len;
		if (!queued)
			break;
	}
Exemple #25
0
/* Prepare to transfer the next segment of a scatterlist */
static void
mmc_omap_prepare_dma(struct mmc_omap_host *host, struct mmc_data *data)
{
	int dma_ch = host->dma_ch;
	unsigned long data_addr;
	u16 buf, frame;
	u32 count;
	struct scatterlist *sg = &data->sg[host->sg_idx];
	int src_port = 0;
	int dst_port = 0;
	int sync_dev = 0;

	data_addr = host->phys_base + OMAP_MMC_REG_DATA;
	frame = data->blksz;
	count = sg_dma_len(sg);

	if ((data->blocks == 1) && (count > data->blksz))
		count = frame;

	host->dma_len = count;

	/* FIFO is 16x2 bytes on 15xx, and 32x2 bytes on 16xx and 24xx.
	 * Use 16 or 32 word frames when the blocksize is at least that large.
	 * Blocksize is usually 512 bytes; but not for some SD reads.
	 */
	if (cpu_is_omap15xx() && frame > 32)
		frame = 32;
	else if (frame > 64)
		frame = 64;
	count /= frame;
	frame >>= 1;

	if (!(data->flags & MMC_DATA_WRITE)) {
		buf = 0x800f | ((frame - 1) << 8);

		if (cpu_class_is_omap1()) {
			src_port = OMAP_DMA_PORT_TIPB;
			dst_port = OMAP_DMA_PORT_EMIFF;
		}
		if (cpu_is_omap24xx())
			sync_dev = OMAP24XX_DMA_MMC1_RX;

		omap_set_dma_src_params(dma_ch, src_port,
					OMAP_DMA_AMODE_CONSTANT,
					data_addr, 0, 0);
		omap_set_dma_dest_params(dma_ch, dst_port,
					 OMAP_DMA_AMODE_POST_INC,
					 sg_dma_address(sg), 0, 0);
		omap_set_dma_dest_data_pack(dma_ch, 1);
		omap_set_dma_dest_burst_mode(dma_ch, OMAP_DMA_DATA_BURST_4);
	} else {
		buf = 0x0f80 | ((frame - 1) << 0);

		if (cpu_class_is_omap1()) {
			src_port = OMAP_DMA_PORT_EMIFF;
			dst_port = OMAP_DMA_PORT_TIPB;
		}
		if (cpu_is_omap24xx())
			sync_dev = OMAP24XX_DMA_MMC1_TX;

		omap_set_dma_dest_params(dma_ch, dst_port,
					 OMAP_DMA_AMODE_CONSTANT,
					 data_addr, 0, 0);
		omap_set_dma_src_params(dma_ch, src_port,
					OMAP_DMA_AMODE_POST_INC,
					sg_dma_address(sg), 0, 0);
		omap_set_dma_src_data_pack(dma_ch, 1);
		omap_set_dma_src_burst_mode(dma_ch, OMAP_DMA_DATA_BURST_4);
	}

	/* Max limit for DMA frame count is 0xffff */
	BUG_ON(count > 0xffff);

	OMAP_MMC_WRITE(host, BUF, buf);
	omap_set_dma_transfer_params(dma_ch, OMAP_DMA_DATA_TYPE_S16,
				     frame, count, OMAP_DMA_SYNC_FRAME,
				     sync_dev, 0);
}
Exemple #26
0
/* Creates the scatter gather list, DMA Table */
static unsigned int
sgiioc4_build_dma_table(ide_drive_t * drive, struct request *rq, int ddir)
{
	ide_hwif_t *hwif = HWIF(drive);
	unsigned int *table = hwif->dmatable_cpu;
	unsigned int count = 0, i = 1;
	struct scatterlist *sg;

	hwif->sg_nents = i = ide_build_sglist(drive, rq);

	if (!i)
		return 0;	/* sglist of length Zero */

	sg = hwif->sg_table;
	while (i && sg_dma_len(sg)) {
		dma_addr_t cur_addr;
		int cur_len;
		cur_addr = sg_dma_address(sg);
		cur_len = sg_dma_len(sg);

		while (cur_len) {
			if (count++ >= IOC4_PRD_ENTRIES) {
				printk(KERN_WARNING
				       "%s: DMA table too small\n",
				       drive->name);
				goto use_pio_instead;
			} else {
				u32 bcount =
				    0x10000 - (cur_addr & 0xffff);

				if (bcount > cur_len)
					bcount = cur_len;

				/* put the addr, length in
				 * the IOC4 dma-table format */
				*table = 0x0;
				table++;
				*table = cpu_to_be32(cur_addr);
				table++;
				*table = 0x0;
				table++;

				*table = cpu_to_be32(bcount);
				table++;

				cur_addr += bcount;
				cur_len -= bcount;
			}
		}

		sg++;
		i--;
	}

	if (count) {
		table--;
		*table |= cpu_to_be32(0x80000000);
		return count;
	}

use_pio_instead:
	pci_unmap_sg(hwif->pci_dev, hwif->sg_table, hwif->sg_nents,
		     hwif->sg_dma_direction);

	return 0;		/* revert to PIO for this request */
}
struct drm_gem_object *exynos_dmabuf_prime_import(struct drm_device *drm_dev,
				struct dma_buf *dma_buf)
{
	struct dma_buf_attachment *attach;
	struct sg_table *sgt;
	struct scatterlist *sgl;
	struct exynos_drm_gem_obj *exynos_gem_obj;
	struct exynos_drm_gem_buf *buffer;
	int ret;

	/* is this one of own objects? */
	if (dma_buf->ops == &exynos_dmabuf_ops) {
		struct drm_gem_object *obj;

		obj = dma_buf->priv;

		/* is it from our device? */
		if (obj->dev == drm_dev) {
			/*
			 * Importing dmabuf exported from out own gem increases
			 * refcount on gem itself instead of f_count of dmabuf.
			 */
			drm_gem_object_reference(obj);
			return obj;
		}
	}

	attach = dma_buf_attach(dma_buf, drm_dev->dev);
	if (IS_ERR(attach))
		return ERR_PTR(-EINVAL);

	get_dma_buf(dma_buf);

	sgt = dma_buf_map_attachment(attach, DMA_BIDIRECTIONAL);
	if (IS_ERR(sgt)) {
		ret = PTR_ERR(sgt);
		goto err_buf_detach;
	}

	buffer = kzalloc(sizeof(*buffer), GFP_KERNEL);
	if (!buffer) {
		ret = -ENOMEM;
		goto err_unmap_attach;
	}

	exynos_gem_obj = exynos_drm_gem_init(drm_dev, dma_buf->size);
	if (!exynos_gem_obj) {
		ret = -ENOMEM;
		goto err_free_buffer;
	}

	sgl = sgt->sgl;

	buffer->size = dma_buf->size;
	buffer->dma_addr = sg_dma_address(sgl);

	if (sgt->nents == 1) {
		/* always physically continuous memory if sgt->nents is 1. */
		exynos_gem_obj->flags |= EXYNOS_BO_CONTIG;
	} else {
		/*
		 * this case could be CONTIG or NONCONTIG type but for now
		 * sets NONCONTIG.
		 * TODO. we have to find a way that exporter can notify
		 * the type of its own buffer to importer.
		 */
		exynos_gem_obj->flags |= EXYNOS_BO_NONCONTIG;
	}

	exynos_gem_obj->buffer = buffer;
	buffer->sgt = sgt;
	exynos_gem_obj->base.import_attach = attach;

	DRM_DEBUG_PRIME("dma_addr = 0x%x, size = 0x%lx\n", buffer->dma_addr,
								buffer->size);

	return &exynos_gem_obj->base;

err_free_buffer:
	kfree(buffer);
	buffer = NULL;
err_unmap_attach:
	dma_buf_unmap_attachment(attach, sgt, DMA_BIDIRECTIONAL);
err_buf_detach:
	dma_buf_detach(dma_buf, attach);
	dma_buf_put(dma_buf);

	return ERR_PTR(ret);
}
Exemple #28
0
/*
 * pmac_ide_build_dmatable builds the DBDMA command list
 * for a transfer and sets the DBDMA channel to point to it.
 */
static int pmac_ide_build_dmatable(ide_drive_t *drive, struct ide_cmd *cmd)
{
	ide_hwif_t *hwif = drive->hwif;
	pmac_ide_hwif_t *pmif = dev_get_drvdata(hwif->gendev.parent);
	struct dbdma_cmd *table;
	volatile struct dbdma_regs __iomem *dma = pmif->dma_regs;
	struct scatterlist *sg;
	int wr = !!(cmd->tf_flags & IDE_TFLAG_WRITE);
	int i = cmd->sg_nents, count = 0;

	/* DMA table is already aligned */
	table = (struct dbdma_cmd *) pmif->dma_table_cpu;

	/* Make sure DMA controller is stopped (necessary ?) */
	writel((RUN|PAUSE|FLUSH|WAKE|DEAD) << 16, &dma->control);
	while (readl(&dma->status) & RUN)
		udelay(1);

	/* Build DBDMA commands list */
	sg = hwif->sg_table;
	while (i && sg_dma_len(sg)) {
		u32 cur_addr;
		u32 cur_len;

		cur_addr = sg_dma_address(sg);
		cur_len = sg_dma_len(sg);

		if (pmif->broken_dma && cur_addr & (L1_CACHE_BYTES - 1)) {
			if (pmif->broken_dma_warn == 0) {
				printk(KERN_WARNING "%s: DMA on non aligned address, "
				       "switching to PIO on Ohare chipset\n", drive->name);
				pmif->broken_dma_warn = 1;
			}
			return 0;
		}
		while (cur_len) {
			unsigned int tc = (cur_len < 0xfe00)? cur_len: 0xfe00;

			if (count++ >= MAX_DCMDS) {
				printk(KERN_WARNING "%s: DMA table too small\n",
				       drive->name);
				return 0;
			}
			table->command = cpu_to_le16(wr? OUTPUT_MORE: INPUT_MORE);
			table->req_count = cpu_to_le16(tc);
			table->phy_addr = cpu_to_le32(cur_addr);
			table->cmd_dep = 0;
			table->xfer_status = 0;
			table->res_count = 0;
			cur_addr += tc;
			cur_len -= tc;
			++table;
		}
		sg = sg_next(sg);
		i--;
	}

	/* convert the last command to an input/output last command */
	if (count) {
		table[-1].command = cpu_to_le16(wr? OUTPUT_LAST: INPUT_LAST);
		/* add the stop command to the end of the list */
		memset(table, 0, sizeof(struct dbdma_cmd));
		table->command = cpu_to_le16(DBDMA_STOP);
		mb();
		writel(hwif->dmatable_dma, &dma->cmdptr);
		return 1;
	}

	printk(KERN_DEBUG "%s: empty DMA table?\n", drive->name);

	return 0; /* revert to PIO for this request */
}
Exemple #29
0
static struct dma_async_tx_descriptor *mdc_prep_slave_sg(
	struct dma_chan *chan, struct scatterlist *sgl,
	unsigned int sg_len, enum dma_transfer_direction dir,
	unsigned long flags, void *context)
{
	struct mdc_chan *mchan = to_mdc_chan(chan);
	struct mdc_dma *mdma = mchan->mdma;
	struct mdc_tx_desc *mdesc;
	struct scatterlist *sg;
	struct mdc_hw_list_desc *curr, *prev = NULL;
	dma_addr_t curr_phys, prev_phys;
	unsigned int i;

	if (!sgl)
		return NULL;

	if (!is_slave_direction(dir))
		return NULL;

	if (mdc_check_slave_width(mchan, dir) < 0)
		return NULL;

	mdesc = kzalloc(sizeof(*mdesc), GFP_NOWAIT);
	if (!mdesc)
		return NULL;
	mdesc->chan = mchan;

	for_each_sg(sgl, sg, sg_len, i) {
		dma_addr_t buf = sg_dma_address(sg);
		size_t buf_len = sg_dma_len(sg);

		while (buf_len > 0) {
			size_t xfer_size;

			curr = dma_pool_alloc(mdma->desc_pool, GFP_NOWAIT,
					      &curr_phys);
			if (!curr)
				goto free_desc;

			if (!prev) {
				mdesc->list_phys = curr_phys;
				mdesc->list = curr;
			} else {
				prev->node_addr = curr_phys;
				prev->next_desc = curr;
			}

			xfer_size = min_t(size_t, mdma->max_xfer_size,
					  buf_len);

			if (dir == DMA_MEM_TO_DEV) {
				mdc_list_desc_config(mchan, curr, dir, buf,
						     mchan->config.dst_addr,
						     xfer_size);
			} else {
				mdc_list_desc_config(mchan, curr, dir,
						     mchan->config.src_addr,
						     buf, xfer_size);
			}

			prev = curr;
			prev_phys = curr_phys;

			mdesc->list_len++;
			mdesc->list_xfer_size += xfer_size;
			buf += xfer_size;
			buf_len -= xfer_size;
		}
	}
Exemple #30
0
static int mmc_davinci_send_dma_request(struct mmc_davinci_host *host,
					struct mmc_request *req)
{
	int sync_dev;
	unsigned char i, j;
	unsigned short acnt, bcnt, ccnt;
	unsigned int src_port, dst_port, temp_ccnt;
	enum address_mode mode_src, mode_dst;
	enum fifo_width fifo_width_src, fifo_width_dst;
	unsigned short src_bidx, dst_bidx;
	unsigned short src_cidx, dst_cidx;
	unsigned short bcntrld;
	enum sync_dimension sync_mode;
	edmacc_paramentry_regs temp;
	int edma_chan_num;
	struct mmc_data *data = host->data;
	struct scatterlist *sg = &data->sg[0];
	unsigned int count;
	int num_frames, frame;


	frame = data->blksz;
	count = sg_dma_len(sg);

	if ((data->blocks == 1) && (count > data->blksz))
		count = frame;

	if (count % 32 == 0) {
		acnt = 4;
		bcnt = 8;
		num_frames = count / 32;
	} else {
		acnt = count;
		bcnt = 1;
		num_frames = 1;
	}

	if (num_frames > MAX_C_CNT) {
		temp_ccnt = MAX_C_CNT;
		ccnt = temp_ccnt;
	} else {
		ccnt = num_frames;
		temp_ccnt = ccnt;
	}

	if (host->data_dir == DAVINCI_MMC_DATADIR_WRITE) {
		/*AB Sync Transfer */
		sync_dev = host->dma_tx_event;

		src_port = (unsigned int)sg_dma_address(sg);
		mode_src = INCR;
		fifo_width_src = W8BIT;	/* It's not cared as modeDsr is INCR */
		src_bidx = acnt;
		src_cidx = acnt * bcnt;
		dst_port = host->phys_base + DAVINCI_MMC_REG_DXR;
		mode_dst = INCR;
		fifo_width_dst = W8BIT;	/* It's not cared as modeDsr is INCR */
		dst_bidx = 0;
		dst_cidx = 0;
		bcntrld = 8;
		sync_mode = ABSYNC;
	} else {
		sync_dev = host->dma_rx_event;

		src_port = host->phys_base + DAVINCI_MMC_REG_DRR;
		mode_src = INCR;
		fifo_width_src = W8BIT;
		src_bidx = 0;
		src_cidx = 0;
		dst_port = (unsigned int)sg_dma_address(sg);
		mode_dst = INCR;
		fifo_width_dst = W8BIT;	/* It's not cared as modeDsr is INCR */
		dst_bidx = acnt;
		dst_cidx = acnt * bcnt;
		bcntrld = 8;
		sync_mode = ABSYNC;
	}

	davinci_set_dma_src_params(sync_dev, src_port, mode_src,
				   fifo_width_src);
	davinci_set_dma_dest_params(sync_dev, dst_port, mode_dst,
				    fifo_width_dst);
	davinci_set_dma_src_index(sync_dev, src_bidx, src_cidx);
	davinci_set_dma_dest_index(sync_dev, dst_bidx, dst_cidx);
	davinci_set_dma_transfer_params(sync_dev, acnt, bcnt, ccnt, bcntrld,
					sync_mode);

	davinci_get_dma_params(sync_dev, &temp);
	if (sync_dev == host->dma_tx_event) {
		if (host->option_write == 0) {
			host->option_write = temp.opt;
		} else {
			temp.opt = host->option_write;
			davinci_set_dma_params(sync_dev, &temp);
		}
	}
	if (sync_dev == host->dma_rx_event) {
		if (host->option_read == 0) {
			host->option_read = temp.opt;
		} else {
			temp.opt = host->option_read;
			davinci_set_dma_params(sync_dev, &temp);
		}
	}

	if (host->sg_len > 1) {
		davinci_get_dma_params(sync_dev, &temp);
		temp.opt &= ~TCINTEN;
		davinci_set_dma_params(sync_dev, &temp);

		for (i = 0; i < host->sg_len - 1; i++) {

			sg = &data->sg[i + 1];

			if (i != 0) {
				j = i - 1;
				davinci_get_dma_params(host->edma_ch_details.
						       chanel_num[j], &temp);
				temp.opt &= ~TCINTEN;
				davinci_set_dma_params(host->edma_ch_details.
						       chanel_num[j], &temp);
			}

			edma_chan_num = host->edma_ch_details.chanel_num[0];

			frame = data->blksz;
			count = sg_dma_len(sg);

			if ((data->blocks == 1) && (count > data->blksz))
				count = frame;

			ccnt = count / 32;

			if (sync_dev == host->dma_tx_event)
				temp.src = (unsigned int)sg_dma_address(sg);
			else
				temp.dst = (unsigned int)sg_dma_address(sg);

			temp.opt |= TCINTEN;

			temp.ccnt = (temp.ccnt & 0xFFFF0000) | (ccnt);

			davinci_set_dma_params(edma_chan_num, &temp);
			if (i != 0) {
				j = i - 1;
				davinci_dma_link_lch(host->edma_ch_details.
						     chanel_num[j],
						     edma_chan_num);
			}
		}
		davinci_dma_link_lch(sync_dev,
				     host->edma_ch_details.chanel_num[0]);
	}

	davinci_start_dma(sync_dev);
	return 0;
}