Ejemplo n.º 1
0
/* Prepare DMA to start data transfer from the MMC card */
static void jz_mmc_tx_setup_data(struct jz_mmc_host *host,
				 struct mmc_data *data)
{
	unsigned int nob = data->blocks;
	int channeltx = txdmachan;
	int i;
	u32 size;

	if (data->flags & MMC_DATA_STREAM)
		nob = 0xffff;

	REG_MSC_NOB = nob;
	REG_MSC_BLKLEN = data->blksz;
	size = nob * data->blksz;

	if (data->flags & MMC_DATA_READ) {
		host->dma.dir = DMA_FROM_DEVICE;
	} else {
		host->dma.dir = DMA_TO_DEVICE;
	}

	host->dma.len =
		dma_map_sg(mmc_dev(host->mmc), data->sg, data->sg_len,
			   host->dma.dir);

	for (i = 0; i < host->dma.len; i++) {
		host->sg_cpu[i].dtadr = sg_dma_address(&data->sg[i]);
		host->sg_cpu[i].dcmd = sg_dma_len(&data->sg[i]);
		dma_cache_wback_inv((unsigned long)
				    CKSEG0ADDR(sg_dma_address(data->sg)) +
				    data->sg->offset,
				    host->sg_cpu[i].dcmd);
		jz_mmc_start_dma(channeltx, host->sg_cpu[i].dtadr,
				 host->sg_cpu[i].dcmd, DMA_MODE_WRITE);
	}
}
Ejemplo n.º 2
0
/* Prototype: fasdmatype_t eesoxscsi_dma_setup(host, SCpnt, direction, min_type)
 * Purpose  : initialises DMA/PIO
 * Params   : host      - host
 *	      SCpnt     - command
 *	      direction - DMA on to/off of card
 *	      min_type  - minimum DMA support that we must have for this transfer
 * Returns  : type of transfer to be performed
 */
static fasdmatype_t
eesoxscsi_dma_setup(struct Scsi_Host *host, struct scsi_pointer *SCp,
		       fasdmadir_t direction, fasdmatype_t min_type)
{
	struct eesoxscsi_info *info = (struct eesoxscsi_info *)host->hostdata;
	struct device *dev = scsi_get_device(host);
	int dmach = info->info.scsi.dma;

	if (dmach != NO_DMA &&
	    (min_type == fasdma_real_all || SCp->this_residual >= 512)) {
		int bufs, map_dir, dma_dir;

		bufs = copy_SCp_to_sg(&info->sg[0], SCp, NR_SG);

		if (direction == DMA_OUT)
			map_dir = DMA_TO_DEVICE,
			dma_dir = DMA_MODE_WRITE;
		else
			map_dir = DMA_FROM_DEVICE,
			dma_dir = DMA_MODE_READ;

		dma_map_sg(dev, info->sg, bufs, map_dir);

		disable_dma(dmach);
		set_dma_sg(dmach, info->sg, bufs);
		set_dma_mode(dmach, dma_dir);
		enable_dma(dmach);
		return fasdma_real_all;
	}
	/*
	 * We don't do DMA, we only do slow PIO
	 *
	 * Some day, we will do Pseudo DMA
	 */
	return fasdma_pseudo;
}
Ejemplo n.º 3
0
/* Prototype: fasdmatype_t powertecscsi_dma_setup(host, SCpnt, direction, min_type)
 * Purpose  : initialises DMA/PIO
 * Params   : host      - host
 *	      SCpnt     - command
 *	      direction - DMA on to/off of card
 *	      min_type  - minimum DMA support that we must have for this transfer
 * Returns  : type of transfer to be performed
 */
static fasdmatype_t
powertecscsi_dma_setup(struct Scsi_Host *host, Scsi_Pointer *SCp,
		       fasdmadir_t direction, fasdmatype_t min_type)
{
	struct powertec_info *info = (struct powertec_info *)host->hostdata;
	struct device *dev = scsi_get_device(host);
	int dmach = info->info.scsi.dma;

	if (info->info.ifcfg.capabilities & FASCAP_DMA &&
	    min_type == fasdma_real_all) {
		int bufs, map_dir, dma_dir;

		bufs = copy_SCp_to_sg(&info->sg[0], SCp, NR_SG);

		if (direction == DMA_OUT)
			map_dir = DMA_TO_DEVICE,
			dma_dir = DMA_MODE_WRITE;
		else
			map_dir = DMA_FROM_DEVICE,
			dma_dir = DMA_MODE_READ;

		dma_map_sg(dev, info->sg, bufs + 1, map_dir);

		disable_dma(dmach);
		set_dma_sg(dmach, info->sg, bufs + 1);
		set_dma_mode(dmach, dma_dir);
		enable_dma(dmach);
		return fasdma_real_all;
	}

	/*
	 * If we're not doing DMA,
	 *  we'll do slow PIO
	 */
	return fasdma_pio;
}
Ejemplo n.º 4
0
static int sun3x_esp_map_sg(struct esp *esp, struct scatterlist *sg,
				  int num_sg, int dir)
{
	return dma_map_sg(esp->dev, sg, num_sg, dir);
}
Ejemplo n.º 5
0
static void ak98_sdio_start_data(struct ak98_mci_host *host, struct mmc_data *data)
{
	unsigned int datactrl, timeout;
	unsigned long long clks;
	void __iomem *base;
    
	PK("%s: blksz %04x blks %04x flags %08x\n",
	       __func__, data->blksz, data->blocks, data->flags);

	host->data = data;
	host->size = data->blksz * data->blocks;
	host->data_xfered = 0;

	ak98_mci_init_sg(host, data);  
	
	clks = (unsigned long long)data->timeout_ns * host->bus_clkrate;
	do_div(clks, 1000000000UL);
	timeout = data->timeout_clks + (unsigned int)clks;
	
	PK("timeout: %uns / %uclks, clks=%d\n", data->timeout_ns, data->timeout_clks,clks);

	base = host->base;
	writel(timeout, base + AK98MCIDATATIMER);
	writel(host->size, base + AK98MCIDATALENGTH);

	/* set l2 fifo info */
	writel (MCI_DMA_BUFEN | MCI_DMA_SIZE(MCI_L2FIFO_SIZE/4),
		base + AK98MCIDMACTRL);

#ifdef AKMCI_L2FIFO_DMA
    u32    regval;	

	/* get l2 fifo */
	regval = readl(host->l2base + L2FIFO_ASSIGN1);
	regval = (regval & (~(3<<12))) | (MCI_L2FIFO_NUM << 12);
	writel(regval, host->l2base + L2FIFO_ASSIGN1);

	regval = readl(host->l2base + L2FIFO_CONF1);
	regval |= (1 << (0 + MCI_L2FIFO_NUM))
		| (1 << (16 + MCI_L2FIFO_NUM))
		| (1 << (24 + MCI_L2FIFO_NUM));
	if (data->flags & MMC_DATA_WRITE)
		regval |= (1 << (8 + MCI_L2FIFO_NUM));
	else
		regval &= ~(1 << (8 + MCI_L2FIFO_NUM));
	writel(regval, host->l2base + L2FIFO_CONF1);

	/* set dma addr */
	if (data->flags & MMC_DATA_WRITE)
		dma_map_sg(mmc_dev(host->mmc), data->sg, data->sg_len, DMA_TO_DEVICE);
	else
		dma_map_sg(mmc_dev(host->mmc), data->sg, data->sg_len, DMA_FROM_DEVICE);
	writel(sg_dma_address(data->sg), host->l2base + MCI_L2FIFO_NUM);

	/* set dma size */
	if (host->size > L2DMA_MAX_SIZE)
		dma_size = L2DMA_MAX_SIZE;
	dma_times = dma_size/64;
	writel(dma_times, host->l2base + 0x40 + MCI_L2FIFO_NUM);

	if (host->size > L2DMA_MAX_SIZE) {
		/* need to handle dma int */
		regval = readl(host->l2base + L2FIFO_INTEN);
		regval |= (1 << (9 + MCI_L2FIFO_NUM));
		writel(regval, host->l2base + L2FIFO_INTEN);

		request_irq(AK88_L2MEM_IRQ(x)(9+MCI_L2FIFO_NUM), ak98_mcil2_irq,
			    IRQF_DISABLED, DRIVER_NAME "(dma)", host);
	}

	/* when to start dma? */
	regval = readl(host->l2base + L2FIFO_DMACONF);
	regval |= (1 | (1 << (24 + MCI_L2FIFO_NUM)));
	writel(regval, host->l2base + L2FIFO_DMACONF);

	if (dma_size % 64) {
		/* fraction DMA */
		(8 * MCI_L2FIFO_NUM)
	}

	/* set l2 fifo info */
	writel (MCI_DMA_BUFEN | MCI_DMA_EN | MCI_DMA_SIZE(MCI_L2FIFO_SIZE/4),
		base + AK98MCIDMACTRL);
#endif

	datactrl = MCI_DPSM_ENABLE;

	switch (host->bus_width) {
	case MMC_BUS_WIDTH_8:
		datactrl |= MCI_DPSM_BUSMODE(2);
		break;
	case MMC_BUS_WIDTH_4:
		datactrl |= MCI_DPSM_BUSMODE(1);
		break;
	case MMC_BUS_WIDTH_1:
	default:
		datactrl |= MCI_DPSM_BUSMODE(0);
		break;
	}

	if (data->flags & MMC_DATA_STREAM) {
		DBG(host, "%s", "STREAM Data\n");
		datactrl |= MCI_DPSM_STREAM;
	} else {
		DBG(host, "BLOCK Data: %u x %u\n", data->blksz, data->blocks);
		datactrl |= MCI_DPSM_BLOCKSIZE(data->blksz);
	}

	if (data->flags & MMC_DATA_READ) {
		datactrl |= MCI_DPSM_DIRECTION;
	}

	writel(readl(base + AK98MCIMASK) | MCI_DATAIRQMASKS, base + AK98MCIMASK);
	writel(datactrl, base + AK98MCIDATACTRL);

	PK("ENABLE DATA IRQ, datactrl: 0x%08x, timeout: 0x%08x, len: %u\n",
	       datactrl, readl(base+AK98MCIDATATIMER), host->size);

#ifdef AKMCI_L2FIFO_PIO
	if (data->flags & MMC_DATA_WRITE)
		mci_xfer(host);
#endif

#ifdef AKMCI_INNERFIFO_PIO
    unsigned int irqmask;
    
	irqmask = readl(base + AK98MCIMASK);
	if (data->flags & MMC_DATA_READ) {
		if (host->size > MCI_FIFOSIZE)
			irqmask |= MCI_FIFOFULLMASK;
		else
			;	/* wait for DATAEND int */
	} else {
		irqmask |= MCI_FIFOEMPTYMASK;
	}

	writel(irqmask, base + AK98MCIMASK);
#endif
}
Ejemplo n.º 6
0
static int srp_indirect_data(struct scst_cmd *sc, struct srp_cmd *cmd,
			     struct srp_indirect_buf *id,
			     enum dma_data_direction dir, srp_rdma_t rdma_io,
			     int dma_map, int ext_desc)
{
	struct iu_entry *iue = NULL;
	struct srp_direct_buf *md = NULL;
	struct scatterlist dummy, *sg = NULL;
	dma_addr_t token = 0;
	int err = 0;
	int nmd, nsg = 0, len, sg_cnt = 0;
	u32 tsize = 0;
	enum dma_data_direction dma_dir;

	iue = scst_cmd_get_tgt_priv(sc);
	if (dir == DMA_TO_DEVICE) {
		scst_cmd_get_write_fields(sc, &sg, &sg_cnt);
		tsize = scst_cmd_get_bufflen(sc);
		dma_dir = DMA_FROM_DEVICE;
	} else {
		sg = scst_cmd_get_sg(sc);
		sg_cnt = scst_cmd_get_sg_cnt(sc);
		tsize = scst_cmd_get_adjusted_resp_data_len(sc);
		dma_dir = DMA_TO_DEVICE;
	}

	dprintk("%p %u %u %d %d\n", iue, tsize, be32_to_cpu(id->len),
		be32_to_cpu(cmd->data_in_desc_cnt),
		be32_to_cpu(cmd->data_out_desc_cnt));

	len = min(tsize, be32_to_cpu(id->len));

	nmd = be32_to_cpu(id->table_desc.len) / sizeof(struct srp_direct_buf);

	if ((dir == DMA_FROM_DEVICE && nmd == cmd->data_in_desc_cnt) ||
	    (dir == DMA_TO_DEVICE && nmd == cmd->data_out_desc_cnt)) {
		md = &id->desc_list[0];
		goto rdma;
	}

	if (ext_desc && dma_map) {
		md = dma_alloc_coherent(iue->target->dev,
					be32_to_cpu(id->table_desc.len),
					&token, GFP_KERNEL);
		if (!md) {
			eprintk("Can't get dma memory %u\n", id->table_desc.len);
			return -ENOMEM;
		}

		sg_init_one(&dummy, md, be32_to_cpu(id->table_desc.len));
		sg_dma_address(&dummy) = token;
		sg_dma_len(&dummy) = be32_to_cpu(id->table_desc.len);
		err = rdma_io(sc, &dummy, 1, &id->table_desc, 1, DMA_TO_DEVICE,
			      be32_to_cpu(id->table_desc.len));
		if (err) {
			eprintk("Error copying indirect table %d\n", err);
			goto free_mem;
		}
	} else {
		eprintk("This command uses external indirect buffer\n");
		return -EINVAL;
	}

rdma:
	if (dma_map) {
		nsg = dma_map_sg(iue->target->dev, sg, sg_cnt, dma_dir);
		if (!nsg) {
			eprintk("fail to map %p %d\n", iue, sg_cnt);
			err = -ENOMEM;
			goto free_mem;
		}
	}

	err = rdma_io(sc, sg, nsg, md, nmd, dir, len);

	if (dma_map)
		dma_unmap_sg(iue->target->dev, sg, nsg, dma_dir);

free_mem:
	if (token && dma_map)
		dma_free_coherent(iue->target->dev,
				  be32_to_cpu(id->table_desc.len), md, token);

	return err;
}
Ejemplo n.º 7
0
static struct sg_table *
		exynos_gem_map_dma_buf(struct dma_buf_attachment *attach,
					enum dma_data_direction dir)
{
	struct exynos_drm_dmabuf_attachment *exynos_attach = attach->priv;
	struct exynos_drm_gem_obj *gem_obj = dma_buf_to_obj(attach->dmabuf);
	struct drm_device *dev = gem_obj->base.dev;
	struct exynos_drm_gem_buf *buf;
	struct scatterlist *rd, *wr;
	struct sg_table *sgt = NULL;
	unsigned int i;
	int nents, ret;

	/* just return current sgt if already requested. */
	if (exynos_attach->dir == dir && exynos_attach->is_mapped)
		return &exynos_attach->sgt;

	buf = gem_obj->buffer;
	if (!buf) {
		DRM_ERROR("buffer is null.\n");
		return ERR_PTR(-ENOMEM);
	}

	sgt = &exynos_attach->sgt;

	ret = sg_alloc_table(sgt, buf->sgt->orig_nents, GFP_KERNEL);
	if (ret) {
		DRM_ERROR("failed to alloc sgt.\n");
		return ERR_PTR(-ENOMEM);
	}

	mutex_lock(&dev->struct_mutex);

	rd = buf->sgt->sgl;
	wr = sgt->sgl;
	for (i = 0; i < sgt->orig_nents; ++i) {
		sg_set_page(wr, sg_page(rd), rd->length, rd->offset);
		rd = sg_next(rd);
		wr = sg_next(wr);
	}

	if (dir != DMA_NONE) {
		nents = dma_map_sg(attach->dev, sgt->sgl, sgt->orig_nents, dir);
		if (!nents) {
			DRM_ERROR("failed to map sgl with iommu.\n");
			sg_free_table(sgt);
			sgt = ERR_PTR(-EIO);
			goto err_unlock;
		}
	}

	exynos_attach->is_mapped = true;
	exynos_attach->dir = dir;
	attach->priv = exynos_attach;

	DRM_DEBUG_PRIME("buffer size = 0x%lx\n", buf->size);

err_unlock:
	mutex_unlock(&dev->struct_mutex);
	return sgt;
}
Ejemplo n.º 8
0
static void
mmc_omap_prepare_data(struct mmc_omap_host *host, struct mmc_request *req)
{
	struct mmc_data *data = req->data;
	int i, use_dma, block_size;
	unsigned sg_len;

	host->data = data;
	if (data == NULL) {
		OMAP_MMC_WRITE(host, BLEN, 0);
		OMAP_MMC_WRITE(host, NBLK, 0);
		OMAP_MMC_WRITE(host, BUF, 0);
		host->dma_in_use = 0;
		set_cmd_timeout(host, req);
		return;
	}

	block_size = data->blksz;

	OMAP_MMC_WRITE(host, NBLK, data->blocks - 1);
	OMAP_MMC_WRITE(host, BLEN, block_size - 1);
	set_data_timeout(host, req);

	/* cope with calling layer confusion; it issues "single
	 * block" writes using multi-block scatterlists.
	 */
	sg_len = (data->blocks == 1) ? 1 : data->sg_len;

	/* Only do DMA for entire blocks */
	use_dma = host->use_dma;
	if (use_dma) {
		for (i = 0; i < sg_len; i++) {
			if ((data->sg[i].length % block_size) != 0) {
				use_dma = 0;
				break;
			}
		}
	}

	host->sg_idx = 0;
	if (use_dma) {
		if (mmc_omap_get_dma_channel(host, data) == 0) {
			enum dma_data_direction dma_data_dir;

			if (data->flags & MMC_DATA_WRITE)
				dma_data_dir = DMA_TO_DEVICE;
			else
				dma_data_dir = DMA_FROM_DEVICE;

			host->sg_len = dma_map_sg(mmc_dev(host->mmc), data->sg,
						sg_len, dma_data_dir);
			host->total_bytes_left = 0;
			mmc_omap_prepare_dma(host, req->data);
			host->brs_received = 0;
			host->dma_done = 0;
			host->dma_in_use = 1;
		} else
			use_dma = 0;
	}

	/* Revert to PIO? */
	if (!use_dma) {
		OMAP_MMC_WRITE(host, BUF, 0x1f1f);
		host->total_bytes_left = data->blocks * block_size;
		host->sg_len = sg_len;
		mmc_omap_sg_to_buf(host);
		host->dma_in_use = 0;
	}
}
Ejemplo n.º 9
0
static void imxmci_setup_data(struct imxmci_host *host, struct mmc_data *data)
{
	unsigned int nob = data->blocks;
	unsigned int blksz = data->blksz;
	unsigned int datasz = nob * blksz;
	int i;

	if (data->flags & MMC_DATA_STREAM)
		nob = 0xffff;

	host->data = data;
	data->bytes_xfered = 0;

	MMC_NOB = nob;
	MMC_BLK_LEN = blksz;

	/*
	 * DMA cannot be used for small block sizes, we have to use CPU driven transfers otherwise.
	 * We are in big troubles for non-512 byte transfers according to note in the paragraph
	 * 20.6.7 of User Manual anyway, but we need to be able to transfer SCR at least.
	 * The situation is even more complex in reality. The SDHC in not able to handle wll
	 * partial FIFO fills and reads. The length has to be rounded up to burst size multiple.
	 * This is required for SCR read at least.
	 */
	if (datasz < 512) {
		host->dma_size = datasz;
		if (data->flags & MMC_DATA_READ) {
			host->dma_dir = DMA_FROM_DEVICE;

			/* Hack to enable read SCR */
			MMC_NOB = 1;
			MMC_BLK_LEN = 512;
		} else {
			host->dma_dir = DMA_TO_DEVICE;
		}

		/* Convert back to virtual address */
		host->data_ptr = (u16*)(page_address(data->sg->page) + data->sg->offset);
		host->data_cnt = 0;

		clear_bit(IMXMCI_PEND_DMA_DATA_b, &host->pending_events);
		set_bit(IMXMCI_PEND_CPU_DATA_b, &host->pending_events);

		return;
	}

	if (data->flags & MMC_DATA_READ) {
		host->dma_dir = DMA_FROM_DEVICE;
		host->dma_nents = dma_map_sg(mmc_dev(host->mmc), data->sg,
						data->sg_len,  host->dma_dir);

		imx_dma_setup_sg(host->dma, data->sg, data->sg_len, datasz,
			host->res->start + MMC_BUFFER_ACCESS_OFS, DMA_MODE_READ);

		/*imx_dma_setup_mem2dev_ccr(host->dma, DMA_MODE_READ, IMX_DMA_WIDTH_16, CCR_REN);*/
		CCR(host->dma) = CCR_DMOD_LINEAR | CCR_DSIZ_32 | CCR_SMOD_FIFO | CCR_SSIZ_16 | CCR_REN;
	} else {
		host->dma_dir = DMA_TO_DEVICE;

		host->dma_nents = dma_map_sg(mmc_dev(host->mmc), data->sg,
						data->sg_len,  host->dma_dir);

		imx_dma_setup_sg(host->dma, data->sg, data->sg_len, datasz,
			host->res->start + MMC_BUFFER_ACCESS_OFS, DMA_MODE_WRITE);

		/*imx_dma_setup_mem2dev_ccr(host->dma, DMA_MODE_WRITE, IMX_DMA_WIDTH_16, CCR_REN);*/
		CCR(host->dma) = CCR_SMOD_LINEAR | CCR_SSIZ_32 | CCR_DMOD_FIFO | CCR_DSIZ_16 | CCR_REN;
	}

#if 1	/* This code is there only for consistency checking and can be disabled in future */
	host->dma_size = 0;
	for(i=0; i<host->dma_nents; i++)
		host->dma_size+=data->sg[i].length;

	if (datasz > host->dma_size) {
		dev_err(mmc_dev(host->mmc), "imxmci_setup_data datasz 0x%x > 0x%x dm_size\n",
		       datasz, host->dma_size);
	}
#endif

	host->dma_size = datasz;

	wmb();

	if(host->actual_bus_width == MMC_BUS_WIDTH_4)
		BLR(host->dma) = 0;	/* burst 64 byte read / 64 bytes write */
	else
		BLR(host->dma) = 16;	/* burst 16 byte read / 16 bytes write */

	RSSR(host->dma) = DMA_REQ_SDHC;

	set_bit(IMXMCI_PEND_DMA_DATA_b, &host->pending_events);
	clear_bit(IMXMCI_PEND_CPU_DATA_b, &host->pending_events);

	/* start DMA engine for read, write is delayed after initial response */
	if (host->dma_dir == DMA_FROM_DEVICE) {
		imx_dma_enable(host->dma);
	}
}
Ejemplo n.º 10
0
/**
 * ixgbe_fcoe_ddp_setup - called to set up ddp context
 * @netdev: the corresponding net_device
 * @xid: the exchange id requesting ddp
 * @sgl: the scatter-gather list for this request
 * @sgc: the number of scatter-gather items
 *
 * Returns : 1 for success and 0 for no ddp
 */
static int ixgbe_fcoe_ddp_setup(struct net_device *netdev, u16 xid,
				struct scatterlist *sgl, unsigned int sgc,
				int target_mode)
{
	struct ixgbe_adapter *adapter;
	struct ixgbe_hw *hw;
	struct ixgbe_fcoe *fcoe;
	struct ixgbe_fcoe_ddp *ddp;
	struct ixgbe_fcoe_ddp_pool *ddp_pool;
	struct scatterlist *sg;
	unsigned int i, j, dmacount;
	unsigned int len;
	static const unsigned int bufflen = IXGBE_FCBUFF_MIN;
	unsigned int firstoff = 0;
	unsigned int lastsize;
	unsigned int thisoff = 0;
	unsigned int thislen = 0;
	u32 fcbuff, fcdmarw, fcfltrw, fcrxctl;
	dma_addr_t addr = 0;

	if (!netdev || !sgl)
		return 0;

	adapter = netdev_priv(netdev);
	if (xid >= IXGBE_FCOE_DDP_MAX) {
		e_warn(drv, "xid=0x%x out-of-range\n", xid);
		return 0;
	}

	/* no DDP if we are already down or resetting */
	if (test_bit(__IXGBE_DOWN, &adapter->state) ||
	    test_bit(__IXGBE_RESETTING, &adapter->state))
		return 0;

	fcoe = &adapter->fcoe;
	ddp = &fcoe->ddp[xid];
	if (ddp->sgl) {
		e_err(drv, "xid 0x%x w/ non-null sgl=%p nents=%d\n",
		      xid, ddp->sgl, ddp->sgc);
		return 0;
	}
	ixgbe_fcoe_clear_ddp(ddp);


	if (!fcoe->ddp_pool) {
		e_warn(drv, "No ddp_pool resources allocated\n");
		return 0;
	}

	ddp_pool = per_cpu_ptr(fcoe->ddp_pool, get_cpu());
	if (!ddp_pool->pool) {
		e_warn(drv, "xid=0x%x no ddp pool for fcoe\n", xid);
		goto out_noddp;
	}

	/* setup dma from scsi command sgl */
	dmacount = dma_map_sg(&adapter->pdev->dev, sgl, sgc, DMA_FROM_DEVICE);
	if (dmacount == 0) {
		e_err(drv, "xid 0x%x DMA map error\n", xid);
		goto out_noddp;
	}

	/* alloc the udl from per cpu ddp pool */
	ddp->udl = dma_pool_alloc(ddp_pool->pool, GFP_ATOMIC, &ddp->udp);
	if (!ddp->udl) {
		e_err(drv, "failed allocated ddp context\n");
		goto out_noddp_unmap;
	}
	ddp->pool = ddp_pool->pool;
	ddp->sgl = sgl;
	ddp->sgc = sgc;

	j = 0;
	for_each_sg(sgl, sg, dmacount, i) {
		addr = sg_dma_address(sg);
		len = sg_dma_len(sg);
		while (len) {
			/* max number of buffers allowed in one DDP context */
			if (j >= IXGBE_BUFFCNT_MAX) {
				ddp_pool->noddp++;
				goto out_noddp_free;
			}

			/* get the offset of length of current buffer */
			thisoff = addr & ((dma_addr_t)bufflen - 1);
			thislen = min((bufflen - thisoff), len);
			/*
			 * all but the 1st buffer (j == 0)
			 * must be aligned on bufflen
			 */
			if ((j != 0) && (thisoff))
				goto out_noddp_free;
			/*
			 * all but the last buffer
			 * ((i == (dmacount - 1)) && (thislen == len))
			 * must end at bufflen
			 */
			if (((i != (dmacount - 1)) || (thislen != len))
			    && ((thislen + thisoff) != bufflen))
				goto out_noddp_free;

			ddp->udl[j] = (u64)(addr - thisoff);
			/* only the first buffer may have none-zero offset */
			if (j == 0)
				firstoff = thisoff;
			len -= thislen;
			addr += thislen;
			j++;
		}
	}
Ejemplo n.º 11
0
static int
qce_ablkcipher_async_req_handle(struct crypto_async_request *async_req)
{
	struct ablkcipher_request *req = ablkcipher_request_cast(async_req);
	struct qce_cipher_reqctx *rctx = ablkcipher_request_ctx(req);
	struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
	struct qce_alg_template *tmpl = to_cipher_tmpl(async_req->tfm);
	struct qce_device *qce = tmpl->qce;
	enum dma_data_direction dir_src, dir_dst;
	struct scatterlist *sg;
	bool diff_dst;
	gfp_t gfp;
	int ret;

	rctx->iv = req->info;
	rctx->ivsize = crypto_ablkcipher_ivsize(ablkcipher);
	rctx->cryptlen = req->nbytes;

	diff_dst = (req->src != req->dst) ? true : false;
	dir_src = diff_dst ? DMA_TO_DEVICE : DMA_BIDIRECTIONAL;
	dir_dst = diff_dst ? DMA_FROM_DEVICE : DMA_BIDIRECTIONAL;

	rctx->src_nents = sg_nents_for_len(req->src, req->nbytes);
	if (diff_dst)
		rctx->dst_nents = sg_nents_for_len(req->dst, req->nbytes);
	else
		rctx->dst_nents = rctx->src_nents;
	if (rctx->src_nents < 0) {
		dev_err(qce->dev, "Invalid numbers of src SG.\n");
		return rctx->src_nents;
	}
	if (rctx->dst_nents < 0) {
		dev_err(qce->dev, "Invalid numbers of dst SG.\n");
		return -rctx->dst_nents;
	}

	rctx->dst_nents += 1;

	gfp = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
						GFP_KERNEL : GFP_ATOMIC;

	ret = sg_alloc_table(&rctx->dst_tbl, rctx->dst_nents, gfp);
	if (ret)
		return ret;

	sg_init_one(&rctx->result_sg, qce->dma.result_buf, QCE_RESULT_BUF_SZ);

	sg = qce_sgtable_add(&rctx->dst_tbl, req->dst);
	if (IS_ERR(sg)) {
		ret = PTR_ERR(sg);
		goto error_free;
	}

	sg = qce_sgtable_add(&rctx->dst_tbl, &rctx->result_sg);
	if (IS_ERR(sg)) {
		ret = PTR_ERR(sg);
		goto error_free;
	}

	sg_mark_end(sg);
	rctx->dst_sg = rctx->dst_tbl.sgl;

	ret = dma_map_sg(qce->dev, rctx->dst_sg, rctx->dst_nents, dir_dst);
	if (ret < 0)
		goto error_free;

	if (diff_dst) {
		ret = dma_map_sg(qce->dev, req->src, rctx->src_nents, dir_src);
		if (ret < 0)
			goto error_unmap_dst;
		rctx->src_sg = req->src;
	} else {
		rctx->src_sg = rctx->dst_sg;
	}

	ret = qce_dma_prep_sgs(&qce->dma, rctx->src_sg, rctx->src_nents,
			       rctx->dst_sg, rctx->dst_nents,
			       qce_ablkcipher_done, async_req);
	if (ret)
		goto error_unmap_src;

	qce_dma_issue_pending(&qce->dma);

	ret = qce_start(async_req, tmpl->crypto_alg_type, req->nbytes, 0);
	if (ret)
		goto error_terminate;

	return 0;

error_terminate:
	qce_dma_terminate_all(&qce->dma);
error_unmap_src:
	if (diff_dst)
		dma_unmap_sg(qce->dev, req->src, rctx->src_nents, dir_src);
error_unmap_dst:
	dma_unmap_sg(qce->dev, rctx->dst_sg, rctx->dst_nents, dir_dst);
error_free:
	sg_free_table(&rctx->dst_tbl);
	return ret;
}
Ejemplo n.º 12
0
/**
 * Generic LRW-AES en/decryption
 * @param encrypt non-zero to encrypt, zero to decrypt
 * @param in Source of data
 * @param out Location to place en/decrypted data
 * @param nents Number of entries in scatter list, in and out must have the same
 *              number of entries
 * @param iv 8 byte array containing the I-Value
 * @return error code or 0 for success
 */
static int ox800_aeslrw_gencrypt(  u8  encrypt,
                            struct scatterlist* in,
                            struct scatterlist* out,
                            unsigned int nents,
                            u8  iv[])
{
    oxnas_dma_channel_t* dma_in;
    oxnas_dma_channel_t* dma_out;
    struct scatterlist* out_;
    char same_buffer;
    int status = 0;

    /* get dma resources (non blocking) */
    dma_in = oxnas_dma_request(0);
    dma_out = oxnas_dma_request(0);
    
    VPRINTK("dma in %d out %d \n", 
        dma_in->channel_number_,  
        dma_out->channel_number_);  

    if ((dma_in) && (dma_out)) {
        u32 reg;
        
        // shouldn't be busy or full
        reg = readl( OX800DPE_STATUS );
        if (! (reg & OX800DPE_STAT_IDLE) )
            printk("not idle after abort toggle");
        if (reg & OX800DPE_STAT_TX_NOTEMPTY)
            printk("tx fifo not empty after abort toggle");
        if (! (reg & OX800DPE_STAT_RX_SPACE) )
            printk("rx not empty after abort toggle");
        
        /* check to see if the destination buffer is the same as the source */
        same_buffer = (sg_phys(in) == sg_phys(out));
        
        /* map transfers */
        if (same_buffer) {
            dma_map_sg(NULL, in, nents, DMA_BIDIRECTIONAL);
            out_ = in;
        } else {
            /* map transfers */
            dma_map_sg(NULL, in, nents, DMA_TO_DEVICE);
            dma_map_sg(NULL, out, nents, DMA_FROM_DEVICE);
            out_ = out;
        }
#ifdef CIPHER_USE_SG_DMA        
        /* setup DMA transfers */ 
        oxnas_dma_device_set_sg(
            dma_in,
            OXNAS_DMA_TO_DEVICE,
            in,
            nents,
            &oxnas_dpe_rx_dma_settings,
            OXNAS_DMA_MODE_INC);
            
        oxnas_dma_device_set_sg(
            dma_out,
            OXNAS_DMA_FROM_DEVICE,
            out_,
            nents,
            &oxnas_dpe_tx_dma_settings,
            OXNAS_DMA_MODE_INC);

#else
        oxnas_dma_device_set(
            dma_in,
            OXNAS_DMA_TO_DEVICE,
            (unsigned char* )sg_dma_address(in),
            sg_dma_len(in),
            &oxnas_dpe_rx_dma_settings,
            OXNAS_DMA_MODE_INC,
            1 /*paused */ );
            
        oxnas_dma_device_set(
            dma_out,
            OXNAS_DMA_FROM_DEVICE,
            (unsigned char* )sg_dma_address(out_),
            sg_dma_len(out_),
            &oxnas_dpe_tx_dma_settings,
            OXNAS_DMA_MODE_INC,
            1 /*paused */ );
#endif

        /* set dma callbacks */
        oxnas_dma_set_callback(
            dma_in,
            OXNAS_DMA_CALLBACK_ARG_NUL,
            OXNAS_DMA_CALLBACK_ARG_NUL);
        
        oxnas_dma_set_callback(
            dma_out,
            OXNAS_DMA_CALLBACK_ARG_NUL,
            OXNAS_DMA_CALLBACK_ARG_NUL);
        
        
        /* set for AES LRW encryption or decryption */
        writel( (encrypt ? OX800DPE_CTL_DIRECTION_ENC : 0 ) |
               OX800DPE_CTL_MODE_LRW_AES, 
               OX800DPE_CONTROL);
        wmb();
        
        /* write in I-value */
        writel(*((u32* )&(iv[0])), OX800DPE_DATA_LRW0 );
        writel(*((u32* )&(iv[4])), OX800DPE_DATA_LRW1 );
        
        wmb();

        /* wait until done */
        while(  !(OX800DPE_STAT_IDLE & readl( OX800DPE_STATUS )) );
        
        /* start dma */
        oxnas_dma_start(dma_out);
        oxnas_dma_start(dma_in);
    
        /* wait (once for each channel) */
        while ( oxnas_dma_is_active( dma_out ) ||
                oxnas_dma_is_active( dma_in  ) )
        {
            schedule();
        }
        
        /* free any allocated dma channels */
        oxnas_dma_free( dma_in );
        oxnas_dma_free( dma_out );

        /* unmap transfers */
        if (same_buffer) {
            dma_unmap_sg(NULL, in, nents, DMA_BIDIRECTIONAL);
        } else {
            dma_unmap_sg(NULL, in, nents, DMA_TO_DEVICE);
            dma_unmap_sg(NULL, out, nents, DMA_FROM_DEVICE);
        }
        
        status = ox800_aeslrw_driver.result;
    } else {        
        /* free any allocated dma channels */
        if (dma_in) 
            oxnas_dma_free( dma_in );
        if (dma_out)
            oxnas_dma_free( dma_out );
        status = -EBUSY;        
    }
    /* return an indication of success */
    return status;
}
Ejemplo n.º 13
0
static void sslsd_request(struct mmc_host *mmc, struct mmc_request *req)
{
	sslsd_host			*host = mmc_priv(mmc);
	struct mmc_command	*cmd = req->cmd;
	sd_cmd_p			c = &host->tcmd;
	struct mmc_data		*d;
	int					flag;
	unsigned long		iflags;

	if (!sdhc_is_in(&host->hw))
	{
		cmd->error = -ENOMEDIUM;
		mmc_request_done(mmc, req);
		return;
	}

	c->dat = 0;
	switch (mmc_resp_type(cmd))
	{
		case MMC_RSP_R1: /* & R5, R6 */
			flag = SDCMD_F_R1;
			break;

		case MMC_RSP_R1B: /* & R5b */
			flag = SDCMD_F_R1B;
			break;

		case MMC_RSP_R2:
			flag = SDCMD_F_R2;
			c->dat = &host->tdat;
			c->dat->buf = (uint8_t *)cmd->resp;
			break;

		case MMC_RSP_R3:
			flag = SDCMD_F_R3;
			break;

		default:
			flag = 0;
			break;
	}

	c->cmd = cmd->opcode;
	c->arg = cmd->arg;
	host->pcmd = cmd;

	d = cmd->data;
	if (d)
	{
		struct scatterlist	*sg;
		sd_dat_p			dat;

		if (d->flags & MMC_DATA_STREAM) 
		{
			/* not supported */
			cmd->error = -EINVAL;
			mmc_request_done(mmc, req);
			return;
		}

		flag |= SDCMD_F_DAT;
		if (d->flags & MMC_DATA_WRITE)
		{
			flag |= SDCMD_F_WR;
		}
		if (d->blocks > 1)
		{
			flag |= SDCMD_F_MULTI;
		}
#if SD_DMA
		if (host->hw.fdma)
		{
			flag |= SDCMD_F_DMA;
		}
#endif

		dat = c->dat = &host->tdat;
		dat->blk = d->blksz;
#if 1
		c->tout = (d->timeout_ns + 1000000 - 1) / 1000000 + 
			d->timeout_clks / (mmc->ios.clock / 1000);
#endif

		sg = d->sg;
		host->sg = sg;
		host->sgc = d->sg_len;
		host->sgn = sg + d->sg_len - 1;
		host->sgofs = 0;

#if SD_DMA && IO_MAP == 1
		if (flag & SDCMD_F_DMA)
		{
			int	count;

			count = dma_map_sg(mmc_dev(mmc), sg, d->sg_len, 
						(flag & SDCMD_F_WR) ? DMA_TO_DEVICE : DMA_FROM_DEVICE);
			if (!count)
			{
				/* failed to map even 1 page */
				cmd->error = -ENOMEM;
				mmc_request_done(mmc, req);
				return;
			}
			if (count != d->sg_len)
			{
				/* not all pages got mapped */
				count = d->sg_len - count;
				host->sgc -= count;
				host->sgn -= count;
			}
		}
#endif
		if (req->stop)
		{
			flag |= SDCMD_F_STOP;
		}
		c->flag = flag;
		sslsd_map(sg, c, dat, 0);
		host->retry = 1;
	}
	else
	{
		c->flag = flag;
	}

//printk("sslsd: cmd info - cmd=%d arg=%08X flags=%02X\n", 
//cmd->opcode, cmd->arg, flag);

	/* 2 sec timeout */
	mod_timer(&host->timer, jiffies + 2 * HZ);
	local_irq_save(iflags);
	flag = sdhc_cmd(&host->hw, c);
	local_irq_restore(iflags);
	if (flag)
	{
		sslsd_cmd_done(host, c);
	}
}
Ejemplo n.º 14
0
static int hi_mci_setup_data(struct himci_host *host, struct mmc_data *data)
{
	unsigned int sg_phyaddr, sg_length;
	unsigned int i, ret = 0;
	unsigned int data_size;
	unsigned int max_des, des_cnt;
	struct himci_des *des;

	himci_trace(2, "begin");
	himci_assert(host);
	himci_assert(data);

	host->data = data;

	if (data->flags & MMC_DATA_READ)
		host->dma_dir = DMA_FROM_DEVICE;
	else
		host->dma_dir = DMA_TO_DEVICE;

	host->dma_sg = data->sg;
	host->dma_sg_num = dma_map_sg(mmc_dev(host->mmc),
			data->sg, data->sg_len, host->dma_dir);
	himci_assert(host->dma_sg_num);
	himci_trace(2, "host->dma_sg_num is %d\n", host->dma_sg_num);

	data_size = data->blksz * data->blocks;
	if (data_size > (DMA_BUFFER * MAX_DMA_DES)) {
		himci_error("mci request data_size is too big!\n");
		ret = -1;
		goto out;
	}

	himci_trace(2, "host->dma_paddr is 0x%08X,host->dma_vaddr is 0x%08X\n",
			(unsigned int)host->dma_paddr,
			(unsigned int)host->dma_vaddr);

	max_des = (PAGE_SIZE/sizeof(struct himci_des));
	des = (struct himci_des *)host->dma_vaddr;
	des_cnt = 0;

	for (i = 0; i < host->dma_sg_num; i++) {
		sg_length = sg_dma_len(&data->sg[i]);
		sg_phyaddr = sg_dma_address(&data->sg[i]);
		himci_trace(2, "sg[%d] sg_length is 0x%08X, "
				"sg_phyaddr is 0x%08X\n",
				i, (unsigned int)sg_length,
				(unsigned int)sg_phyaddr);
		while (sg_length) {
			des[des_cnt].idmac_des_ctrl = DMA_DES_OWN
				| DMA_DES_NEXT_DES;
			des[des_cnt].idmac_des_buf_addr = sg_phyaddr;
			/* idmac_des_next_addr is paddr for dma */
			des[des_cnt].idmac_des_next_addr = host->dma_paddr
				+ (des_cnt + 1) * sizeof(struct himci_des);

			if (sg_length >= 0x1F00) {
				des[des_cnt].idmac_des_buf_size = 0x1F00;
				sg_length -= 0x1F00;
				sg_phyaddr += 0x1F00;
			} else {
				/* FIXME:data alignment */
				des[des_cnt].idmac_des_buf_size = sg_length;
				sg_length = 0;
			}

			himci_trace(2, "des[%d] vaddr  is 0x%08X", i,
					(unsigned int)&des[i]);
			himci_trace(2, "des[%d].idmac_des_ctrl is 0x%08X",
			       i, (unsigned int)des[i].idmac_des_ctrl);
			himci_trace(2, "des[%d].idmac_des_buf_size is 0x%08X",
				i, (unsigned int)des[i].idmac_des_buf_size);
			himci_trace(2, "des[%d].idmac_des_buf_addr 0x%08X",
				i, (unsigned int)des[i].idmac_des_buf_addr);
			himci_trace(2, "des[%d].idmac_des_next_addr is 0x%08X",
				i, (unsigned int)des[i].idmac_des_next_addr);
			des_cnt++;
		}

		himci_assert(des_cnt < max_des);
	}
	des[0].idmac_des_ctrl |= DMA_DES_FIRST_DES;
	des[des_cnt - 1].idmac_des_ctrl |= DMA_DES_LAST_DES;
	des[des_cnt - 1].idmac_des_next_addr = 0;
out:
	return ret;
}
Ejemplo n.º 15
0
static void rk28_sdio_host_request(struct mmc_host *mmc, struct mmc_request *mrq)
{
#ifdef CONFIG_ATHEROS_AR6002
	int timeout;
#endif
	int total_count = 0;
	volatile u32 value = 0;
	unsigned long flags;
	struct rk28_sdio_priv *priv = NULL;
	struct mmc_command *cmd = NULL;

	spin_lock_irqsave(&(priv->lock), flags);

	priv = mmc_priv(mmc);
	cmd = mrq->cmd;
	
#ifdef CONFIG_ATHEROS_AR6002
	if (cmd->opcode == 53)
	{
		spin_unlock_irqrestore(&priv->lock, flags);
		
		for (timeout = 100; timeout > 0; timeout--)
		{
			value = rk28_host_readl(priv, SDMMC_STATUS);
			if ((value & DATA_BUSY) == 0)
				break;
			
			//Card data busy.
			mdelay(1);
		}
		if (timeout <= 0)
		{
			printk("Waiting for CARD DATA BUSY timeout.\n");
			cmd->error = -EIO;
			mmc_request_done(mmc, mrq);
			
			return;
		}
		spin_lock_irqsave(&(priv->lock), flags);
	}
	
#endif

#if 0
	if (cmd->opcode == 53)// && cmd->arg & 0x80000000)
	{
		printk("copcode=53WR blksz=%d blocks=%d func->blksz=%d\n", cmd->data->blksz, cmd->data->blocks,
		       mmc->card->sdio_func[0]->cur_blksize);
	}
#endif
	
	value = cmd->opcode | CMD_START_CMD;
	if (cmd->opcode == 53)
	{
		value |= (CMD_DATA_EXPECT);
		if (cmd->arg & 0x80000000) /* Is write command */
		{
			value |= CMD_DATA_WRITE;
		}
		value |= (CMD_RESP_EXPECT | CMD_CHECK_RCRC | CMD_WAIT_PRVDATA);
	}
	else if ((cmd->opcode == 52) ||
				   (cmd->opcode == 3) ||
				   (cmd->opcode == 7))
	{
		value |= (CMD_RESP_EXPECT | CMD_CHECK_RCRC | CMD_WAIT_PRVDATA);
	}
	else if (cmd->opcode == 0)
	{
		value |= CMD_SEND_INITIAL;
	}
	else if(cmd->opcode == 5)
	{
		value |= CMD_RESP_EXPECT;
	}
	else
	{
		//printk("Doesn't support command <%d> in SDIO driver.\n", cmd->opcode);
		cmd->error = -EIO;
		mmc_request_done(mmc, mrq);
		spin_unlock_irqrestore(&priv->lock, flags);
		return;
	}
	
	/* 
	 * TODO: We don't deal with mrq->stop for now. - Yongle Lai
	 */
	if (cmd->opcode == 53) 
	{
		if (cmd->data->blocks > 1)
			total_count = cmd->data->blksz * cmd->data->blocks;
		else
			total_count = cmd->data->blksz;
#if (SORT_PACKET_SIZE == 1)
		rk28_sdio_data_req_statistics(total_count);
#endif
		rk28_host_writel(priv, SDMMC_BLKSIZ, cmd->data->blksz);
		rk28_host_writel(priv, SDMMC_BYTCNT, total_count);
		if (total_count & 0x03)
			total_count += 4 - (total_count & 0x03);

   	cmd->data->sg->length = total_count;
   	priv->dma_dir = (value & CMD_DATA_WRITE) ? DMA_TO_DEVICE : DMA_FROM_DEVICE;
    	
   	priv->dma_nents = dma_map_sg(mmc_dev(mmc->parent), cmd->data->sg,
        													 cmd->data->sg_len, priv->dma_dir);
   	rk28_dma_setup_sg(priv->dma_chan, cmd->data->sg, cmd->data->sg_len, 
    								  (value & CMD_DATA_WRITE) ? DMA_MODE_WRITE : DMA_MODE_READ);
	
		rk28_dma_enable(priv->dma_chan);
	}
	
	priv->mmc = mmc;
	priv->mrq = mrq;

	rk28_host_writel(priv, SDMMC_CMDARG, cmd->arg);
	rk28_host_writel(priv, SDMMC_CMD, value);

	spin_unlock_irqrestore(&priv->lock, flags);
}
Ejemplo n.º 16
0
static int aes_dma_start(struct aes_hwa_ctx *ctx)
{
	int err, fast = 0, in, out;
	size_t count;
	dma_addr_t addr_in, addr_out;
	struct omap_dma_channel_params dma_params;
	struct tf_crypto_aes_operation_state *state =
		crypto_ablkcipher_ctx(crypto_ablkcipher_reqtfm(ctx->req));
	static size_t last_count;
	unsigned long flags;

	in = IS_ALIGNED((u32)ctx->in_sg->offset, sizeof(u32));
	out = IS_ALIGNED((u32)ctx->out_sg->offset, sizeof(u32));

	fast = in && out;

	if (fast) {
		count = min(ctx->total, sg_dma_len(ctx->in_sg));
		count = min(count, sg_dma_len(ctx->out_sg));

		if (count != ctx->total)
			return -EINVAL;

		/* Only call dma_map_sg if it has not yet been done */
		if (!(ctx->req->base.flags & CRYPTO_TFM_REQ_DMA_VISIBLE)) {
			err = dma_map_sg(NULL, ctx->in_sg, 1, DMA_TO_DEVICE);
			if (!err)
				return -EINVAL;

			err = dma_map_sg(NULL, ctx->out_sg, 1, DMA_FROM_DEVICE);
			if (!err) {
				dma_unmap_sg(
					NULL, ctx->in_sg, 1, DMA_TO_DEVICE);
				return -EINVAL;
			}
		}
		ctx->req->base.flags &= ~CRYPTO_TFM_REQ_DMA_VISIBLE;

		addr_in = sg_dma_address(ctx->in_sg);
		addr_out = sg_dma_address(ctx->out_sg);

		ctx->flags |= FLAGS_FAST;
	} else {
		count = sg_copy(&ctx->in_sg, &ctx->in_offset, ctx->buf_in,
			ctx->buflen, ctx->total, 0);
		addr_in = ctx->dma_addr_in;
		addr_out = ctx->dma_addr_out;

		ctx->flags &= ~FLAGS_FAST;
	}

	ctx->total -= count;

	/* Configure HWA */
	tf_crypto_enable_clock(PUBLIC_CRYPTO_AES1_CLOCK_REG);

	tf_aes_restore_registers(state, ctx->flags & FLAGS_ENCRYPT ? 1 : 0);

	OUTREG32(&paes_reg->AES_SYSCONFIG, INREG32(&paes_reg->AES_SYSCONFIG)
		| AES_SYSCONFIG_DMA_REQ_OUT_EN_BIT
		| AES_SYSCONFIG_DMA_REQ_IN_EN_BIT);

	ctx->dma_size = count;
	if (!fast)
		dma_sync_single_for_device(NULL, addr_in, count,
			DMA_TO_DEVICE);

	dma_params.data_type = OMAP_DMA_DATA_TYPE_S32;
	dma_params.frame_count = count / AES_BLOCK_SIZE;
	dma_params.elem_count = DMA_CEN_Elts_per_Frame_AES;
	dma_params.src_ei = 0;
	dma_params.src_fi = 0;
	dma_params.dst_ei = 0;
	dma_params.dst_fi = 0;
	dma_params.sync_mode = OMAP_DMA_SYNC_FRAME;
	dma_params.read_prio = 0;
	dma_params.write_prio = 0;

	/* IN */
	dma_params.trigger = ctx->dma_in;
	dma_params.src_or_dst_synch = OMAP_DMA_DST_SYNC;
	dma_params.dst_start = AES1_REGS_HW_ADDR + 0x60;
	dma_params.dst_amode = OMAP_DMA_AMODE_CONSTANT;
	dma_params.src_start = addr_in;
	dma_params.src_amode = OMAP_DMA_AMODE_POST_INC;

	if (reconfigure_dma) {
		omap_set_dma_params(ctx->dma_lch_in, &dma_params);
		omap_set_dma_dest_burst_mode(ctx->dma_lch_in,
			OMAP_DMA_DATA_BURST_8);
		omap_set_dma_src_burst_mode(ctx->dma_lch_in,
			OMAP_DMA_DATA_BURST_8);
		omap_set_dma_src_data_pack(ctx->dma_lch_in, 1);
	} else {
		if (last_count != count)
			omap_set_dma_transfer_params(ctx->dma_lch_in,
				dma_params.data_type,
				dma_params.elem_count, dma_params.frame_count,
				dma_params.sync_mode, dma_params.trigger,
				dma_params.src_or_dst_synch);

		/* Configure input start address */
		__raw_writel(dma_params.src_start,
			omap_dma_base + (0x60 * (ctx->dma_lch_in) + 0x9c));
	}

	/* OUT */
	dma_params.trigger = ctx->dma_out;
	dma_params.src_or_dst_synch = OMAP_DMA_SRC_SYNC;
	dma_params.src_start = AES1_REGS_HW_ADDR + 0x60;
	dma_params.src_amode = OMAP_DMA_AMODE_CONSTANT;
	dma_params.dst_start = addr_out;
	dma_params.dst_amode = OMAP_DMA_AMODE_POST_INC;

	if (reconfigure_dma) {
		omap_set_dma_params(ctx->dma_lch_out, &dma_params);
		omap_set_dma_dest_burst_mode(ctx->dma_lch_out,
			OMAP_DMA_DATA_BURST_8);
		omap_set_dma_src_burst_mode(ctx->dma_lch_out,
			OMAP_DMA_DATA_BURST_8);
		omap_set_dma_dest_data_pack(ctx->dma_lch_out, 1);
		reconfigure_dma = false;
	} else {
		if (last_count != count) {
			omap_set_dma_transfer_params(ctx->dma_lch_out,
				dma_params.data_type,
				dma_params.elem_count, dma_params.frame_count,
				dma_params.sync_mode, dma_params.trigger,
				dma_params.src_or_dst_synch);
			last_count = count;
		}
		/* Configure output start address */
		__raw_writel(dma_params.dst_start,
			omap_dma_base + (0x60 * (ctx->dma_lch_out) + 0xa0));
	}

	/* Is this really needed? */
	omap_enable_dma_irq(ctx->dma_lch_in, OMAP_DMA_BLOCK_IRQ);
	omap_enable_dma_irq(ctx->dma_lch_out, OMAP_DMA_BLOCK_IRQ);

	wmb();

	omap_start_dma(ctx->dma_lch_in);
	omap_start_dma(ctx->dma_lch_out);

	spin_lock_irqsave(&ctx->lock, flags);
	if (ctx->next_req) {
		struct ablkcipher_request *req =
			ablkcipher_request_cast(ctx->next_req);

		if (!(ctx->next_req->flags & CRYPTO_TFM_REQ_DMA_VISIBLE)) {
			err = dma_map_sg(NULL, req->src, 1, DMA_TO_DEVICE);
			if (!err) {
				/* Silently fail for now... */
				spin_unlock_irqrestore(&ctx->lock, flags);
				return 0;
			}

			err = dma_map_sg(NULL, req->dst, 1, DMA_FROM_DEVICE);
			if (!err) {
				dma_unmap_sg(NULL, req->src, 1, DMA_TO_DEVICE);
				/* Silently fail for now... */
				spin_unlock_irqrestore(&ctx->lock, flags);
				return 0;
			}

			ctx->next_req->flags |= CRYPTO_TFM_REQ_DMA_VISIBLE;
			ctx->next_req = NULL;
		}
	}

	if (ctx->backlog) {
		ctx->backlog->complete(ctx->backlog, -EINPROGRESS);
		ctx->backlog = NULL;
	}
	spin_unlock_irqrestore(&ctx->lock, flags);

	return 0;
}
Ejemplo n.º 17
0
static int pmpmci_prepare_data(struct pmpmci_host *host,
				struct mmc_data *data)
{
    
	int datalen = data->blocks * data->blksz;
    struct sd_data_s *sd_data= host->platdata;		
	static int blksz = 0;
	
	if (data->flags & MMC_DATA_READ)
		host->flags |= HOST_F_RECV;
	else
		host->flags |= HOST_F_XMIT;

	if (host->mrq->stop)
		host->flags |= HOST_F_STOP;

	host->dma.dir = DMA_BIDIRECTIONAL;

	host->dma.sgmap_len = dma_map_sg(mmc_dev(host->mmc), data->sg,
				   data->sg_len, host->dma.dir);
	if (host->dma.sgmap_len == 0)
		return -EINVAL;
  
    if(blksz != data->blksz)
    {
      sd_data->ops->setBlkLen(&(sd_data->info), data->blksz);			  
      blksz = data->blksz;
    }
/*************************************************************/
	if ((host->use_dma) && ((data->blksz % 16) == 0)) {  // dma working size must 16 plus
	    if(~(host->flags & HOST_F_DMA))
	    {
          host->flags |= HOST_F_DMA;		
	    }
        host->dma.dmatogo = 0;
	    host->dma.totalxfer = 0;
    	host->dma.offset = 0;	
    	host->dma.prexfer =0; 			
//      	if(pmpmci_prepare_dma(host,data))
//			goto dataerr; 		
	} 
	else {
	    if(host->flags & HOST_F_DMA)
	    {		
          host->flags &= ~HOST_F_DMA;					  
	    }
		host->pio.index = 0;
		host->pio.offset = 0;
		host->pio.len = datalen;

		
//		if (host->flags & HOST_F_XMIT)
//        	sd_data->ops->intrpt_enable(&(sd_data->info), SD_MMC_INT_DATABUFEMPTY , 1);	
//		else
//        	sd_data->ops->intrpt_enable(&(sd_data->info), SD_MMC_INT_DATABUFFULL , 1);	
	}
	
/***************************************************/

	return 0;

dataerr:
	dma_unmap_sg(mmc_dev(host->mmc), data->sg, data->sg_len,
			host->dma.dir);
	return -ETIMEDOUT;
}
Ejemplo n.º 18
0
/*
 * Prepare and enable DMA Tx channel (on STM32)
 */
static void mmc_dma_tx_start(struct mmci_host *host)
{
	struct mmc_request *mrq = host->mrq;
	struct mmc_data *reqdata = mrq->data;
	int dma_len;
	int rv;

	/* Scatter/gather DMA is not supported */
	BUG_ON(reqdata->sg_len > 1);

	dma_len = dma_map_sg(
		mmc_dev(host->mmc), reqdata->sg, reqdata->sg_len,
		DMA_TO_DEVICE);
	if (dma_len == 0) {
		dev_err(mmc_dev(host->mmc), "could not map DMA Tx buffer\n");
		goto out;
	}

	/*
	 * Direction: memory-to-peripheral
	 * Flow controller: peripheral
	 * Priority: very high (3)
	 * Double buffer mode: disabled
	 * Circular mode: disabled
	 */
	rv = stm32_dma_ch_init(STM32F2_DMACH_SDIO, 1, 1, 3, 0, 0);
	if (rv < 0)
		goto err;

	/*
	 * Enable burst mode; set FIFO threshold to "full FIFO"
	 */
	rv = stm32_dma_ch_init_fifo(STM32F2_DMACH_SDIO, 1, 3);
	if (rv < 0)
		goto err;

	/*
	 * Peripheral address: SDIO controller FIFO data register
	 * Peripheral increment: disabled
	 * Peripheral data size: 32-bit
	 * Burst transfer configuration: incremental burst of 4 beats
	 */
	rv = stm32_dma_ch_set_periph(STM32F2_DMACH_SDIO,
		SD_FIFO((u32)host->base), 0, 2, 1);
	if (rv < 0)
		goto err;

	/*
	 * Memory address: DMA buffer address
	 * Memory incremental: enabled
	 * Memory data size: 32-bit
	 * Burst transfer configuration: incremental burst of 4 beats
	 */
	rv = stm32_dma_ch_set_memory(STM32F2_DMACH_SDIO,
		sg_dma_address(&reqdata->sg[0]), 1, 2, 1);
	if (rv < 0)
		goto err;

	/*
	 * Set number of items to transfer to zero, because we use peripheral
	 * flow controller, and therefore the SDIO controller will stop
	 * the transfer when the whole block data has been transferred.
	 */
	rv = stm32_dma_ch_set_nitems(STM32F2_DMACH_SDIO, 0);
	if (rv < 0)
		goto err;

	/*
	 * Enable the DMA channel. After this point, the DMA transfer will
	 * be able to start.
	 */
	rv = stm32_dma_ch_enable(STM32F2_DMACH_SDIO);
	if (rv < 0)
		goto err;

	goto out;

err:
	dev_err(mmc_dev(host->mmc), "Tx DMA channel initialization failed\n");
out:
	;
}
Ejemplo n.º 19
0
Archivo: mmci.c Proyecto: Lyude/linux
/* prepares DMA channel and DMA descriptor, returns non-zero on failure */
static int __mmci_dma_prep_data(struct mmci_host *host, struct mmc_data *data,
				struct dma_chan **dma_chan,
				struct dma_async_tx_descriptor **dma_desc)
{
	struct variant_data *variant = host->variant;
	struct dma_slave_config conf = {
		.src_addr = host->phybase + MMCIFIFO,
		.dst_addr = host->phybase + MMCIFIFO,
		.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES,
		.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES,
		.src_maxburst = variant->fifohalfsize >> 2, /* # of words */
		.dst_maxburst = variant->fifohalfsize >> 2, /* # of words */
		.device_fc = false,
	};
	struct dma_chan *chan;
	struct dma_device *device;
	struct dma_async_tx_descriptor *desc;
	int nr_sg;
	unsigned long flags = DMA_CTRL_ACK;

	if (data->flags & MMC_DATA_READ) {
		conf.direction = DMA_DEV_TO_MEM;
		chan = host->dma_rx_channel;
	} else {
		conf.direction = DMA_MEM_TO_DEV;
		chan = host->dma_tx_channel;
	}

	/* If there's no DMA channel, fall back to PIO */
	if (!chan)
		return -EINVAL;

	/* If less than or equal to the fifo size, don't bother with DMA */
	if (data->blksz * data->blocks <= variant->fifosize)
		return -EINVAL;

	device = chan->device;
	nr_sg = dma_map_sg(device->dev, data->sg, data->sg_len,
			   mmc_get_dma_dir(data));
	if (nr_sg == 0)
		return -EINVAL;

	if (host->variant->qcom_dml)
		flags |= DMA_PREP_INTERRUPT;

	dmaengine_slave_config(chan, &conf);
	desc = dmaengine_prep_slave_sg(chan, data->sg, nr_sg,
					    conf.direction, flags);
	if (!desc)
		goto unmap_exit;

	*dma_chan = chan;
	*dma_desc = desc;

	return 0;

 unmap_exit:
	dma_unmap_sg(device->dev, data->sg, data->sg_len,
		     mmc_get_dma_dir(data));
	return -ENOMEM;
}

static inline int mmci_dma_prep_data(struct mmci_host *host,
				     struct mmc_data *data)
{
	/* Check if next job is already prepared. */
	if (host->dma_current && host->dma_desc_current)
		return 0;

	/* No job were prepared thus do it now. */
	return __mmci_dma_prep_data(host, data, &host->dma_current,
				    &host->dma_desc_current);
}

static inline int mmci_dma_prep_next(struct mmci_host *host,
				     struct mmc_data *data)
{
	struct mmci_host_next *nd = &host->next_data;
	return __mmci_dma_prep_data(host, data, &nd->dma_chan, &nd->dma_desc);
}

static int mmci_dma_start_data(struct mmci_host *host, unsigned int datactrl)
{
	int ret;
	struct mmc_data *data = host->data;

	ret = mmci_dma_prep_data(host, host->data);
	if (ret)
		return ret;

	/* Okay, go for it. */
	dev_vdbg(mmc_dev(host->mmc),
		 "Submit MMCI DMA job, sglen %d blksz %04x blks %04x flags %08x\n",
		 data->sg_len, data->blksz, data->blocks, data->flags);
	host->dma_in_progress = true;
	dmaengine_submit(host->dma_desc_current);
	dma_async_issue_pending(host->dma_current);

	if (host->variant->qcom_dml)
		dml_start_xfer(host, data);

	datactrl |= MCI_DPSM_DMAENABLE;

	/* Trigger the DMA transfer */
	mmci_write_datactrlreg(host, datactrl);

	/*
	 * Let the MMCI say when the data is ended and it's time
	 * to fire next DMA request. When that happens, MMCI will
	 * call mmci_data_end()
	 */
	writel(readl(host->base + MMCIMASK0) | MCI_DATAENDMASK,
	       host->base + MMCIMASK0);
	return 0;
}
static int rk_load_data(struct rk_crypto_info *dev,
			struct scatterlist *sg_src,
			struct scatterlist *sg_dst)
{
	unsigned int count;

	dev->aligned = dev->aligned ?
		check_alignment(sg_src, sg_dst, dev->align_size) :
		dev->aligned;
	if (dev->aligned) {
		count = min(dev->left_bytes, sg_src->length);
		dev->left_bytes -= count;

		if (!dma_map_sg(dev->dev, sg_src, 1, DMA_TO_DEVICE)) {
			dev_err(dev->dev, "[%s:%d] dma_map_sg(src)  error\n",
				__func__, __LINE__);
			return -EINVAL;
		}
		dev->addr_in = sg_dma_address(sg_src);

		if (sg_dst) {
			if (!dma_map_sg(dev->dev, sg_dst, 1, DMA_FROM_DEVICE)) {
				dev_err(dev->dev,
					"[%s:%d] dma_map_sg(dst)  error\n",
					__func__, __LINE__);
				dma_unmap_sg(dev->dev, sg_src, 1,
					     DMA_TO_DEVICE);
				return -EINVAL;
			}
			dev->addr_out = sg_dma_address(sg_dst);
		}
	} else {
		count = (dev->left_bytes > PAGE_SIZE) ?
			PAGE_SIZE : dev->left_bytes;

		if (!sg_pcopy_to_buffer(dev->first, dev->nents,
					dev->addr_vir, count,
					dev->total - dev->left_bytes)) {
			dev_err(dev->dev, "[%s:%d] pcopy err\n",
				__func__, __LINE__);
			return -EINVAL;
		}
		dev->left_bytes -= count;
		sg_init_one(&dev->sg_tmp, dev->addr_vir, count);
		if (!dma_map_sg(dev->dev, &dev->sg_tmp, 1, DMA_TO_DEVICE)) {
			dev_err(dev->dev, "[%s:%d] dma_map_sg(sg_tmp)  error\n",
				__func__, __LINE__);
			return -ENOMEM;
		}
		dev->addr_in = sg_dma_address(&dev->sg_tmp);

		if (sg_dst) {
			if (!dma_map_sg(dev->dev, &dev->sg_tmp, 1,
					DMA_FROM_DEVICE)) {
				dev_err(dev->dev,
					"[%s:%d] dma_map_sg(sg_tmp)  error\n",
					__func__, __LINE__);
				dma_unmap_sg(dev->dev, &dev->sg_tmp, 1,
					     DMA_TO_DEVICE);
				return -ENOMEM;
			}
			dev->addr_out = sg_dma_address(&dev->sg_tmp);
		}
	}
	dev->count = count;
	return 0;
}
Ejemplo n.º 21
0
static int sahara_hw_descriptor_create(struct sahara_dev *dev)
{
	struct sahara_ctx *ctx = dev->ctx;
	struct scatterlist *sg;
	int ret;
	int i, j;

	/* Copy new key if necessary */
	if (ctx->flags & FLAGS_NEW_KEY) {
		memcpy(dev->key_base, ctx->key, ctx->keylen);
		ctx->flags &= ~FLAGS_NEW_KEY;

		if (dev->flags & FLAGS_CBC) {
			dev->hw_desc[0]->len1 = AES_BLOCK_SIZE;
			dev->hw_desc[0]->p1 = dev->iv_phys_base;
		} else {
			dev->hw_desc[0]->len1 = 0;
			dev->hw_desc[0]->p1 = 0;
		}
		dev->hw_desc[0]->len2 = ctx->keylen;
		dev->hw_desc[0]->p2 = dev->key_phys_base;
		dev->hw_desc[0]->next = dev->hw_phys_desc[1];
	}
	dev->hw_desc[0]->hdr = sahara_aes_key_hdr(dev);

	dev->nb_in_sg = sahara_sg_length(dev->in_sg, dev->total);
	dev->nb_out_sg = sahara_sg_length(dev->out_sg, dev->total);
	if ((dev->nb_in_sg + dev->nb_out_sg) > SAHARA_MAX_HW_LINK) {
		dev_err(dev->device, "not enough hw links (%d)\n",
			dev->nb_in_sg + dev->nb_out_sg);
		return -EINVAL;
	}

	ret = dma_map_sg(dev->device, dev->in_sg, dev->nb_in_sg,
			 DMA_TO_DEVICE);
	if (ret != dev->nb_in_sg) {
		dev_err(dev->device, "couldn't map in sg\n");
		goto unmap_in;
	}
	ret = dma_map_sg(dev->device, dev->out_sg, dev->nb_out_sg,
			 DMA_FROM_DEVICE);
	if (ret != dev->nb_out_sg) {
		dev_err(dev->device, "couldn't map out sg\n");
		goto unmap_out;
	}

	/* Create input links */
	dev->hw_desc[1]->p1 = dev->hw_phys_link[0];
	sg = dev->in_sg;
	for (i = 0; i < dev->nb_in_sg; i++) {
		dev->hw_link[i]->len = sg->length;
		dev->hw_link[i]->p = sg->dma_address;
		if (i == (dev->nb_in_sg - 1)) {
			dev->hw_link[i]->next = 0;
		} else {
			dev->hw_link[i]->next = dev->hw_phys_link[i + 1];
			sg = sg_next(sg);
		}
	}

	/* Create output links */
	dev->hw_desc[1]->p2 = dev->hw_phys_link[i];
	sg = dev->out_sg;
	for (j = i; j < dev->nb_out_sg + i; j++) {
		dev->hw_link[j]->len = sg->length;
		dev->hw_link[j]->p = sg->dma_address;
		if (j == (dev->nb_out_sg + i - 1)) {
			dev->hw_link[j]->next = 0;
		} else {
			dev->hw_link[j]->next = dev->hw_phys_link[j + 1];
			sg = sg_next(sg);
		}
	}

	/* Fill remaining fields of hw_desc[1] */
	dev->hw_desc[1]->hdr = sahara_aes_data_link_hdr(dev);
	dev->hw_desc[1]->len1 = dev->total;
	dev->hw_desc[1]->len2 = dev->total;
	dev->hw_desc[1]->next = 0;

	sahara_dump_descriptors(dev);
	sahara_dump_links(dev);

	/* Start processing descriptor chain. */
	mod_timer(&dev->watchdog,
		  jiffies + msecs_to_jiffies(SAHARA_TIMEOUT_MS));
	sahara_write(dev, dev->hw_phys_desc[0], SAHARA_REG_DAR);

	return 0;

unmap_out:
	dma_unmap_sg(dev->device, dev->out_sg, dev->nb_out_sg,
		DMA_TO_DEVICE);
unmap_in:
	dma_unmap_sg(dev->device, dev->in_sg, dev->nb_in_sg,
		DMA_FROM_DEVICE);

	return -EINVAL;
}
Ejemplo n.º 22
0
static int mxs_i2c_dma_setup_xfer(struct i2c_adapter *adap,
			struct i2c_msg *msg, uint32_t flags)
{
	struct dma_async_tx_descriptor *desc;
	struct mxs_i2c_dev *i2c = i2c_get_adapdata(adap);

	if (msg->flags & I2C_M_RD) {
		i2c->dma_read = 1;
		i2c->addr_data = (msg->addr << 1) | I2C_SMBUS_READ;

		/*
		 * SELECT command.
		 */

		/* Queue the PIO register write transfer. */
		i2c->pio_data[0] = MXS_CMD_I2C_SELECT;
		desc = dmaengine_prep_slave_sg(i2c->dmach,
					(struct scatterlist *)&i2c->pio_data[0],
					1, DMA_TRANS_NONE, 0);
		if (!desc) {
			dev_err(i2c->dev,
				"Failed to get PIO reg. write descriptor.\n");
			goto select_init_pio_fail;
		}

		/* Queue the DMA data transfer. */
		sg_init_one(&i2c->sg_io[0], &i2c->addr_data, 1);
		dma_map_sg(i2c->dev, &i2c->sg_io[0], 1, DMA_TO_DEVICE);
		desc = dmaengine_prep_slave_sg(i2c->dmach, &i2c->sg_io[0], 1,
					DMA_MEM_TO_DEV,
					DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
		if (!desc) {
			dev_err(i2c->dev,
				"Failed to get DMA data write descriptor.\n");
			goto select_init_dma_fail;
		}

		/*
		 * READ command.
		 */

		/* Queue the PIO register write transfer. */
		i2c->pio_data[1] = flags | MXS_CMD_I2C_READ |
				MXS_I2C_CTRL0_XFER_COUNT(msg->len);
		desc = dmaengine_prep_slave_sg(i2c->dmach,
					(struct scatterlist *)&i2c->pio_data[1],
					1, DMA_TRANS_NONE, DMA_PREP_INTERRUPT);
		if (!desc) {
			dev_err(i2c->dev,
				"Failed to get PIO reg. write descriptor.\n");
			goto select_init_dma_fail;
		}

		/* Queue the DMA data transfer. */
		sg_init_one(&i2c->sg_io[1], msg->buf, msg->len);
		dma_map_sg(i2c->dev, &i2c->sg_io[1], 1, DMA_FROM_DEVICE);
		desc = dmaengine_prep_slave_sg(i2c->dmach, &i2c->sg_io[1], 1,
					DMA_DEV_TO_MEM,
					DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
		if (!desc) {
			dev_err(i2c->dev,
				"Failed to get DMA data write descriptor.\n");
			goto read_init_dma_fail;
		}
	} else {
		i2c->dma_read = 0;
		i2c->addr_data = (msg->addr << 1) | I2C_SMBUS_WRITE;

		/*
		 * WRITE command.
		 */

		/* Queue the PIO register write transfer. */
		i2c->pio_data[0] = flags | MXS_CMD_I2C_WRITE |
				MXS_I2C_CTRL0_XFER_COUNT(msg->len + 1);
		desc = dmaengine_prep_slave_sg(i2c->dmach,
					(struct scatterlist *)&i2c->pio_data[0],
					1, DMA_TRANS_NONE, 0);
		if (!desc) {
			dev_err(i2c->dev,
				"Failed to get PIO reg. write descriptor.\n");
			goto write_init_pio_fail;
		}

		/* Queue the DMA data transfer. */
		sg_init_table(i2c->sg_io, 2);
		sg_set_buf(&i2c->sg_io[0], &i2c->addr_data, 1);
		sg_set_buf(&i2c->sg_io[1], msg->buf, msg->len);
		dma_map_sg(i2c->dev, i2c->sg_io, 2, DMA_TO_DEVICE);
		desc = dmaengine_prep_slave_sg(i2c->dmach, i2c->sg_io, 2,
					DMA_MEM_TO_DEV,
					DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
		if (!desc) {
			dev_err(i2c->dev,
				"Failed to get DMA data write descriptor.\n");
			goto write_init_dma_fail;
		}
	}

	/*
	 * The last descriptor must have this callback,
	 * to finish the DMA transaction.
	 */
	desc->callback = mxs_i2c_dma_irq_callback;
	desc->callback_param = i2c;

	/* Start the transfer. */
	dmaengine_submit(desc);
	dma_async_issue_pending(i2c->dmach);
	return 0;

/* Read failpath. */
read_init_dma_fail:
	dma_unmap_sg(i2c->dev, &i2c->sg_io[1], 1, DMA_FROM_DEVICE);
select_init_dma_fail:
	dma_unmap_sg(i2c->dev, &i2c->sg_io[0], 1, DMA_TO_DEVICE);
select_init_pio_fail:
	dmaengine_terminate_all(i2c->dmach);
	return -EINVAL;

/* Write failpath. */
write_init_dma_fail:
	dma_unmap_sg(i2c->dev, i2c->sg_io, 2, DMA_TO_DEVICE);
write_init_pio_fail:
	dmaengine_terminate_all(i2c->dmach);
	return -EINVAL;
}
static int msmsdcc_config_dma(struct msmsdcc_host *host, struct mmc_data *data)
{
	struct msmsdcc_nc_dmadata *nc;
	dmov_box *box;
	uint32_t rows;
	uint32_t crci;
	unsigned int n;
	int i, rc;
	struct scatterlist *sg = data->sg;

	rc = validate_dma(host, data);
	if (rc)
		return rc;

	host->dma.sg = data->sg;
	host->dma.num_ents = data->sg_len;

	nc = host->dma.nc;

	if (host->pdev_id == 1)
		crci = MSMSDCC_CRCI_SDC1;
	else if (host->pdev_id == 2)
		crci = MSMSDCC_CRCI_SDC2;
	else if (host->pdev_id == 3)
		crci = MSMSDCC_CRCI_SDC3;
	else if (host->pdev_id == 4)
		crci = MSMSDCC_CRCI_SDC4;
	else {
		host->dma.sg = NULL;
		host->dma.num_ents = 0;
		return -ENOENT;
	}

	if (data->flags & MMC_DATA_READ)
		host->dma.dir = DMA_FROM_DEVICE;
	else
		host->dma.dir = DMA_TO_DEVICE;

	/* host->curr.user_pages = (data->flags & MMC_DATA_USERPAGE); */
	host->curr.user_pages = 0;

	n = dma_map_sg(mmc_dev(host->mmc), host->dma.sg,
	               host->dma.num_ents, host->dma.dir);

	if (n != host->dma.num_ents) {
		printk(KERN_ERR "%s: Unable to map in all sg elements\n",
		       mmc_hostname(host->mmc));
		host->dma.sg = NULL;
		host->dma.num_ents = 0;
		return -ENOMEM;
	}

	box = &nc->cmd[0];
	for (i = 0; i < host->dma.num_ents; i++) {
		box->cmd = CMD_MODE_BOX;

		if (i == (host->dma.num_ents - 1))
			box->cmd |= CMD_LC;
		rows = (sg_dma_len(sg) % MCI_FIFOSIZE) ?
		       (sg_dma_len(sg) / MCI_FIFOSIZE) + 1 :
		       (sg_dma_len(sg) / MCI_FIFOSIZE) ;

		if (data->flags & MMC_DATA_READ) {
			box->src_row_addr = msmsdcc_fifo_addr(host);
			box->dst_row_addr = sg_dma_address(sg);

			box->src_dst_len = (MCI_FIFOSIZE << 16) |
			                   (MCI_FIFOSIZE);
			box->row_offset = MCI_FIFOSIZE;

			box->num_rows = rows * ((1 << 16) + 1);
			box->cmd |= CMD_SRC_CRCI(crci);
		} else {
			box->src_row_addr = sg_dma_address(sg);
			box->dst_row_addr = msmsdcc_fifo_addr(host);

			box->src_dst_len = (MCI_FIFOSIZE << 16) |
			                   (MCI_FIFOSIZE);
			box->row_offset = (MCI_FIFOSIZE << 16);

			box->num_rows = rows * ((1 << 16) + 1);
			box->cmd |= CMD_DST_CRCI(crci);
		}
		box++;
		sg++;
	}

	/* location of command block must be 64 bit aligned */
	BUG_ON(host->dma.cmd_busaddr & 0x07);

	nc->cmdptr = (host->dma.cmd_busaddr >> 3) | CMD_PTR_LP;
	host->dma.hdr.cmdptr = DMOV_CMD_PTR_LIST |
	                       DMOV_CMD_ADDR(host->dma.cmdptr_busaddr);
	host->dma.hdr.complete_func = msmsdcc_dma_complete_func;

	return 0;
}
Ejemplo n.º 24
0
static struct rsa_edesc *rsa_edesc_alloc(struct akcipher_request *req,
					 size_t desclen)
{
	struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
	struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
	struct device *dev = ctx->dev;
	struct caam_rsa_req_ctx *req_ctx = akcipher_request_ctx(req);
	struct rsa_edesc *edesc;
	gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
		       GFP_KERNEL : GFP_ATOMIC;
	int sg_flags = (flags == GFP_ATOMIC) ? SG_MITER_ATOMIC : 0;
	int sgc;
	int sec4_sg_index, sec4_sg_len = 0, sec4_sg_bytes;
	int src_nents, dst_nents;
	int lzeros;

	lzeros = caam_rsa_count_leading_zeros(req->src, req->src_len, sg_flags);
	if (lzeros < 0)
		return ERR_PTR(lzeros);

	req->src_len -= lzeros;
	req->src = scatterwalk_ffwd(req_ctx->src, req->src, lzeros);

	src_nents = sg_nents_for_len(req->src, req->src_len);
	dst_nents = sg_nents_for_len(req->dst, req->dst_len);

	if (src_nents > 1)
		sec4_sg_len = src_nents;
	if (dst_nents > 1)
		sec4_sg_len += dst_nents;

	sec4_sg_bytes = sec4_sg_len * sizeof(struct sec4_sg_entry);

	/* allocate space for base edesc, hw desc commands and link tables */
	edesc = kzalloc(sizeof(*edesc) + desclen + sec4_sg_bytes,
			GFP_DMA | flags);
	if (!edesc)
		return ERR_PTR(-ENOMEM);

	sgc = dma_map_sg(dev, req->src, src_nents, DMA_TO_DEVICE);
	if (unlikely(!sgc)) {
		dev_err(dev, "unable to map source\n");
		goto src_fail;
	}

	sgc = dma_map_sg(dev, req->dst, dst_nents, DMA_FROM_DEVICE);
	if (unlikely(!sgc)) {
		dev_err(dev, "unable to map destination\n");
		goto dst_fail;
	}

	edesc->sec4_sg = (void *)edesc + sizeof(*edesc) + desclen;

	sec4_sg_index = 0;
	if (src_nents > 1) {
		sg_to_sec4_sg_last(req->src, src_nents, edesc->sec4_sg, 0);
		sec4_sg_index += src_nents;
	}
	if (dst_nents > 1)
		sg_to_sec4_sg_last(req->dst, dst_nents,
				   edesc->sec4_sg + sec4_sg_index, 0);

	/* Save nents for later use in Job Descriptor */
	edesc->src_nents = src_nents;
	edesc->dst_nents = dst_nents;

	if (!sec4_sg_bytes)
		return edesc;

	edesc->sec4_sg_dma = dma_map_single(dev, edesc->sec4_sg,
					    sec4_sg_bytes, DMA_TO_DEVICE);
	if (dma_mapping_error(dev, edesc->sec4_sg_dma)) {
		dev_err(dev, "unable to map S/G table\n");
		goto sec4_sg_fail;
	}

	edesc->sec4_sg_bytes = sec4_sg_bytes;

	return edesc;

sec4_sg_fail:
	dma_unmap_sg(dev, req->dst, dst_nents, DMA_FROM_DEVICE);
dst_fail:
	dma_unmap_sg(dev, req->src, src_nents, DMA_TO_DEVICE);
src_fail:
	kfree(edesc);
	return ERR_PTR(-ENOMEM);
}
Ejemplo n.º 25
0
/*
 * Routine to configure and start DMA for the MMC card
 */
static int
mmc_omap_start_dma_transfer(struct mmc_omap_host *host, struct mmc_request *req)
{
	int sync_dev, sync_dir = 0;
	int dma_ch = 0, ret = 0, err = 1;
	struct mmc_data *data = req->data;

	/*
	 * If for some reason the DMA transfer is still active,
	 * we wait for timeout period and free the dma
	 */
	if (host->dma_ch != -1) {
		set_current_state(TASK_UNINTERRUPTIBLE);
		schedule_timeout(100);
		if (down_trylock(&host->sem)) {
			omap_free_dma(host->dma_ch);
			host->dma_ch = -1;
			up(&host->sem);
			return err;
		}
	} else {
		if (down_trylock(&host->sem))
			return err;
	}

	if (!(data->flags & MMC_DATA_WRITE)) {
		host->dma_dir = DMA_FROM_DEVICE;
		if (host->id == OMAP_MMC1_DEVID)
			sync_dev = OMAP24XX_DMA_MMC1_RX;
		else
			sync_dev = OMAP24XX_DMA_MMC2_RX;
	} else {
		host->dma_dir = DMA_TO_DEVICE;
		if (host->id == OMAP_MMC1_DEVID)
			sync_dev = OMAP24XX_DMA_MMC1_TX;
		else
			sync_dev = OMAP24XX_DMA_MMC2_TX;
	}

	ret = omap_request_dma(sync_dev, "MMC/SD", mmc_omap_dma_cb,
			host, &dma_ch);
	if (ret != 0) {
		dev_dbg(mmc_dev(host->mmc),
			"%s: omap_request_dma() failed with %d\n",
			mmc_hostname(host->mmc), ret);
		return ret;
	}

	host->dma_len = dma_map_sg(mmc_dev(host->mmc), data->sg,
			data->sg_len, host->dma_dir);
	host->dma_ch = dma_ch;

	if (!(data->flags & MMC_DATA_WRITE))
		mmc_omap_config_dma_param(1, host, data);
	else
		mmc_omap_config_dma_param(0, host, data);

	if ((data->blksz % 4) == 0)
		omap_set_dma_transfer_params(dma_ch, OMAP_DMA_DATA_TYPE_S32,
			(data->blksz / 4), data->blocks, OMAP_DMA_SYNC_FRAME,
			sync_dev, sync_dir);
	else
		/* REVISIT: The MMC buffer increments only when MSB is written.
		 * Return error for blksz which is non multiple of four.
		 */
		return -EINVAL;

	omap_start_dma(dma_ch);
	return 0;
}
Ejemplo n.º 26
0
static int safexcel_aes_send(struct crypto_async_request *base, int ring,
			     struct safexcel_request *request,
			     struct safexcel_cipher_req *sreq,
			     struct scatterlist *src, struct scatterlist *dst,
			     unsigned int cryptlen, unsigned int assoclen,
			     unsigned int digestsize, u8 *iv, int *commands,
			     int *results)
{
	struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(base->tfm);
	struct safexcel_crypto_priv *priv = ctx->priv;
	struct safexcel_command_desc *cdesc;
	struct safexcel_result_desc *rdesc;
	struct scatterlist *sg;
	unsigned int totlen = cryptlen + assoclen;
	int nr_src, nr_dst, n_cdesc = 0, n_rdesc = 0, queued = totlen;
	int i, ret = 0;

	if (src == dst) {
		nr_src = dma_map_sg(priv->dev, src,
				    sg_nents_for_len(src, totlen),
				    DMA_BIDIRECTIONAL);
		nr_dst = nr_src;
		if (!nr_src)
			return -EINVAL;
	} else {
		nr_src = dma_map_sg(priv->dev, src,
				    sg_nents_for_len(src, totlen),
				    DMA_TO_DEVICE);
		if (!nr_src)
			return -EINVAL;

		nr_dst = dma_map_sg(priv->dev, dst,
				    sg_nents_for_len(dst, totlen),
				    DMA_FROM_DEVICE);
		if (!nr_dst) {
			dma_unmap_sg(priv->dev, src,
				     sg_nents_for_len(src, totlen),
				     DMA_TO_DEVICE);
			return -EINVAL;
		}
	}

	memcpy(ctx->base.ctxr->data, ctx->key, ctx->key_len);

	if (ctx->aead) {
		memcpy(ctx->base.ctxr->data + ctx->key_len / sizeof(u32),
		       ctx->ipad, ctx->state_sz);
		memcpy(ctx->base.ctxr->data + (ctx->key_len + ctx->state_sz) / sizeof(u32),
		       ctx->opad, ctx->state_sz);
	}

	spin_lock_bh(&priv->ring[ring].egress_lock);

	/* command descriptors */
	for_each_sg(src, sg, nr_src, i) {
		int len = sg_dma_len(sg);

		/* Do not overflow the request */
		if (queued - len < 0)
			len = queued;

		cdesc = safexcel_add_cdesc(priv, ring, !n_cdesc, !(queued - len),
					   sg_dma_address(sg), len, totlen,
					   ctx->base.ctxr_dma);
		if (IS_ERR(cdesc)) {
			/* No space left in the command descriptor ring */
			ret = PTR_ERR(cdesc);
			goto cdesc_rollback;
		}
		n_cdesc++;

		if (n_cdesc == 1) {
			safexcel_context_control(ctx, base, sreq, cdesc);
			if (ctx->aead)
				safexcel_aead_token(ctx, iv, cdesc,
						    sreq->direction, cryptlen,
						    assoclen, digestsize);
			else
				safexcel_skcipher_token(ctx, iv, cdesc,
							cryptlen);
		}

		queued -= len;
		if (!queued)
			break;
	}
Ejemplo n.º 27
0
static int pm8001_task_exec(struct sas_task *task, const int num,
	gfp_t gfp_flags, int is_tmf, struct pm8001_tmf_task *tmf)
{
	struct domain_device *dev = task->dev;
	struct pm8001_hba_info *pm8001_ha;
	struct pm8001_device *pm8001_dev;
	struct pm8001_port *port = NULL;
	struct sas_task *t = task;
	struct pm8001_ccb_info *ccb;
	u32 tag = 0xdeadbeef, rc, n_elem = 0;
	u32 n = num;
	unsigned long flags = 0, flags_libsas = 0;

	if (!dev->port) {
		struct task_status_struct *tsm = &t->task_status;
		tsm->resp = SAS_TASK_UNDELIVERED;
		tsm->stat = SAS_PHY_DOWN;
		if (dev->dev_type != SATA_DEV)
			t->task_done(t);
		return 0;
	}
	pm8001_ha = pm8001_find_ha_by_dev(task->dev);
	PM8001_IO_DBG(pm8001_ha, pm8001_printk("pm8001_task_exec device \n "));
	spin_lock_irqsave(&pm8001_ha->lock, flags);
	do {
		dev = t->dev;
		pm8001_dev = dev->lldd_dev;
		if (DEV_IS_GONE(pm8001_dev)) {
			if (pm8001_dev) {
				PM8001_IO_DBG(pm8001_ha,
					pm8001_printk("device %d not ready.\n",
					pm8001_dev->device_id));
			} else {
				PM8001_IO_DBG(pm8001_ha,
					pm8001_printk("device %016llx not "
					"ready.\n", SAS_ADDR(dev->sas_addr)));
			}
			rc = SAS_PHY_DOWN;
			goto out_done;
		}
		port = &pm8001_ha->port[sas_find_local_port_id(dev)];
		if (!port->port_attached) {
			if (sas_protocol_ata(t->task_proto)) {
				struct task_status_struct *ts = &t->task_status;
				ts->resp = SAS_TASK_UNDELIVERED;
				ts->stat = SAS_PHY_DOWN;

				spin_unlock_irqrestore(&pm8001_ha->lock, flags);
				spin_unlock_irqrestore(dev->sata_dev.ap->lock,
						flags_libsas);
				t->task_done(t);
				spin_lock_irqsave(dev->sata_dev.ap->lock,
					flags_libsas);
				spin_lock_irqsave(&pm8001_ha->lock, flags);
				if (n > 1)
					t = list_entry(t->list.next,
							struct sas_task, list);
				continue;
			} else {
				struct task_status_struct *ts = &t->task_status;
				ts->resp = SAS_TASK_UNDELIVERED;
				ts->stat = SAS_PHY_DOWN;
				t->task_done(t);
				if (n > 1)
					t = list_entry(t->list.next,
							struct sas_task, list);
				continue;
			}
		}
		rc = pm8001_tag_alloc(pm8001_ha, &tag);
		if (rc)
			goto err_out;
		ccb = &pm8001_ha->ccb_info[tag];

		if (!sas_protocol_ata(t->task_proto)) {
			if (t->num_scatter) {
				n_elem = dma_map_sg(pm8001_ha->dev,
					t->scatter,
					t->num_scatter,
					t->data_dir);
				if (!n_elem) {
					rc = -ENOMEM;
					goto err_out_tag;
				}
			}
		} else {
			n_elem = t->num_scatter;
		}

		t->lldd_task = ccb;
		ccb->n_elem = n_elem;
		ccb->ccb_tag = tag;
		ccb->task = t;
		switch (t->task_proto) {
		case SAS_PROTOCOL_SMP:
			rc = pm8001_task_prep_smp(pm8001_ha, ccb);
			break;
		case SAS_PROTOCOL_SSP:
			if (is_tmf)
				rc = pm8001_task_prep_ssp_tm(pm8001_ha,
					ccb, tmf);
			else
				rc = pm8001_task_prep_ssp(pm8001_ha, ccb);
			break;
		case SAS_PROTOCOL_SATA:
		case SAS_PROTOCOL_STP:
		case SAS_PROTOCOL_SATA | SAS_PROTOCOL_STP:
			rc = pm8001_task_prep_ata(pm8001_ha, ccb);
			break;
		default:
			dev_printk(KERN_ERR, pm8001_ha->dev,
				"unknown sas_task proto: 0x%x\n",
				t->task_proto);
			rc = -EINVAL;
			break;
		}

		if (rc) {
			PM8001_IO_DBG(pm8001_ha,
				pm8001_printk("rc is %x\n", rc));
			goto err_out_tag;
		}
		/* TODO: select normal or high priority */
		spin_lock(&t->task_state_lock);
		t->task_state_flags |= SAS_TASK_AT_INITIATOR;
		spin_unlock(&t->task_state_lock);
		pm8001_dev->running_req++;
		if (n > 1)
			t = list_entry(t->list.next, struct sas_task, list);
	} while (--n);
Ejemplo n.º 28
0
static int
qla2x00_process_els(struct fc_bsg_job *bsg_job)
{
	struct fc_rport *rport;
	fc_port_t *fcport = NULL;
	struct Scsi_Host *host;
	scsi_qla_host_t *vha;
	struct qla_hw_data *ha;
	srb_t *sp;
	const char *type;
	int req_sg_cnt, rsp_sg_cnt;
	int rval =  (DRIVER_ERROR << 16);
	uint16_t nextlid = 0;
	struct srb_ctx *els;

	if (bsg_job->request->msgcode == FC_BSG_RPT_ELS) {
		rport = bsg_job->rport;
		fcport = *(fc_port_t **) rport->dd_data;
		host = rport_to_shost(rport);
		vha = shost_priv(host);
		ha = vha->hw;
		type = "FC_BSG_RPT_ELS";
	} else {
		host = bsg_job->shost;
		vha = shost_priv(host);
		ha = vha->hw;
		type = "FC_BSG_HST_ELS_NOLOGIN";
	}

	/* pass through is supported only for ISP 4Gb or higher */
	if (!IS_FWI2_CAPABLE(ha)) {
		DEBUG2(qla_printk(KERN_INFO, ha,
		    "scsi(%ld):ELS passthru not supported for ISP23xx based "
		    "adapters\n", vha->host_no));
		rval = -EPERM;
		goto done;
	}

	/*  Multiple SG's are not supported for ELS requests */
	if (bsg_job->request_payload.sg_cnt > 1 ||
		bsg_job->reply_payload.sg_cnt > 1) {
		DEBUG2(printk(KERN_INFO
			"multiple SG's are not supported for ELS requests"
			" [request_sg_cnt: %x reply_sg_cnt: %x]\n",
			bsg_job->request_payload.sg_cnt,
			bsg_job->reply_payload.sg_cnt));
		rval = -EPERM;
		goto done;
	}

	/* ELS request for rport */
	if (bsg_job->request->msgcode == FC_BSG_RPT_ELS) {
		/* make sure the rport is logged in,
		 * if not perform fabric login
		 */
		if (qla2x00_fabric_login(vha, fcport, &nextlid)) {
			DEBUG2(qla_printk(KERN_WARNING, ha,
			"failed to login port %06X for ELS passthru\n",
			fcport->d_id.b24));
			rval = -EIO;
			goto done;
		}
	} else {
		/* Allocate a dummy fcport structure, since functions
		 * preparing the IOCB and mailbox command retrieves port
		 * specific information from fcport structure. For Host based
		 * ELS commands there will be no fcport structure allocated
		 */
		fcport = qla2x00_alloc_fcport(vha, GFP_KERNEL);
		if (!fcport) {
			rval = -ENOMEM;
			goto done;
		}

		/* Initialize all required  fields of fcport */
		fcport->vha = vha;
		fcport->vp_idx = vha->vp_idx;
		fcport->d_id.b.al_pa =
			bsg_job->request->rqst_data.h_els.port_id[0];
		fcport->d_id.b.area =
			bsg_job->request->rqst_data.h_els.port_id[1];
		fcport->d_id.b.domain =
			bsg_job->request->rqst_data.h_els.port_id[2];
		fcport->loop_id =
			(fcport->d_id.b.al_pa == 0xFD) ?
			NPH_FABRIC_CONTROLLER : NPH_F_PORT;
	}

	if (!vha->flags.online) {
		DEBUG2(qla_printk(KERN_WARNING, ha,
		"host not online\n"));
		rval = -EIO;
		goto done;
	}

	req_sg_cnt =
		dma_map_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list,
		bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
	if (!req_sg_cnt) {
		rval = -ENOMEM;
		goto done_free_fcport;
	}

	rsp_sg_cnt = dma_map_sg(&ha->pdev->dev, bsg_job->reply_payload.sg_list,
		bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
        if (!rsp_sg_cnt) {
		rval = -ENOMEM;
		goto done_free_fcport;
	}

	if ((req_sg_cnt !=  bsg_job->request_payload.sg_cnt) ||
		(rsp_sg_cnt != bsg_job->reply_payload.sg_cnt)) {
		DEBUG2(printk(KERN_INFO
			"dma mapping resulted in different sg counts \
			[request_sg_cnt: %x dma_request_sg_cnt: %x\
			reply_sg_cnt: %x dma_reply_sg_cnt: %x]\n",
			bsg_job->request_payload.sg_cnt, req_sg_cnt,
			bsg_job->reply_payload.sg_cnt, rsp_sg_cnt));
		rval = -EAGAIN;
		goto done_unmap_sg;
	}
Ejemplo n.º 29
0
static int srp_indirect_data(struct scsi_cmnd *sc, struct srp_cmd *cmd,
                             struct srp_indirect_buf *id,
                             enum dma_data_direction dir, srp_rdma_t rdma_io,
                             int dma_map, int ext_desc)
{
    struct iu_entry *iue = NULL;
    struct srp_direct_buf *md = NULL;
    struct scatterlist dummy, *sg = NULL;
    dma_addr_t token = 0;
    int err = 0;
    int nmd, nsg = 0, len;

    if (dma_map || ext_desc) {
        iue = (struct iu_entry *) sc->SCp.ptr;
        sg = scsi_sglist(sc);

        dprintk("%p %u %u %d %d\n",
                iue, scsi_bufflen(sc), id->len,
                cmd->data_in_desc_cnt, cmd->data_out_desc_cnt);
    }

    nmd = id->table_desc.len / sizeof(struct srp_direct_buf);

    if ((dir == DMA_FROM_DEVICE && nmd == cmd->data_in_desc_cnt) ||
            (dir == DMA_TO_DEVICE && nmd == cmd->data_out_desc_cnt)) {
        md = &id->desc_list[0];
        goto rdma;
    }

    if (ext_desc && dma_map) {
        md = dma_alloc_coherent(iue->target->dev, id->table_desc.len,
                                &token, GFP_KERNEL);
        if (!md) {
            eprintk("Can't get dma memory %u\n", id->table_desc.len);
            return -ENOMEM;
        }

        sg_init_one(&dummy, md, id->table_desc.len);
        sg_dma_address(&dummy) = token;
        sg_dma_len(&dummy) = id->table_desc.len;
        err = rdma_io(sc, &dummy, 1, &id->table_desc, 1, DMA_TO_DEVICE,
                      id->table_desc.len);
        if (err) {
            eprintk("Error copying indirect table %d\n", err);
            goto free_mem;
        }
    } else {
        eprintk("This command uses external indirect buffer\n");
        return -EINVAL;
    }

rdma:
    if (dma_map) {
        nsg = dma_map_sg(iue->target->dev, sg, scsi_sg_count(sc),
                         DMA_BIDIRECTIONAL);
        if (!nsg) {
            eprintk("fail to map %p %d\n", iue, scsi_sg_count(sc));
            err = -EIO;
            goto free_mem;
        }
        len = min(scsi_bufflen(sc), id->len);
    } else
        len = id->len;

    err = rdma_io(sc, sg, nsg, md, nmd, dir, len);

    if (dma_map)
        dma_unmap_sg(iue->target->dev, sg, nsg, DMA_BIDIRECTIONAL);

free_mem:
    if (token && dma_map)
        dma_free_coherent(iue->target->dev, id->table_desc.len, md, token);

    return err;
}
Ejemplo n.º 30
0
static void
mmc_omap_prepare_data(struct mmc_omap_host *host, struct mmc_request *req)
{
	struct mmc_data *data = req->data;
	int i, use_dma, block_size;
	unsigned sg_len;

	host->data = data;
	if (data == NULL) {
		OMAP_MMC_WRITE(host, BLEN, 0);
		OMAP_MMC_WRITE(host, NBLK, 0);
		OMAP_MMC_WRITE(host, BUF, 0);
		host->dma_in_use = 0;
		set_cmd_timeout(host, req);
		return;
	}

	block_size = data->blksz;

	OMAP_MMC_WRITE(host, NBLK, data->blocks - 1);
	OMAP_MMC_WRITE(host, BLEN, block_size - 1);
	set_data_timeout(host, req);

	/* cope with calling layer confusion; it issues "single
	 * block" writes using multi-block scatterlists.
	 */
	sg_len = (data->blocks == 1) ? 1 : data->sg_len;

	/* Only do DMA for entire blocks */
	use_dma = host->use_dma;
	if (use_dma) {
		for (i = 0; i < sg_len; i++) {
			if ((data->sg[i].length % block_size) != 0) {
				use_dma = 0;
				break;
			}
		}
	}

	host->sg_idx = 0;
	if (use_dma) {
		enum dma_data_direction dma_data_dir;
		struct dma_async_tx_descriptor *tx;
		struct dma_chan *c;
		u32 burst, *bp;
		u16 buf;

		/*
		 * FIFO is 16x2 bytes on 15xx, and 32x2 bytes on 16xx
		 * and 24xx. Use 16 or 32 word frames when the
		 * blocksize is at least that large. Blocksize is
		 * usually 512 bytes; but not for some SD reads.
		 */
		burst = cpu_is_omap15xx() ? 32 : 64;
		if (burst > data->blksz)
			burst = data->blksz;

		burst >>= 1;

		if (data->flags & MMC_DATA_WRITE) {
			c = host->dma_tx;
			bp = &host->dma_tx_burst;
			buf = 0x0f80 | (burst - 1) << 0;
			dma_data_dir = DMA_TO_DEVICE;
		} else {
			c = host->dma_rx;
			bp = &host->dma_rx_burst;
			buf = 0x800f | (burst - 1) << 8;
			dma_data_dir = DMA_FROM_DEVICE;
		}

		if (!c)
			goto use_pio;

		/* Only reconfigure if we have a different burst size */
		if (*bp != burst) {
			struct dma_slave_config cfg;

			cfg.src_addr = host->phys_base + OMAP_MMC_REG(host, DATA);
			cfg.dst_addr = host->phys_base + OMAP_MMC_REG(host, DATA);
			cfg.src_addr_width = DMA_SLAVE_BUSWIDTH_2_BYTES;
			cfg.dst_addr_width = DMA_SLAVE_BUSWIDTH_2_BYTES;
			cfg.src_maxburst = burst;
			cfg.dst_maxburst = burst;

			if (dmaengine_slave_config(c, &cfg))
				goto use_pio;

			*bp = burst;
		}

		host->sg_len = dma_map_sg(c->device->dev, data->sg, sg_len,
					  dma_data_dir);
		if (host->sg_len == 0)
			goto use_pio;

		tx = dmaengine_prep_slave_sg(c, data->sg, host->sg_len,
			data->flags & MMC_DATA_WRITE ? DMA_MEM_TO_DEV : DMA_DEV_TO_MEM,
			DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
		if (!tx)
			goto use_pio;

		OMAP_MMC_WRITE(host, BUF, buf);

		tx->callback = mmc_omap_dma_callback;
		tx->callback_param = host;
		dmaengine_submit(tx);
		host->brs_received = 0;
		host->dma_done = 0;
		host->dma_in_use = 1;
		return;
	}
 use_pio:

	/* Revert to PIO? */
	OMAP_MMC_WRITE(host, BUF, 0x1f1f);
	host->total_bytes_left = data->blocks * block_size;
	host->sg_len = sg_len;
	mmc_omap_sg_to_buf(host);
	host->dma_in_use = 0;
}