Ejemplo n.º 1
0
static void ak98_sdio_stop_data(struct ak98_mci_host *host)
{
	u32 masks;

	PK1("%s\n", __func__);

	writel(0, host->base + AK98MCIDMACTRL);
	writel(0, host->base + AK98MCIDATACTRL);
	masks = readl(host->base + AK98MCIMASK);
	masks &= ~(MCI_DATAIRQMASKS|MCI_FIFOFULLMASK|MCI_FIFOEMPTYMASK);
	writel(masks, host->base + AK98MCIMASK);
	PK("DISABLE DATA IRQ\n"); 
     
#ifdef MCI_USE_L2FIFO_DMA
	if (host->data->flags & MMC_DATA_WRITE) {
		dma_sync_sg_for_cpu(mmc_dev(host->mmc), host->data->sg, host->data->sg_len, DMA_TO_DEVICE);
		dma_unmap_sg(mmc_dev(host->mmc), host->data->sg, host->data->sg_len, DMA_TO_DEVICE);
	} else {
		dma_sync_sg_for_cpu(mmc_dev(host->mmc), host->data->sg, host->data->sg_len, DMA_FROM_DEVICE);
		dma_unmap_sg(mmc_dev(host->mmc), host->data->sg, host->data->sg_len, DMA_FROM_DEVICE);
	}
#endif

	host->data = NULL; 
	
}
Ejemplo n.º 2
0
static void qce_ablkcipher_done(void *data)
{
	struct crypto_async_request *async_req = data;
	struct ablkcipher_request *req = ablkcipher_request_cast(async_req);
	struct qce_cipher_reqctx *rctx = ablkcipher_request_ctx(req);
	struct qce_alg_template *tmpl = to_cipher_tmpl(async_req->tfm);
	struct qce_device *qce = tmpl->qce;
	enum dma_data_direction dir_src, dir_dst;
	u32 status;
	int error;
	bool diff_dst;

	diff_dst = (req->src != req->dst) ? true : false;
	dir_src = diff_dst ? DMA_TO_DEVICE : DMA_BIDIRECTIONAL;
	dir_dst = diff_dst ? DMA_FROM_DEVICE : DMA_BIDIRECTIONAL;

	error = qce_dma_terminate_all(&qce->dma);
	if (error)
		dev_dbg(qce->dev, "ablkcipher dma termination error (%d)\n",
			error);

	if (diff_dst)
		dma_unmap_sg(qce->dev, rctx->src_sg, rctx->src_nents, dir_src);
	dma_unmap_sg(qce->dev, rctx->dst_sg, rctx->dst_nents, dir_dst);

	sg_free_table(&rctx->dst_tbl);

	error = qce_check_status(qce, &status);
	if (error < 0)
		dev_dbg(qce->dev, "ablkcipher operation error (%x)\n", status);

	qce->async_req_done(tmpl->qce, error);
}
Ejemplo n.º 3
0
static void ss_dma_release(sunxi_ss_t *sss, ss_dma_info_t *info)
{
	dma_unmap_sg(&sss->pdev->dev, info->sgt_for_cp.sgl, info->nents, info->dir);
	sg_free_table(&info->sgt_for_cp);
	dma_unmap_sg(&sss->pdev->dev, info->sg, info->nents, info->dir);
	dma_release_channel(info->chan);
}
Ejemplo n.º 4
0
void cc_unmap_cipher_request(struct device *dev, void *ctx,
				unsigned int ivsize, struct scatterlist *src,
				struct scatterlist *dst)
{
	struct cipher_req_ctx *req_ctx = (struct cipher_req_ctx *)ctx;

	if (req_ctx->gen_ctx.iv_dma_addr) {
		dev_dbg(dev, "Unmapped iv: iv_dma_addr=%pad iv_size=%u\n",
			&req_ctx->gen_ctx.iv_dma_addr, ivsize);
		dma_unmap_single(dev, req_ctx->gen_ctx.iv_dma_addr,
				 ivsize, DMA_TO_DEVICE);
	}
	/* Release pool */
	if (req_ctx->dma_buf_type == CC_DMA_BUF_MLLI &&
	    req_ctx->mlli_params.mlli_virt_addr) {
		dma_pool_free(req_ctx->mlli_params.curr_pool,
			      req_ctx->mlli_params.mlli_virt_addr,
			      req_ctx->mlli_params.mlli_dma_addr);
	}

	dma_unmap_sg(dev, src, req_ctx->in_nents, DMA_BIDIRECTIONAL);
	dev_dbg(dev, "Unmapped req->src=%pK\n", sg_virt(src));

	if (src != dst) {
		dma_unmap_sg(dev, dst, req_ctx->out_nents, DMA_BIDIRECTIONAL);
		dev_dbg(dev, "Unmapped req->dst=%pK\n", sg_virt(dst));
	}
}
Ejemplo n.º 5
0
static void flexrm_spu_dma_unmap(struct device *dev, struct brcm_message *msg)
{
	dma_unmap_sg(dev, msg->spu.dst, sg_nents(msg->spu.dst),
		     DMA_FROM_DEVICE);
	dma_unmap_sg(dev, msg->spu.src, sg_nents(msg->spu.src),
		     DMA_TO_DEVICE);
}
void unmap_aead_request(struct device *dev, struct aead_request *req)
{
	struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
	unsigned int hw_iv_size = areq_ctx->hw_iv_size;

/*HI3630++ DX: for SCCC bug*/
//HI3630	if (!areq_ctx->mac_buf_dma_addr)
	if (areq_ctx->mac_buf_dma_addr != 0)
/*HI3630--*/
		dma_unmap_single(dev, areq_ctx->mac_buf_dma_addr,
			MAX_MAC_SIZE, DMA_BIDIRECTIONAL);

	if (areq_ctx->ccm_hdr_size != ccm_header_size_null) {
/*HI3630++ DX: for SCCC bug*/
//HI3630	if (!areq_ctx->ccm_iv0_dma_addr)
		if (areq_ctx->ccm_iv0_dma_addr != 0)
/*HI3630--*/
			dma_unmap_single(dev, areq_ctx->ccm_iv0_dma_addr,
				AES_BLOCK_SIZE, DMA_TO_DEVICE);
		if (&areq_ctx->ccm_adata_sg != NULL)
			dma_unmap_sg(dev, &areq_ctx->ccm_adata_sg,
				1, DMA_TO_DEVICE);
	}
/*HI3630++ DX: for SCCC bug*/
//HI3630	if (!areq_ctx->gen_ctx.iv_dma_addr)
	if (areq_ctx->gen_ctx.iv_dma_addr != 0)
/*HI3630--*/
		dma_unmap_single(dev, areq_ctx->gen_ctx.iv_dma_addr,
				 hw_iv_size, DMA_BIDIRECTIONAL);

	/*In case a pool was set, a table was
	  allocated and should be released */
	if (areq_ctx->mlli_params.curr_pool != NULL) {
		DX_LOG_DEBUG("free MLLI buffer: dma=0x%08lX virt=0x%08X\n",
			     (unsigned long)areq_ctx->mlli_params.mlli_dma_addr,
			     (uint32_t)areq_ctx->mlli_params.mlli_virt_addr);
		dma_pool_free(areq_ctx->mlli_params.curr_pool,
			      areq_ctx->mlli_params.mlli_virt_addr,
			      areq_ctx->mlli_params.mlli_dma_addr);
	}
	if (areq_ctx->assoc_dma_buf_type != DX_DMA_BUF_NULL) {
		DX_LOG_DEBUG("Unmapping sg assoc: req->assoc=0x%08lX\n",
			     (unsigned long)sg_virt(req->assoc));
		dma_unmap_sg(dev, req->assoc,  areq_ctx->assoc_nents,
			DMA_TO_DEVICE);
	}

	DX_LOG_DEBUG("Unmapping sg src: req->src=0x%08lX\n",
		     (unsigned long)sg_virt(req->src));
	dma_unmap_sg(dev, req->src,
		     areq_ctx->in_nents, DMA_BIDIRECTIONAL);
	if (unlikely(req->src != req->dst)) {
		DX_LOG_DEBUG("Unmapping sg dst: req->dst=0x%08lX\n",
			     (unsigned long)sg_virt(req->dst));
		dma_unmap_sg(dev, req->dst,
			     areq_ctx->out_nents, DMA_BIDIRECTIONAL);
	}
}
Ejemplo n.º 7
0
static void mxs_i2c_dma_finish(struct mxs_i2c_dev *i2c)
{
	if (i2c->dma_read) {
		dma_unmap_sg(i2c->dev, &i2c->sg_io[0], 1, DMA_TO_DEVICE);
		dma_unmap_sg(i2c->dev, &i2c->sg_io[1], 1, DMA_FROM_DEVICE);
	} else {
		dma_unmap_sg(i2c->dev, i2c->sg_io, 2, DMA_TO_DEVICE);
	}
}
Ejemplo n.º 8
0
/* THIS FUNCTION ONLY EXISTS FOR DEBUGGING AND ONLY SUPPORTS TWO CHANNELS */
static void asrc_polling_debug(struct asrc_pair_params *params)
{
	enum asrc_pair_index index = params->index;
	u32 *in24 = params->input_dma_total.dma_vaddr;
	u32 dma_len = params->input_dma_total.length / (params->channel_nums * 4);
	u32 size, i, j, t_size, reg;
	u32 *reg24 = params->output_dma_total.dma_vaddr;

	t_size = 0;

	for (i = 0; i < dma_len; ) {
		for (j = 0; j < 2; j++) {
			asrc_write_one_to_input_FIFO(index, *in24);
			in24++;
			asrc_write_one_to_input_FIFO(index, *in24);
			in24++;
			i++;
		}
		udelay(50);
		udelay(50 * params->output_sample_rate / params->input_sample_rate);

		size = asrc_get_output_FIFO_size(index);
		for (j = 0; j < size; j++) {
			reg = asrc_read_one_from_output_FIFO(index);
			*(reg24) = reg;
			reg24++;
			reg = asrc_read_one_from_output_FIFO(index);
			*(reg24) = reg;
			reg24++;
		}
		t_size += size;
	}

	mdelay(1);
	size = asrc_get_output_FIFO_size(index);
	for (j = 0; j < size; j++) {
		reg = asrc_read_one_from_output_FIFO(index);
		*(reg24) = reg;
		reg24++;
		reg = asrc_read_one_from_output_FIFO(index);
		*(reg24) = reg;
		reg24++;
	}
	t_size += size;

	params->output_dma_total.length = t_size * params->channel_nums * 4;
	params->output_last_period.length = 0;

	dma_unmap_sg(NULL, params->input_sg, params->input_sg_nodes,
			DMA_MEM_TO_DEV);
	dma_unmap_sg(NULL, params->output_sg, params->output_sg_nodes,
			DMA_DEV_TO_MEM);

	complete(&params->input_complete);
	complete(&params->lastperiod_complete);
}
Ejemplo n.º 9
0
static int lpc32xx_xmit_dma(struct mtd_info *mtd, dma_addr_t dma,
			    void *mem, int len, enum dma_transfer_direction dir)
{
	struct nand_chip *chip = mtd->priv;
	struct lpc32xx_nand_host *host = chip->priv;
	struct dma_async_tx_descriptor *desc;
	int flags = DMA_CTRL_ACK | DMA_PREP_INTERRUPT;
	int res;

	host->dma_slave_config.direction = dir;
	host->dma_slave_config.src_addr = dma;
	host->dma_slave_config.dst_addr = dma;
	host->dma_slave_config.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
	host->dma_slave_config.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
	host->dma_slave_config.src_maxburst = 4;
	host->dma_slave_config.dst_maxburst = 4;
	/* DMA controller does flow control: */
	host->dma_slave_config.device_fc = false;
	if (dmaengine_slave_config(host->dma_chan, &host->dma_slave_config)) {
		dev_err(mtd->dev.parent, "Failed to setup DMA slave\n");
		return -ENXIO;
	}

	sg_init_one(&host->sgl, mem, len);

	res = dma_map_sg(host->dma_chan->device->dev, &host->sgl, 1,
			 DMA_BIDIRECTIONAL);
	if (res != 1) {
		dev_err(mtd->dev.parent, "Failed to map sg list\n");
		return -ENXIO;
	}
	desc = dmaengine_prep_slave_sg(host->dma_chan, &host->sgl, 1, dir,
				       flags);
	if (!desc) {
		dev_err(mtd->dev.parent, "Failed to prepare slave sg\n");
		goto out1;
	}

	init_completion(&host->comp);
	desc->callback = lpc32xx_dma_complete_func;
	desc->callback_param = &host->comp;

	dmaengine_submit(desc);
	dma_async_issue_pending(host->dma_chan);

	wait_for_completion_timeout(&host->comp, msecs_to_jiffies(1000));

	dma_unmap_sg(host->dma_chan->device->dev, &host->sgl, 1,
		     DMA_BIDIRECTIONAL);

	return 0;
out1:
	dma_unmap_sg(host->dma_chan->device->dev, &host->sgl, 1,
		     DMA_BIDIRECTIONAL);
	return -ENXIO;
}
Ejemplo n.º 10
0
static void rsa_io_unmap(struct device *dev, struct rsa_edesc *edesc,
			 struct akcipher_request *req)
{
	dma_unmap_sg(dev, req->dst, edesc->dst_nents, DMA_FROM_DEVICE);
	dma_unmap_sg(dev, req->src, edesc->src_nents, DMA_TO_DEVICE);

	if (edesc->sec4_sg_bytes)
		dma_unmap_single(dev, edesc->sec4_sg_dma, edesc->sec4_sg_bytes,
				 DMA_TO_DEVICE);
}
Ejemplo n.º 11
0
static int aes_dma_stop(struct aes_hwa_ctx *ctx)
{
	struct tf_crypto_aes_operation_state *state =
		crypto_ablkcipher_ctx(crypto_ablkcipher_reqtfm(ctx->req));
	int err = 0;
	size_t count;

	dprintk(KERN_INFO "aes_dma_stop(%p)\n", ctx);

	tf_aes_save_registers(state);

	if (!AES_CTRL_IS_MODE_ECB(state->CTRL)) {
		u32 *ptr = (u32 *) ctx->req->info;

		ptr[0] = state->AES_IV_0;
		ptr[1] = state->AES_IV_1;
		ptr[2] = state->AES_IV_2;
		ptr[3] = state->AES_IV_3;
	}

	OUTREG32(&paes_reg->AES_SYSCONFIG, 0);

	omap_stop_dma(ctx->dma_lch_in);
	omap_stop_dma(ctx->dma_lch_out);

	tf_crypto_disable_clock(PUBLIC_CRYPTO_AES1_CLOCK_REG);

	if (!(ctx->flags & FLAGS_FAST)) {
		dma_sync_single_for_device(NULL, ctx->dma_addr_out,
			ctx->dma_size, DMA_FROM_DEVICE);

#ifdef CONFIG_TF_DRIVER_FAULT_INJECTION
		tf_aes_fault_injection(paes_reg->AES_CTRL, ctx->buf_out);
#endif

		/* Copy data */
		count = sg_copy(&ctx->out_sg, &ctx->out_offset, ctx->buf_out,
			ctx->buflen, ctx->dma_size, 1);
		if (count != ctx->dma_size)
			err = -EINVAL;
	} else {
		dma_unmap_sg(NULL, ctx->out_sg, 1, DMA_FROM_DEVICE);
		dma_unmap_sg(NULL, ctx->in_sg, 1, DMA_TO_DEVICE);

#ifdef CONFIG_TF_DRIVER_FAULT_INJECTION
		tf_aes_fault_injection(paes_reg->AES_CTRL,
			sg_virt(ctx->out_sg));
#endif
	}

	if (err || !ctx->total)
		ctx->req->base.complete(&ctx->req->base, err);

	return err;
}
Ejemplo n.º 12
0
static int qce_ahash_async_req_handle(struct crypto_async_request *async_req)
{
	struct ahash_request *req = ahash_request_cast(async_req);
	struct qce_sha_reqctx *rctx = ahash_request_ctx(req);
	struct qce_sha_ctx *ctx = crypto_tfm_ctx(async_req->tfm);
	struct qce_alg_template *tmpl = to_ahash_tmpl(async_req->tfm);
	struct qce_device *qce = tmpl->qce;
	unsigned long flags = rctx->flags;
	int ret;

	if (IS_SHA_HMAC(flags)) {
		rctx->authkey = ctx->authkey;
		rctx->authklen = QCE_SHA_HMAC_KEY_SIZE;
	} else if (IS_CMAC(flags)) {
		rctx->authkey = ctx->authkey;
		rctx->authklen = AES_KEYSIZE_128;
	}

	rctx->src_nents = sg_nents_for_len(req->src, req->nbytes);
	if (rctx->src_nents < 0) {
		dev_err(qce->dev, "Invalid numbers of src SG.\n");
		return rctx->src_nents;
	}

	ret = dma_map_sg(qce->dev, req->src, rctx->src_nents, DMA_TO_DEVICE);
	if (ret < 0)
		return ret;

	sg_init_one(&rctx->result_sg, qce->dma.result_buf, QCE_RESULT_BUF_SZ);

	ret = dma_map_sg(qce->dev, &rctx->result_sg, 1, DMA_FROM_DEVICE);
	if (ret < 0)
		goto error_unmap_src;

	ret = qce_dma_prep_sgs(&qce->dma, req->src, rctx->src_nents,
			       &rctx->result_sg, 1, qce_ahash_done, async_req);
	if (ret)
		goto error_unmap_dst;

	qce_dma_issue_pending(&qce->dma);

	ret = qce_start(async_req, tmpl->crypto_alg_type, 0, 0);
	if (ret)
		goto error_terminate;

	return 0;

error_terminate:
	qce_dma_terminate_all(&qce->dma);
error_unmap_dst:
	dma_unmap_sg(qce->dev, &rctx->result_sg, 1, DMA_FROM_DEVICE);
error_unmap_src:
	dma_unmap_sg(qce->dev, req->src, rctx->src_nents, DMA_TO_DEVICE);
	return ret;
}
static void rk_unload_data(struct rk_crypto_info *dev)
{
	struct scatterlist *sg_in, *sg_out;

	sg_in = dev->aligned ? dev->sg_src : &dev->sg_tmp;
	dma_unmap_sg(dev->dev, sg_in, 1, DMA_TO_DEVICE);

	if (dev->sg_dst) {
		sg_out = dev->aligned ? dev->sg_dst : &dev->sg_tmp;
		dma_unmap_sg(dev->dev, sg_out, 1, DMA_FROM_DEVICE);
	}
}
Ejemplo n.º 14
0
/**
 * iser_finalize_rdma_unaligned_sg
 */
void iser_finalize_rdma_unaligned_sg(struct iscsi_iser_cmd_task *iser_ctask,
				     enum iser_data_dir         cmd_dir)
{
	struct device *dma_device;
	struct iser_data_buf *mem_copy;
	unsigned long  cmd_data_len;

	dma_device = iser_ctask->iser_conn->ib_conn->device->ib_device->dma_device;
	mem_copy   = &iser_ctask->data_copy[cmd_dir];

	if (cmd_dir == ISER_DIR_OUT)
		dma_unmap_sg(dma_device, &mem_copy->sg_single, 1,
			     DMA_TO_DEVICE);
	else
		dma_unmap_sg(dma_device, &mem_copy->sg_single, 1,
			     DMA_FROM_DEVICE);

	if (cmd_dir == ISER_DIR_IN) {
		char *mem;
		struct scatterlist *sg;
		unsigned char *p, *to;
		unsigned int sg_size;
		int i;

		/* copy back read RDMA to unaligned sg */
		mem	= mem_copy->copy_buf;

		sg	= (struct scatterlist *)iser_ctask->data[ISER_DIR_IN].buf;
		sg_size = iser_ctask->data[ISER_DIR_IN].size;

		for (p = mem, i = 0; i < sg_size; i++){
			to = kmap_atomic(sg[i].page, KM_SOFTIRQ0);
			memcpy(to + sg[i].offset,
			       p,
			       sg[i].length);
			kunmap_atomic(to, KM_SOFTIRQ0);
			p += sg[i].length;
		}
	}

	cmd_data_len = iser_ctask->data[cmd_dir].data_len;

	if (cmd_data_len > ISER_KMALLOC_THRESHOLD)
		free_pages((unsigned long)mem_copy->copy_buf,
			   long_log2(roundup_pow_of_two(cmd_data_len)) - PAGE_SHIFT);
	else
		kfree(mem_copy->copy_buf);

	mem_copy->copy_buf = NULL;
}
Ejemplo n.º 15
0
static int safexcel_handle_req_result(struct safexcel_crypto_priv *priv, int ring,
				      struct crypto_async_request *async,
				      struct scatterlist *src,
				      struct scatterlist *dst,
				      unsigned int cryptlen,
				      struct safexcel_cipher_req *sreq,
				      bool *should_complete, int *ret)
{
	struct safexcel_result_desc *rdesc;
	int ndesc = 0;

	*ret = 0;

	spin_lock_bh(&priv->ring[ring].egress_lock);
	do {
		rdesc = safexcel_ring_next_rptr(priv, &priv->ring[ring].rdr);
		if (IS_ERR(rdesc)) {
			dev_err(priv->dev,
				"cipher: result: could not retrieve the result descriptor\n");
			*ret = PTR_ERR(rdesc);
			break;
		}

		if (likely(!*ret))
			*ret = safexcel_rdesc_check_errors(priv, rdesc);

		ndesc++;
	} while (!rdesc->last_seg);

	safexcel_complete(priv, ring);
	spin_unlock_bh(&priv->ring[ring].egress_lock);

	if (src == dst) {
		dma_unmap_sg(priv->dev, src,
			     sg_nents_for_len(src, cryptlen),
			     DMA_BIDIRECTIONAL);
	} else {
		dma_unmap_sg(priv->dev, src,
			     sg_nents_for_len(src, cryptlen),
			     DMA_TO_DEVICE);
		dma_unmap_sg(priv->dev, dst,
			     sg_nents_for_len(dst, cryptlen),
			     DMA_FROM_DEVICE);
	}

	*should_complete = true;

	return ndesc;
}
Ejemplo n.º 16
0
static int sahara_aes_process(struct ablkcipher_request *req)
{
	struct sahara_dev *dev = dev_ptr;
	struct sahara_ctx *ctx;
	struct sahara_aes_reqctx *rctx;
	int ret;
	unsigned long timeout;

	/* Request is ready to be dispatched by the device */
	dev_dbg(dev->device,
		"dispatch request (nbytes=%d, src=%p, dst=%p)\n",
		req->nbytes, req->src, req->dst);

	/* assign new request to device */
	dev->total = req->nbytes;
	dev->in_sg = req->src;
	dev->out_sg = req->dst;

	rctx = ablkcipher_request_ctx(req);
	ctx = crypto_ablkcipher_ctx(crypto_ablkcipher_reqtfm(req));
	rctx->mode &= FLAGS_MODE_MASK;
	dev->flags = (dev->flags & ~FLAGS_MODE_MASK) | rctx->mode;

	if ((dev->flags & FLAGS_CBC) && req->info)
		memcpy(dev->iv_base, req->info, AES_KEYSIZE_128);

	/* assign new context to device */
	dev->ctx = ctx;

	reinit_completion(&dev->dma_completion);

	ret = sahara_hw_descriptor_create(dev);
	if (ret)
		return -EINVAL;

	timeout = wait_for_completion_timeout(&dev->dma_completion,
				msecs_to_jiffies(SAHARA_TIMEOUT_MS));
	if (!timeout) {
		dev_err(dev->device, "AES timeout\n");
		return -ETIMEDOUT;
	}

	dma_unmap_sg(dev->device, dev->out_sg, dev->nb_out_sg,
		DMA_TO_DEVICE);
	dma_unmap_sg(dev->device, dev->in_sg, dev->nb_in_sg,
		DMA_FROM_DEVICE);

	return 0;
}
Ejemplo n.º 17
0
static void sahara_aes_done_task(unsigned long data)
{
	struct sahara_dev *dev = (struct sahara_dev *)data;

	dma_unmap_sg(dev->device, dev->out_sg, dev->nb_out_sg,
		DMA_TO_DEVICE);
	dma_unmap_sg(dev->device, dev->in_sg, dev->nb_in_sg,
		DMA_FROM_DEVICE);

	spin_lock(&dev->lock);
	clear_bit(FLAGS_BUSY, &dev->flags);
	spin_unlock(&dev->lock);

	dev->req->base.complete(&dev->req->base, dev->error);
}
Ejemplo n.º 18
0
static void _tee_shm_detach_dma_buf(struct dma_buf *dmabuf,
					struct dma_buf_attachment *attach)
{
	struct tee_shm_attach *tee_shm_attach = attach->priv;
	struct sg_table *sgt;
	struct tee_shm *shm;
	struct tee *tee;

	shm = dmabuf->priv;
	tee = shm->tee;

	INMSG();

	if (!tee_shm_attach) {
		OUTMSG(0);
		return;
	}

	sgt = &tee_shm_attach->sgt;

	if (tee_shm_attach->dir != DMA_NONE)
		dma_unmap_sg(attach->dev, sgt->sgl, sgt->nents,
			tee_shm_attach->dir);

	sg_free_table(sgt);
	devm_kfree(_DEV(tee), tee_shm_attach);
	attach->priv = NULL;
	OUTMSG(0);
}
Ejemplo n.º 19
0
static int srp_direct_data(struct scsi_cmnd *sc, struct srp_direct_buf *md,
                           enum dma_data_direction dir, srp_rdma_t rdma_io,
                           int dma_map, int ext_desc)
{
    struct iu_entry *iue = NULL;
    struct scatterlist *sg = NULL;
    int err, nsg = 0, len;

    if (dma_map) {
        iue = (struct iu_entry *) sc->SCp.ptr;
        sg = scsi_sglist(sc);

        dprintk("%p %u %u %d\n", iue, scsi_bufflen(sc),
                md->len, scsi_sg_count(sc));

        nsg = dma_map_sg(iue->target->dev, sg, scsi_sg_count(sc),
                         DMA_BIDIRECTIONAL);
        if (!nsg) {
            printk("fail to map %p %d\n", iue, scsi_sg_count(sc));
            return 0;
        }
        len = min(scsi_bufflen(sc), md->len);
    } else
        len = md->len;

    err = rdma_io(sc, sg, nsg, md, 1, dir, len);

    if (dma_map)
        dma_unmap_sg(iue->target->dev, sg, nsg, DMA_BIDIRECTIONAL);

    return err;
}
Ejemplo n.º 20
0
static struct dma_async_tx_descriptor *mxs_mmc_prep_dma(
	struct mxs_mmc_host *host, unsigned long flags)
{
	struct mxs_ssp *ssp = &host->ssp;
	struct dma_async_tx_descriptor *desc;
	struct mmc_data *data = host->data;
	struct scatterlist * sgl;
	unsigned int sg_len;

	if (data) {
		/* data */
		dma_map_sg(mmc_dev(host->mmc), data->sg,
			   data->sg_len, ssp->dma_dir);
		sgl = data->sg;
		sg_len = data->sg_len;
	} else {
		/* pio */
		sgl = (struct scatterlist *) ssp->ssp_pio_words;
		sg_len = SSP_PIO_NUM;
	}

	desc = dmaengine_prep_slave_sg(ssp->dmach,
				sgl, sg_len, ssp->slave_dirn, flags);
	if (desc) {
		desc->callback = mxs_mmc_dma_irq_callback;
		desc->callback_param = host;
	} else {
		if (data)
			dma_unmap_sg(mmc_dev(host->mmc), data->sg,
				     data->sg_len, ssp->dma_dir);
	}

	return desc;
}
Ejemplo n.º 21
0
static void ispstat_bufs_free(struct ispstat *stat)
{
	struct isp_device *isp = dev_get_drvdata(stat->isp->dev);
	int i;

	for (i = 0; i < STAT_MAX_BUFS; i++) {
		struct ispstat_buffer *buf = &stat->buf[i];

		if (!IS_COHERENT_BUF(stat)) {
			if (IS_ERR_OR_NULL((void *)buf->iommu_addr))
				continue;
			if (buf->iovm)
				dma_unmap_sg(isp->dev, buf->iovm->sgt->sgl,
					     buf->iovm->sgt->nents,
					     DMA_FROM_DEVICE);
			iommu_vfree(isp->iommu, buf->iommu_addr);
		} else {
			if (!buf->virt_addr)
				continue;
			dma_free_coherent(stat->isp->dev, stat->buf_alloc_size,
					  buf->virt_addr, buf->dma_addr);
		}
		buf->iommu_addr = 0;
		buf->iovm = NULL;
		buf->dma_addr = 0;
		buf->virt_addr = NULL;
		buf->empty = 1;
	}

	dev_dbg(stat->isp->dev, "%s: all buffers were freed.\n",
		stat->subdev.name);

	stat->buf_alloc_size = 0;
	stat->active_buf = NULL;
}
Ejemplo n.º 22
0
static void nouveau_gem_unmap_dma_buf(struct dma_buf_attachment *attachment,
				      struct sg_table *sg, enum dma_data_direction dir)
{
	dma_unmap_sg(attachment->dev, sg->sgl, sg->nents, dir);
	sg_free_table(sg);
	kfree(sg);
}
Ejemplo n.º 23
0
static int imxmci_finish_data(struct imxmci_host *host, unsigned int stat)
{
	struct mmc_data *data = host->data;
	int data_error;

	if(test_and_clear_bit(IMXMCI_PEND_DMA_DATA_b, &host->pending_events)){
		imx_dma_disable(host->dma);
		dma_unmap_sg(mmc_dev(host->mmc), data->sg, host->dma_nents,
			     host->dma_dir);
	}

	if ( stat & STATUS_ERR_MASK ) {
		dev_dbg(mmc_dev(host->mmc), "request failed. status: 0x%08x\n",stat);
		if(stat & (STATUS_CRC_READ_ERR | STATUS_CRC_WRITE_ERR))
			data->error = MMC_ERR_BADCRC;
		else if(stat & STATUS_TIME_OUT_READ)
			data->error = MMC_ERR_TIMEOUT;
		else
			data->error = MMC_ERR_FAILED;
	} else {
		data->bytes_xfered = host->dma_size;
	}

	data_error = data->error;

	host->data = NULL;

	return data_error;
}
Ejemplo n.º 24
0
void cxgbit_unmap_cmd(struct iscsi_conn *conn, struct iscsi_cmd *cmd)
{
	struct cxgbit_cmd *ccmd = iscsit_priv_cmd(cmd);

	if (ccmd->release) {
		struct cxgbi_task_tag_info *ttinfo = &ccmd->ttinfo;

		if (ttinfo->sgl) {
			struct cxgbit_sock *csk = conn->context;
			struct cxgbit_device *cdev = csk->com.cdev;
			struct cxgbi_ppm *ppm = cdev2ppm(cdev);

			/* Abort the TCP conn if DDP is not complete to
			 * avoid any possibility of DDP after freeing
			 * the cmd.
			 */
			if (unlikely(cmd->write_data_done !=
				     cmd->se_cmd.data_length))
				cxgbit_abort_conn(csk);

			cxgbi_ppm_ppod_release(ppm, ttinfo->idx);

			dma_unmap_sg(&ppm->pdev->dev, ttinfo->sgl,
				     ttinfo->nents, DMA_FROM_DEVICE);
		} else {
			put_page(sg_page(&ccmd->sg));
		}

		ccmd->release = false;
	}
}
Ejemplo n.º 25
0
static void
mmc_omap_release_dma(struct mmc_omap_host *host, struct mmc_data *data,
		     int abort)
{
	enum dma_data_direction dma_data_dir;
	struct device *dev = mmc_dev(host->mmc);
	struct dma_chan *c;

	if (data->flags & MMC_DATA_WRITE) {
		dma_data_dir = DMA_TO_DEVICE;
		c = host->dma_tx;
	} else {
		dma_data_dir = DMA_FROM_DEVICE;
		c = host->dma_rx;
	}
	if (c) {
		if (data->error) {
			dmaengine_terminate_all(c);
			/* Claim nothing transferred on error... */
			data->bytes_xfered = 0;
		}
		dev = c->device->dev;
	}
	dma_unmap_sg(dev, data->sg, host->sg_len, dma_data_dir);
}
Ejemplo n.º 26
0
static void sbus_esp_unmap_sg(struct esp *esp, struct scatterlist *sg,
			      int num_sg, int dir)
{
	struct platform_device *op = esp->dev;

	dma_unmap_sg(&op->dev, sg, num_sg, dir);
}
Ejemplo n.º 27
0
static void hi_mci_data_done(struct himci_host *host, unsigned int stat)
{
	struct mmc_data *data = host->data;

	himci_trace(2, "begin");
	himci_assert(host);
	himci_assert(data);

	dma_unmap_sg(mmc_dev(host->mmc), data->sg, data->sg_len, host->dma_dir);

	if (stat & (HTO_INT_STATUS | DRTO_INT_STATUS)) {
		data->error = -ETIMEDOUT;
		himci_trace(3, "irq data status stat = 0x%x is timeout error!",
				stat);
	} else if (stat & (EBE_INT_STATUS | SBE_INT_STATUS | FRUN_INT_STATUS
				| DCRC_INT_STATUS)) {
		data->error = -EILSEQ;
		himci_trace(3, "irq data status stat = 0x%x is data error!",
				stat);
	}

	if (!data->error)
		data->bytes_xfered = data->blocks * data->blksz;
	else
		data->bytes_xfered = 0;

	host->data = NULL;
}
Ejemplo n.º 28
0
static void mmc_davinci_xfer_done(struct mmc_davinci_host *host,
				  struct mmc_data *data)
{
	unsigned long flags;
	host->data = NULL;
	host->data_dir = DAVINCI_MMC_DATADIR_NONE;
	if (data->error == MMC_ERR_NONE)
		data->bytes_xfered += data->blocks * data->blksz;

	if (host->do_dma) {
		davinci_abort_dma(host);

		dma_unmap_sg(host->mmc->dev, data->sg, host->sg_len,
			     (data->
			      flags & MMC_DATA_WRITE) ? DMA_TO_DEVICE :
			     DMA_FROM_DEVICE);
	}

	if (data->error == MMC_ERR_TIMEOUT) {
		spin_lock_irqsave(&host->mmc_lock, flags);
		host->is_card_busy = 0;
		spin_unlock_irqrestore(&host->mmc_lock, flags);
		mmc_request_done(host->mmc, data->mrq);
		return;
	}

	if (!data->stop) {
		spin_lock_irqsave(&host->mmc_lock, flags);
		host->is_card_busy = 0;
		spin_unlock_irqrestore(&host->mmc_lock, flags);
		mmc_request_done(host->mmc, data->mrq);
		return;
	}
	mmc_davinci_start_command(host, data->stop);
}
Ejemplo n.º 29
0
static int mmc_davinci_start_dma_transfer(struct mmc_davinci_host *host,
					  struct mmc_request *req)
{
	int use_dma = 1, i;
	struct mmc_data *data = host->data;
//	int block_size = (1 << data->blksz_bits);

	host->sg_len = dma_map_sg(host->mmc->dev, data->sg, host->sg_len,
				  ((data->
				    flags & MMC_DATA_WRITE) ? DMA_TO_DEVICE :
				   DMA_FROM_DEVICE));

	/* Decide if we can use DMA */
	for (i = 0; i < host->sg_len; i++) {
		if ((data->sg[i].length % data->blksz/* block_size*/) != 0) {
			use_dma = 0;
			break;
		}
	}

	if (!use_dma) {
		dma_unmap_sg(host->mmc->dev, data->sg, host->sg_len,
			     (data->
			      flags & MMC_DATA_WRITE) ? DMA_TO_DEVICE :
			     DMA_FROM_DEVICE);
		return -1;
	}

	host->do_dma = 1;

	mmc_davinci_send_dma_request(host, req);

	return 0;

}
Ejemplo n.º 30
0
void rtsx_pci_dma_unmap_sg(struct rtsx_pcr *pcr, struct scatterlist *sglist,
		int num_sg, bool read)
{
	enum dma_data_direction dir = read ? DMA_FROM_DEVICE : DMA_TO_DEVICE;

	dma_unmap_sg(&(pcr->pci->dev), sglist, num_sg, dir);
}