Пример #1
0
/**
 * sg_copy_buffer - Copy data between a linear buffer and an SG list
 * @sgl:		 The SG list
 * @nents:		 Number of SG entries
 * @buf:		 Where to copy from
 * @buflen:		 The number of bytes to copy
 * @to_buffer: 		 transfer direction (non zero == from an sg list to a
 * 			 buffer, 0 == from a buffer to an sg list
 *
 * Returns the number of copied bytes.
 *
 **/
static size_t sg_copy_buffer(struct scatterlist *sgl, unsigned int nents,
			     void *buf, size_t buflen, int to_buffer)
{
	unsigned int offset = 0;
	struct sg_mapping_iter miter;
	unsigned long flags;

	sg_miter_start(&miter, sgl, nents, SG_MITER_ATOMIC);

	local_irq_save(flags);

	while (sg_miter_next(&miter) && offset < buflen) {
		unsigned int len;

		len = min(miter.length, buflen - offset);

		if (to_buffer)
			memcpy(buf + offset, miter.addr, len);
		else {
			memcpy(miter.addr, buf + offset, len);
			flush_kernel_dcache_page(miter.page);
		}

		offset += len;
	}

	sg_miter_stop(&miter);

	local_irq_restore(flags);
	return offset;
}
Пример #2
0
static void copy_src_to_buf(struct req_progress *p, char *dbuf, int len)
{
	int ret;
	void *sbuf;
	int copied = 0;

	while (1) {
		if (!p->sg_src_left) {
			ret = sg_miter_next(&p->src_sg_it);
			BUG_ON(!ret);
			p->sg_src_left = p->src_sg_it.length;
			p->src_start = 0;
		}

		sbuf = p->src_sg_it.addr + p->src_start;

		if (p->sg_src_left <= len - copied) {
			memcpy(dbuf + copied, sbuf, p->sg_src_left);
			copied += p->sg_src_left;
			p->sg_src_left = 0;
			if (copied >= len)
				break;
		} else {
			int copy_len = len - copied;
			memcpy(dbuf + copied, sbuf, copy_len);
			p->src_start += copy_len;
			p->sg_src_left -= copy_len;
			break;
		}
	}
}
/* Copy data to userspace process from sg. */
static __must_check int sg_copy_to_user_buffer(
	struct scatterlist *sg,
	unsigned int nents,
	unsigned char __user *dst,
	size_t nbytes)
{
	int ret = 0;
	struct sg_mapping_iter miter;

	sg_miter_start(&miter, sg, nents, SG_MITER_FROM_SG);
	while (sg_miter_next(&miter) && nbytes > 0 && miter.addr) {
		size_t len;

		len = min(miter.length, nbytes);
		if (__copy_to_user(dst, miter.addr, len)) {
			ret = -EINVAL;
			goto error_sg_copy_to_user_buffer;
		}
		nbytes -= len;
		dst += len;
	}
	/* If the provided buffer is proper all bytes are copied. */
	BUG_ON(nbytes != 0);
 error_sg_copy_to_user_buffer:
	sg_miter_stop(&miter);
	return ret;
}
Пример #4
0
static void do_read(struct cvm_mmc_host *host, struct mmc_request *req,
		    u64 dbuf)
{
	struct sg_mapping_iter *smi = &host->smi;
	int data_len = req->data->blocks * req->data->blksz;
	int bytes_xfered, shift = -1;
	u64 dat = 0;

	/* Auto inc from offset zero */
	writeq((0x10000 | (dbuf << 6)), host->base + MIO_EMM_BUF_IDX(host));

	for (bytes_xfered = 0; bytes_xfered < data_len;) {
		if (smi->consumed >= smi->length) {
			if (!sg_miter_next(smi))
				break;
			smi->consumed = 0;
		}

		if (shift < 0) {
			dat = readq(host->base + MIO_EMM_BUF_DAT(host));
			shift = 56;
		}

		while (smi->consumed < smi->length && shift >= 0) {
			((u8 *)smi->addr)[smi->consumed] = (dat >> shift) & 0xff;
			bytes_xfered++;
			smi->consumed++;
			shift -= 8;
		}
	}

	sg_miter_stop(smi);
	req->data->bytes_xfered = bytes_xfered;
	req->data->error = 0;
}
Пример #5
0
/**
 * sg_copy_buffer - Copy data between a linear buffer and an SG list
 * @sgl:		 The SG list
 * @nents:		 Number of SG entries
 * @buf:		 Where to copy from
 * @buflen:		 The number of bytes to copy
 * @to_buffer: 		 transfer direction (non zero == from an sg list to a
 * 			 buffer, 0 == from a buffer to an sg list
 *
 * Returns the number of copied bytes.
 *
 **/
static size_t sg_copy_buffer(struct scatterlist *sgl, unsigned int nents,
			     void *buf, size_t buflen, int to_buffer)
{
	unsigned int offset = 0;
	struct sg_mapping_iter miter;
	unsigned long flags;
	unsigned int sg_flags = SG_MITER_ATOMIC;

	if (to_buffer)
		sg_flags |= SG_MITER_FROM_SG;
	else
		sg_flags |= SG_MITER_TO_SG;

	sg_miter_start(&miter, sgl, nents, sg_flags);

	local_irq_save(flags);

	while (sg_miter_next(&miter) && offset < buflen) {
		unsigned int len;

		len = min(miter.length, buflen - offset);

		if (to_buffer)
			memcpy(buf + offset, miter.addr, len);
		else
			memcpy(miter.addr, buf + offset, len);

		offset += len;
	}

	sg_miter_stop(&miter);

	local_irq_restore(flags);
	return offset;
}
Пример #6
0
static bool sg_dwiter_next(struct sg_mapping_iter *miter)
{
	if (sg_miter_next(miter)) {
		miter->consumed = 0;
		return true;
	} else
		return false;
}
Пример #7
0
static bool jz4740_mmc_write_data(struct jz4740_mmc_host *host,
	struct mmc_data *data)
{
	struct sg_mapping_iter *miter = &host->miter;
	void __iomem *fifo_addr = host->base + JZ_REG_MMC_TXFIFO;
	uint32_t *buf;
	bool timeout;
	size_t i, j;

	while (sg_miter_next(miter)) {
		buf = miter->addr;
		i = miter->length / 4;
		j = i / 8;
		i = i & 0x7;
		while (j) {
			timeout = jz4740_mmc_poll_irq(host, JZ_MMC_IRQ_TXFIFO_WR_REQ);
			if (unlikely(timeout))
				goto poll_timeout;

			writel(buf[0], fifo_addr);
			writel(buf[1], fifo_addr);
			writel(buf[2], fifo_addr);
			writel(buf[3], fifo_addr);
			writel(buf[4], fifo_addr);
			writel(buf[5], fifo_addr);
			writel(buf[6], fifo_addr);
			writel(buf[7], fifo_addr);
			buf += 8;
			--j;
		}
		if (unlikely(i)) {
			timeout = jz4740_mmc_poll_irq(host, JZ_MMC_IRQ_TXFIFO_WR_REQ);
			if (unlikely(timeout))
				goto poll_timeout;

			while (i) {
				writel(*buf, fifo_addr);
				++buf;
				--i;
			}
		}
		data->bytes_xfered += miter->length;
	}
	sg_miter_stop(miter);

	return false;

poll_timeout:
	miter->consumed = (void *)buf - miter->addr;
	data->bytes_xfered += miter->consumed;
	sg_miter_stop(miter);

	return true;
}
Пример #8
0
/* May need to reorganize buffer for scatter/gather */
static void mmc_dma_tx_start(struct mmci_host *host)
{
	unsigned int len;
	int dma_len;
	struct scatterlist *sg;
	struct mmc_request *mrq = host->mrq;
	struct mmc_data *reqdata = mrq->data;
	struct sg_mapping_iter *sg_miter = &host->sg_miter;
	void *dmaaddr;
	char *src_buffer, *dst_buffer;
	unsigned long flags;

	local_irq_save(flags);

	sg = reqdata->sg;
	len = reqdata->sg_len;

	/* Only 1 segment and no need to copy? */
	if (len == 1 && !dmac_drvdat.preallocated_tx_buf) {
		dma_len = dma_map_sg(mmc_dev(host->mmc), reqdata->sg,
			reqdata->sg_len, DMA_TO_DEVICE);
		if (dma_len == 0)
			return;

		dmaaddr = (void *) sg_dma_address(&sg[0]);
		dmac_drvdat.mapped = 1;
	} else {
		/* Move data to contiguous buffer first, then transfer it */
		dst_buffer = (char *) dmac_drvdat.dma_v_base;
		do
		{
			if (!sg_miter_next(sg_miter))
				break;

			/*
			 * Map the current scatter buffer, copy data, and unmap
			 */
			src_buffer = sg_miter->addr;
			memcpy(dst_buffer, src_buffer, sg_miter->length);
			dst_buffer += sg_miter->length;
		} while (1);

		sg_miter_stop(sg_miter);

		dmac_drvdat.mapped = 0;
		dmaaddr = (void *) dmac_drvdat.dma_handle_tx;
	}

	lpc178x_dma_start_pflow_xfer(DMA_CH_SDCARD_TX, dmaaddr,
		(void *) SD_FIFO((u32)host->base), 1);

	local_irq_restore(flags);
}
Пример #9
0
static void sd_zbc_report_zones_complete(struct scsi_cmnd *scmd,
					 unsigned int good_bytes)
{
	struct request *rq = scmd->request;
	struct scsi_disk *sdkp = scsi_disk(rq->rq_disk);
	struct sg_mapping_iter miter;
	struct blk_zone_report_hdr hdr;
	struct blk_zone zone;
	unsigned int offset, bytes = 0;
	unsigned long flags;
	u8 *buf;

	if (good_bytes < 64)
		return;

	memset(&hdr, 0, sizeof(struct blk_zone_report_hdr));

	sg_miter_start(&miter, scsi_sglist(scmd), scsi_sg_count(scmd),
		       SG_MITER_TO_SG | SG_MITER_ATOMIC);

	local_irq_save(flags);
	while (sg_miter_next(&miter) && bytes < good_bytes) {

		buf = miter.addr;
		offset = 0;

		if (bytes == 0) {
			/* Set the report header */
			hdr.nr_zones = min_t(unsigned int,
					 (good_bytes - 64) / 64,
					 get_unaligned_be32(&buf[0]) / 64);
			memcpy(buf, &hdr, sizeof(struct blk_zone_report_hdr));
			offset += 64;
			bytes += 64;
		}

		/* Parse zone descriptors */
		while (offset < miter.length && hdr.nr_zones) {
			WARN_ON(offset > miter.length);
			buf = miter.addr + offset;
			sd_zbc_parse_report(sdkp, buf, &zone);
			memcpy(buf, &zone, sizeof(struct blk_zone));
			offset += 64;
			bytes += 64;
			hdr.nr_zones--;
		}

		if (!hdr.nr_zones)
			break;

	}
Пример #10
0
static void dequeue_complete_req(void)
{
	struct crypto_async_request *req = cpg->cur_req;
	void *buf;
	int ret;
	cpg->p.hw_processed_bytes += cpg->p.crypt_len;
	if (cpg->p.copy_back) {
		int need_copy_len = cpg->p.crypt_len;
		int sram_offset = 0;
		do {
			int dst_copy;

			if (!cpg->p.sg_dst_left) {
				ret = sg_miter_next(&cpg->p.dst_sg_it);
				BUG_ON(!ret);
				cpg->p.sg_dst_left = cpg->p.dst_sg_it.length;
				cpg->p.dst_start = 0;
			}

			buf = cpg->p.dst_sg_it.addr;
			buf += cpg->p.dst_start;

			dst_copy = min(need_copy_len, cpg->p.sg_dst_left);

			memcpy(buf,
			       cpg->sram + SRAM_DATA_OUT_START + sram_offset,
			       dst_copy);
			sram_offset += dst_copy;
			cpg->p.sg_dst_left -= dst_copy;
			need_copy_len -= dst_copy;
			cpg->p.dst_start += dst_copy;
		} while (need_copy_len > 0);
	}

	cpg->p.crypt_len = 0;

	BUG_ON(cpg->eng_st != ENGINE_W_DEQUEUE);
	if (cpg->p.hw_processed_bytes < cpg->p.hw_nbytes) {
		/* process next scatter list entry */
		cpg->eng_st = ENGINE_BUSY;
		cpg->p.process(0);
	} else {
		cpg->p.complete();
		cpg->eng_st = ENGINE_IDLE;
		local_bh_disable();
		req->complete(req, 0);
		local_bh_enable();
	}
}
Пример #11
0
static size_t vtl_sg_copy_user(struct scatterlist *sgl, unsigned int nents,
				__user void *buf, size_t buflen, int to_buffer)
{
	unsigned int offset = 0;
	struct sg_mapping_iter miter;
	/* Do not use SG_MITER_ATOMIC flag on the sg_miter_start() call */
	unsigned int sg_flags = 0;
	unsigned int rem;

#if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,30)
	if (to_buffer)
		sg_flags |= SG_MITER_FROM_SG;
	else
		sg_flags |= SG_MITER_TO_SG;
#endif

	sg_miter_start(&miter, sgl, nents, sg_flags);

	while (sg_miter_next(&miter) && offset < buflen) {
		unsigned int len;

		len = min(miter.length, buflen - offset);

		if (to_buffer)
			rem = copy_to_user(buf + offset, miter.addr, len);
		else {
			rem = copy_from_user(miter.addr, buf + offset, len);
			flush_kernel_dcache_page(miter.page);
		}
		if (rem)
			printk(KERN_DEBUG "mhvtl: %s(): "
				"copy_%s_user() failed, rem %ld, buf 0x%llx, "
				"miter.addr 0x%llx, len %d\n",
				__func__, (to_buffer) ? "to" : "from",
				(long)rem,
				(long long unsigned int)(buf + offset),
				(long long unsigned int)miter.addr, len);

		offset += len;
	}

	sg_miter_stop(&miter);

	return offset;
}
static void dequeue_complete_req(void)
{
	struct ablkcipher_request *req = cpg->cur_req;
	void *buf;
	int ret;

	cpg->p.total_req_bytes += cpg->p.crypt_len;
	do {
		int dst_copy;

		if (!cpg->p.sg_dst_left) {
			ret = sg_miter_next(&cpg->p.dst_sg_it);
			BUG_ON(!ret);
			cpg->p.sg_dst_left = cpg->p.dst_sg_it.length;
			cpg->p.dst_start = 0;
		}

		buf = cpg->p.dst_sg_it.addr;
		buf += cpg->p.dst_start;

		dst_copy = min(cpg->p.crypt_len, cpg->p.sg_dst_left);

		memcpy(buf, cpg->sram + SRAM_DATA_OUT_START, dst_copy);

		cpg->p.sg_dst_left -= dst_copy;
		cpg->p.crypt_len -= dst_copy;
		cpg->p.dst_start += dst_copy;
	} while (cpg->p.crypt_len > 0);

	BUG_ON(cpg->eng_st != ENGINE_W_DEQUEUE);
	if (cpg->p.total_req_bytes < req->nbytes) {
		/* process next scatter list entry */
		cpg->eng_st = ENGINE_BUSY;
		mv_process_current_q(0);
	} else {
		sg_miter_stop(&cpg->p.src_sg_it);
		sg_miter_stop(&cpg->p.dst_sg_it);
		mv_crypto_algo_completion();
		cpg->eng_st = ENGINE_IDLE;
		req->base.complete(&req->base, 0);
	}
}
Пример #13
0
static int caam_rsa_count_leading_zeros(struct scatterlist *sgl,
					unsigned int nbytes,
					unsigned int flags)
{
	struct sg_mapping_iter miter;
	int lzeros, ents;
	unsigned int len;
	unsigned int tbytes = nbytes;
	const u8 *buff;

	ents = sg_nents_for_len(sgl, nbytes);
	if (ents < 0)
		return ents;

	sg_miter_start(&miter, sgl, ents, SG_MITER_FROM_SG | flags);

	lzeros = 0;
	len = 0;
	while (nbytes > 0) {
		while (len && !*buff) {
			lzeros++;
			len--;
			buff++;
		}

		if (len && *buff)
			break;

		sg_miter_next(&miter);
		buff = miter.addr;
		len = miter.length;

		nbytes -= lzeros;
		lzeros = 0;
	}

	miter.consumed = lzeros;
	sg_miter_stop(&miter);
	nbytes -= lzeros;

	return tbytes - nbytes;
}
Пример #14
0
/* Transfers actual data using PIO. */
static int r592_transfer_fifo_pio(struct r592_device *dev)
{
    unsigned long flags;

    bool is_write = dev->req->tpc >= MS_TPC_SET_RW_REG_ADRS;
    struct sg_mapping_iter miter;

    kfifo_reset(&dev->pio_fifo);

    if (!dev->req->long_data) {
        if (is_write) {
            r592_write_fifo_pio(dev, dev->req->data,
                                dev->req->data_len);
            r592_flush_fifo_write(dev);
        } else
            r592_read_fifo_pio(dev, dev->req->data,
                               dev->req->data_len);
        return 0;
    }

    local_irq_save(flags);
    sg_miter_start(&miter, &dev->req->sg, 1, SG_MITER_ATOMIC |
                   (is_write ? SG_MITER_FROM_SG : SG_MITER_TO_SG));

    /* Do the transfer fifo<->memory*/
    while (sg_miter_next(&miter))
        if (is_write)
            r592_write_fifo_pio(dev, miter.addr, miter.length);
        else
            r592_read_fifo_pio(dev, miter.addr, miter.length);


    /* Write last few non aligned bytes*/
    if (is_write)
        r592_flush_fifo_write(dev);

    sg_miter_stop(&miter);
    local_irq_restore(flags);
    return 0;
}
Пример #15
0
static int pkcs1pad_encrypt_sign_complete(struct akcipher_request *req, int err)
{
	struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
	struct pkcs1pad_ctx *ctx = akcipher_tfm_ctx(tfm);
	struct pkcs1pad_request *req_ctx = akcipher_request_ctx(req);
	size_t pad_len = ctx->key_size - req_ctx->child_req.dst_len;
	size_t chunk_len, pad_left;
	struct sg_mapping_iter miter;

	if (!err) {
		if (pad_len) {
			sg_miter_start(&miter, req->dst,
					sg_nents_for_len(req->dst, pad_len),
					SG_MITER_ATOMIC | SG_MITER_TO_SG);

			pad_left = pad_len;
			while (pad_left) {
				sg_miter_next(&miter);

				chunk_len = min(miter.length, pad_left);
				memset(miter.addr, 0, chunk_len);
				pad_left -= chunk_len;
			}

			sg_miter_stop(&miter);
		}

		sg_pcopy_from_buffer(req->dst,
				sg_nents_for_len(req->dst, ctx->key_size),
				req_ctx->out_buf, req_ctx->child_req.dst_len,
				pad_len);
	}
	req->dst_len = ctx->key_size;

	kfree(req_ctx->in_buf);
	kzfree(req_ctx->out_buf);

	return err;
}
static void setup_data_in(struct ablkcipher_request *req)
{
	int ret;
	void *buf;

	if (!cpg->p.sg_src_left) {
		ret = sg_miter_next(&cpg->p.src_sg_it);
		BUG_ON(!ret);
		cpg->p.sg_src_left = cpg->p.src_sg_it.length;
		cpg->p.src_start = 0;
	}

	cpg->p.crypt_len = min(cpg->p.sg_src_left, cpg->max_req_size);

	buf = cpg->p.src_sg_it.addr;
	buf += cpg->p.src_start;

	memcpy(cpg->sram + SRAM_DATA_IN_START, buf, cpg->p.crypt_len);

	cpg->p.sg_src_left -= cpg->p.crypt_len;
	cpg->p.src_start += cpg->p.crypt_len;
}
Пример #17
0
static void bcm2835_sdhost_write_block_pio(struct bcm2835_host *host)
{
	unsigned long flags;
	size_t blksize, len;
	u32 *buf;

	blksize = host->data->blksz;

	local_irq_save(flags);

	while (blksize) {
		if (!sg_miter_next(&host->sg_miter))
			BUG();

		len = min(host->sg_miter.length, blksize);
		BUG_ON(len % 4);

		blksize -= len;
		host->sg_miter.consumed = len;

		buf = host->sg_miter.addr;

		while (len) {
			if (!data_transfer_wait(host))
				break;

			bcm2835_sdhost_write(host, *(buf++), SDDATA);
			len -= 4;
		}

		if (host->data->error)
			break;
	}

	sg_miter_stop(&host->sg_miter);

	local_irq_restore(flags);
}
Пример #18
0
static int sun4i_ss_opti_poll(struct skcipher_request *areq)
{
	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(areq);
	struct sun4i_tfm_ctx *op = crypto_skcipher_ctx(tfm);
	struct sun4i_ss_ctx *ss = op->ss;
	unsigned int ivsize = crypto_skcipher_ivsize(tfm);
	struct sun4i_cipher_req_ctx *ctx = skcipher_request_ctx(areq);
	u32 mode = ctx->mode;
	/* when activating SS, the default FIFO space is SS_RX_DEFAULT(32) */
	u32 rx_cnt = SS_RX_DEFAULT;
	u32 tx_cnt = 0;
	u32 spaces;
	u32 v;
	int err = 0;
	unsigned int i;
	unsigned int ileft = areq->cryptlen;
	unsigned int oleft = areq->cryptlen;
	unsigned int todo;
	struct sg_mapping_iter mi, mo;
	unsigned int oi, oo; /* offset for in and out */
	unsigned long flags;

	if (!areq->cryptlen)
		return 0;

	if (!areq->iv) {
		dev_err_ratelimited(ss->dev, "ERROR: Empty IV\n");
		return -EINVAL;
	}

	if (!areq->src || !areq->dst) {
		dev_err_ratelimited(ss->dev, "ERROR: Some SGs are NULL\n");
		return -EINVAL;
	}

	spin_lock_irqsave(&ss->slock, flags);

	for (i = 0; i < op->keylen; i += 4)
		writel(*(op->key + i / 4), ss->base + SS_KEY0 + i);

	if (areq->iv) {
		for (i = 0; i < 4 && i < ivsize / 4; i++) {
			v = *(u32 *)(areq->iv + i * 4);
			writel(v, ss->base + SS_IV0 + i * 4);
		}
	}
	writel(mode, ss->base + SS_CTL);

	sg_miter_start(&mi, areq->src, sg_nents(areq->src),
		       SG_MITER_FROM_SG | SG_MITER_ATOMIC);
	sg_miter_start(&mo, areq->dst, sg_nents(areq->dst),
		       SG_MITER_TO_SG | SG_MITER_ATOMIC);
	sg_miter_next(&mi);
	sg_miter_next(&mo);
	if (!mi.addr || !mo.addr) {
		dev_err_ratelimited(ss->dev, "ERROR: sg_miter return null\n");
		err = -EINVAL;
		goto release_ss;
	}

	ileft = areq->cryptlen / 4;
	oleft = areq->cryptlen / 4;
	oi = 0;
	oo = 0;
	do {
		todo = min3(rx_cnt, ileft, (mi.length - oi) / 4);
		if (todo) {
			ileft -= todo;
			writesl(ss->base + SS_RXFIFO, mi.addr + oi, todo);
			oi += todo * 4;
		}
		if (oi == mi.length) {
			sg_miter_next(&mi);
			oi = 0;
		}

		spaces = readl(ss->base + SS_FCSR);
		rx_cnt = SS_RXFIFO_SPACES(spaces);
		tx_cnt = SS_TXFIFO_SPACES(spaces);

		todo = min3(tx_cnt, oleft, (mo.length - oo) / 4);
		if (todo) {
			oleft -= todo;
			readsl(ss->base + SS_TXFIFO, mo.addr + oo, todo);
			oo += todo * 4;
		}
		if (oo == mo.length) {
			sg_miter_next(&mo);
			oo = 0;
		}
	} while (oleft);

	if (areq->iv) {
		for (i = 0; i < 4 && i < ivsize / 4; i++) {
			v = readl(ss->base + SS_IV0 + i * 4);
			*(u32 *)(areq->iv + i * 4) = v;
		}
	}

release_ss:
	sg_miter_stop(&mi);
	sg_miter_stop(&mo);
	writel(0, ss->base + SS_CTL);
	spin_unlock_irqrestore(&ss->slock, flags);
	return err;
}
Пример #19
0
static int rd_MEMCPY(struct rd_request *req, u32 read_rd)
{
	struct se_task *task = &req->rd_task;
	struct rd_dev *dev = req->rd_task.task_se_cmd->se_dev->dev_ptr;
	struct rd_dev_sg_table *table;
	struct scatterlist *rd_sg;
	struct sg_mapping_iter m;
	u32 rd_offset = req->rd_offset;
	u32 src_len;

	table = rd_get_sg_table(dev, req->rd_page);
	if (!table)
		return -EINVAL;

	rd_sg = &table->sg_table[req->rd_page - table->page_start_offset];

	pr_debug("RD[%u]: %s LBA: %llu, Size: %u Page: %u, Offset: %u\n",
			dev->rd_dev_id, read_rd ? "Read" : "Write",
			task->task_lba, req->rd_size, req->rd_page,
			rd_offset);

	src_len = PAGE_SIZE - rd_offset;
	sg_miter_start(&m, task->task_sg, task->task_sg_nents,
			read_rd ? SG_MITER_TO_SG : SG_MITER_FROM_SG);
	while (req->rd_size) {
		u32 len;
		void *rd_addr;

		sg_miter_next(&m);
		len = min((u32)m.length, src_len);
		m.consumed = len;

		rd_addr = sg_virt(rd_sg) + rd_offset;

		if (read_rd)
			memcpy(m.addr, rd_addr, len);
		else
			memcpy(rd_addr, m.addr, len);

		req->rd_size -= len;
		if (!req->rd_size)
			continue;

		src_len -= len;
		if (src_len) {
			rd_offset += len;
			continue;
		}

		/* rd page completed, next one please */
		req->rd_page++;
		rd_offset = 0;
		src_len = PAGE_SIZE;
		if (req->rd_page <= table->page_end_offset) {
			rd_sg++;
			continue;
		}

		table = rd_get_sg_table(dev, req->rd_page);
		if (!table) {
			sg_miter_stop(&m);
			return -EINVAL;
		}

		/* since we increment, the first sg entry is correct */
		rd_sg = table->sg_table;
	}
	sg_miter_stop(&m);
	return 0;
}
Пример #20
0
static sense_reason_t
rd_execute_rw(struct se_cmd *cmd)
{
	struct scatterlist *sgl = cmd->t_data_sg;
	u32 sgl_nents = cmd->t_data_nents;
	enum dma_data_direction data_direction = cmd->data_direction;
	struct se_device *se_dev = cmd->se_dev;
	struct rd_dev *dev = RD_DEV(se_dev);
	struct rd_dev_sg_table *table;
	struct scatterlist *rd_sg;
	struct sg_mapping_iter m;
	u32 rd_offset;
	u32 rd_size;
	u32 rd_page;
	u32 src_len;
	u64 tmp;

	if (dev->rd_flags & RDF_NULLIO) {
		target_complete_cmd(cmd, SAM_STAT_GOOD);
		return 0;
	}

	tmp = cmd->t_task_lba * se_dev->dev_attrib.block_size;
	rd_offset = do_div(tmp, PAGE_SIZE);
	rd_page = tmp;
	rd_size = cmd->data_length;

	table = rd_get_sg_table(dev, rd_page);
	if (!table)
		return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;

	rd_sg = &table->sg_table[rd_page - table->page_start_offset];

	pr_debug("RD[%u]: %s LBA: %llu, Size: %u Page: %u, Offset: %u\n",
			dev->rd_dev_id,
			data_direction == DMA_FROM_DEVICE ? "Read" : "Write",
			cmd->t_task_lba, rd_size, rd_page, rd_offset);

	src_len = PAGE_SIZE - rd_offset;
	sg_miter_start(&m, sgl, sgl_nents,
			data_direction == DMA_FROM_DEVICE ?
				SG_MITER_TO_SG : SG_MITER_FROM_SG);
	while (rd_size) {
		u32 len;
		void *rd_addr;

		sg_miter_next(&m);
		if (!(u32)m.length) {
			pr_debug("RD[%u]: invalid sgl %p len %zu\n",
				 dev->rd_dev_id, m.addr, m.length);
			sg_miter_stop(&m);
			return TCM_INCORRECT_AMOUNT_OF_DATA;
		}
		len = min((u32)m.length, src_len);
		if (len > rd_size) {
			pr_debug("RD[%u]: size underrun page %d offset %d "
				 "size %d\n", dev->rd_dev_id,
				 rd_page, rd_offset, rd_size);
			len = rd_size;
		}
		m.consumed = len;

		rd_addr = sg_virt(rd_sg) + rd_offset;

		if (data_direction == DMA_FROM_DEVICE)
			memcpy(m.addr, rd_addr, len);
		else
			memcpy(rd_addr, m.addr, len);

		rd_size -= len;
		if (!rd_size)
			continue;

		src_len -= len;
		if (src_len) {
			rd_offset += len;
			continue;
		}

		/* rd page completed, next one please */
		rd_page++;
		rd_offset = 0;
		src_len = PAGE_SIZE;
		if (rd_page <= table->page_end_offset) {
			rd_sg++;
			continue;
		}

		table = rd_get_sg_table(dev, rd_page);
		if (!table) {
			sg_miter_stop(&m);
			return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
		}

		/* since we increment, the first sg entry is correct */
		rd_sg = table->sg_table;
	}
	sg_miter_stop(&m);

	target_complete_cmd(cmd, SAM_STAT_GOOD);
	return 0;
}
Пример #21
0
static bool jz4740_mmc_read_data(struct jz4740_mmc_host *host,
				struct mmc_data *data)
{
	struct sg_mapping_iter *miter = &host->miter;
	void __iomem *fifo_addr = host->base + JZ_REG_MMC_RXFIFO;
	uint32_t *buf;
	uint32_t d;
	uint16_t status;
	size_t i, j;
	unsigned int timeout;

	while (sg_miter_next(miter)) {
		buf = miter->addr;
		i = miter->length;
		j = i / 32;
		i = i & 0x1f;
		while (j) {
			timeout = jz4740_mmc_poll_irq(host, JZ_MMC_IRQ_RXFIFO_RD_REQ);
			if (unlikely(timeout))
				goto poll_timeout;

			buf[0] = readl(fifo_addr);
			buf[1] = readl(fifo_addr);
			buf[2] = readl(fifo_addr);
			buf[3] = readl(fifo_addr);
			buf[4] = readl(fifo_addr);
			buf[5] = readl(fifo_addr);
			buf[6] = readl(fifo_addr);
			buf[7] = readl(fifo_addr);

			buf += 8;
			--j;
		}

		if (unlikely(i)) {
			timeout = jz4740_mmc_poll_irq(host, JZ_MMC_IRQ_RXFIFO_RD_REQ);
			if (unlikely(timeout))
				goto poll_timeout;

			while (i >= 4) {
				*buf++ = readl(fifo_addr);
				i -= 4;
			}
			if (unlikely(i > 0)) {
				d = readl(fifo_addr);
				memcpy(buf, &d, i);
			}
		}
		data->bytes_xfered += miter->length;

		/* This can go away once MIPS implements
		 * flush_kernel_dcache_page */
		flush_dcache_page(miter->page);
	}
	sg_miter_stop(miter);

	/* For whatever reason there is sometime one word more in the fifo then
	 * requested */
	timeout = 1000;
	status = readl(host->base + JZ_REG_MMC_STATUS);
	while (!(status & JZ_MMC_STATUS_DATA_FIFO_EMPTY) && --timeout) {
		d = readl(fifo_addr);
		status = readl(host->base + JZ_REG_MMC_STATUS);
	}

	return false;

poll_timeout:
	miter->consumed = (void *)buf - miter->addr;
	data->bytes_xfered += miter->consumed;
	sg_miter_stop(miter);

	return true;
}
Пример #22
0
/* Generic function that support SG with size not multiple of 4 */
static int sun4i_ss_cipher_poll(struct skcipher_request *areq)
{
	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(areq);
	struct sun4i_tfm_ctx *op = crypto_skcipher_ctx(tfm);
	struct sun4i_ss_ctx *ss = op->ss;
	int no_chunk = 1;
	struct scatterlist *in_sg = areq->src;
	struct scatterlist *out_sg = areq->dst;
	unsigned int ivsize = crypto_skcipher_ivsize(tfm);
	struct sun4i_cipher_req_ctx *ctx = skcipher_request_ctx(areq);
	u32 mode = ctx->mode;
	/* when activating SS, the default FIFO space is SS_RX_DEFAULT(32) */
	u32 rx_cnt = SS_RX_DEFAULT;
	u32 tx_cnt = 0;
	u32 v;
	u32 spaces;
	int err = 0;
	unsigned int i;
	unsigned int ileft = areq->cryptlen;
	unsigned int oleft = areq->cryptlen;
	unsigned int todo;
	struct sg_mapping_iter mi, mo;
	unsigned int oi, oo;	/* offset for in and out */
	char buf[4 * SS_RX_MAX];/* buffer for linearize SG src */
	char bufo[4 * SS_TX_MAX]; /* buffer for linearize SG dst */
	unsigned int ob = 0;	/* offset in buf */
	unsigned int obo = 0;	/* offset in bufo*/
	unsigned int obl = 0;	/* length of data in bufo */
	unsigned long flags;

	if (!areq->cryptlen)
		return 0;

	if (!areq->iv) {
		dev_err_ratelimited(ss->dev, "ERROR: Empty IV\n");
		return -EINVAL;
	}

	if (!areq->src || !areq->dst) {
		dev_err_ratelimited(ss->dev, "ERROR: Some SGs are NULL\n");
		return -EINVAL;
	}

	/*
	 * if we have only SGs with size multiple of 4,
	 * we can use the SS optimized function
	 */
	while (in_sg && no_chunk == 1) {
		if (in_sg->length % 4)
			no_chunk = 0;
		in_sg = sg_next(in_sg);
	}
	while (out_sg && no_chunk == 1) {
		if (out_sg->length % 4)
			no_chunk = 0;
		out_sg = sg_next(out_sg);
	}

	if (no_chunk == 1)
		return sun4i_ss_opti_poll(areq);

	spin_lock_irqsave(&ss->slock, flags);

	for (i = 0; i < op->keylen; i += 4)
		writel(*(op->key + i / 4), ss->base + SS_KEY0 + i);

	if (areq->iv) {
		for (i = 0; i < 4 && i < ivsize / 4; i++) {
			v = *(u32 *)(areq->iv + i * 4);
			writel(v, ss->base + SS_IV0 + i * 4);
		}
	}
	writel(mode, ss->base + SS_CTL);

	sg_miter_start(&mi, areq->src, sg_nents(areq->src),
		       SG_MITER_FROM_SG | SG_MITER_ATOMIC);
	sg_miter_start(&mo, areq->dst, sg_nents(areq->dst),
		       SG_MITER_TO_SG | SG_MITER_ATOMIC);
	sg_miter_next(&mi);
	sg_miter_next(&mo);
	if (!mi.addr || !mo.addr) {
		dev_err_ratelimited(ss->dev, "ERROR: sg_miter return null\n");
		err = -EINVAL;
		goto release_ss;
	}
	ileft = areq->cryptlen;
	oleft = areq->cryptlen;
	oi = 0;
	oo = 0;

	while (oleft) {
		if (ileft) {
			/*
			 * todo is the number of consecutive 4byte word that we
			 * can read from current SG
			 */
			todo = min3(rx_cnt, ileft / 4, (mi.length - oi) / 4);
			if (todo && !ob) {
				writesl(ss->base + SS_RXFIFO, mi.addr + oi,
					todo);
				ileft -= todo * 4;
				oi += todo * 4;
			} else {
				/*
				 * not enough consecutive bytes, so we need to
				 * linearize in buf. todo is in bytes
				 * After that copy, if we have a multiple of 4
				 * we need to be able to write all buf in one
				 * pass, so it is why we min() with rx_cnt
				 */
				todo = min3(rx_cnt * 4 - ob, ileft,
					    mi.length - oi);
				memcpy(buf + ob, mi.addr + oi, todo);
				ileft -= todo;
				oi += todo;
				ob += todo;
				if (!(ob % 4)) {
					writesl(ss->base + SS_RXFIFO, buf,
						ob / 4);
					ob = 0;
				}
			}
			if (oi == mi.length) {
				sg_miter_next(&mi);
				oi = 0;
			}
		}

		spaces = readl(ss->base + SS_FCSR);
		rx_cnt = SS_RXFIFO_SPACES(spaces);
		tx_cnt = SS_TXFIFO_SPACES(spaces);
		dev_dbg(ss->dev, "%x %u/%u %u/%u cnt=%u %u/%u %u/%u cnt=%u %u\n",
			mode,
			oi, mi.length, ileft, areq->cryptlen, rx_cnt,
			oo, mo.length, oleft, areq->cryptlen, tx_cnt, ob);

		if (!tx_cnt)
			continue;
		/* todo in 4bytes word */
		todo = min3(tx_cnt, oleft / 4, (mo.length - oo) / 4);
		if (todo) {
			readsl(ss->base + SS_TXFIFO, mo.addr + oo, todo);
			oleft -= todo * 4;
			oo += todo * 4;
			if (oo == mo.length) {
				sg_miter_next(&mo);
				oo = 0;
			}
		} else {
			/*
			 * read obl bytes in bufo, we read at maximum for
			 * emptying the device
			 */
			readsl(ss->base + SS_TXFIFO, bufo, tx_cnt);
			obl = tx_cnt * 4;
			obo = 0;
			do {
				/*
				 * how many bytes we can copy ?
				 * no more than remaining SG size
				 * no more than remaining buffer
				 * no need to test against oleft
				 */
				todo = min(mo.length - oo, obl - obo);
				memcpy(mo.addr + oo, bufo + obo, todo);
				oleft -= todo;
				obo += todo;
				oo += todo;
				if (oo == mo.length) {
					sg_miter_next(&mo);
					oo = 0;
				}
			} while (obo < obl);
			/* bufo must be fully used here */
		}
	}
	if (areq->iv) {
		for (i = 0; i < 4 && i < ivsize / 4; i++) {
			v = readl(ss->base + SS_IV0 + i * 4);
			*(u32 *)(areq->iv + i * 4) = v;
		}
	}

release_ss:
	sg_miter_stop(&mi);
	sg_miter_stop(&mo);
	writel(0, ss->base + SS_CTL);
	spin_unlock_irqrestore(&ss->slock, flags);

	return err;
}