/** * sg_copy_buffer - Copy data between a linear buffer and an SG list * @sgl: The SG list * @nents: Number of SG entries * @buf: Where to copy from * @buflen: The number of bytes to copy * @to_buffer: transfer direction (non zero == from an sg list to a * buffer, 0 == from a buffer to an sg list * * Returns the number of copied bytes. * **/ static size_t sg_copy_buffer(struct scatterlist *sgl, unsigned int nents, void *buf, size_t buflen, int to_buffer) { unsigned int offset = 0; struct sg_mapping_iter miter; unsigned long flags; unsigned int sg_flags = SG_MITER_ATOMIC; if (to_buffer) sg_flags |= SG_MITER_FROM_SG; else sg_flags |= SG_MITER_TO_SG; sg_miter_start(&miter, sgl, nents, sg_flags); arch_cpu_irq_save(flags); while (sg_miter_next(&miter) && offset < buflen) { unsigned int len; len = min(miter.length, buflen - offset); if (to_buffer) memcpy(buf + offset, miter.addr, len); else memcpy(miter.addr, buf + offset, len); offset += len; } sg_miter_stop(&miter); arch_cpu_irq_restore(flags); return offset; }
bool sg_miter_next(struct sg_mapping_iter *miter) { unsigned int off, len; if (!miter->__nents) return false; sg_miter_stop(miter); while (miter->__offset == miter->__sg->length) { if (--miter->__nents) { miter->__sg = sg_next(miter->__sg); miter->__offset = 0; } else return false; } off = miter->__sg->offset + miter->__offset; len = miter->__sg->length - miter->__offset; miter->page = nth_page(sg_page(miter->__sg), off >> PAGE_SHIFT); off &= ~PAGE_MASK; miter->length = min_t(unsigned int, len, PAGE_SIZE - off); miter->consumed = miter->length; if (miter->__flags & SG_MITER_ATOMIC) miter->addr = kmap_atomic(miter->page) + off; else miter->addr = kmap(miter->page) + off; return true; }
static void do_read(struct cvm_mmc_host *host, struct mmc_request *req, u64 dbuf) { struct sg_mapping_iter *smi = &host->smi; int data_len = req->data->blocks * req->data->blksz; int bytes_xfered, shift = -1; u64 dat = 0; /* Auto inc from offset zero */ writeq((0x10000 | (dbuf << 6)), host->base + MIO_EMM_BUF_IDX(host)); for (bytes_xfered = 0; bytes_xfered < data_len;) { if (smi->consumed >= smi->length) { if (!sg_miter_next(smi)) break; smi->consumed = 0; } if (shift < 0) { dat = readq(host->base + MIO_EMM_BUF_DAT(host)); shift = 56; } while (smi->consumed < smi->length && shift >= 0) { ((u8 *)smi->addr)[smi->consumed] = (dat >> shift) & 0xff; bytes_xfered++; smi->consumed++; shift -= 8; } } sg_miter_stop(smi); req->data->bytes_xfered = bytes_xfered; req->data->error = 0; }
/** * sg_miter_next - proceed mapping iterator to the next mapping * @miter: sg mapping iter to proceed * * Description: * Proceeds @miter@ to the next mapping. @miter@ should have been * started using sg_miter_start(). On successful return, * @miter@->page, @miter@->addr and @miter@->length point to the * current mapping. * * Context: * IRQ disabled if SG_MITER_ATOMIC. IRQ must stay disabled till * @miter@ is stopped. May sleep if !SG_MITER_ATOMIC. * * Returns: * true if @miter contains the next mapping. false if end of sg * list is reached. */ bool sg_miter_next(struct sg_mapping_iter *miter) { unsigned int off, len; /* check for end and drop resources from the last iteration */ if (!miter->__nents) return false; sg_miter_stop(miter); /* get to the next sg if necessary. __offset is adjusted by stop */ while (miter->__offset == miter->__sg->length) { if (--miter->__nents) { miter->__sg = sg_next(miter->__sg); miter->__offset = 0; } else return false; } /* map the next page */ off = miter->__sg->offset + miter->__offset; len = miter->__sg->length - miter->__offset; miter->page = nth_page(sg_page(miter->__sg), off >> PAGE_SHIFT); off &= ~PAGE_MASK; miter->length = min_t(unsigned int, len, PAGE_SIZE - off); miter->consumed = miter->length; if (miter->__flags & SG_MITER_ATOMIC) miter->addr = kmap_atomic(miter->page, KM_BIO_SRC_IRQ) + off; else miter->addr = kmap(miter->page) + off; return true; }
/** * sg_copy_buffer - Copy data between a linear buffer and an SG list * @sgl: The SG list * @nents: Number of SG entries * @buf: Where to copy from * @buflen: The number of bytes to copy * @to_buffer: transfer direction (non zero == from an sg list to a * buffer, 0 == from a buffer to an sg list * * Returns the number of copied bytes. * **/ static size_t sg_copy_buffer(struct scatterlist *sgl, unsigned int nents, void *buf, size_t buflen, int to_buffer) { unsigned int offset = 0; struct sg_mapping_iter miter; unsigned long flags; sg_miter_start(&miter, sgl, nents, SG_MITER_ATOMIC); local_irq_save(flags); while (sg_miter_next(&miter) && offset < buflen) { unsigned int len; len = min(miter.length, buflen - offset); if (to_buffer) memcpy(buf + offset, miter.addr, len); else { memcpy(miter.addr, buf + offset, len); #if 0 flush_kernel_dcache_page(miter.page); #endif } offset += len; } sg_miter_stop(&miter); local_irq_restore(flags); return offset; }
/* Copy data to userspace process from sg. */ static __must_check int sg_copy_to_user_buffer( struct scatterlist *sg, unsigned int nents, unsigned char __user *dst, size_t nbytes) { int ret = 0; struct sg_mapping_iter miter; sg_miter_start(&miter, sg, nents, SG_MITER_FROM_SG); while (sg_miter_next(&miter) && nbytes > 0 && miter.addr) { size_t len; len = min(miter.length, nbytes); if (__copy_to_user(dst, miter.addr, len)) { ret = -EINVAL; goto error_sg_copy_to_user_buffer; } nbytes -= len; dst += len; } /* If the provided buffer is proper all bytes are copied. */ BUG_ON(nbytes != 0); error_sg_copy_to_user_buffer: sg_miter_stop(&miter); return ret; }
/** * sg_miter_next - proceed mapping iterator to the next mapping * @miter: sg mapping iter to proceed * * Description: * Proceeds @miter@ to the next mapping. @miter@ should have been * started using sg_miter_start(). On successful return, * @miter@->page, @miter@->addr and @miter@->length point to the * current mapping. * * Context: * IRQ disabled if SG_MITER_ATOMIC. IRQ must stay disabled till * @miter@ is stopped. May sleep if !SG_MITER_ATOMIC. * * Returns: * true if @miter contains the next mapping. false if end of sg * list is reached. */ bool sg_miter_next(struct sg_mapping_iter *miter) { unsigned int off, len; /* check for end and drop resources from the last iteration */ if (!miter->__nents) return false; sg_miter_stop(miter); /* get to the next sg if necessary. __offset is adjusted by stop */ while (miter->__offset == miter->__sg->length) { if (--miter->__nents) { miter->__sg = sg_next(miter->__sg); miter->__offset = 0; } else return false; } /* map the next page */ off = miter->__sg->offset + miter->__offset; len = miter->__sg->length - miter->__offset; miter->page = VMM_PAGE_NTH(sg_page(miter->__sg), off >> VMM_PAGE_SHIFT); off &= ~VMM_PAGE_MASK; miter->length = min_t(unsigned int, len, VMM_PAGE_SIZE - off); miter->consumed = miter->length; miter->addr = (void *)(miter->page + off); return true; }
static void mv_start_new_hash_req(struct ahash_request *req) { struct req_progress *p = &cpg->p; struct mv_req_hash_ctx *ctx = ahash_request_ctx(req); const struct mv_tfm_hash_ctx *tfm_ctx = crypto_tfm_ctx(req->base.tfm); int num_sgs, hw_bytes, old_extra_bytes, rc; cpg->cur_req = &req->base; memset(p, 0, sizeof(struct req_progress)); hw_bytes = req->nbytes + ctx->extra_bytes; old_extra_bytes = ctx->extra_bytes; if (unlikely(ctx->extra_bytes)) { memcpy(cpg->sram + SRAM_DATA_IN_START, ctx->buffer, ctx->extra_bytes); p->crypt_len = ctx->extra_bytes; } memcpy(cpg->sram + SRAM_HMAC_IV_IN, tfm_ctx->ivs, sizeof(tfm_ctx->ivs)); if (unlikely(!ctx->first_hash)) { writel(ctx->state[0], cpg->reg + DIGEST_INITIAL_VAL_A); writel(ctx->state[1], cpg->reg + DIGEST_INITIAL_VAL_B); writel(ctx->state[2], cpg->reg + DIGEST_INITIAL_VAL_C); writel(ctx->state[3], cpg->reg + DIGEST_INITIAL_VAL_D); writel(ctx->state[4], cpg->reg + DIGEST_INITIAL_VAL_E); } ctx->extra_bytes = hw_bytes % SHA1_BLOCK_SIZE; if (ctx->extra_bytes != 0 && (!ctx->last_chunk || ctx->count > MAX_HW_HASH_SIZE)) hw_bytes -= ctx->extra_bytes; else ctx->extra_bytes = 0; num_sgs = count_sgs(req->src, req->nbytes); sg_miter_start(&p->src_sg_it, req->src, num_sgs, SG_MITER_FROM_SG); if (hw_bytes) { p->hw_nbytes = hw_bytes; p->complete = mv_hash_algo_completion; p->process = mv_process_hash_current; mv_process_hash_current(1); } else { copy_src_to_buf(p, ctx->buffer + old_extra_bytes, ctx->extra_bytes - old_extra_bytes); sg_miter_stop(&p->src_sg_it); if (ctx->last_chunk) rc = mv_hash_final_fallback(req); else rc = 0; cpg->eng_st = ENGINE_IDLE; local_bh_disable(); req->base.complete(&req->base, rc); local_bh_enable(); } }
static void dequeue_complete_req(void) { struct ablkcipher_request *req = cpg->cur_req; void *buf; int ret; cpg->p.total_req_bytes += cpg->p.crypt_len; do { int dst_copy; if (!cpg->p.sg_dst_left) { ret = sg_miter_next(&cpg->p.dst_sg_it); BUG_ON(!ret); cpg->p.sg_dst_left = cpg->p.dst_sg_it.length; cpg->p.dst_start = 0; } buf = cpg->p.dst_sg_it.addr; buf += cpg->p.dst_start; dst_copy = min(cpg->p.crypt_len, cpg->p.sg_dst_left); memcpy(buf, cpg->sram + SRAM_DATA_OUT_START, dst_copy); cpg->p.sg_dst_left -= dst_copy; cpg->p.crypt_len -= dst_copy; cpg->p.dst_start += dst_copy; } while (cpg->p.crypt_len > 0); BUG_ON(cpg->eng_st != ENGINE_W_DEQUEUE); if (cpg->p.total_req_bytes < req->nbytes) { /* process next scatter list entry */ cpg->eng_st = ENGINE_BUSY; mv_process_current_q(0); } else { sg_miter_stop(&cpg->p.src_sg_it); sg_miter_stop(&cpg->p.dst_sg_it); mv_crypto_algo_completion(); cpg->eng_st = ENGINE_IDLE; req->base.complete(&req->base, 0); } }
/* May need to reorganize buffer for scatter/gather */ static void mmc_dma_tx_start(struct mmci_host *host) { unsigned int len; int dma_len; struct scatterlist *sg; struct mmc_request *mrq = host->mrq; struct mmc_data *reqdata = mrq->data; struct sg_mapping_iter *sg_miter = &host->sg_miter; void *dmaaddr; char *src_buffer, *dst_buffer; unsigned long flags; local_irq_save(flags); sg = reqdata->sg; len = reqdata->sg_len; /* Only 1 segment and no need to copy? */ if (len == 1 && !dmac_drvdat.preallocated_tx_buf) { dma_len = dma_map_sg(mmc_dev(host->mmc), reqdata->sg, reqdata->sg_len, DMA_TO_DEVICE); if (dma_len == 0) return; dmaaddr = (void *) sg_dma_address(&sg[0]); dmac_drvdat.mapped = 1; } else { /* Move data to contiguous buffer first, then transfer it */ dst_buffer = (char *) dmac_drvdat.dma_v_base; do { if (!sg_miter_next(sg_miter)) break; /* * Map the current scatter buffer, copy data, and unmap */ src_buffer = sg_miter->addr; memcpy(dst_buffer, src_buffer, sg_miter->length); dst_buffer += sg_miter->length; } while (1); sg_miter_stop(sg_miter); dmac_drvdat.mapped = 0; dmaaddr = (void *) dmac_drvdat.dma_handle_tx; } lpc178x_dma_start_pflow_xfer(DMA_CH_SDCARD_TX, dmaaddr, (void *) SD_FIFO((u32)host->base), 1); local_irq_restore(flags); }
static size_t vtl_sg_copy_user(struct scatterlist *sgl, unsigned int nents, __user void *buf, size_t buflen, int to_buffer) { unsigned int offset = 0; struct sg_mapping_iter miter; /* Do not use SG_MITER_ATOMIC flag on the sg_miter_start() call */ unsigned int sg_flags = 0; unsigned int rem; #if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,30) if (to_buffer) sg_flags |= SG_MITER_FROM_SG; else sg_flags |= SG_MITER_TO_SG; #endif sg_miter_start(&miter, sgl, nents, sg_flags); while (sg_miter_next(&miter) && offset < buflen) { unsigned int len; len = min(miter.length, buflen - offset); if (to_buffer) rem = copy_to_user(buf + offset, miter.addr, len); else { rem = copy_from_user(miter.addr, buf + offset, len); flush_kernel_dcache_page(miter.page); } if (rem) printk(KERN_DEBUG "mhvtl: %s(): " "copy_%s_user() failed, rem %ld, buf 0x%llx, " "miter.addr 0x%llx, len %d\n", __func__, (to_buffer) ? "to" : "from", (long)rem, (long long unsigned int)(buf + offset), (long long unsigned int)miter.addr, len); offset += len; } sg_miter_stop(&miter); return offset; }
static int caam_rsa_count_leading_zeros(struct scatterlist *sgl, unsigned int nbytes, unsigned int flags) { struct sg_mapping_iter miter; int lzeros, ents; unsigned int len; unsigned int tbytes = nbytes; const u8 *buff; ents = sg_nents_for_len(sgl, nbytes); if (ents < 0) return ents; sg_miter_start(&miter, sgl, ents, SG_MITER_FROM_SG | flags); lzeros = 0; len = 0; while (nbytes > 0) { while (len && !*buff) { lzeros++; len--; buff++; } if (len && *buff) break; sg_miter_next(&miter); buff = miter.addr; len = miter.length; nbytes -= lzeros; lzeros = 0; } miter.consumed = lzeros; sg_miter_stop(&miter); nbytes -= lzeros; return tbytes - nbytes; }
/* Transfers actual data using PIO. */ static int r592_transfer_fifo_pio(struct r592_device *dev) { unsigned long flags; bool is_write = dev->req->tpc >= MS_TPC_SET_RW_REG_ADRS; struct sg_mapping_iter miter; kfifo_reset(&dev->pio_fifo); if (!dev->req->long_data) { if (is_write) { r592_write_fifo_pio(dev, dev->req->data, dev->req->data_len); r592_flush_fifo_write(dev); } else r592_read_fifo_pio(dev, dev->req->data, dev->req->data_len); return 0; } local_irq_save(flags); sg_miter_start(&miter, &dev->req->sg, 1, SG_MITER_ATOMIC | (is_write ? SG_MITER_FROM_SG : SG_MITER_TO_SG)); /* Do the transfer fifo<->memory*/ while (sg_miter_next(&miter)) if (is_write) r592_write_fifo_pio(dev, miter.addr, miter.length); else r592_read_fifo_pio(dev, miter.addr, miter.length); /* Write last few non aligned bytes*/ if (is_write) r592_flush_fifo_write(dev); sg_miter_stop(&miter); local_irq_restore(flags); return 0; }
static int pkcs1pad_encrypt_sign_complete(struct akcipher_request *req, int err) { struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req); struct pkcs1pad_ctx *ctx = akcipher_tfm_ctx(tfm); struct pkcs1pad_request *req_ctx = akcipher_request_ctx(req); size_t pad_len = ctx->key_size - req_ctx->child_req.dst_len; size_t chunk_len, pad_left; struct sg_mapping_iter miter; if (!err) { if (pad_len) { sg_miter_start(&miter, req->dst, sg_nents_for_len(req->dst, pad_len), SG_MITER_ATOMIC | SG_MITER_TO_SG); pad_left = pad_len; while (pad_left) { sg_miter_next(&miter); chunk_len = min(miter.length, pad_left); memset(miter.addr, 0, chunk_len); pad_left -= chunk_len; } sg_miter_stop(&miter); } sg_pcopy_from_buffer(req->dst, sg_nents_for_len(req->dst, ctx->key_size), req_ctx->out_buf, req_ctx->child_req.dst_len, pad_len); } req->dst_len = ctx->key_size; kfree(req_ctx->in_buf); kzfree(req_ctx->out_buf); return err; }
static void bcm2835_sdhost_write_block_pio(struct bcm2835_host *host) { unsigned long flags; size_t blksize, len; u32 *buf; blksize = host->data->blksz; local_irq_save(flags); while (blksize) { if (!sg_miter_next(&host->sg_miter)) BUG(); len = min(host->sg_miter.length, blksize); BUG_ON(len % 4); blksize -= len; host->sg_miter.consumed = len; buf = host->sg_miter.addr; while (len) { if (!data_transfer_wait(host)) break; bcm2835_sdhost_write(host, *(buf++), SDDATA); len -= 4; } if (host->data->error) break; } sg_miter_stop(&host->sg_miter); local_irq_restore(flags); }
static void mv_hash_algo_completion(void) { struct ahash_request *req = ahash_request_cast(cpg->cur_req); struct mv_req_hash_ctx *ctx = ahash_request_ctx(req); if (ctx->extra_bytes) copy_src_to_buf(&cpg->p, ctx->buffer, ctx->extra_bytes); sg_miter_stop(&cpg->p.src_sg_it); ctx->state[0] = readl(cpg->reg + DIGEST_INITIAL_VAL_A); ctx->state[1] = readl(cpg->reg + DIGEST_INITIAL_VAL_B); ctx->state[2] = readl(cpg->reg + DIGEST_INITIAL_VAL_C); ctx->state[3] = readl(cpg->reg + DIGEST_INITIAL_VAL_D); ctx->state[4] = readl(cpg->reg + DIGEST_INITIAL_VAL_E); if (likely(ctx->last_chunk)) { if (likely(ctx->count <= MAX_HW_HASH_SIZE)) { memcpy(req->result, cpg->sram + SRAM_DIGEST_BUF, crypto_ahash_digestsize(crypto_ahash_reqtfm (req))); } else mv_hash_final_fallback(req); } }
static sense_reason_t rd_execute_rw(struct se_cmd *cmd) { struct scatterlist *sgl = cmd->t_data_sg; u32 sgl_nents = cmd->t_data_nents; enum dma_data_direction data_direction = cmd->data_direction; struct se_device *se_dev = cmd->se_dev; struct rd_dev *dev = RD_DEV(se_dev); struct rd_dev_sg_table *table; struct scatterlist *rd_sg; struct sg_mapping_iter m; u32 rd_offset; u32 rd_size; u32 rd_page; u32 src_len; u64 tmp; if (dev->rd_flags & RDF_NULLIO) { target_complete_cmd(cmd, SAM_STAT_GOOD); return 0; } tmp = cmd->t_task_lba * se_dev->dev_attrib.block_size; rd_offset = do_div(tmp, PAGE_SIZE); rd_page = tmp; rd_size = cmd->data_length; table = rd_get_sg_table(dev, rd_page); if (!table) return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; rd_sg = &table->sg_table[rd_page - table->page_start_offset]; pr_debug("RD[%u]: %s LBA: %llu, Size: %u Page: %u, Offset: %u\n", dev->rd_dev_id, data_direction == DMA_FROM_DEVICE ? "Read" : "Write", cmd->t_task_lba, rd_size, rd_page, rd_offset); src_len = PAGE_SIZE - rd_offset; sg_miter_start(&m, sgl, sgl_nents, data_direction == DMA_FROM_DEVICE ? SG_MITER_TO_SG : SG_MITER_FROM_SG); while (rd_size) { u32 len; void *rd_addr; sg_miter_next(&m); if (!(u32)m.length) { pr_debug("RD[%u]: invalid sgl %p len %zu\n", dev->rd_dev_id, m.addr, m.length); sg_miter_stop(&m); return TCM_INCORRECT_AMOUNT_OF_DATA; } len = min((u32)m.length, src_len); if (len > rd_size) { pr_debug("RD[%u]: size underrun page %d offset %d " "size %d\n", dev->rd_dev_id, rd_page, rd_offset, rd_size); len = rd_size; } m.consumed = len; rd_addr = sg_virt(rd_sg) + rd_offset; if (data_direction == DMA_FROM_DEVICE) memcpy(m.addr, rd_addr, len); else memcpy(rd_addr, m.addr, len); rd_size -= len; if (!rd_size) continue; src_len -= len; if (src_len) { rd_offset += len; continue; } /* rd page completed, next one please */ rd_page++; rd_offset = 0; src_len = PAGE_SIZE; if (rd_page <= table->page_end_offset) { rd_sg++; continue; } table = rd_get_sg_table(dev, rd_page); if (!table) { sg_miter_stop(&m); return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; } /* since we increment, the first sg entry is correct */ rd_sg = table->sg_table; } sg_miter_stop(&m); target_complete_cmd(cmd, SAM_STAT_GOOD); return 0; }
static int sun4i_ss_opti_poll(struct skcipher_request *areq) { struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(areq); struct sun4i_tfm_ctx *op = crypto_skcipher_ctx(tfm); struct sun4i_ss_ctx *ss = op->ss; unsigned int ivsize = crypto_skcipher_ivsize(tfm); struct sun4i_cipher_req_ctx *ctx = skcipher_request_ctx(areq); u32 mode = ctx->mode; /* when activating SS, the default FIFO space is SS_RX_DEFAULT(32) */ u32 rx_cnt = SS_RX_DEFAULT; u32 tx_cnt = 0; u32 spaces; u32 v; int err = 0; unsigned int i; unsigned int ileft = areq->cryptlen; unsigned int oleft = areq->cryptlen; unsigned int todo; struct sg_mapping_iter mi, mo; unsigned int oi, oo; /* offset for in and out */ unsigned long flags; if (!areq->cryptlen) return 0; if (!areq->iv) { dev_err_ratelimited(ss->dev, "ERROR: Empty IV\n"); return -EINVAL; } if (!areq->src || !areq->dst) { dev_err_ratelimited(ss->dev, "ERROR: Some SGs are NULL\n"); return -EINVAL; } spin_lock_irqsave(&ss->slock, flags); for (i = 0; i < op->keylen; i += 4) writel(*(op->key + i / 4), ss->base + SS_KEY0 + i); if (areq->iv) { for (i = 0; i < 4 && i < ivsize / 4; i++) { v = *(u32 *)(areq->iv + i * 4); writel(v, ss->base + SS_IV0 + i * 4); } } writel(mode, ss->base + SS_CTL); sg_miter_start(&mi, areq->src, sg_nents(areq->src), SG_MITER_FROM_SG | SG_MITER_ATOMIC); sg_miter_start(&mo, areq->dst, sg_nents(areq->dst), SG_MITER_TO_SG | SG_MITER_ATOMIC); sg_miter_next(&mi); sg_miter_next(&mo); if (!mi.addr || !mo.addr) { dev_err_ratelimited(ss->dev, "ERROR: sg_miter return null\n"); err = -EINVAL; goto release_ss; } ileft = areq->cryptlen / 4; oleft = areq->cryptlen / 4; oi = 0; oo = 0; do { todo = min3(rx_cnt, ileft, (mi.length - oi) / 4); if (todo) { ileft -= todo; writesl(ss->base + SS_RXFIFO, mi.addr + oi, todo); oi += todo * 4; } if (oi == mi.length) { sg_miter_next(&mi); oi = 0; } spaces = readl(ss->base + SS_FCSR); rx_cnt = SS_RXFIFO_SPACES(spaces); tx_cnt = SS_TXFIFO_SPACES(spaces); todo = min3(tx_cnt, oleft, (mo.length - oo) / 4); if (todo) { oleft -= todo; readsl(ss->base + SS_TXFIFO, mo.addr + oo, todo); oo += todo * 4; } if (oo == mo.length) { sg_miter_next(&mo); oo = 0; } } while (oleft); if (areq->iv) { for (i = 0; i < 4 && i < ivsize / 4; i++) { v = readl(ss->base + SS_IV0 + i * 4); *(u32 *)(areq->iv + i * 4) = v; } } release_ss: sg_miter_stop(&mi); sg_miter_stop(&mo); writel(0, ss->base + SS_CTL); spin_unlock_irqrestore(&ss->slock, flags); return err; }
static bool jz4740_mmc_read_data(struct jz4740_mmc_host *host, struct mmc_data *data) { struct sg_mapping_iter *miter = &host->miter; void __iomem *fifo_addr = host->base + JZ_REG_MMC_RXFIFO; uint32_t *buf; uint32_t d; uint16_t status; size_t i, j; unsigned int timeout; while (sg_miter_next(miter)) { buf = miter->addr; i = miter->length; j = i / 32; i = i & 0x1f; while (j) { timeout = jz4740_mmc_poll_irq(host, JZ_MMC_IRQ_RXFIFO_RD_REQ); if (unlikely(timeout)) goto poll_timeout; buf[0] = readl(fifo_addr); buf[1] = readl(fifo_addr); buf[2] = readl(fifo_addr); buf[3] = readl(fifo_addr); buf[4] = readl(fifo_addr); buf[5] = readl(fifo_addr); buf[6] = readl(fifo_addr); buf[7] = readl(fifo_addr); buf += 8; --j; } if (unlikely(i)) { timeout = jz4740_mmc_poll_irq(host, JZ_MMC_IRQ_RXFIFO_RD_REQ); if (unlikely(timeout)) goto poll_timeout; while (i >= 4) { *buf++ = readl(fifo_addr); i -= 4; } if (unlikely(i > 0)) { d = readl(fifo_addr); memcpy(buf, &d, i); } } data->bytes_xfered += miter->length; /* This can go away once MIPS implements * flush_kernel_dcache_page */ flush_dcache_page(miter->page); } sg_miter_stop(miter); /* For whatever reason there is sometime one word more in the fifo then * requested */ timeout = 1000; status = readl(host->base + JZ_REG_MMC_STATUS); while (!(status & JZ_MMC_STATUS_DATA_FIFO_EMPTY) && --timeout) { d = readl(fifo_addr); status = readl(host->base + JZ_REG_MMC_STATUS); } return false; poll_timeout: miter->consumed = (void *)buf - miter->addr; data->bytes_xfered += miter->consumed; sg_miter_stop(miter); return true; }
static int rd_MEMCPY(struct rd_request *req, u32 read_rd) { struct se_task *task = &req->rd_task; struct rd_dev *dev = req->rd_task.task_se_cmd->se_dev->dev_ptr; struct rd_dev_sg_table *table; struct scatterlist *rd_sg; struct sg_mapping_iter m; u32 rd_offset = req->rd_offset; u32 src_len; table = rd_get_sg_table(dev, req->rd_page); if (!table) return -EINVAL; rd_sg = &table->sg_table[req->rd_page - table->page_start_offset]; pr_debug("RD[%u]: %s LBA: %llu, Size: %u Page: %u, Offset: %u\n", dev->rd_dev_id, read_rd ? "Read" : "Write", task->task_lba, req->rd_size, req->rd_page, rd_offset); src_len = PAGE_SIZE - rd_offset; sg_miter_start(&m, task->task_sg, task->task_sg_nents, read_rd ? SG_MITER_TO_SG : SG_MITER_FROM_SG); while (req->rd_size) { u32 len; void *rd_addr; sg_miter_next(&m); len = min((u32)m.length, src_len); m.consumed = len; rd_addr = sg_virt(rd_sg) + rd_offset; if (read_rd) memcpy(m.addr, rd_addr, len); else memcpy(rd_addr, m.addr, len); req->rd_size -= len; if (!req->rd_size) continue; src_len -= len; if (src_len) { rd_offset += len; continue; } /* rd page completed, next one please */ req->rd_page++; rd_offset = 0; src_len = PAGE_SIZE; if (req->rd_page <= table->page_end_offset) { rd_sg++; continue; } table = rd_get_sg_table(dev, req->rd_page); if (!table) { sg_miter_stop(&m); return -EINVAL; } /* since we increment, the first sg entry is correct */ rd_sg = table->sg_table; } sg_miter_stop(&m); return 0; }
/* Generic function that support SG with size not multiple of 4 */ static int sun4i_ss_cipher_poll(struct skcipher_request *areq) { struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(areq); struct sun4i_tfm_ctx *op = crypto_skcipher_ctx(tfm); struct sun4i_ss_ctx *ss = op->ss; int no_chunk = 1; struct scatterlist *in_sg = areq->src; struct scatterlist *out_sg = areq->dst; unsigned int ivsize = crypto_skcipher_ivsize(tfm); struct sun4i_cipher_req_ctx *ctx = skcipher_request_ctx(areq); u32 mode = ctx->mode; /* when activating SS, the default FIFO space is SS_RX_DEFAULT(32) */ u32 rx_cnt = SS_RX_DEFAULT; u32 tx_cnt = 0; u32 v; u32 spaces; int err = 0; unsigned int i; unsigned int ileft = areq->cryptlen; unsigned int oleft = areq->cryptlen; unsigned int todo; struct sg_mapping_iter mi, mo; unsigned int oi, oo; /* offset for in and out */ char buf[4 * SS_RX_MAX];/* buffer for linearize SG src */ char bufo[4 * SS_TX_MAX]; /* buffer for linearize SG dst */ unsigned int ob = 0; /* offset in buf */ unsigned int obo = 0; /* offset in bufo*/ unsigned int obl = 0; /* length of data in bufo */ unsigned long flags; if (!areq->cryptlen) return 0; if (!areq->iv) { dev_err_ratelimited(ss->dev, "ERROR: Empty IV\n"); return -EINVAL; } if (!areq->src || !areq->dst) { dev_err_ratelimited(ss->dev, "ERROR: Some SGs are NULL\n"); return -EINVAL; } /* * if we have only SGs with size multiple of 4, * we can use the SS optimized function */ while (in_sg && no_chunk == 1) { if (in_sg->length % 4) no_chunk = 0; in_sg = sg_next(in_sg); } while (out_sg && no_chunk == 1) { if (out_sg->length % 4) no_chunk = 0; out_sg = sg_next(out_sg); } if (no_chunk == 1) return sun4i_ss_opti_poll(areq); spin_lock_irqsave(&ss->slock, flags); for (i = 0; i < op->keylen; i += 4) writel(*(op->key + i / 4), ss->base + SS_KEY0 + i); if (areq->iv) { for (i = 0; i < 4 && i < ivsize / 4; i++) { v = *(u32 *)(areq->iv + i * 4); writel(v, ss->base + SS_IV0 + i * 4); } } writel(mode, ss->base + SS_CTL); sg_miter_start(&mi, areq->src, sg_nents(areq->src), SG_MITER_FROM_SG | SG_MITER_ATOMIC); sg_miter_start(&mo, areq->dst, sg_nents(areq->dst), SG_MITER_TO_SG | SG_MITER_ATOMIC); sg_miter_next(&mi); sg_miter_next(&mo); if (!mi.addr || !mo.addr) { dev_err_ratelimited(ss->dev, "ERROR: sg_miter return null\n"); err = -EINVAL; goto release_ss; } ileft = areq->cryptlen; oleft = areq->cryptlen; oi = 0; oo = 0; while (oleft) { if (ileft) { /* * todo is the number of consecutive 4byte word that we * can read from current SG */ todo = min3(rx_cnt, ileft / 4, (mi.length - oi) / 4); if (todo && !ob) { writesl(ss->base + SS_RXFIFO, mi.addr + oi, todo); ileft -= todo * 4; oi += todo * 4; } else { /* * not enough consecutive bytes, so we need to * linearize in buf. todo is in bytes * After that copy, if we have a multiple of 4 * we need to be able to write all buf in one * pass, so it is why we min() with rx_cnt */ todo = min3(rx_cnt * 4 - ob, ileft, mi.length - oi); memcpy(buf + ob, mi.addr + oi, todo); ileft -= todo; oi += todo; ob += todo; if (!(ob % 4)) { writesl(ss->base + SS_RXFIFO, buf, ob / 4); ob = 0; } } if (oi == mi.length) { sg_miter_next(&mi); oi = 0; } } spaces = readl(ss->base + SS_FCSR); rx_cnt = SS_RXFIFO_SPACES(spaces); tx_cnt = SS_TXFIFO_SPACES(spaces); dev_dbg(ss->dev, "%x %u/%u %u/%u cnt=%u %u/%u %u/%u cnt=%u %u\n", mode, oi, mi.length, ileft, areq->cryptlen, rx_cnt, oo, mo.length, oleft, areq->cryptlen, tx_cnt, ob); if (!tx_cnt) continue; /* todo in 4bytes word */ todo = min3(tx_cnt, oleft / 4, (mo.length - oo) / 4); if (todo) { readsl(ss->base + SS_TXFIFO, mo.addr + oo, todo); oleft -= todo * 4; oo += todo * 4; if (oo == mo.length) { sg_miter_next(&mo); oo = 0; } } else { /* * read obl bytes in bufo, we read at maximum for * emptying the device */ readsl(ss->base + SS_TXFIFO, bufo, tx_cnt); obl = tx_cnt * 4; obo = 0; do { /* * how many bytes we can copy ? * no more than remaining SG size * no more than remaining buffer * no need to test against oleft */ todo = min(mo.length - oo, obl - obo); memcpy(mo.addr + oo, bufo + obo, todo); oleft -= todo; obo += todo; oo += todo; if (oo == mo.length) { sg_miter_next(&mo); oo = 0; } } while (obo < obl); /* bufo must be fully used here */ } } if (areq->iv) { for (i = 0; i < 4 && i < ivsize / 4; i++) { v = readl(ss->base + SS_IV0 + i * 4); *(u32 *)(areq->iv + i * 4) = v; } } release_ss: sg_miter_stop(&mi); sg_miter_stop(&mo); writel(0, ss->base + SS_CTL); spin_unlock_irqrestore(&ss->slock, flags); return err; }