static void mv_enqueue_new_req(struct ablkcipher_request *req) { int num_sgs; cpg->cur_req = req; memset(&cpg->p, 0, sizeof(struct req_progress)); num_sgs = count_sgs(req->src, req->nbytes); sg_miter_start(&cpg->p.src_sg_it, req->src, num_sgs, SG_MITER_FROM_SG); num_sgs = count_sgs(req->dst, req->nbytes); sg_miter_start(&cpg->p.dst_sg_it, req->dst, num_sgs, SG_MITER_TO_SG); mv_process_current_q(1); }
/* Copy data to userspace process from sg. */ static __must_check int sg_copy_to_user_buffer( struct scatterlist *sg, unsigned int nents, unsigned char __user *dst, size_t nbytes) { int ret = 0; struct sg_mapping_iter miter; sg_miter_start(&miter, sg, nents, SG_MITER_FROM_SG); while (sg_miter_next(&miter) && nbytes > 0 && miter.addr) { size_t len; len = min(miter.length, nbytes); if (__copy_to_user(dst, miter.addr, len)) { ret = -EINVAL; goto error_sg_copy_to_user_buffer; } nbytes -= len; dst += len; } /* If the provided buffer is proper all bytes are copied. */ BUG_ON(nbytes != 0); error_sg_copy_to_user_buffer: sg_miter_stop(&miter); return ret; }
/** * sg_copy_buffer - Copy data between a linear buffer and an SG list * @sgl: The SG list * @nents: Number of SG entries * @buf: Where to copy from * @buflen: The number of bytes to copy * @to_buffer: transfer direction (non zero == from an sg list to a * buffer, 0 == from a buffer to an sg list * * Returns the number of copied bytes. * **/ static size_t sg_copy_buffer(struct scatterlist *sgl, unsigned int nents, void *buf, size_t buflen, int to_buffer) { unsigned int offset = 0; struct sg_mapping_iter miter; unsigned long flags; unsigned int sg_flags = SG_MITER_ATOMIC; if (to_buffer) sg_flags |= SG_MITER_FROM_SG; else sg_flags |= SG_MITER_TO_SG; sg_miter_start(&miter, sgl, nents, sg_flags); local_irq_save(flags); while (sg_miter_next(&miter) && offset < buflen) { unsigned int len; len = min(miter.length, buflen - offset); if (to_buffer) memcpy(buf + offset, miter.addr, len); else memcpy(miter.addr, buf + offset, len); offset += len; } sg_miter_stop(&miter); local_irq_restore(flags); return offset; }
/** * sg_copy_buffer - Copy data between a linear buffer and an SG list * @sgl: The SG list * @nents: Number of SG entries * @buf: Where to copy from * @buflen: The number of bytes to copy * @to_buffer: transfer direction (non zero == from an sg list to a * buffer, 0 == from a buffer to an sg list * * Returns the number of copied bytes. * **/ static size_t sg_copy_buffer(struct scatterlist *sgl, unsigned int nents, void *buf, size_t buflen, int to_buffer) { unsigned int offset = 0; struct sg_mapping_iter miter; unsigned long flags; sg_miter_start(&miter, sgl, nents, SG_MITER_ATOMIC); local_irq_save(flags); while (sg_miter_next(&miter) && offset < buflen) { unsigned int len; len = min(miter.length, buflen - offset); if (to_buffer) memcpy(buf + offset, miter.addr, len); else { memcpy(miter.addr, buf + offset, len); flush_kernel_dcache_page(miter.page); } offset += len; } sg_miter_stop(&miter); local_irq_restore(flags); return offset; }
static void mv_start_new_hash_req(struct ahash_request *req) { struct req_progress *p = &cpg->p; struct mv_req_hash_ctx *ctx = ahash_request_ctx(req); const struct mv_tfm_hash_ctx *tfm_ctx = crypto_tfm_ctx(req->base.tfm); int num_sgs, hw_bytes, old_extra_bytes, rc; cpg->cur_req = &req->base; memset(p, 0, sizeof(struct req_progress)); hw_bytes = req->nbytes + ctx->extra_bytes; old_extra_bytes = ctx->extra_bytes; if (unlikely(ctx->extra_bytes)) { memcpy(cpg->sram + SRAM_DATA_IN_START, ctx->buffer, ctx->extra_bytes); p->crypt_len = ctx->extra_bytes; } memcpy(cpg->sram + SRAM_HMAC_IV_IN, tfm_ctx->ivs, sizeof(tfm_ctx->ivs)); if (unlikely(!ctx->first_hash)) { writel(ctx->state[0], cpg->reg + DIGEST_INITIAL_VAL_A); writel(ctx->state[1], cpg->reg + DIGEST_INITIAL_VAL_B); writel(ctx->state[2], cpg->reg + DIGEST_INITIAL_VAL_C); writel(ctx->state[3], cpg->reg + DIGEST_INITIAL_VAL_D); writel(ctx->state[4], cpg->reg + DIGEST_INITIAL_VAL_E); } ctx->extra_bytes = hw_bytes % SHA1_BLOCK_SIZE; if (ctx->extra_bytes != 0 && (!ctx->last_chunk || ctx->count > MAX_HW_HASH_SIZE)) hw_bytes -= ctx->extra_bytes; else ctx->extra_bytes = 0; num_sgs = count_sgs(req->src, req->nbytes); sg_miter_start(&p->src_sg_it, req->src, num_sgs, SG_MITER_FROM_SG); if (hw_bytes) { p->hw_nbytes = hw_bytes; p->complete = mv_hash_algo_completion; p->process = mv_process_hash_current; mv_process_hash_current(1); } else { copy_src_to_buf(p, ctx->buffer + old_extra_bytes, ctx->extra_bytes - old_extra_bytes); sg_miter_stop(&p->src_sg_it); if (ctx->last_chunk) rc = mv_hash_final_fallback(req); else rc = 0; cpg->eng_st = ENGINE_IDLE; local_bh_disable(); req->base.complete(&req->base, rc); local_bh_enable(); } }
static void mv_start_new_crypt_req(struct ablkcipher_request *req) { struct req_progress *p = &cpg->p; int num_sgs; cpg->cur_req = &req->base; memset(p, 0, sizeof(struct req_progress)); p->hw_nbytes = req->nbytes; p->complete = mv_crypto_algo_completion; p->process = mv_process_current_q; p->copy_back = 1; num_sgs = count_sgs(req->src, req->nbytes); sg_miter_start(&p->src_sg_it, req->src, num_sgs, SG_MITER_FROM_SG); num_sgs = count_sgs(req->dst, req->nbytes); sg_miter_start(&p->dst_sg_it, req->dst, num_sgs, SG_MITER_TO_SG); mv_process_current_q(1); }
static void mmci_init_sg(struct mmci_host *host, struct mmc_data *data) { unsigned int flags = SG_MITER_ATOMIC; if (data->flags & MMC_DATA_READ) flags |= SG_MITER_TO_SG; else flags |= SG_MITER_FROM_SG; sg_miter_start(&host->sg_miter, data->sg, data->sg_len, flags); }
static void sd_zbc_report_zones_complete(struct scsi_cmnd *scmd, unsigned int good_bytes) { struct request *rq = scmd->request; struct scsi_disk *sdkp = scsi_disk(rq->rq_disk); struct sg_mapping_iter miter; struct blk_zone_report_hdr hdr; struct blk_zone zone; unsigned int offset, bytes = 0; unsigned long flags; u8 *buf; if (good_bytes < 64) return; memset(&hdr, 0, sizeof(struct blk_zone_report_hdr)); sg_miter_start(&miter, scsi_sglist(scmd), scsi_sg_count(scmd), SG_MITER_TO_SG | SG_MITER_ATOMIC); local_irq_save(flags); while (sg_miter_next(&miter) && bytes < good_bytes) { buf = miter.addr; offset = 0; if (bytes == 0) { /* Set the report header */ hdr.nr_zones = min_t(unsigned int, (good_bytes - 64) / 64, get_unaligned_be32(&buf[0]) / 64); memcpy(buf, &hdr, sizeof(struct blk_zone_report_hdr)); offset += 64; bytes += 64; } /* Parse zone descriptors */ while (offset < miter.length && hdr.nr_zones) { WARN_ON(offset > miter.length); buf = miter.addr + offset; sd_zbc_parse_report(sdkp, buf, &zone); memcpy(buf, &zone, sizeof(struct blk_zone)); offset += 64; bytes += 64; hdr.nr_zones--; } if (!hdr.nr_zones) break; }
static size_t vtl_sg_copy_user(struct scatterlist *sgl, unsigned int nents, __user void *buf, size_t buflen, int to_buffer) { unsigned int offset = 0; struct sg_mapping_iter miter; /* Do not use SG_MITER_ATOMIC flag on the sg_miter_start() call */ unsigned int sg_flags = 0; unsigned int rem; #if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,30) if (to_buffer) sg_flags |= SG_MITER_FROM_SG; else sg_flags |= SG_MITER_TO_SG; #endif sg_miter_start(&miter, sgl, nents, sg_flags); while (sg_miter_next(&miter) && offset < buflen) { unsigned int len; len = min(miter.length, buflen - offset); if (to_buffer) rem = copy_to_user(buf + offset, miter.addr, len); else { rem = copy_from_user(miter.addr, buf + offset, len); flush_kernel_dcache_page(miter.page); } if (rem) printk(KERN_DEBUG "mhvtl: %s(): " "copy_%s_user() failed, rem %ld, buf 0x%llx, " "miter.addr 0x%llx, len %d\n", __func__, (to_buffer) ? "to" : "from", (long)rem, (long long unsigned int)(buf + offset), (long long unsigned int)miter.addr, len); offset += len; } sg_miter_stop(&miter); return offset; }
static int caam_rsa_count_leading_zeros(struct scatterlist *sgl, unsigned int nbytes, unsigned int flags) { struct sg_mapping_iter miter; int lzeros, ents; unsigned int len; unsigned int tbytes = nbytes; const u8 *buff; ents = sg_nents_for_len(sgl, nbytes); if (ents < 0) return ents; sg_miter_start(&miter, sgl, ents, SG_MITER_FROM_SG | flags); lzeros = 0; len = 0; while (nbytes > 0) { while (len && !*buff) { lzeros++; len--; buff++; } if (len && *buff) break; sg_miter_next(&miter); buff = miter.addr; len = miter.length; nbytes -= lzeros; lzeros = 0; } miter.consumed = lzeros; sg_miter_stop(&miter); nbytes -= lzeros; return tbytes - nbytes; }
static void bcm2835_sdhost_prepare_data(struct bcm2835_host *host, struct mmc_command *cmd) { struct mmc_data *data = cmd->data; WARN_ON(host->data); if (!data) return; /* Sanity checks */ BUG_ON(data->blksz * data->blocks > 524288); BUG_ON(data->blksz > host->mmc->max_blk_size); BUG_ON(data->blocks > 65535); host->data = data; host->data_complete = 0; host->flush_fifo = 0; host->data->bytes_xfered = 0; host->use_dma = host->have_dma && (data->blocks > host->pio_limit); if (!host->use_dma) { int flags; flags = SG_MITER_ATOMIC; if (data->flags & MMC_DATA_READ) flags |= SG_MITER_TO_SG; else flags |= SG_MITER_FROM_SG; sg_miter_start(&host->sg_miter, data->sg, data->sg_len, flags); host->blocks = data->blocks; } bcm2835_sdhost_set_transfer_irqs(host); bcm2835_sdhost_write(host, data->blksz, SDHBCT); if (host->use_dma) bcm2835_sdhost_write(host, data->blocks, SDHBLC); BUG_ON(!host->data); }
/* Transfers actual data using PIO. */ static int r592_transfer_fifo_pio(struct r592_device *dev) { unsigned long flags; bool is_write = dev->req->tpc >= MS_TPC_SET_RW_REG_ADRS; struct sg_mapping_iter miter; kfifo_reset(&dev->pio_fifo); if (!dev->req->long_data) { if (is_write) { r592_write_fifo_pio(dev, dev->req->data, dev->req->data_len); r592_flush_fifo_write(dev); } else r592_read_fifo_pio(dev, dev->req->data, dev->req->data_len); return 0; } local_irq_save(flags); sg_miter_start(&miter, &dev->req->sg, 1, SG_MITER_ATOMIC | (is_write ? SG_MITER_FROM_SG : SG_MITER_TO_SG)); /* Do the transfer fifo<->memory*/ while (sg_miter_next(&miter)) if (is_write) r592_write_fifo_pio(dev, miter.addr, miter.length); else r592_read_fifo_pio(dev, miter.addr, miter.length); /* Write last few non aligned bytes*/ if (is_write) r592_flush_fifo_write(dev); sg_miter_stop(&miter); local_irq_restore(flags); return 0; }
static int pkcs1pad_encrypt_sign_complete(struct akcipher_request *req, int err) { struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req); struct pkcs1pad_ctx *ctx = akcipher_tfm_ctx(tfm); struct pkcs1pad_request *req_ctx = akcipher_request_ctx(req); size_t pad_len = ctx->key_size - req_ctx->child_req.dst_len; size_t chunk_len, pad_left; struct sg_mapping_iter miter; if (!err) { if (pad_len) { sg_miter_start(&miter, req->dst, sg_nents_for_len(req->dst, pad_len), SG_MITER_ATOMIC | SG_MITER_TO_SG); pad_left = pad_len; while (pad_left) { sg_miter_next(&miter); chunk_len = min(miter.length, pad_left); memset(miter.addr, 0, chunk_len); pad_left -= chunk_len; } sg_miter_stop(&miter); } sg_pcopy_from_buffer(req->dst, sg_nents_for_len(req->dst, ctx->key_size), req_ctx->out_buf, req_ctx->child_req.dst_len, pad_len); } req->dst_len = ctx->key_size; kfree(req_ctx->in_buf); kzfree(req_ctx->out_buf); return err; }
static int sun4i_ss_opti_poll(struct skcipher_request *areq) { struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(areq); struct sun4i_tfm_ctx *op = crypto_skcipher_ctx(tfm); struct sun4i_ss_ctx *ss = op->ss; unsigned int ivsize = crypto_skcipher_ivsize(tfm); struct sun4i_cipher_req_ctx *ctx = skcipher_request_ctx(areq); u32 mode = ctx->mode; /* when activating SS, the default FIFO space is SS_RX_DEFAULT(32) */ u32 rx_cnt = SS_RX_DEFAULT; u32 tx_cnt = 0; u32 spaces; u32 v; int err = 0; unsigned int i; unsigned int ileft = areq->cryptlen; unsigned int oleft = areq->cryptlen; unsigned int todo; struct sg_mapping_iter mi, mo; unsigned int oi, oo; /* offset for in and out */ unsigned long flags; if (!areq->cryptlen) return 0; if (!areq->iv) { dev_err_ratelimited(ss->dev, "ERROR: Empty IV\n"); return -EINVAL; } if (!areq->src || !areq->dst) { dev_err_ratelimited(ss->dev, "ERROR: Some SGs are NULL\n"); return -EINVAL; } spin_lock_irqsave(&ss->slock, flags); for (i = 0; i < op->keylen; i += 4) writel(*(op->key + i / 4), ss->base + SS_KEY0 + i); if (areq->iv) { for (i = 0; i < 4 && i < ivsize / 4; i++) { v = *(u32 *)(areq->iv + i * 4); writel(v, ss->base + SS_IV0 + i * 4); } } writel(mode, ss->base + SS_CTL); sg_miter_start(&mi, areq->src, sg_nents(areq->src), SG_MITER_FROM_SG | SG_MITER_ATOMIC); sg_miter_start(&mo, areq->dst, sg_nents(areq->dst), SG_MITER_TO_SG | SG_MITER_ATOMIC); sg_miter_next(&mi); sg_miter_next(&mo); if (!mi.addr || !mo.addr) { dev_err_ratelimited(ss->dev, "ERROR: sg_miter return null\n"); err = -EINVAL; goto release_ss; } ileft = areq->cryptlen / 4; oleft = areq->cryptlen / 4; oi = 0; oo = 0; do { todo = min3(rx_cnt, ileft, (mi.length - oi) / 4); if (todo) { ileft -= todo; writesl(ss->base + SS_RXFIFO, mi.addr + oi, todo); oi += todo * 4; } if (oi == mi.length) { sg_miter_next(&mi); oi = 0; } spaces = readl(ss->base + SS_FCSR); rx_cnt = SS_RXFIFO_SPACES(spaces); tx_cnt = SS_TXFIFO_SPACES(spaces); todo = min3(tx_cnt, oleft, (mo.length - oo) / 4); if (todo) { oleft -= todo; readsl(ss->base + SS_TXFIFO, mo.addr + oo, todo); oo += todo * 4; } if (oo == mo.length) { sg_miter_next(&mo); oo = 0; } } while (oleft); if (areq->iv) { for (i = 0; i < 4 && i < ivsize / 4; i++) { v = readl(ss->base + SS_IV0 + i * 4); *(u32 *)(areq->iv + i * 4) = v; } } release_ss: sg_miter_stop(&mi); sg_miter_stop(&mo); writel(0, ss->base + SS_CTL); spin_unlock_irqrestore(&ss->slock, flags); return err; }
/* Generic function that support SG with size not multiple of 4 */ static int sun4i_ss_cipher_poll(struct skcipher_request *areq) { struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(areq); struct sun4i_tfm_ctx *op = crypto_skcipher_ctx(tfm); struct sun4i_ss_ctx *ss = op->ss; int no_chunk = 1; struct scatterlist *in_sg = areq->src; struct scatterlist *out_sg = areq->dst; unsigned int ivsize = crypto_skcipher_ivsize(tfm); struct sun4i_cipher_req_ctx *ctx = skcipher_request_ctx(areq); u32 mode = ctx->mode; /* when activating SS, the default FIFO space is SS_RX_DEFAULT(32) */ u32 rx_cnt = SS_RX_DEFAULT; u32 tx_cnt = 0; u32 v; u32 spaces; int err = 0; unsigned int i; unsigned int ileft = areq->cryptlen; unsigned int oleft = areq->cryptlen; unsigned int todo; struct sg_mapping_iter mi, mo; unsigned int oi, oo; /* offset for in and out */ char buf[4 * SS_RX_MAX];/* buffer for linearize SG src */ char bufo[4 * SS_TX_MAX]; /* buffer for linearize SG dst */ unsigned int ob = 0; /* offset in buf */ unsigned int obo = 0; /* offset in bufo*/ unsigned int obl = 0; /* length of data in bufo */ unsigned long flags; if (!areq->cryptlen) return 0; if (!areq->iv) { dev_err_ratelimited(ss->dev, "ERROR: Empty IV\n"); return -EINVAL; } if (!areq->src || !areq->dst) { dev_err_ratelimited(ss->dev, "ERROR: Some SGs are NULL\n"); return -EINVAL; } /* * if we have only SGs with size multiple of 4, * we can use the SS optimized function */ while (in_sg && no_chunk == 1) { if (in_sg->length % 4) no_chunk = 0; in_sg = sg_next(in_sg); } while (out_sg && no_chunk == 1) { if (out_sg->length % 4) no_chunk = 0; out_sg = sg_next(out_sg); } if (no_chunk == 1) return sun4i_ss_opti_poll(areq); spin_lock_irqsave(&ss->slock, flags); for (i = 0; i < op->keylen; i += 4) writel(*(op->key + i / 4), ss->base + SS_KEY0 + i); if (areq->iv) { for (i = 0; i < 4 && i < ivsize / 4; i++) { v = *(u32 *)(areq->iv + i * 4); writel(v, ss->base + SS_IV0 + i * 4); } } writel(mode, ss->base + SS_CTL); sg_miter_start(&mi, areq->src, sg_nents(areq->src), SG_MITER_FROM_SG | SG_MITER_ATOMIC); sg_miter_start(&mo, areq->dst, sg_nents(areq->dst), SG_MITER_TO_SG | SG_MITER_ATOMIC); sg_miter_next(&mi); sg_miter_next(&mo); if (!mi.addr || !mo.addr) { dev_err_ratelimited(ss->dev, "ERROR: sg_miter return null\n"); err = -EINVAL; goto release_ss; } ileft = areq->cryptlen; oleft = areq->cryptlen; oi = 0; oo = 0; while (oleft) { if (ileft) { /* * todo is the number of consecutive 4byte word that we * can read from current SG */ todo = min3(rx_cnt, ileft / 4, (mi.length - oi) / 4); if (todo && !ob) { writesl(ss->base + SS_RXFIFO, mi.addr + oi, todo); ileft -= todo * 4; oi += todo * 4; } else { /* * not enough consecutive bytes, so we need to * linearize in buf. todo is in bytes * After that copy, if we have a multiple of 4 * we need to be able to write all buf in one * pass, so it is why we min() with rx_cnt */ todo = min3(rx_cnt * 4 - ob, ileft, mi.length - oi); memcpy(buf + ob, mi.addr + oi, todo); ileft -= todo; oi += todo; ob += todo; if (!(ob % 4)) { writesl(ss->base + SS_RXFIFO, buf, ob / 4); ob = 0; } } if (oi == mi.length) { sg_miter_next(&mi); oi = 0; } } spaces = readl(ss->base + SS_FCSR); rx_cnt = SS_RXFIFO_SPACES(spaces); tx_cnt = SS_TXFIFO_SPACES(spaces); dev_dbg(ss->dev, "%x %u/%u %u/%u cnt=%u %u/%u %u/%u cnt=%u %u\n", mode, oi, mi.length, ileft, areq->cryptlen, rx_cnt, oo, mo.length, oleft, areq->cryptlen, tx_cnt, ob); if (!tx_cnt) continue; /* todo in 4bytes word */ todo = min3(tx_cnt, oleft / 4, (mo.length - oo) / 4); if (todo) { readsl(ss->base + SS_TXFIFO, mo.addr + oo, todo); oleft -= todo * 4; oo += todo * 4; if (oo == mo.length) { sg_miter_next(&mo); oo = 0; } } else { /* * read obl bytes in bufo, we read at maximum for * emptying the device */ readsl(ss->base + SS_TXFIFO, bufo, tx_cnt); obl = tx_cnt * 4; obo = 0; do { /* * how many bytes we can copy ? * no more than remaining SG size * no more than remaining buffer * no need to test against oleft */ todo = min(mo.length - oo, obl - obo); memcpy(mo.addr + oo, bufo + obo, todo); oleft -= todo; obo += todo; oo += todo; if (oo == mo.length) { sg_miter_next(&mo); oo = 0; } } while (obo < obl); /* bufo must be fully used here */ } } if (areq->iv) { for (i = 0; i < 4 && i < ivsize / 4; i++) { v = readl(ss->base + SS_IV0 + i * 4); *(u32 *)(areq->iv + i * 4) = v; } } release_ss: sg_miter_stop(&mi); sg_miter_stop(&mo); writel(0, ss->base + SS_CTL); spin_unlock_irqrestore(&ss->slock, flags); return err; }
static sense_reason_t rd_execute_rw(struct se_cmd *cmd) { struct scatterlist *sgl = cmd->t_data_sg; u32 sgl_nents = cmd->t_data_nents; enum dma_data_direction data_direction = cmd->data_direction; struct se_device *se_dev = cmd->se_dev; struct rd_dev *dev = RD_DEV(se_dev); struct rd_dev_sg_table *table; struct scatterlist *rd_sg; struct sg_mapping_iter m; u32 rd_offset; u32 rd_size; u32 rd_page; u32 src_len; u64 tmp; if (dev->rd_flags & RDF_NULLIO) { target_complete_cmd(cmd, SAM_STAT_GOOD); return 0; } tmp = cmd->t_task_lba * se_dev->dev_attrib.block_size; rd_offset = do_div(tmp, PAGE_SIZE); rd_page = tmp; rd_size = cmd->data_length; table = rd_get_sg_table(dev, rd_page); if (!table) return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; rd_sg = &table->sg_table[rd_page - table->page_start_offset]; pr_debug("RD[%u]: %s LBA: %llu, Size: %u Page: %u, Offset: %u\n", dev->rd_dev_id, data_direction == DMA_FROM_DEVICE ? "Read" : "Write", cmd->t_task_lba, rd_size, rd_page, rd_offset); src_len = PAGE_SIZE - rd_offset; sg_miter_start(&m, sgl, sgl_nents, data_direction == DMA_FROM_DEVICE ? SG_MITER_TO_SG : SG_MITER_FROM_SG); while (rd_size) { u32 len; void *rd_addr; sg_miter_next(&m); if (!(u32)m.length) { pr_debug("RD[%u]: invalid sgl %p len %zu\n", dev->rd_dev_id, m.addr, m.length); sg_miter_stop(&m); return TCM_INCORRECT_AMOUNT_OF_DATA; } len = min((u32)m.length, src_len); if (len > rd_size) { pr_debug("RD[%u]: size underrun page %d offset %d " "size %d\n", dev->rd_dev_id, rd_page, rd_offset, rd_size); len = rd_size; } m.consumed = len; rd_addr = sg_virt(rd_sg) + rd_offset; if (data_direction == DMA_FROM_DEVICE) memcpy(m.addr, rd_addr, len); else memcpy(rd_addr, m.addr, len); rd_size -= len; if (!rd_size) continue; src_len -= len; if (src_len) { rd_offset += len; continue; } /* rd page completed, next one please */ rd_page++; rd_offset = 0; src_len = PAGE_SIZE; if (rd_page <= table->page_end_offset) { rd_sg++; continue; } table = rd_get_sg_table(dev, rd_page); if (!table) { sg_miter_stop(&m); return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; } /* since we increment, the first sg entry is correct */ rd_sg = table->sg_table; } sg_miter_stop(&m); target_complete_cmd(cmd, SAM_STAT_GOOD); return 0; }
static int rd_MEMCPY(struct rd_request *req, u32 read_rd) { struct se_task *task = &req->rd_task; struct rd_dev *dev = req->rd_task.task_se_cmd->se_dev->dev_ptr; struct rd_dev_sg_table *table; struct scatterlist *rd_sg; struct sg_mapping_iter m; u32 rd_offset = req->rd_offset; u32 src_len; table = rd_get_sg_table(dev, req->rd_page); if (!table) return -EINVAL; rd_sg = &table->sg_table[req->rd_page - table->page_start_offset]; pr_debug("RD[%u]: %s LBA: %llu, Size: %u Page: %u, Offset: %u\n", dev->rd_dev_id, read_rd ? "Read" : "Write", task->task_lba, req->rd_size, req->rd_page, rd_offset); src_len = PAGE_SIZE - rd_offset; sg_miter_start(&m, task->task_sg, task->task_sg_nents, read_rd ? SG_MITER_TO_SG : SG_MITER_FROM_SG); while (req->rd_size) { u32 len; void *rd_addr; sg_miter_next(&m); len = min((u32)m.length, src_len); m.consumed = len; rd_addr = sg_virt(rd_sg) + rd_offset; if (read_rd) memcpy(m.addr, rd_addr, len); else memcpy(rd_addr, m.addr, len); req->rd_size -= len; if (!req->rd_size) continue; src_len -= len; if (src_len) { rd_offset += len; continue; } /* rd page completed, next one please */ req->rd_page++; rd_offset = 0; src_len = PAGE_SIZE; if (req->rd_page <= table->page_end_offset) { rd_sg++; continue; } table = rd_get_sg_table(dev, req->rd_page); if (!table) { sg_miter_stop(&m); return -EINVAL; } /* since we increment, the first sg entry is correct */ rd_sg = table->sg_table; } sg_miter_stop(&m); return 0; }