static void crypto_gcm_ghash_update_sg(struct crypto_gcm_ghash_ctx *ctx, struct scatterlist *sg, int len) { struct scatter_walk walk; u8 *src; int n; if (!len) return; scatterwalk_start(&walk, sg); while (len) { n = scatterwalk_clamp(&walk, len); if (!n) { scatterwalk_start(&walk, scatterwalk_sg_next(walk.sg)); n = scatterwalk_clamp(&walk, len); } src = scatterwalk_map(&walk, 0); crypto_gcm_ghash_update(ctx, src, n); len -= n; scatterwalk_unmap(src, 0); scatterwalk_advance(&walk, n); scatterwalk_done(&walk, 0, len); if (len) crypto_yield(ctx->flags); } }
static int mmc_copy_sglist(struct scatterlist *in_sg, int entries, struct scatterlist *out_sg, u8 *buf) { /* initialize out_sg with number of entries present in the in_sg * iterate over in_sg to get the length of each entry and copy * the same amount of buffer into out_sg */ int i = 0; if (out_sg && (entries > 0)) sg_init_table(out_sg, entries); else { pr_err("Either in_sg is empty or out_sg is NULL\n"); goto exit; } while (in_sg && entries > 0) { if (&out_sg[i]) { sg_set_buf(&out_sg[i], buf, in_sg->length); buf += in_sg->length; i++; in_sg = scatterwalk_sg_next(in_sg); entries--; } else { pr_err("in_sg is bigger than out_sg\n"); i = 0; goto exit; } } exit: return i; }
static int mmc_count_sg(struct scatterlist *sg, unsigned long nbytes) { int i; for (i = 0; nbytes > 0 && sg; i++, sg = scatterwalk_sg_next(sg)) nbytes -= sg->length; return i; }
/* Copy sg data, from to_skip to end, to dest and vice versa*/ void dx_sg_copy_part(u8 *dest, struct scatterlist *sg, int to_skip, unsigned int end, enum dx_sg_cpy_direct direct) { struct scatterlist t_sg; struct scatterlist *current_sg = sg; int sg_index, cpy_index; int nents; int lbytes; nents = sg_count_ents(sg, end, &lbytes); sg_index = current_sg->length; while (sg_index <= to_skip) { current_sg = scatterwalk_sg_next(current_sg); sg_index += current_sg->length; nents--; } cpy_index = sg_index - to_skip; /* copy current sg to temporary */ t_sg = *current_sg; /*update the offset in the sg entry*/ t_sg.offset += current_sg->length - cpy_index; /*copy the data*/ if (direct == DX_SG_TO_BUF) { sg_copy_to_buffer(&t_sg, 1, dest, cpy_index); } else { sg_copy_from_buffer(&t_sg, 1, dest, cpy_index); } current_sg = scatterwalk_sg_next(current_sg); nents--; if (end > sg_index) { if (direct == DX_SG_TO_BUF) { sg_copy_to_buffer(current_sg, nents, &dest[cpy_index], end - sg_index); } else { sg_copy_from_buffer(current_sg, nents, &dest[cpy_index], end - sg_index); } } }
static void eseqiv_chain(struct scatterlist *head, struct scatterlist *sg, int chain) { if (chain) { head->length += sg->length; sg = scatterwalk_sg_next(sg); } if (sg) scatterwalk_sg_chain(head, 2, sg); else sg_mark_end(head); }
static int update2(struct hash_desc *desc, struct scatterlist *sg, unsigned int nbytes) { struct crypto_tfm *tfm = crypto_hash_tfm(desc->tfm); unsigned int alignmask = crypto_tfm_alg_alignmask(tfm); if (!nbytes) return 0; for (;;) { struct page *pg = sg_page(sg); unsigned int offset = sg->offset; unsigned int l = sg->length; if (unlikely(l > nbytes)) l = nbytes; nbytes -= l; do { unsigned int bytes_from_page = min(l, ((unsigned int) (PAGE_SIZE)) - offset); char *src = crypto_kmap(pg, 0); char *p = src + offset; if (unlikely(offset & alignmask)) { unsigned int bytes = alignmask + 1 - (offset & alignmask); bytes = min(bytes, bytes_from_page); tfm->__crt_alg->cra_digest.dia_update(tfm, p, bytes); p += bytes; bytes_from_page -= bytes; l -= bytes; } tfm->__crt_alg->cra_digest.dia_update(tfm, p, bytes_from_page); crypto_kunmap(src, 0); crypto_yield(desc->flags); offset = 0; pg++; l -= bytes_from_page; } while (l > 0); if (!nbytes) break; sg = scatterwalk_sg_next(sg); } return 0; }
/* Get number of pages within scatterlist that are needed to store * nbytes bytes of data. It is interesting question how we should * behave in error cases; hopefully sg_copy functions do not overwrite * memory if running out of lists! */ static int sg_count(struct scatterlist *sg, size_t nbytes) { int n = 0; while (sg && nbytes > 0) { n++; nbytes -= sg->length; sg = scatterwalk_sg_next(sg); } if (nbytes > 0 && !sg) pr_err("too short input to sg_count!"); return n; }
/** * nx_walk_and_build - walk a linux scatterlist and build an nx scatterlist * * @nx_dst: pointer to the first nx_sg element to write * @sglen: max number of nx_sg entries we're allowed to write * @sg_src: pointer to the source linux scatterlist to walk * @start: number of bytes to fast-forward past at the beginning of @sg_src * @src_len: number of bytes to walk in @sg_src */ struct nx_sg *nx_walk_and_build(struct nx_sg *nx_dst, unsigned int sglen, struct scatterlist *sg_src, unsigned int start, unsigned int src_len) { struct scatter_walk walk; struct nx_sg *nx_sg = nx_dst; unsigned int n, offset = 0, len = src_len; char *dst; /* we need to fast forward through @start bytes first */ for (;;) { scatterwalk_start(&walk, sg_src); if (start < offset + sg_src->length) break; offset += sg_src->length; sg_src = scatterwalk_sg_next(sg_src); } /* start - offset is the number of bytes to advance in the scatterlist * element we're currently looking at */ scatterwalk_advance(&walk, start - offset); while (len && nx_sg) { n = scatterwalk_clamp(&walk, len); if (!n) { scatterwalk_start(&walk, sg_next(walk.sg)); n = scatterwalk_clamp(&walk, len); } dst = scatterwalk_map(&walk); nx_sg = nx_build_sg_list(nx_sg, dst, n, sglen); len -= n; scatterwalk_unmap(dst); scatterwalk_advance(&walk, n); scatterwalk_done(&walk, SCATTERWALK_FROM_SG, len); } /* return the moved destination pointer */ return nx_sg; }
static int process_next_packet(struct aes_container *aes_con, struct ablkcipher_request *areq, int state) { u8 *iv; int mode, dir, err = -EINVAL; unsigned long queue_flag; u32 inc, nbytes, remain, chunk_size; struct scatterlist *src = NULL; struct scatterlist *dst = NULL; struct crypto_ablkcipher *cipher; struct aes_ctx *ctx; spin_lock_irqsave(&aes_queue->lock, queue_flag); dir = aes_con->encdec; mode = aes_con->mode; iv = aes_con->iv; if (state & PROCESS_SCATTER) { src = scatterwalk_sg_next(areq->src); dst = scatterwalk_sg_next(areq->dst); if (!src || !dst) { spin_unlock_irqrestore(&aes_queue->lock, queue_flag); return 1; } } else if (state & PROCESS_NEW_PACKET) { src = areq->src; dst = areq->dst; } remain = aes_con->bytes_processed; chunk_size = src->length; if (remain > DEU_MAX_PACKET_SIZE) inc = DEU_MAX_PACKET_SIZE; else if (remain > chunk_size) inc = chunk_size; else inc = remain; remain -= inc; aes_con->nbytes = inc; if (state & PROCESS_SCATTER) { aes_con->src_buf += aes_con->nbytes; aes_con->dst_buf += aes_con->nbytes; } lq_sg_init(aes_con, src, dst); nbytes = aes_con->nbytes; //printk("debug - Line: %d, func: %s, reqsize: %d, scattersize: %d\n", // __LINE__, __func__, nbytes, chunk_size); cipher = crypto_ablkcipher_reqtfm(areq); ctx = crypto_ablkcipher_ctx(cipher); if (aes_queue->hw_status == AES_IDLE) aes_queue->hw_status = AES_STARTED; aes_con->bytes_processed -= aes_con->nbytes; err = ablkcipher_enqueue_request(&aes_queue->list, &aes_con->arequest); if (err == -EBUSY) { spin_unlock_irqrestore(&aes_queue->lock, queue_flag); printk("Failed to enqueue request, ln: %d, err: %d\n", __LINE__, err); return -EINVAL; } spin_unlock_irqrestore(&aes_queue->lock, queue_flag); err = lq_deu_aes_core(ctx, aes_con->dst_buf, aes_con->src_buf, iv, nbytes, dir, mode); return err; }