static void crypto_gcm_init_crypt(struct ablkcipher_request *ablk_req, struct aead_request *req, unsigned int cryptlen) { struct crypto_aead *aead = crypto_aead_reqtfm(req); struct crypto_gcm_ctx *ctx = crypto_aead_ctx(aead); struct crypto_gcm_req_priv_ctx *pctx = crypto_gcm_reqctx(req); struct scatterlist *dst; __be32 counter = cpu_to_be32(1); memset(pctx->auth_tag, 0, sizeof(pctx->auth_tag)); memcpy(req->iv + 12, &counter, 4); sg_init_table(pctx->src, 2); sg_set_buf(pctx->src, pctx->auth_tag, sizeof(pctx->auth_tag)); scatterwalk_sg_chain(pctx->src, 2, req->src); dst = pctx->src; if (req->src != req->dst) { sg_init_table(pctx->dst, 2); sg_set_buf(pctx->dst, pctx->auth_tag, sizeof(pctx->auth_tag)); scatterwalk_sg_chain(pctx->dst, 2, req->dst); dst = pctx->dst; } ablkcipher_request_set_tfm(ablk_req, ctx->ctr); ablkcipher_request_set_crypt(ablk_req, pctx->src, dst, cryptlen + sizeof(pctx->auth_tag), req->iv); }
static int crypto_ccm_decrypt(struct aead_request *req) { struct crypto_aead *aead = crypto_aead_reqtfm(req); struct crypto_ccm_ctx *ctx = crypto_aead_ctx(aead); struct crypto_ccm_req_priv_ctx *pctx = crypto_ccm_reqctx(req); struct ablkcipher_request *abreq = &pctx->abreq; struct scatterlist *dst; unsigned int authsize = crypto_aead_authsize(aead); unsigned int cryptlen = req->cryptlen; u8 *authtag = pctx->auth_tag; u8 *odata = pctx->odata; u8 *iv = req->iv; int err; if (cryptlen < authsize) return -EINVAL; cryptlen -= authsize; err = crypto_ccm_check_iv(iv); if (err) return err; pctx->flags = aead_request_flags(req); scatterwalk_map_and_copy(authtag, req->src, cryptlen, authsize, 0); memset(iv + 15 - iv[0], 0, iv[0] + 1); sg_init_table(pctx->src, 2); sg_set_buf(pctx->src, authtag, 16); scatterwalk_sg_chain(pctx->src, 2, req->src); dst = pctx->src; if (req->src != req->dst) { sg_init_table(pctx->dst, 2); sg_set_buf(pctx->dst, authtag, 16); scatterwalk_sg_chain(pctx->dst, 2, req->dst); dst = pctx->dst; } ablkcipher_request_set_tfm(abreq, ctx->ctr); ablkcipher_request_set_callback(abreq, pctx->flags, crypto_ccm_decrypt_done, req); ablkcipher_request_set_crypt(abreq, pctx->src, dst, cryptlen + 16, iv); err = crypto_ablkcipher_decrypt(abreq); if (err) return err; err = crypto_ccm_auth(req, req->dst, cryptlen); if (err) return err; /* verify */ if (crypto_memneq(authtag, odata, authsize)) return -EBADMSG; return err; }
static int crypto_ccm_encrypt(struct aead_request *req) { struct crypto_aead *aead = crypto_aead_reqtfm(req); struct crypto_ccm_ctx *ctx = crypto_aead_ctx(aead); struct crypto_ccm_req_priv_ctx *pctx = crypto_ccm_reqctx(req); struct ablkcipher_request *abreq = &pctx->abreq; struct scatterlist *dst; unsigned int cryptlen = req->cryptlen; u8 *odata = pctx->odata; u8 *iv = req->iv; int err; err = crypto_ccm_check_iv(iv); if (err) return err; pctx->flags = aead_request_flags(req); err = crypto_ccm_auth(req, req->src, cryptlen); if (err) return err; /* Note: rfc 3610 and NIST 800-38C require counter of * zero to encrypt auth tag. */ memset(iv + 15 - iv[0], 0, iv[0] + 1); sg_init_table(pctx->src, 2); sg_set_buf(pctx->src, odata, 16); scatterwalk_sg_chain(pctx->src, 2, req->src); dst = pctx->src; if (req->src != req->dst) { sg_init_table(pctx->dst, 2); sg_set_buf(pctx->dst, odata, 16); scatterwalk_sg_chain(pctx->dst, 2, req->dst); dst = pctx->dst; } ablkcipher_request_set_tfm(abreq, ctx->ctr); ablkcipher_request_set_callback(abreq, pctx->flags, crypto_ccm_encrypt_done, req); ablkcipher_request_set_crypt(abreq, pctx->src, dst, cryptlen + 16, iv); err = crypto_ablkcipher_encrypt(abreq); if (err) return err; /* copy authtag to end of dst */ scatterwalk_map_and_copy(odata, req->dst, cryptlen, crypto_aead_authsize(aead), 1); return err; }
static void eseqiv_chain(struct scatterlist *head, struct scatterlist *sg, int chain) { if (chain) { head->length += sg->length; sg = scatterwalk_sg_next(sg); } if (sg) scatterwalk_sg_chain(head, 2, sg); else sg_mark_end(head); }
static void crypto_gcm_init_crypt(struct ablkcipher_request *ablk_req, struct aead_request *req, unsigned int cryptlen) { struct crypto_aead *aead = crypto_aead_reqtfm(req); struct crypto_gcm_ctx *ctx = crypto_aead_ctx(aead); struct crypto_gcm_req_priv_ctx *pctx = crypto_gcm_reqctx(req); u32 flags = req->base.tfm->crt_flags; struct crypto_gcm_ghash_ctx *ghash = &pctx->ghash; struct scatterlist *dst; __be32 counter = cpu_to_be32(1); memset(pctx->auth_tag, 0, sizeof(pctx->auth_tag)); memcpy(req->iv + 12, &counter, 4); sg_init_table(pctx->src, 2); sg_set_buf(pctx->src, pctx->auth_tag, sizeof(pctx->auth_tag)); scatterwalk_sg_chain(pctx->src, 2, req->src); dst = pctx->src; if (req->src != req->dst) { sg_init_table(pctx->dst, 2); sg_set_buf(pctx->dst, pctx->auth_tag, sizeof(pctx->auth_tag)); scatterwalk_sg_chain(pctx->dst, 2, req->dst); dst = pctx->dst; } ablkcipher_request_set_tfm(ablk_req, ctx->ctr); ablkcipher_request_set_crypt(ablk_req, pctx->src, dst, cryptlen + sizeof(pctx->auth_tag), req->iv); crypto_gcm_ghash_init(ghash, flags, ctx->gf128); crypto_gcm_ghash_update_sg(ghash, req->assoc, req->assoclen); crypto_gcm_ghash_flush(ghash); }
static int sahara_sha_prepare_request(struct ahash_request *req) { struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); struct sahara_sha_reqctx *rctx = ahash_request_ctx(req); unsigned int hash_later; unsigned int block_size; unsigned int len; block_size = crypto_tfm_alg_blocksize(crypto_ahash_tfm(tfm)); /* append bytes from previous operation */ len = rctx->buf_cnt + req->nbytes; /* only the last transfer can be padded in hardware */ if (!rctx->last && (len < block_size)) { /* to few data, save for next operation */ scatterwalk_map_and_copy(rctx->buf + rctx->buf_cnt, req->src, 0, req->nbytes, 0); rctx->buf_cnt += req->nbytes; return 0; } /* add data from previous operation first */ if (rctx->buf_cnt) memcpy(rctx->rembuf, rctx->buf, rctx->buf_cnt); /* data must always be a multiple of block_size */ hash_later = rctx->last ? 0 : len & (block_size - 1); if (hash_later) { unsigned int offset = req->nbytes - hash_later; /* Save remaining bytes for later use */ scatterwalk_map_and_copy(rctx->buf, req->src, offset, hash_later, 0); } /* nbytes should now be multiple of blocksize */ req->nbytes = req->nbytes - hash_later; sahara_walk_and_recalc(req->src, req->nbytes); /* have data from previous operation and current */ if (rctx->buf_cnt && req->nbytes) { sg_init_table(rctx->in_sg_chain, 2); sg_set_buf(rctx->in_sg_chain, rctx->rembuf, rctx->buf_cnt); scatterwalk_sg_chain(rctx->in_sg_chain, 2, req->src); rctx->total = req->nbytes + rctx->buf_cnt; rctx->in_sg = rctx->in_sg_chain; rctx->in_sg_chained = true; req->src = rctx->in_sg_chain; /* only data from previous operation */ } else if (rctx->buf_cnt) { if (req->src) rctx->in_sg = req->src; else rctx->in_sg = rctx->in_sg_chain; /* buf was copied into rembuf above */ sg_init_one(rctx->in_sg, rctx->rembuf, rctx->buf_cnt); rctx->total = rctx->buf_cnt; rctx->in_sg_chained = false; /* no data from previous operation */ } else { rctx->in_sg = req->src; rctx->total = req->nbytes; req->src = rctx->in_sg; rctx->in_sg_chained = false; } /* on next call, we only have the remaining data in the buffer */ rctx->buf_cnt = hash_later; return -EINPROGRESS; }