static int crypto_rfc3686_crypt(struct ablkcipher_request *req) { struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req); struct crypto_rfc3686_ctx *ctx = crypto_ablkcipher_ctx(tfm); struct crypto_ablkcipher *child = ctx->child; unsigned long align = crypto_ablkcipher_alignmask(tfm); struct crypto_rfc3686_req_ctx *rctx = (void *)PTR_ALIGN((u8 *)ablkcipher_request_ctx(req), align + 1); struct ablkcipher_request *subreq = &rctx->subreq; u8 *iv = rctx->iv; /* set up counter block */ memcpy(iv, ctx->nonce, CTR_RFC3686_NONCE_SIZE); memcpy(iv + CTR_RFC3686_NONCE_SIZE, req->info, CTR_RFC3686_IV_SIZE); /* initialize counter portion of counter block */ *(__be32 *)(iv + CTR_RFC3686_NONCE_SIZE + CTR_RFC3686_IV_SIZE) = cpu_to_be32(1); ablkcipher_request_set_tfm(subreq, child); ablkcipher_request_set_callback(subreq, req->base.flags, req->base.complete, req->base.data); ablkcipher_request_set_crypt(subreq, req->src, req->dst, req->nbytes, iv); return crypto_ablkcipher_encrypt(subreq); }
static int eseqiv_init(struct crypto_tfm *tfm) { struct crypto_ablkcipher *geniv = __crypto_ablkcipher_cast(tfm); struct eseqiv_ctx *ctx = crypto_ablkcipher_ctx(geniv); unsigned long alignmask; unsigned int reqsize; spin_lock_init(&ctx->lock); alignmask = crypto_tfm_ctx_alignment() - 1; reqsize = sizeof(struct eseqiv_request_ctx); if (alignmask & reqsize) { alignmask &= reqsize; alignmask--; } alignmask = ~alignmask; alignmask &= crypto_ablkcipher_alignmask(geniv); reqsize += alignmask; reqsize += crypto_ablkcipher_ivsize(geniv); reqsize = ALIGN(reqsize, crypto_tfm_ctx_alignment()); ctx->reqoff = reqsize - sizeof(struct eseqiv_request_ctx); tfm->crt_ablkcipher.reqsize = reqsize + sizeof(struct ablkcipher_request); return skcipher_geniv_init(tfm); }
static void eseqiv_complete2(struct skcipher_givcrypt_request *req) { struct crypto_ablkcipher *geniv = skcipher_givcrypt_reqtfm(req); struct eseqiv_request_ctx *reqctx = skcipher_givcrypt_reqctx(req); memcpy(req->giv, PTR_ALIGN((u8 *)reqctx->tail, crypto_ablkcipher_alignmask(geniv) + 1), crypto_ablkcipher_ivsize(geniv)); }
static int setkey(struct crypto_ablkcipher *tfm, const u8 *key, unsigned int keylen) { struct ablkcipher_alg *cipher = crypto_ablkcipher_alg(tfm); unsigned long alignmask = crypto_ablkcipher_alignmask(tfm); if (keylen < cipher->min_keysize || keylen > cipher->max_keysize) { crypto_ablkcipher_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN); return -EINVAL; } if ((unsigned long)key & alignmask) return setkey_unaligned(tfm, key, keylen); return cipher->setkey(tfm, key, keylen); }
static int seqiv_givencrypt(struct skcipher_givcrypt_request *req) { struct crypto_ablkcipher *geniv = skcipher_givcrypt_reqtfm(req); struct seqiv_ctx *ctx = crypto_ablkcipher_ctx(geniv); struct ablkcipher_request *subreq = skcipher_givcrypt_reqctx(req); crypto_completion_t compl; void *data; u8 *info; unsigned int ivsize; int err; ablkcipher_request_set_tfm(subreq, skcipher_geniv_cipher(geniv)); compl = req->creq.base.complete; data = req->creq.base.data; info = req->creq.info; ivsize = crypto_ablkcipher_ivsize(geniv); if (unlikely(!IS_ALIGNED((unsigned long)info, crypto_ablkcipher_alignmask(geniv) + 1))) { info = kmalloc(ivsize, req->creq.base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL: GFP_ATOMIC); if (!info) return -ENOMEM; compl = seqiv_complete; data = req; } ablkcipher_request_set_callback(subreq, req->creq.base.flags, compl, data); ablkcipher_request_set_crypt(subreq, req->creq.src, req->creq.dst, req->creq.nbytes, info); seqiv_geniv(ctx, info, req->seq, ivsize); memcpy(req->giv, info, ivsize); err = crypto_ablkcipher_encrypt(subreq); if (unlikely(info != req->creq.info)) seqiv_complete2(req, err); return err; }
static int setkey_unaligned(struct crypto_ablkcipher *tfm, const u8 *key, unsigned int keylen) { struct ablkcipher_alg *cipher = crypto_ablkcipher_alg(tfm); unsigned long alignmask = crypto_ablkcipher_alignmask(tfm); int ret; u8 *buffer, *alignbuffer; unsigned long absize; absize = keylen + alignmask; buffer = kmalloc(absize, GFP_ATOMIC); if (!buffer) return -ENOMEM; alignbuffer = (u8 *)ALIGN((unsigned long)buffer, alignmask + 1); memcpy(alignbuffer, key, keylen); ret = cipher->setkey(tfm, alignbuffer, keylen); memset(alignbuffer, 0, keylen); kfree(buffer); return ret; }
static int eseqiv_init(struct crypto_tfm *tfm) { struct crypto_ablkcipher *geniv = __crypto_ablkcipher_cast(tfm); struct eseqiv_ctx *ctx = crypto_ablkcipher_ctx(geniv); unsigned long alignmask; unsigned int reqsize; #ifndef CONFIG_CRYPTO_DRBG spin_lock_init(&ctx->lock); #endif alignmask = crypto_tfm_ctx_alignment() - 1; reqsize = sizeof(struct eseqiv_request_ctx); if (alignmask & reqsize) { alignmask &= reqsize; alignmask--; } alignmask = ~alignmask; alignmask &= crypto_ablkcipher_alignmask(geniv); reqsize += alignmask; reqsize += crypto_ablkcipher_ivsize(geniv); reqsize = ALIGN(reqsize, crypto_tfm_ctx_alignment()); ctx->reqoff = reqsize - sizeof(struct eseqiv_request_ctx); tfm->crt_ablkcipher.reqsize = reqsize + sizeof(struct ablkcipher_request); #ifdef CONFIG_CRYPTO_DRBG crypto_rng_get_bytes(crypto_default_rng, ctx->salt, crypto_ablkcipher_ivsize(geniv)); #endif return skcipher_geniv_init(tfm); }
static int eseqiv_givencrypt(struct skcipher_givcrypt_request *req) { struct crypto_ablkcipher *geniv = skcipher_givcrypt_reqtfm(req); struct eseqiv_ctx *ctx = crypto_ablkcipher_ctx(geniv); struct eseqiv_request_ctx *reqctx = skcipher_givcrypt_reqctx(req); struct ablkcipher_request *subreq; crypto_completion_t complete; void *data; struct scatterlist *osrc, *odst; struct scatterlist *dst; struct page *srcp; struct page *dstp; u8 *giv; u8 *vsrc; u8 *vdst; __be64 seq; unsigned int ivsize; unsigned int len; int err; subreq = (void *)(reqctx->tail + ctx->reqoff); ablkcipher_request_set_tfm(subreq, skcipher_geniv_cipher(geniv)); giv = req->giv; complete = req->creq.base.complete; data = req->creq.base.data; osrc = req->creq.src; odst = req->creq.dst; srcp = sg_page(osrc); dstp = sg_page(odst); vsrc = PageHighMem(srcp) ? NULL : page_address(srcp) + osrc->offset; vdst = PageHighMem(dstp) ? NULL : page_address(dstp) + odst->offset; ivsize = crypto_ablkcipher_ivsize(geniv); if (vsrc != giv + ivsize && vdst != giv + ivsize) { giv = PTR_ALIGN((u8 *)reqctx->tail, crypto_ablkcipher_alignmask(geniv) + 1); complete = eseqiv_complete; data = req; } ablkcipher_request_set_callback(subreq, req->creq.base.flags, complete, data); sg_init_table(reqctx->src, 2); sg_set_buf(reqctx->src, giv, ivsize); scatterwalk_crypto_chain(reqctx->src, osrc, vsrc == giv + ivsize, 2); dst = reqctx->src; if (osrc != odst) { sg_init_table(reqctx->dst, 2); sg_set_buf(reqctx->dst, giv, ivsize); scatterwalk_crypto_chain(reqctx->dst, odst, vdst == giv + ivsize, 2); dst = reqctx->dst; } ablkcipher_request_set_crypt(subreq, reqctx->src, dst, req->creq.nbytes + ivsize, req->creq.info); memcpy(req->creq.info, ctx->salt, ivsize); len = ivsize; if (ivsize > sizeof(u64)) { memset(req->giv, 0, ivsize - sizeof(u64)); len = sizeof(u64); } seq = cpu_to_be64(req->seq); memcpy(req->giv + ivsize - len, &seq, len); err = crypto_ablkcipher_encrypt(subreq); if (err) goto out; if (giv != req->giv) eseqiv_complete2(req); out: return err; }
static u8 *iv_of_dmreq(struct crypt_config *cc, struct dm_crypt_request *dmreq) { return (u8 *)ALIGN((unsigned long)(dmreq + 1), crypto_ablkcipher_alignmask(any_tfm(cc)) + 1); }
int cryptodev_cipher_init(struct cipher_data *out, const char *alg_name, uint8_t *keyp, size_t keylen, int stream, int aead) { int ret; if (aead == 0) { struct ablkcipher_alg *alg; out->async.s = crypto_alloc_ablkcipher(alg_name, 0, 0); if (unlikely(IS_ERR(out->async.s))) { ddebug(1, "Failed to load cipher %s", alg_name); return -EINVAL; } alg = crypto_ablkcipher_alg(out->async.s); if (alg != NULL) { /* Was correct key length supplied? */ if (alg->max_keysize > 0 && unlikely((keylen < alg->min_keysize) || (keylen > alg->max_keysize))) { ddebug(1, "Wrong keylen '%zu' for algorithm '%s'. Use %u to %u.", keylen, alg_name, alg->min_keysize, alg->max_keysize); ret = -EINVAL; goto error; } } out->blocksize = crypto_ablkcipher_blocksize(out->async.s); out->ivsize = crypto_ablkcipher_ivsize(out->async.s); out->alignmask = crypto_ablkcipher_alignmask(out->async.s); ret = crypto_ablkcipher_setkey(out->async.s, keyp, keylen); } else { out->async.as = crypto_alloc_aead(alg_name, 0, 0); if (unlikely(IS_ERR(out->async.as))) { ddebug(1, "Failed to load cipher %s", alg_name); return -EINVAL; } out->blocksize = crypto_aead_blocksize(out->async.as); out->ivsize = crypto_aead_ivsize(out->async.as); out->alignmask = crypto_aead_alignmask(out->async.as); ret = crypto_aead_setkey(out->async.as, keyp, keylen); } if (unlikely(ret)) { ddebug(1, "Setting key failed for %s-%zu.", alg_name, keylen*8); ret = -EINVAL; goto error; } out->stream = stream; out->aead = aead; out->async.result = kzalloc(sizeof(*out->async.result), GFP_KERNEL); if (unlikely(!out->async.result)) { ret = -ENOMEM; goto error; } init_completion(&out->async.result->completion); if (aead == 0) { out->async.request = ablkcipher_request_alloc(out->async.s, GFP_KERNEL); if (unlikely(!out->async.request)) { derr(1, "error allocating async crypto request"); ret = -ENOMEM; goto error; } ablkcipher_request_set_callback(out->async.request, CRYPTO_TFM_REQ_MAY_BACKLOG, cryptodev_complete, out->async.result); } else { out->async.arequest = aead_request_alloc(out->async.as, GFP_KERNEL); if (unlikely(!out->async.arequest)) { derr(1, "error allocating async crypto request"); ret = -ENOMEM; goto error; } aead_request_set_callback(out->async.arequest, CRYPTO_TFM_REQ_MAY_BACKLOG, cryptodev_complete, out->async.result); } out->init = 1; return 0; error: if (aead == 0) { if (out->async.request) ablkcipher_request_free(out->async.request); if (out->async.s) crypto_free_ablkcipher(out->async.s); } else { if (out->async.arequest) aead_request_free(out->async.arequest); if (out->async.as) crypto_free_aead(out->async.as); } kfree(out->async.result); return ret; }