static inline u8 *esp_tmp_iv(struct crypto_aead *aead, void *tmp, int seqhilen) { ASF_FP_LINUX_CRYPTO_FENTRY; ASF_FP_LINUX_CRYPTO_FEXIT; return crypto_aead_ivsize(aead) ? PTR_ALIGN((u8 *)tmp + seqhilen, crypto_aead_alignmask(aead) + 1) : tmp + seqhilen; }
static int crypto_ccm_init_tfm(struct crypto_aead *tfm) { struct aead_instance *inst = aead_alg_instance(tfm); struct ccm_instance_ctx *ictx = aead_instance_ctx(inst); struct crypto_ccm_ctx *ctx = crypto_aead_ctx(tfm); struct crypto_cipher *cipher; struct crypto_ablkcipher *ctr; unsigned long align; int err; cipher = crypto_spawn_cipher(&ictx->cipher); if (IS_ERR(cipher)) return PTR_ERR(cipher); ctr = crypto_spawn_skcipher(&ictx->ctr); err = PTR_ERR(ctr); if (IS_ERR(ctr)) goto err_free_cipher; ctx->cipher = cipher; ctx->ctr = ctr; align = crypto_aead_alignmask(tfm); align &= ~(crypto_tfm_ctx_alignment() - 1); crypto_aead_set_reqsize( tfm, align + sizeof(struct crypto_ccm_req_priv_ctx) + crypto_ablkcipher_reqsize(ctr)); return 0; err_free_cipher: crypto_free_cipher(cipher); return err; }
static inline struct crypto_ccm_req_priv_ctx *crypto_ccm_reqctx( struct aead_request *req) { unsigned long align = crypto_aead_alignmask(crypto_aead_reqtfm(req)); return (void *)PTR_ALIGN((u8 *)aead_request_ctx(req), align + 1); }
static int setkey(struct crypto_aead *tfm, const u8 *key, unsigned int keylen) { struct aead_alg *aead = crypto_aead_alg(tfm); unsigned long alignmask = crypto_aead_alignmask(tfm); if ((unsigned long)key & alignmask) return setkey_unaligned(tfm, key, keylen); return aead->setkey(tfm, key, keylen); }
static struct aead_request *crypto_rfc4543_crypt(struct aead_request *req, int enc) { struct crypto_aead *aead = crypto_aead_reqtfm(req); struct crypto_rfc4543_ctx *ctx = crypto_aead_ctx(aead); struct crypto_rfc4543_req_ctx *rctx = crypto_rfc4543_reqctx(req); struct aead_request *subreq = &rctx->subreq; struct scatterlist *dst = req->dst; struct scatterlist *cipher = rctx->cipher; struct scatterlist *payload = rctx->payload; struct scatterlist *assoc = rctx->assoc; unsigned int authsize = crypto_aead_authsize(aead); unsigned int assoclen = req->assoclen; struct page *dstp; u8 *vdst; u8 *iv = PTR_ALIGN((u8 *)(rctx + 1) + crypto_aead_reqsize(ctx->child), crypto_aead_alignmask(ctx->child) + 1); memcpy(iv, ctx->nonce, 4); memcpy(iv + 4, req->iv, 8); /* construct cipher/plaintext */ if (enc) memset(rctx->auth_tag, 0, authsize); else scatterwalk_map_and_copy(rctx->auth_tag, dst, req->cryptlen - authsize, authsize, 0); sg_init_one(cipher, rctx->auth_tag, authsize); /* construct the aad */ dstp = sg_page(dst); vdst = PageHighMem(dstp) ? NULL : page_address(dstp) + dst->offset; sg_init_table(payload, 2); sg_set_buf(payload, req->iv, 8); scatterwalk_crypto_chain(payload, dst, vdst == req->iv + 8, 2); assoclen += 8 + req->cryptlen - (enc ? 0 : authsize); sg_init_table(assoc, 2); sg_set_page(assoc, sg_page(req->assoc), req->assoc->length, req->assoc->offset); scatterwalk_crypto_chain(assoc, payload, 0, 2); aead_request_set_tfm(subreq, ctx->child); aead_request_set_callback(subreq, req->base.flags, req->base.complete, req->base.data); aead_request_set_crypt(subreq, cipher, cipher, enc ? 0 : authsize, iv); aead_request_set_assoc(subreq, assoc, assoclen); return subreq; }
int blkcipher_aead_walk_virt_block(struct blkcipher_desc *desc, struct blkcipher_walk *walk, struct crypto_aead *tfm, unsigned int blocksize) { walk->flags &= ~BLKCIPHER_WALK_PHYS; walk->walk_blocksize = blocksize; walk->cipher_blocksize = crypto_aead_blocksize(tfm); walk->ivsize = crypto_aead_ivsize(tfm); walk->alignmask = crypto_aead_alignmask(tfm); return blkcipher_walk_first(desc, walk); }
static int seqiv_aead_givencrypt(struct aead_givcrypt_request *req) { struct crypto_aead *geniv = aead_givcrypt_reqtfm(req); struct seqiv_ctx *ctx = crypto_aead_ctx(geniv); struct aead_request *areq = &req->areq; struct aead_request *subreq = aead_givcrypt_reqctx(req); crypto_completion_t compl; void *data; u8 *info; unsigned int ivsize; int err; aead_request_set_tfm(subreq, aead_geniv_base(geniv)); compl = areq->base.complete; data = areq->base.data; info = areq->iv; ivsize = crypto_aead_ivsize(geniv); if (unlikely(!IS_ALIGNED((unsigned long)info, crypto_aead_alignmask(geniv) + 1))) { info = kmalloc(ivsize, areq->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL: GFP_ATOMIC); if (!info) return -ENOMEM; compl = seqiv_aead_complete; data = req; } aead_request_set_callback(subreq, areq->base.flags, compl, data); aead_request_set_crypt(subreq, areq->src, areq->dst, areq->cryptlen, info); aead_request_set_assoc(subreq, areq->assoc, areq->assoclen); seqiv_geniv(ctx, info, req->seq, ivsize); memcpy(req->giv, info, ivsize); err = crypto_aead_encrypt(subreq); if (unlikely(info != areq->iv)) seqiv_aead_complete2(req, err); return err; }
static struct aead_request *crypto_rfc4309_crypt(struct aead_request *req) { struct crypto_rfc4309_req_ctx *rctx = aead_request_ctx(req); struct aead_request *subreq = &rctx->subreq; struct crypto_aead *aead = crypto_aead_reqtfm(req); struct crypto_rfc4309_ctx *ctx = crypto_aead_ctx(aead); struct crypto_aead *child = ctx->child; struct scatterlist *sg; u8 *iv = PTR_ALIGN((u8 *)(subreq + 1) + crypto_aead_reqsize(child), crypto_aead_alignmask(child) + 1); /* L' */ iv[0] = 3; memcpy(iv + 1, ctx->nonce, 3); memcpy(iv + 4, req->iv, 8); scatterwalk_map_and_copy(iv + 16, req->src, 0, req->assoclen - 8, 0); sg_init_table(rctx->src, 3); sg_set_buf(rctx->src, iv + 16, req->assoclen - 8); sg = scatterwalk_ffwd(rctx->src + 1, req->src, req->assoclen); if (sg != rctx->src + 1) sg_chain(rctx->src, 2, sg); if (req->src != req->dst) { sg_init_table(rctx->dst, 3); sg_set_buf(rctx->dst, iv + 16, req->assoclen - 8); sg = scatterwalk_ffwd(rctx->dst + 1, req->dst, req->assoclen); if (sg != rctx->dst + 1) sg_chain(rctx->dst, 2, sg); } aead_request_set_tfm(subreq, child); aead_request_set_callback(subreq, req->base.flags, req->base.complete, req->base.data); aead_request_set_crypt(subreq, rctx->src, req->src == req->dst ? rctx->src : rctx->dst, req->cryptlen, iv); aead_request_set_ad(subreq, req->assoclen - 8); return subreq; }
static struct aead_request *crypto_rfc4106_crypt(struct aead_request *req) { struct aead_request *subreq = aead_request_ctx(req); struct crypto_aead *aead = crypto_aead_reqtfm(req); struct crypto_rfc4106_ctx *ctx = crypto_aead_ctx(aead); struct crypto_aead *child = ctx->child; u8 *iv = PTR_ALIGN((u8 *)(subreq + 1) + crypto_aead_reqsize(child), crypto_aead_alignmask(child) + 1); memcpy(iv, ctx->nonce, 4); memcpy(iv + 4, req->iv, 8); aead_request_set_tfm(subreq, child); aead_request_set_callback(subreq, req->base.flags, req->base.complete, req->base.data); aead_request_set_crypt(subreq, req->src, req->dst, req->cryptlen, iv); aead_request_set_assoc(subreq, req->assoc, req->assoclen); return subreq; }
static int setkey_unaligned(struct crypto_aead *tfm, const u8 *key, unsigned int keylen) { struct aead_alg *aead = crypto_aead_alg(tfm); unsigned long alignmask = crypto_aead_alignmask(tfm); int ret; u8 *buffer, *alignbuffer; unsigned long absize; absize = keylen + alignmask; buffer = kmalloc(absize, GFP_ATOMIC); if (!buffer) return -ENOMEM; alignbuffer = (u8 *)ALIGN((unsigned long)buffer, alignmask + 1); memcpy(alignbuffer, key, keylen); ret = aead->setkey(tfm, alignbuffer, keylen); memset(alignbuffer, 0, keylen); kfree(buffer); return ret; }
static int crypto_rfc4309_init_tfm(struct crypto_tfm *tfm) { struct crypto_instance *inst = (void *)tfm->__crt_alg; struct crypto_aead_spawn *spawn = crypto_instance_ctx(inst); struct crypto_rfc4309_ctx *ctx = crypto_tfm_ctx(tfm); struct crypto_aead *aead; unsigned long align; aead = crypto_spawn_aead(spawn); if (IS_ERR(aead)) return PTR_ERR(aead); ctx->child = aead; align = crypto_aead_alignmask(aead); align &= ~(crypto_tfm_ctx_alignment() - 1); tfm->crt_aead.reqsize = sizeof(struct aead_request) + ALIGN(crypto_aead_reqsize(aead), crypto_tfm_ctx_alignment()) + align + 16; return 0; }
/* Allocate an AEAD request structure with extra space for SG and IV. * * For alignment considerations the IV is placed at the front, followed * by the request and finally the SG list. */ static void *esp_alloc_tmp(struct crypto_aead *aead, int nfrags, int seqhilen) { unsigned int len; ASF_FP_LINUX_CRYPTO_FENTRY; len = seqhilen; len += crypto_aead_ivsize(aead); if (likely(len)) { len += crypto_aead_alignmask(aead) & ~(crypto_tfm_ctx_alignment() - 1); len = ALIGN(len, crypto_tfm_ctx_alignment()); } len += sizeof(struct aead_givcrypt_request) + crypto_aead_reqsize(aead); len = ALIGN(len, __alignof__(struct scatterlist)); len += sizeof(struct scatterlist) * nfrags; ASF_FP_LINUX_CRYPTO_FEXIT; return kmalloc(len, GFP_ATOMIC); }
static int crypto_rfc4543_init_tfm(struct crypto_tfm *tfm) { struct crypto_instance *inst = (void *)tfm->__crt_alg; struct crypto_rfc4543_instance_ctx *ictx = crypto_instance_ctx(inst); struct crypto_aead_spawn *spawn = &ictx->aead; struct crypto_rfc4543_ctx *ctx = crypto_tfm_ctx(tfm); struct crypto_aead *aead; struct crypto_blkcipher *null; unsigned long align; int err = 0; aead = crypto_spawn_aead(spawn); if (IS_ERR(aead)) return PTR_ERR(aead); null = crypto_spawn_blkcipher(&ictx->null.base); err = PTR_ERR(null); if (IS_ERR(null)) goto err_free_aead; ctx->child = aead; ctx->null = null; align = crypto_aead_alignmask(aead); align &= ~(crypto_tfm_ctx_alignment() - 1); tfm->crt_aead.reqsize = sizeof(struct crypto_rfc4543_req_ctx) + ALIGN(crypto_aead_reqsize(aead), crypto_tfm_ctx_alignment()) + align + 16; return 0; err_free_aead: crypto_free_aead(aead); return err; }
static int crypto_rfc4309_init_tfm(struct crypto_aead *tfm) { struct aead_instance *inst = aead_alg_instance(tfm); struct crypto_aead_spawn *spawn = aead_instance_ctx(inst); struct crypto_rfc4309_ctx *ctx = crypto_aead_ctx(tfm); struct crypto_aead *aead; unsigned long align; aead = crypto_spawn_aead(spawn); if (IS_ERR(aead)) return PTR_ERR(aead); ctx->child = aead; align = crypto_aead_alignmask(aead); align &= ~(crypto_tfm_ctx_alignment() - 1); crypto_aead_set_reqsize( tfm, sizeof(struct crypto_rfc4309_req_ctx) + ALIGN(crypto_aead_reqsize(aead), crypto_tfm_ctx_alignment()) + align + 32); return 0; }
static struct aead_request *crypto_rfc4543_crypt(struct aead_request *req, bool enc) { struct crypto_aead *aead = crypto_aead_reqtfm(req); struct crypto_rfc4543_ctx *ctx = crypto_aead_ctx(aead); struct crypto_rfc4543_req_ctx *rctx = crypto_rfc4543_reqctx(req); struct aead_request *subreq = &rctx->subreq; struct scatterlist *src = req->src; struct scatterlist *cipher = rctx->cipher; struct scatterlist *payload = rctx->payload; struct scatterlist *assoc = rctx->assoc; unsigned int authsize = crypto_aead_authsize(aead); unsigned int assoclen = req->assoclen; struct page *srcp; u8 *vsrc; u8 *iv = PTR_ALIGN((u8 *)(rctx + 1) + crypto_aead_reqsize(ctx->child), crypto_aead_alignmask(ctx->child) + 1); memcpy(iv, ctx->nonce, 4); memcpy(iv + 4, req->iv, 8); /* construct cipher/plaintext */ if (enc) memset(rctx->auth_tag, 0, authsize); else scatterwalk_map_and_copy(rctx->auth_tag, src, req->cryptlen - authsize, authsize, 0); sg_init_one(cipher, rctx->auth_tag, authsize); /* construct the aad */ srcp = sg_page(src); vsrc = PageHighMem(srcp) ? NULL : page_address(srcp) + src->offset; sg_init_table(payload, 2); sg_set_buf(payload, req->iv, 8); scatterwalk_crypto_chain(payload, src, vsrc == req->iv + 8, 2); assoclen += 8 + req->cryptlen - (enc ? 0 : authsize); if (req->assoc->length == req->assoclen) { sg_init_table(assoc, 2); sg_set_page(assoc, sg_page(req->assoc), req->assoc->length, req->assoc->offset); } else { BUG_ON(req->assoclen > sizeof(rctx->assocbuf)); scatterwalk_map_and_copy(rctx->assocbuf, req->assoc, 0, req->assoclen, 0); sg_init_table(assoc, 2); sg_set_buf(assoc, rctx->assocbuf, req->assoclen); } scatterwalk_crypto_chain(assoc, payload, 0, 2); aead_request_set_tfm(subreq, ctx->child); aead_request_set_callback(subreq, req->base.flags, crypto_rfc4543_done, req); aead_request_set_crypt(subreq, cipher, cipher, enc ? 0 : authsize, iv); aead_request_set_assoc(subreq, assoc, assoclen); return subreq; }
static int echainiv_encrypt(struct aead_request *req) { struct crypto_aead *geniv = crypto_aead_reqtfm(req); struct echainiv_ctx *ctx = crypto_aead_ctx(geniv); struct aead_request *subreq = aead_request_ctx(req); crypto_completion_t compl; void *data; u8 *info; unsigned int ivsize = crypto_aead_ivsize(geniv); int err; if (req->cryptlen < ivsize) return -EINVAL; aead_request_set_tfm(subreq, ctx->geniv.child); compl = echainiv_encrypt_complete; data = req; info = req->iv; if (req->src != req->dst) { struct blkcipher_desc desc = { .tfm = ctx->null, }; err = crypto_blkcipher_encrypt( &desc, req->dst, req->src, req->assoclen + req->cryptlen); if (err) return err; } if (unlikely(!IS_ALIGNED((unsigned long)info, crypto_aead_alignmask(geniv) + 1))) { info = kmalloc(ivsize, req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL: GFP_ATOMIC); if (!info) return -ENOMEM; memcpy(info, req->iv, ivsize); } aead_request_set_callback(subreq, req->base.flags, compl, data); aead_request_set_crypt(subreq, req->dst, req->dst, req->cryptlen - ivsize, info); aead_request_set_ad(subreq, req->assoclen + ivsize); crypto_xor(info, ctx->salt, ivsize); scatterwalk_map_and_copy(info, req->dst, req->assoclen, ivsize, 1); echainiv_read_iv(info, ivsize); err = crypto_aead_encrypt(subreq); echainiv_encrypt_complete2(req, err); return err; } static int echainiv_decrypt(struct aead_request *req) { struct crypto_aead *geniv = crypto_aead_reqtfm(req); struct echainiv_ctx *ctx = crypto_aead_ctx(geniv); struct aead_request *subreq = aead_request_ctx(req); crypto_completion_t compl; void *data; unsigned int ivsize = crypto_aead_ivsize(geniv); if (req->cryptlen < ivsize + crypto_aead_authsize(geniv)) return -EINVAL; aead_request_set_tfm(subreq, ctx->geniv.child); compl = req->base.complete; data = req->base.data; aead_request_set_callback(subreq, req->base.flags, compl, data); aead_request_set_crypt(subreq, req->src, req->dst, req->cryptlen - ivsize, req->iv); aead_request_set_ad(subreq, req->assoclen + ivsize); scatterwalk_map_and_copy(req->iv, req->src, req->assoclen, ivsize, 0); if (req->src != req->dst) scatterwalk_map_and_copy(req->iv, req->dst, req->assoclen, ivsize, 1); return crypto_aead_decrypt(subreq); } static int echainiv_init(struct crypto_tfm *tfm) { struct crypto_aead *geniv = __crypto_aead_cast(tfm); struct echainiv_ctx *ctx = crypto_aead_ctx(geniv); int err; spin_lock_init(&ctx->geniv.lock); crypto_aead_set_reqsize(geniv, sizeof(struct aead_request)); err = crypto_get_default_rng(); if (err) goto out; err = crypto_rng_get_bytes(crypto_default_rng, ctx->salt, crypto_aead_ivsize(geniv)); crypto_put_default_rng(); if (err) goto out; ctx->null = crypto_get_default_null_skcipher(); err = PTR_ERR(ctx->null); if (IS_ERR(ctx->null)) goto out; err = aead_geniv_init(tfm); if (err) goto drop_null; ctx->geniv.child = geniv->child; geniv->child = geniv; out: return err; drop_null: crypto_put_default_null_skcipher(); goto out; } static void echainiv_exit(struct crypto_tfm *tfm) { struct echainiv_ctx *ctx = crypto_tfm_ctx(tfm); crypto_free_aead(ctx->geniv.child); crypto_put_default_null_skcipher(); } static int echainiv_aead_create(struct crypto_template *tmpl, struct rtattr **tb) { struct aead_instance *inst; struct crypto_aead_spawn *spawn; struct aead_alg *alg; int err; inst = aead_geniv_alloc(tmpl, tb, 0, 0); if (IS_ERR(inst)) return PTR_ERR(inst); spawn = aead_instance_ctx(inst); alg = crypto_spawn_aead_alg(spawn); if (alg->base.cra_aead.encrypt) goto done; err = -EINVAL; if (inst->alg.ivsize & (sizeof(u32) - 1) || inst->alg.ivsize > MAX_IV_SIZE) goto free_inst; inst->alg.encrypt = echainiv_encrypt; inst->alg.decrypt = echainiv_decrypt; inst->alg.base.cra_init = echainiv_init; inst->alg.base.cra_exit = echainiv_exit; inst->alg.base.cra_alignmask |= __alignof__(u32) - 1; inst->alg.base.cra_ctxsize = sizeof(struct echainiv_ctx); inst->alg.base.cra_ctxsize += inst->alg.ivsize; done: err = aead_register_instance(tmpl, inst); if (err) goto free_inst; out: return err; free_inst: aead_geniv_free(inst); goto out; } static void echainiv_free(struct crypto_instance *inst) { aead_geniv_free(aead_instance(inst)); }
static int seqiv_aead_encrypt(struct aead_request *req) { struct crypto_aead *geniv = crypto_aead_reqtfm(req); struct aead_geniv_ctx *ctx = crypto_aead_ctx(geniv); struct aead_request *subreq = aead_request_ctx(req); crypto_completion_t compl; void *data; u8 *info; unsigned int ivsize = 8; int err; if (req->cryptlen < ivsize) return -EINVAL; aead_request_set_tfm(subreq, ctx->child); compl = req->base.complete; data = req->base.data; info = req->iv; if (req->src != req->dst) { SKCIPHER_REQUEST_ON_STACK(nreq, ctx->sknull); skcipher_request_set_tfm(nreq, ctx->sknull); skcipher_request_set_callback(nreq, req->base.flags, NULL, NULL); skcipher_request_set_crypt(nreq, req->src, req->dst, req->assoclen + req->cryptlen, NULL); err = crypto_skcipher_encrypt(nreq); if (err) return err; } if (unlikely(!IS_ALIGNED((unsigned long)info, crypto_aead_alignmask(geniv) + 1))) { info = kmalloc(ivsize, req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL: GFP_ATOMIC); if (!info) return -ENOMEM; memcpy(info, req->iv, ivsize); compl = seqiv_aead_encrypt_complete; data = req; } aead_request_set_callback(subreq, req->base.flags, compl, data); aead_request_set_crypt(subreq, req->dst, req->dst, req->cryptlen - ivsize, info); aead_request_set_ad(subreq, req->assoclen + ivsize); crypto_xor(info, ctx->salt, ivsize); scatterwalk_map_and_copy(info, req->dst, req->assoclen, ivsize, 1); err = crypto_aead_encrypt(subreq); if (unlikely(info != req->iv)) seqiv_aead_encrypt_complete2(req, err); return err; }
int cryptodev_cipher_init(struct cipher_data *out, const char *alg_name, uint8_t *keyp, size_t keylen, int stream, int aead) { int ret; if (aead == 0) { struct ablkcipher_alg *alg; out->async.s = crypto_alloc_ablkcipher(alg_name, 0, 0); if (unlikely(IS_ERR(out->async.s))) { ddebug(1, "Failed to load cipher %s", alg_name); return -EINVAL; } alg = crypto_ablkcipher_alg(out->async.s); if (alg != NULL) { /* Was correct key length supplied? */ if (alg->max_keysize > 0 && unlikely((keylen < alg->min_keysize) || (keylen > alg->max_keysize))) { ddebug(1, "Wrong keylen '%zu' for algorithm '%s'. Use %u to %u.", keylen, alg_name, alg->min_keysize, alg->max_keysize); ret = -EINVAL; goto error; } } out->blocksize = crypto_ablkcipher_blocksize(out->async.s); out->ivsize = crypto_ablkcipher_ivsize(out->async.s); out->alignmask = crypto_ablkcipher_alignmask(out->async.s); ret = crypto_ablkcipher_setkey(out->async.s, keyp, keylen); } else { out->async.as = crypto_alloc_aead(alg_name, 0, 0); if (unlikely(IS_ERR(out->async.as))) { ddebug(1, "Failed to load cipher %s", alg_name); return -EINVAL; } out->blocksize = crypto_aead_blocksize(out->async.as); out->ivsize = crypto_aead_ivsize(out->async.as); out->alignmask = crypto_aead_alignmask(out->async.as); ret = crypto_aead_setkey(out->async.as, keyp, keylen); } if (unlikely(ret)) { ddebug(1, "Setting key failed for %s-%zu.", alg_name, keylen*8); ret = -EINVAL; goto error; } out->stream = stream; out->aead = aead; out->async.result = kzalloc(sizeof(*out->async.result), GFP_KERNEL); if (unlikely(!out->async.result)) { ret = -ENOMEM; goto error; } init_completion(&out->async.result->completion); if (aead == 0) { out->async.request = ablkcipher_request_alloc(out->async.s, GFP_KERNEL); if (unlikely(!out->async.request)) { derr(1, "error allocating async crypto request"); ret = -ENOMEM; goto error; } ablkcipher_request_set_callback(out->async.request, CRYPTO_TFM_REQ_MAY_BACKLOG, cryptodev_complete, out->async.result); } else { out->async.arequest = aead_request_alloc(out->async.as, GFP_KERNEL); if (unlikely(!out->async.arequest)) { derr(1, "error allocating async crypto request"); ret = -ENOMEM; goto error; } aead_request_set_callback(out->async.arequest, CRYPTO_TFM_REQ_MAY_BACKLOG, cryptodev_complete, out->async.result); } out->init = 1; return 0; error: if (aead == 0) { if (out->async.request) ablkcipher_request_free(out->async.request); if (out->async.s) crypto_free_ablkcipher(out->async.s); } else { if (out->async.arequest) aead_request_free(out->async.arequest); if (out->async.as) crypto_free_aead(out->async.as); } kfree(out->async.result); return ret; }