static void crypto_rfc4543_exit_tfm(struct crypto_tfm *tfm) { struct crypto_rfc4543_ctx *ctx = crypto_tfm_ctx(tfm); crypto_free_aead(ctx->child); crypto_free_blkcipher(ctx->null); }
int tls_sw_fallback_init(struct sock *sk, struct tls_offload_context_tx *offload_ctx, struct tls_crypto_info *crypto_info) { const u8 *key; int rc; offload_ctx->aead_send = crypto_alloc_aead("gcm(aes)", 0, CRYPTO_ALG_ASYNC); if (IS_ERR(offload_ctx->aead_send)) { rc = PTR_ERR(offload_ctx->aead_send); pr_err_ratelimited("crypto_alloc_aead failed rc=%d\n", rc); offload_ctx->aead_send = NULL; goto err_out; } key = ((struct tls12_crypto_info_aes_gcm_128 *)crypto_info)->key; rc = crypto_aead_setkey(offload_ctx->aead_send, key, TLS_CIPHER_AES_GCM_128_KEY_SIZE); if (rc) goto free_aead; rc = crypto_aead_setauthsize(offload_ctx->aead_send, TLS_CIPHER_AES_GCM_128_TAG_SIZE); if (rc) goto free_aead; return 0; free_aead: crypto_free_aead(offload_ctx->aead_send); err_out: return rc; }
void tls_sw_free_tx_resources(struct sock *sk) { struct tls_context *tls_ctx = tls_get_ctx(sk); struct tls_sw_context *ctx = tls_sw_ctx(tls_ctx); if (ctx->aead_send) crypto_free_aead(ctx->aead_send); tls_free_both_sg(sk); kfree(ctx); kfree(tls_ctx); }
void cryptodev_cipher_deinit(struct cipher_data *cdata) { if (cdata->init) { if (cdata->aead == 0) { cryptodev_blkcipher_request_free(cdata->async.request); cryptodev_crypto_free_blkcipher(cdata->async.s); } else { if (cdata->async.arequest) aead_request_free(cdata->async.arequest); if (cdata->async.as) crypto_free_aead(cdata->async.as); } cdata->init = 0; } }
struct crypto_aead *ieee80211_aes_key_setup_encrypt(const u8 key[]) { struct crypto_aead *tfm; int err; tfm = crypto_alloc_aead("ccm(aes)", 0, CRYPTO_ALG_ASYNC); if (IS_ERR(tfm)) return tfm; err = crypto_aead_setkey(tfm, key, WLAN_KEY_LEN_CCMP); if (!err) err = crypto_aead_setauthsize(tfm, IEEE80211_CCMP_MIC_LEN); if (!err) return tfm; crypto_free_aead(tfm); return ERR_PTR(err); }
struct crypto_aead *ieee80211_aes_key_setup_encrypt(const u8 key[], size_t key_len, size_t mic_len) { struct crypto_aead *tfm; int err; tfm = crypto_alloc_aead("ccm(aes)", 0, CRYPTO_ALG_ASYNC); if (IS_ERR(tfm)) return tfm; err = crypto_aead_setkey(tfm, key, key_len); if (!err) err = crypto_aead_setauthsize(tfm, mic_len); if (!err) return tfm; crypto_free_aead(tfm); return ERR_PTR(err); }
struct crypto_aead *ieee80211_aes_gcm_key_setup_encrypt(const u8 key[], size_t key_len) { struct crypto_aead *tfm; int err; tfm = crypto_alloc_aead("gcm(aes)", 0, CRYPTO_ALG_ASYNC); if (IS_ERR(tfm)) return tfm; err = crypto_aead_setkey(tfm, key, key_len); if (err) goto free_aead; err = crypto_aead_setauthsize(tfm, IEEE80211_GCMP_MIC_LEN); if (err) goto free_aead; return tfm; free_aead: crypto_free_aead(tfm); return ERR_PTR(err); }
static int crypto_rfc4543_init_tfm(struct crypto_tfm *tfm) { struct crypto_instance *inst = (void *)tfm->__crt_alg; struct crypto_rfc4543_instance_ctx *ictx = crypto_instance_ctx(inst); struct crypto_aead_spawn *spawn = &ictx->aead; struct crypto_rfc4543_ctx *ctx = crypto_tfm_ctx(tfm); struct crypto_aead *aead; struct crypto_blkcipher *null; unsigned long align; int err = 0; aead = crypto_spawn_aead(spawn); if (IS_ERR(aead)) return PTR_ERR(aead); null = crypto_spawn_blkcipher(&ictx->null.base); err = PTR_ERR(null); if (IS_ERR(null)) goto err_free_aead; ctx->child = aead; ctx->null = null; align = crypto_aead_alignmask(aead); align &= ~(crypto_tfm_ctx_alignment() - 1); tfm->crt_aead.reqsize = sizeof(struct crypto_rfc4543_req_ctx) + ALIGN(crypto_aead_reqsize(aead), crypto_tfm_ctx_alignment()) + align + 16; return 0; err_free_aead: crypto_free_aead(aead); return err; }
static int echainiv_encrypt(struct aead_request *req) { struct crypto_aead *geniv = crypto_aead_reqtfm(req); struct echainiv_ctx *ctx = crypto_aead_ctx(geniv); struct aead_request *subreq = aead_request_ctx(req); crypto_completion_t compl; void *data; u8 *info; unsigned int ivsize = crypto_aead_ivsize(geniv); int err; if (req->cryptlen < ivsize) return -EINVAL; aead_request_set_tfm(subreq, ctx->geniv.child); compl = echainiv_encrypt_complete; data = req; info = req->iv; if (req->src != req->dst) { struct blkcipher_desc desc = { .tfm = ctx->null, }; err = crypto_blkcipher_encrypt( &desc, req->dst, req->src, req->assoclen + req->cryptlen); if (err) return err; } if (unlikely(!IS_ALIGNED((unsigned long)info, crypto_aead_alignmask(geniv) + 1))) { info = kmalloc(ivsize, req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL: GFP_ATOMIC); if (!info) return -ENOMEM; memcpy(info, req->iv, ivsize); } aead_request_set_callback(subreq, req->base.flags, compl, data); aead_request_set_crypt(subreq, req->dst, req->dst, req->cryptlen - ivsize, info); aead_request_set_ad(subreq, req->assoclen + ivsize); crypto_xor(info, ctx->salt, ivsize); scatterwalk_map_and_copy(info, req->dst, req->assoclen, ivsize, 1); echainiv_read_iv(info, ivsize); err = crypto_aead_encrypt(subreq); echainiv_encrypt_complete2(req, err); return err; } static int echainiv_decrypt(struct aead_request *req) { struct crypto_aead *geniv = crypto_aead_reqtfm(req); struct echainiv_ctx *ctx = crypto_aead_ctx(geniv); struct aead_request *subreq = aead_request_ctx(req); crypto_completion_t compl; void *data; unsigned int ivsize = crypto_aead_ivsize(geniv); if (req->cryptlen < ivsize + crypto_aead_authsize(geniv)) return -EINVAL; aead_request_set_tfm(subreq, ctx->geniv.child); compl = req->base.complete; data = req->base.data; aead_request_set_callback(subreq, req->base.flags, compl, data); aead_request_set_crypt(subreq, req->src, req->dst, req->cryptlen - ivsize, req->iv); aead_request_set_ad(subreq, req->assoclen + ivsize); scatterwalk_map_and_copy(req->iv, req->src, req->assoclen, ivsize, 0); if (req->src != req->dst) scatterwalk_map_and_copy(req->iv, req->dst, req->assoclen, ivsize, 1); return crypto_aead_decrypt(subreq); } static int echainiv_init(struct crypto_tfm *tfm) { struct crypto_aead *geniv = __crypto_aead_cast(tfm); struct echainiv_ctx *ctx = crypto_aead_ctx(geniv); int err; spin_lock_init(&ctx->geniv.lock); crypto_aead_set_reqsize(geniv, sizeof(struct aead_request)); err = crypto_get_default_rng(); if (err) goto out; err = crypto_rng_get_bytes(crypto_default_rng, ctx->salt, crypto_aead_ivsize(geniv)); crypto_put_default_rng(); if (err) goto out; ctx->null = crypto_get_default_null_skcipher(); err = PTR_ERR(ctx->null); if (IS_ERR(ctx->null)) goto out; err = aead_geniv_init(tfm); if (err) goto drop_null; ctx->geniv.child = geniv->child; geniv->child = geniv; out: return err; drop_null: crypto_put_default_null_skcipher(); goto out; } static void echainiv_exit(struct crypto_tfm *tfm) { struct echainiv_ctx *ctx = crypto_tfm_ctx(tfm); crypto_free_aead(ctx->geniv.child); crypto_put_default_null_skcipher(); } static int echainiv_aead_create(struct crypto_template *tmpl, struct rtattr **tb) { struct aead_instance *inst; struct crypto_aead_spawn *spawn; struct aead_alg *alg; int err; inst = aead_geniv_alloc(tmpl, tb, 0, 0); if (IS_ERR(inst)) return PTR_ERR(inst); spawn = aead_instance_ctx(inst); alg = crypto_spawn_aead_alg(spawn); if (alg->base.cra_aead.encrypt) goto done; err = -EINVAL; if (inst->alg.ivsize & (sizeof(u32) - 1) || inst->alg.ivsize > MAX_IV_SIZE) goto free_inst; inst->alg.encrypt = echainiv_encrypt; inst->alg.decrypt = echainiv_decrypt; inst->alg.base.cra_init = echainiv_init; inst->alg.base.cra_exit = echainiv_exit; inst->alg.base.cra_alignmask |= __alignof__(u32) - 1; inst->alg.base.cra_ctxsize = sizeof(struct echainiv_ctx); inst->alg.base.cra_ctxsize += inst->alg.ivsize; done: err = aead_register_instance(tmpl, inst); if (err) goto free_inst; out: return err; free_inst: aead_geniv_free(inst); goto out; } static void echainiv_free(struct crypto_instance *inst) { aead_geniv_free(aead_instance(inst)); }
struct simd_aead_alg *simd_aead_create_compat(const char *algname, const char *drvname, const char *basename) { struct simd_aead_alg *salg; struct crypto_aead *tfm; struct aead_alg *ialg; struct aead_alg *alg; int err; tfm = crypto_alloc_aead(basename, CRYPTO_ALG_INTERNAL, CRYPTO_ALG_INTERNAL | CRYPTO_ALG_ASYNC); if (IS_ERR(tfm)) return ERR_CAST(tfm); ialg = crypto_aead_alg(tfm); salg = kzalloc(sizeof(*salg), GFP_KERNEL); if (!salg) { salg = ERR_PTR(-ENOMEM); goto out_put_tfm; } salg->ialg_name = basename; alg = &salg->alg; err = -ENAMETOOLONG; if (snprintf(alg->base.cra_name, CRYPTO_MAX_ALG_NAME, "%s", algname) >= CRYPTO_MAX_ALG_NAME) goto out_free_salg; if (snprintf(alg->base.cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s", drvname) >= CRYPTO_MAX_ALG_NAME) goto out_free_salg; alg->base.cra_flags = CRYPTO_ALG_ASYNC; alg->base.cra_priority = ialg->base.cra_priority; alg->base.cra_blocksize = ialg->base.cra_blocksize; alg->base.cra_alignmask = ialg->base.cra_alignmask; alg->base.cra_module = ialg->base.cra_module; alg->base.cra_ctxsize = sizeof(struct simd_aead_ctx); alg->ivsize = ialg->ivsize; alg->maxauthsize = ialg->maxauthsize; alg->chunksize = ialg->chunksize; alg->init = simd_aead_init; alg->exit = simd_aead_exit; alg->setkey = simd_aead_setkey; alg->setauthsize = simd_aead_setauthsize; alg->encrypt = simd_aead_encrypt; alg->decrypt = simd_aead_decrypt; err = crypto_register_aead(alg); if (err) goto out_free_salg; out_put_tfm: crypto_free_aead(tfm); return salg; out_free_salg: kfree(salg); salg = ERR_PTR(err); goto out_put_tfm; }
void aead_geniv_exit(struct crypto_tfm *tfm) { crypto_free_aead(tfm->crt_aead.base); }
/* * AEAD algorithm self tests */ int _fips_qcrypto_aead_selftest(struct fips_selftest_data *selftest_d) { int rc = 0, err, tv_index, num_tv, authsize, buf_length; struct crypto_aead *tfm; struct aead_request *aead_req; struct _fips_completion fips_completion; struct scatterlist fips_sg, fips_assoc_sg; char *k_align_src = NULL; struct _fips_test_vector_aead tv_aead; num_tv = (sizeof(fips_test_vector_aead)) / (sizeof(struct _fips_test_vector_aead)); /* One-by-one testing */ for (tv_index = 0; tv_index < num_tv; tv_index++) { memcpy(&tv_aead, &fips_test_vector_aead[tv_index], (sizeof(struct _fips_test_vector_aead))); if (tv_aead.pln_txt_len > tv_aead.enc_txt_len) buf_length = tv_aead.pln_txt_len; else buf_length = tv_aead.enc_txt_len; /* Single buffer allocation for in place operation */ k_align_src = kzalloc(buf_length, GFP_KERNEL); if (k_align_src == NULL) { pr_err("qcrypto:, Failed to allocate memory for k_align_src %ld\n", PTR_ERR(k_align_src)); return -ENOMEM; } memcpy(&k_align_src[0], tv_aead.pln_txt, tv_aead.pln_txt_len); /* use_sw flags are set in dtsi file which makes default Linux API calls to go to s/w crypto instead of h/w crypto. This code makes sure that all selftests calls always go to h/w, independent of DTSI flags. */ if (selftest_d->prefix_aead_algo) { if (_fips_get_alg_cra_name(tv_aead.mod_alg, selftest_d->algo_prefix, strlen(tv_aead.mod_alg))) { rc = -1; pr_err("Algo Name is too long for tv %d\n", tv_index); goto clr_buf; } } tfm = crypto_alloc_aead(tv_aead.mod_alg, 0, 0); if (IS_ERR(tfm)) { pr_err("qcrypto: %s algorithm not found\n", tv_aead.mod_alg); rc = -ENOMEM; goto clr_buf; } aead_req = aead_request_alloc(tfm, GFP_KERNEL); if (!aead_req) { pr_err("qcrypto:aead_request_alloc failed\n"); rc = -ENOMEM; goto clr_tfm; } rc = qcrypto_aead_set_device(aead_req, selftest_d->ce_device); if (rc != 0) { pr_err("%s qcrypto_cipher_set_device failed with err %d\n", __func__, rc); goto clr_aead_req; } init_completion(&fips_completion.completion); aead_request_set_callback(aead_req, CRYPTO_TFM_REQ_MAY_BACKLOG, _fips_cb, &fips_completion); crypto_aead_clear_flags(tfm, ~0); rc = crypto_aead_setkey(tfm, tv_aead.key, tv_aead.klen); if (rc) { pr_err("qcrypto:crypto_aead_setkey failed\n"); goto clr_aead_req; } authsize = abs(tv_aead.enc_txt_len - tv_aead.pln_txt_len); rc = crypto_aead_setauthsize(tfm, authsize); if (rc) { pr_err("qcrypto:crypto_aead_setauthsize failed\n"); goto clr_aead_req; } sg_init_one(&fips_sg, k_align_src, tv_aead.pln_txt_len + authsize); aead_request_set_crypt(aead_req, &fips_sg, &fips_sg, tv_aead.pln_txt_len , tv_aead.iv); sg_init_one(&fips_assoc_sg, tv_aead.assoc, tv_aead.alen); aead_request_set_assoc(aead_req, &fips_assoc_sg, tv_aead.alen); /**** Encryption test ****/ rc = crypto_aead_encrypt(aead_req); if (rc == -EINPROGRESS || rc == -EBUSY) { rc = wait_for_completion_interruptible( &fips_completion.completion); err = fips_completion.err; if (!rc && !err) { INIT_COMPLETION(fips_completion.completion); } else { pr_err("qcrypto:aead:ENC, wait_for_completion failed\n"); goto clr_aead_req; } } if (memcmp(k_align_src, tv_aead.enc_txt, tv_aead.enc_txt_len)) { rc = -1; goto clr_aead_req; } /** Decryption test **/ init_completion(&fips_completion.completion); aead_request_set_callback(aead_req, CRYPTO_TFM_REQ_MAY_BACKLOG, _fips_cb, &fips_completion); crypto_aead_clear_flags(tfm, ~0); rc = crypto_aead_setkey(tfm, tv_aead.key, tv_aead.klen); if (rc) { pr_err("qcrypto:aead:DEC, crypto_aead_setkey failed\n"); goto clr_aead_req; } authsize = abs(tv_aead.enc_txt_len - tv_aead.pln_txt_len); rc = crypto_aead_setauthsize(tfm, authsize); if (rc) { pr_err("qcrypto:aead:DEC, crypto_aead_setauthsize failed\n"); goto clr_aead_req; } sg_init_one(&fips_sg, k_align_src, tv_aead.enc_txt_len + authsize); aead_request_set_crypt(aead_req, &fips_sg, &fips_sg, tv_aead.enc_txt_len, tv_aead.iv); sg_init_one(&fips_assoc_sg, tv_aead.assoc, tv_aead.alen); aead_request_set_assoc(aead_req, &fips_assoc_sg, tv_aead.alen); rc = crypto_aead_decrypt(aead_req); if (rc == -EINPROGRESS || rc == -EBUSY) { rc = wait_for_completion_interruptible( &fips_completion.completion); err = fips_completion.err; if (!rc && !err) { INIT_COMPLETION(fips_completion.completion); } else { pr_err("qcrypto:aead:DEC, wait_for_completion failed\n"); goto clr_aead_req; } } if (memcmp(k_align_src, tv_aead.pln_txt, tv_aead.pln_txt_len)) { rc = -1; goto clr_aead_req; } clr_aead_req: aead_request_free(aead_req); clr_tfm: crypto_free_aead(tfm); clr_buf: kzfree(k_align_src); /* In case of any failure, return error */ if (rc) return rc; } return rc; }
static void pcrypt_aead_exit_tfm(struct crypto_aead *tfm) { struct pcrypt_aead_ctx *ctx = crypto_aead_ctx(tfm); crypto_free_aead(ctx->child); }
void ieee80211_aes_key_free(struct crypto_aead *tfm) { crypto_free_aead(tfm); }
int cryptodev_cipher_init(struct cipher_data *out, const char *alg_name, uint8_t *keyp, size_t keylen, int stream, int aead) { int ret; if (aead == 0) { struct ablkcipher_alg *alg; out->async.s = crypto_alloc_ablkcipher(alg_name, 0, 0); if (unlikely(IS_ERR(out->async.s))) { ddebug(1, "Failed to load cipher %s", alg_name); return -EINVAL; } alg = crypto_ablkcipher_alg(out->async.s); if (alg != NULL) { /* Was correct key length supplied? */ if (alg->max_keysize > 0 && unlikely((keylen < alg->min_keysize) || (keylen > alg->max_keysize))) { ddebug(1, "Wrong keylen '%zu' for algorithm '%s'. Use %u to %u.", keylen, alg_name, alg->min_keysize, alg->max_keysize); ret = -EINVAL; goto error; } } out->blocksize = crypto_ablkcipher_blocksize(out->async.s); out->ivsize = crypto_ablkcipher_ivsize(out->async.s); out->alignmask = crypto_ablkcipher_alignmask(out->async.s); ret = crypto_ablkcipher_setkey(out->async.s, keyp, keylen); } else { out->async.as = crypto_alloc_aead(alg_name, 0, 0); if (unlikely(IS_ERR(out->async.as))) { ddebug(1, "Failed to load cipher %s", alg_name); return -EINVAL; } out->blocksize = crypto_aead_blocksize(out->async.as); out->ivsize = crypto_aead_ivsize(out->async.as); out->alignmask = crypto_aead_alignmask(out->async.as); ret = crypto_aead_setkey(out->async.as, keyp, keylen); } if (unlikely(ret)) { ddebug(1, "Setting key failed for %s-%zu.", alg_name, keylen*8); ret = -EINVAL; goto error; } out->stream = stream; out->aead = aead; out->async.result = kzalloc(sizeof(*out->async.result), GFP_KERNEL); if (unlikely(!out->async.result)) { ret = -ENOMEM; goto error; } init_completion(&out->async.result->completion); if (aead == 0) { out->async.request = ablkcipher_request_alloc(out->async.s, GFP_KERNEL); if (unlikely(!out->async.request)) { derr(1, "error allocating async crypto request"); ret = -ENOMEM; goto error; } ablkcipher_request_set_callback(out->async.request, CRYPTO_TFM_REQ_MAY_BACKLOG, cryptodev_complete, out->async.result); } else { out->async.arequest = aead_request_alloc(out->async.as, GFP_KERNEL); if (unlikely(!out->async.arequest)) { derr(1, "error allocating async crypto request"); ret = -ENOMEM; goto error; } aead_request_set_callback(out->async.arequest, CRYPTO_TFM_REQ_MAY_BACKLOG, cryptodev_complete, out->async.result); } out->init = 1; return 0; error: if (aead == 0) { if (out->async.request) ablkcipher_request_free(out->async.request); if (out->async.s) crypto_free_ablkcipher(out->async.s); } else { if (out->async.arequest) aead_request_free(out->async.arequest); if (out->async.as) crypto_free_aead(out->async.as); } kfree(out->async.result); return ret; }
static void crypto_rfc4309_exit_tfm(struct crypto_tfm *tfm) { struct crypto_rfc4309_ctx *ctx = crypto_tfm_ctx(tfm); crypto_free_aead(ctx->child); }
int tls_set_sw_offload(struct sock *sk, struct tls_context *ctx) { char keyval[TLS_CIPHER_AES_GCM_128_KEY_SIZE]; struct tls_crypto_info *crypto_info; struct tls12_crypto_info_aes_gcm_128 *gcm_128_info; struct tls_sw_context *sw_ctx; u16 nonce_size, tag_size, iv_size, rec_seq_size; char *iv, *rec_seq; int rc = 0; if (!ctx) { rc = -EINVAL; goto out; } if (ctx->priv_ctx) { rc = -EEXIST; goto out; } sw_ctx = kzalloc(sizeof(*sw_ctx), GFP_KERNEL); if (!sw_ctx) { rc = -ENOMEM; goto out; } ctx->priv_ctx = (struct tls_offload_context *)sw_ctx; crypto_info = &ctx->crypto_send; switch (crypto_info->cipher_type) { case TLS_CIPHER_AES_GCM_128: { nonce_size = TLS_CIPHER_AES_GCM_128_IV_SIZE; tag_size = TLS_CIPHER_AES_GCM_128_TAG_SIZE; iv_size = TLS_CIPHER_AES_GCM_128_IV_SIZE; iv = ((struct tls12_crypto_info_aes_gcm_128 *)crypto_info)->iv; rec_seq_size = TLS_CIPHER_AES_GCM_128_REC_SEQ_SIZE; rec_seq = ((struct tls12_crypto_info_aes_gcm_128 *)crypto_info)->rec_seq; gcm_128_info = (struct tls12_crypto_info_aes_gcm_128 *)crypto_info; break; } default: rc = -EINVAL; goto out; } ctx->prepend_size = TLS_HEADER_SIZE + nonce_size; ctx->tag_size = tag_size; ctx->overhead_size = ctx->prepend_size + ctx->tag_size; ctx->iv_size = iv_size; ctx->iv = kmalloc(iv_size + TLS_CIPHER_AES_GCM_128_SALT_SIZE, GFP_KERNEL); if (!ctx->iv) { rc = -ENOMEM; goto out; } memcpy(ctx->iv, gcm_128_info->salt, TLS_CIPHER_AES_GCM_128_SALT_SIZE); memcpy(ctx->iv + TLS_CIPHER_AES_GCM_128_SALT_SIZE, iv, iv_size); ctx->rec_seq_size = rec_seq_size; ctx->rec_seq = kmalloc(rec_seq_size, GFP_KERNEL); if (!ctx->rec_seq) { rc = -ENOMEM; goto free_iv; } memcpy(ctx->rec_seq, rec_seq, rec_seq_size); sg_init_table(sw_ctx->sg_encrypted_data, ARRAY_SIZE(sw_ctx->sg_encrypted_data)); sg_init_table(sw_ctx->sg_plaintext_data, ARRAY_SIZE(sw_ctx->sg_plaintext_data)); sg_init_table(sw_ctx->sg_aead_in, 2); sg_set_buf(&sw_ctx->sg_aead_in[0], sw_ctx->aad_space, sizeof(sw_ctx->aad_space)); sg_unmark_end(&sw_ctx->sg_aead_in[1]); sg_chain(sw_ctx->sg_aead_in, 2, sw_ctx->sg_plaintext_data); sg_init_table(sw_ctx->sg_aead_out, 2); sg_set_buf(&sw_ctx->sg_aead_out[0], sw_ctx->aad_space, sizeof(sw_ctx->aad_space)); sg_unmark_end(&sw_ctx->sg_aead_out[1]); sg_chain(sw_ctx->sg_aead_out, 2, sw_ctx->sg_encrypted_data); if (!sw_ctx->aead_send) { sw_ctx->aead_send = crypto_alloc_aead("gcm(aes)", 0, 0); if (IS_ERR(sw_ctx->aead_send)) { rc = PTR_ERR(sw_ctx->aead_send); sw_ctx->aead_send = NULL; goto free_rec_seq; } } ctx->push_pending_record = tls_sw_push_pending_record; memcpy(keyval, gcm_128_info->key, TLS_CIPHER_AES_GCM_128_KEY_SIZE); rc = crypto_aead_setkey(sw_ctx->aead_send, keyval, TLS_CIPHER_AES_GCM_128_KEY_SIZE); if (rc) goto free_aead; rc = crypto_aead_setauthsize(sw_ctx->aead_send, ctx->tag_size); if (!rc) goto out; free_aead: crypto_free_aead(sw_ctx->aead_send); sw_ctx->aead_send = NULL; free_rec_seq: kfree(ctx->rec_seq); ctx->rec_seq = NULL; free_iv: kfree(ctx->iv); ctx->iv = NULL; out: return rc; }