static void crypto_gcm_init_crypt(struct ablkcipher_request *ablk_req, struct aead_request *req, unsigned int cryptlen) { struct crypto_aead *aead = crypto_aead_reqtfm(req); struct crypto_gcm_ctx *ctx = crypto_aead_ctx(aead); struct crypto_gcm_req_priv_ctx *pctx = crypto_gcm_reqctx(req); struct scatterlist *dst; __be32 counter = cpu_to_be32(1); memset(pctx->auth_tag, 0, sizeof(pctx->auth_tag)); memcpy(req->iv + 12, &counter, 4); sg_init_table(pctx->src, 2); sg_set_buf(pctx->src, pctx->auth_tag, sizeof(pctx->auth_tag)); scatterwalk_sg_chain(pctx->src, 2, req->src); dst = pctx->src; if (req->src != req->dst) { sg_init_table(pctx->dst, 2); sg_set_buf(pctx->dst, pctx->auth_tag, sizeof(pctx->auth_tag)); scatterwalk_sg_chain(pctx->dst, 2, req->dst); dst = pctx->dst; } ablkcipher_request_set_tfm(ablk_req, ctx->ctr); ablkcipher_request_set_crypt(ablk_req, pctx->src, dst, cryptlen + sizeof(pctx->auth_tag), req->iv); }
static int pcrypt_aead_setkey(struct crypto_aead *parent, const u8 *key, unsigned int keylen) { struct pcrypt_aead_ctx *ctx = crypto_aead_ctx(parent); return crypto_aead_setkey(ctx->child, key, keylen); }
static int nitrox_aes_gcm_setkey(struct crypto_aead *aead, const u8 *key, unsigned int keylen) { int aes_keylen; struct nitrox_crypto_ctx *nctx = crypto_aead_ctx(aead); struct flexi_crypto_context *fctx; union fc_ctx_flags flags; aes_keylen = flexi_aes_keylen(keylen); if (aes_keylen < 0) { crypto_aead_set_flags(aead, CRYPTO_TFM_RES_BAD_KEY_LEN); return -EINVAL; } /* fill crypto context */ fctx = nctx->u.fctx; flags.f = be64_to_cpu(fctx->flags.f); flags.w0.aes_keylen = aes_keylen; fctx->flags.f = cpu_to_be64(flags.f); /* copy enc key to context */ memset(&fctx->crypto, 0, sizeof(fctx->crypto)); memcpy(fctx->crypto.u.key, key, keylen); return 0; }
static int seqiv_aead_decrypt(struct aead_request *req) { struct crypto_aead *geniv = crypto_aead_reqtfm(req); struct aead_geniv_ctx *ctx = crypto_aead_ctx(geniv); struct aead_request *subreq = aead_request_ctx(req); crypto_completion_t compl; void *data; unsigned int ivsize = 8; if (req->cryptlen < ivsize + crypto_aead_authsize(geniv)) return -EINVAL; aead_request_set_tfm(subreq, ctx->child); compl = req->base.complete; data = req->base.data; aead_request_set_callback(subreq, req->base.flags, compl, data); aead_request_set_crypt(subreq, req->src, req->dst, req->cryptlen - ivsize, req->iv); aead_request_set_ad(subreq, req->assoclen + ivsize); scatterwalk_map_and_copy(req->iv, req->src, req->assoclen, ivsize, 0); return crypto_aead_decrypt(subreq); }
static int nitrox_aes_gcm_dec(struct aead_request *areq) { struct crypto_aead *aead = crypto_aead_reqtfm(areq); struct nitrox_crypto_ctx *nctx = crypto_aead_ctx(aead); struct nitrox_aead_rctx *rctx = aead_request_ctx(areq); struct se_crypto_request *creq = &rctx->nkreq.creq; struct flexi_crypto_context *fctx = nctx->u.fctx; int ret; memcpy(fctx->crypto.iv, areq->iv, GCM_AES_SALT_SIZE); rctx->cryptlen = areq->cryptlen - aead->authsize; rctx->assoclen = areq->assoclen; rctx->srclen = areq->cryptlen + areq->assoclen; rctx->dstlen = rctx->srclen - aead->authsize; rctx->iv = &areq->iv[GCM_AES_SALT_SIZE]; rctx->ivsize = GCM_AES_IV_SIZE - GCM_AES_SALT_SIZE; rctx->flags = areq->base.flags; rctx->ctx_handle = nctx->u.ctx_handle; rctx->src = areq->src; rctx->dst = areq->dst; rctx->ctrl_arg = DECRYPT; ret = nitrox_set_creq(rctx); if (ret) return ret; /* send the crypto request */ return nitrox_process_se_request(nctx->ndev, creq, nitrox_aead_callback, areq); }
static int crypto_ccm_setkey(struct crypto_aead *aead, const u8 *key, unsigned int keylen) { struct crypto_ccm_ctx *ctx = crypto_aead_ctx(aead); struct crypto_ablkcipher *ctr = ctx->ctr; struct crypto_cipher *tfm = ctx->cipher; int err = 0; crypto_ablkcipher_clear_flags(ctr, CRYPTO_TFM_REQ_MASK); crypto_ablkcipher_set_flags(ctr, crypto_aead_get_flags(aead) & CRYPTO_TFM_REQ_MASK); err = crypto_ablkcipher_setkey(ctr, key, keylen); crypto_aead_set_flags(aead, crypto_ablkcipher_get_flags(ctr) & CRYPTO_TFM_RES_MASK); if (err) goto out; crypto_cipher_clear_flags(tfm, CRYPTO_TFM_REQ_MASK); crypto_cipher_set_flags(tfm, crypto_aead_get_flags(aead) & CRYPTO_TFM_REQ_MASK); err = crypto_cipher_setkey(tfm, key, keylen); crypto_aead_set_flags(aead, crypto_cipher_get_flags(tfm) & CRYPTO_TFM_RES_MASK); out: return err; }
static int pcrypt_aead_givencrypt(struct aead_givcrypt_request *req) { int err; struct aead_request *areq = &req->areq; struct pcrypt_request *preq = aead_request_ctx(areq); struct aead_givcrypt_request *creq = pcrypt_request_ctx(preq); struct padata_priv *padata = pcrypt_request_padata(preq); struct crypto_aead *aead = aead_givcrypt_reqtfm(req); struct pcrypt_aead_ctx *ctx = crypto_aead_ctx(aead); u32 flags = aead_request_flags(areq); memset(padata, 0, sizeof(struct padata_priv)); padata->parallel = pcrypt_aead_givenc; padata->serial = pcrypt_aead_giv_serial; aead_givcrypt_set_tfm(creq, ctx->child); aead_givcrypt_set_callback(creq, flags & ~CRYPTO_TFM_REQ_MAY_SLEEP, pcrypt_aead_done, areq); aead_givcrypt_set_crypt(creq, areq->src, areq->dst, areq->cryptlen, areq->iv); aead_givcrypt_set_assoc(creq, areq->assoc, areq->assoclen); aead_givcrypt_set_giv(creq, req->giv, req->seq); err = pcrypt_do_parallel(padata, &ctx->cb_cpu, &pencrypt); if (!err) return -EINPROGRESS; return err; }
static int qat_alg_aead_init_sessions(struct crypto_aead *tfm, const u8 *key, unsigned int keylen, int mode) { struct crypto_authenc_keys keys; struct qat_alg_aead_ctx *ctx = crypto_aead_ctx(tfm); int alg; if (crypto_rng_get_bytes(crypto_default_rng, ctx->salt, AES_BLOCK_SIZE)) return -EFAULT; if (crypto_authenc_extractkeys(&keys, key, keylen)) goto bad_key; if (qat_alg_validate_key(keys.enckeylen, &alg, mode)) goto bad_key; if (qat_alg_aead_init_enc_session(ctx, alg, &keys, mode)) goto error; if (qat_alg_aead_init_dec_session(ctx, alg, &keys, mode)) goto error; return 0; bad_key: crypto_tfm_set_flags(ctx->tfm, CRYPTO_TFM_RES_BAD_KEY_LEN); return -EINVAL; error: return -EFAULT; }
static int qat_alg_aead_setkey(struct crypto_aead *tfm, const uint8_t *key, unsigned int keylen) { struct qat_alg_aead_ctx *ctx = crypto_aead_ctx(tfm); struct device *dev; spin_lock(&ctx->lock); if (ctx->enc_cd) { /* rekeying */ dev = &GET_DEV(ctx->inst->accel_dev); memset(ctx->enc_cd, 0, sizeof(*ctx->enc_cd)); memset(ctx->dec_cd, 0, sizeof(*ctx->dec_cd)); memset(&ctx->enc_fw_req, 0, sizeof(ctx->enc_fw_req)); memset(&ctx->dec_fw_req, 0, sizeof(ctx->dec_fw_req)); } else { /* new key */ int node = get_current_node(); struct qat_crypto_instance *inst = qat_crypto_get_instance_node(node); if (!inst) { spin_unlock(&ctx->lock); return -EINVAL; } dev = &GET_DEV(inst->accel_dev); ctx->inst = inst; ctx->enc_cd = dma_zalloc_coherent(dev, sizeof(*ctx->enc_cd), &ctx->enc_cd_paddr, GFP_ATOMIC); if (!ctx->enc_cd) { spin_unlock(&ctx->lock); return -ENOMEM; } ctx->dec_cd = dma_zalloc_coherent(dev, sizeof(*ctx->dec_cd), &ctx->dec_cd_paddr, GFP_ATOMIC); if (!ctx->dec_cd) { spin_unlock(&ctx->lock); goto out_free_enc; } } spin_unlock(&ctx->lock); if (qat_alg_aead_init_sessions(tfm, key, keylen, ICP_QAT_HW_CIPHER_CBC_MODE)) goto out_free_all; return 0; out_free_all: memset(ctx->dec_cd, 0, sizeof(struct qat_alg_cd)); dma_free_coherent(dev, sizeof(struct qat_alg_cd), ctx->dec_cd, ctx->dec_cd_paddr); ctx->dec_cd = NULL; out_free_enc: memset(ctx->enc_cd, 0, sizeof(struct qat_alg_cd)); dma_free_coherent(dev, sizeof(struct qat_alg_cd), ctx->enc_cd, ctx->enc_cd_paddr); ctx->enc_cd = NULL; return -ENOMEM; }
static int crypto_ccm_auth(struct aead_request *req, struct scatterlist *plain, unsigned int cryptlen) { struct crypto_aead *aead = crypto_aead_reqtfm(req); struct crypto_ccm_ctx *ctx = crypto_aead_ctx(aead); struct crypto_ccm_req_priv_ctx *pctx = crypto_ccm_reqctx(req); struct crypto_cipher *cipher = ctx->cipher; unsigned int assoclen = req->assoclen; u8 *odata = pctx->odata; u8 *idata = pctx->idata; int err; /* format control data for input */ err = format_input(odata, req, cryptlen); if (err) goto out; /* encrypt first block to use as start in computing mac */ crypto_cipher_encrypt_one(cipher, odata, odata); /* format associated data and compute into mac */ if (assoclen) { pctx->ilen = format_adata(idata, assoclen); get_data_to_compute(cipher, pctx, req->assoc, req->assoclen); } else { pctx->ilen = 0; } /* compute plaintext into mac */ if (cryptlen) get_data_to_compute(cipher, pctx, plain, cryptlen); out: return err; }
static void crypto_ccm_exit_tfm(struct crypto_aead *tfm) { struct crypto_ccm_ctx *ctx = crypto_aead_ctx(tfm); crypto_free_cipher(ctx->cipher); crypto_free_ablkcipher(ctx->ctr); }
static int crypto_ccm_init_tfm(struct crypto_aead *tfm) { struct aead_instance *inst = aead_alg_instance(tfm); struct ccm_instance_ctx *ictx = aead_instance_ctx(inst); struct crypto_ccm_ctx *ctx = crypto_aead_ctx(tfm); struct crypto_cipher *cipher; struct crypto_ablkcipher *ctr; unsigned long align; int err; cipher = crypto_spawn_cipher(&ictx->cipher); if (IS_ERR(cipher)) return PTR_ERR(cipher); ctr = crypto_spawn_skcipher(&ictx->ctr); err = PTR_ERR(ctr); if (IS_ERR(ctr)) goto err_free_cipher; ctx->cipher = cipher; ctx->ctr = ctr; align = crypto_aead_alignmask(tfm); align &= ~(crypto_tfm_ctx_alignment() - 1); crypto_aead_set_reqsize( tfm, align + sizeof(struct crypto_ccm_req_priv_ctx) + crypto_ablkcipher_reqsize(ctr)); return 0; err_free_cipher: crypto_free_cipher(cipher); return err; }
static int nitrox_rfc4106_dec(struct aead_request *areq) { struct crypto_aead *aead = crypto_aead_reqtfm(areq); struct nitrox_crypto_ctx *nctx = crypto_aead_ctx(aead); struct nitrox_rfc4106_rctx *rctx = aead_request_ctx(areq); struct nitrox_aead_rctx *aead_rctx = &rctx->base; struct se_crypto_request *creq = &aead_rctx->nkreq.creq; int ret; aead_rctx->cryptlen = areq->cryptlen - aead->authsize; aead_rctx->assoclen = areq->assoclen - GCM_RFC4106_IV_SIZE; aead_rctx->srclen = areq->cryptlen - GCM_RFC4106_IV_SIZE + areq->assoclen; aead_rctx->dstlen = aead_rctx->srclen - aead->authsize; aead_rctx->iv = areq->iv; aead_rctx->ivsize = GCM_RFC4106_IV_SIZE; aead_rctx->flags = areq->base.flags; aead_rctx->ctx_handle = nctx->u.ctx_handle; aead_rctx->ctrl_arg = DECRYPT; ret = nitrox_rfc4106_set_aead_rctx_sglist(areq); if (ret) return ret; ret = nitrox_set_creq(aead_rctx); if (ret) return ret; /* send the crypto request */ return nitrox_process_se_request(nctx->ndev, creq, nitrox_rfc4106_callback, areq); }
static int pcrypt_aead_setauthsize(struct crypto_aead *parent, unsigned int authsize) { struct pcrypt_aead_ctx *ctx = crypto_aead_ctx(parent); return crypto_aead_setauthsize(ctx->child, authsize); }
static int pcrypt_aead_init_tfm(struct crypto_aead *tfm) { int cpu, cpu_index; struct aead_instance *inst = aead_alg_instance(tfm); struct pcrypt_instance_ctx *ictx = aead_instance_ctx(inst); struct pcrypt_aead_ctx *ctx = crypto_aead_ctx(tfm); struct crypto_aead *cipher; cpu_index = (unsigned int)atomic_inc_return(&ictx->tfm_count) % cpumask_weight(cpu_online_mask); ctx->cb_cpu = cpumask_first(cpu_online_mask); for (cpu = 0; cpu < cpu_index; cpu++) ctx->cb_cpu = cpumask_next(ctx->cb_cpu, cpu_online_mask); cipher = crypto_spawn_aead(&ictx->spawn); if (IS_ERR(cipher)) return PTR_ERR(cipher); ctx->child = cipher; crypto_aead_set_reqsize(tfm, sizeof(struct pcrypt_request) + sizeof(struct aead_request) + crypto_aead_reqsize(cipher)); return 0; }
static int simd_aead_init(struct crypto_aead *tfm) { struct simd_aead_ctx *ctx = crypto_aead_ctx(tfm); struct cryptd_aead *cryptd_tfm; struct simd_aead_alg *salg; struct aead_alg *alg; unsigned reqsize; alg = crypto_aead_alg(tfm); salg = container_of(alg, struct simd_aead_alg, alg); cryptd_tfm = cryptd_alloc_aead(salg->ialg_name, CRYPTO_ALG_INTERNAL, CRYPTO_ALG_INTERNAL); if (IS_ERR(cryptd_tfm)) return PTR_ERR(cryptd_tfm); ctx->cryptd_tfm = cryptd_tfm; reqsize = crypto_aead_reqsize(cryptd_aead_child(cryptd_tfm)); reqsize = max(reqsize, crypto_aead_reqsize(&cryptd_tfm->base)); reqsize += sizeof(struct aead_request); crypto_aead_set_reqsize(tfm, reqsize); return 0; }
static int simd_aead_setauthsize(struct crypto_aead *tfm, unsigned int authsize) { struct simd_aead_ctx *ctx = crypto_aead_ctx(tfm); struct crypto_aead *child = &ctx->cryptd_tfm->base; return crypto_aead_setauthsize(child, authsize); }
static int cryptd_aegis256_aesni_setauthsize(struct crypto_aead *aead, unsigned int authsize) { struct cryptd_aead **ctx = crypto_aead_ctx(aead); struct cryptd_aead *cryptd_tfm = *ctx; return crypto_aead_setauthsize(&cryptd_tfm->base, authsize); }
static int rfc4106_set_key(struct crypto_aead *parent, const u8 *key, unsigned int key_len) { struct cryptd_aead **ctx = crypto_aead_ctx(parent); struct cryptd_aead *cryptd_tfm = *ctx; return crypto_aead_setkey(&cryptd_tfm->base, key, key_len); }
static int cryptd_aegis256_aesni_setkey(struct crypto_aead *aead, const u8 *key, unsigned int keylen) { struct cryptd_aead **ctx = crypto_aead_ctx(aead); struct cryptd_aead *cryptd_tfm = *ctx; return crypto_aead_setkey(&cryptd_tfm->base, key, keylen); }
/* This is the Integrity Check Value (aka the authentication tag length and can * be 8, 12 or 16 bytes long. */ static int rfc4106_set_authsize(struct crypto_aead *parent, unsigned int authsize) { struct cryptd_aead **ctx = crypto_aead_ctx(parent); struct cryptd_aead *cryptd_tfm = *ctx; return crypto_aead_setauthsize(&cryptd_tfm->base, authsize); }
static int crypto_ccm_decrypt(struct aead_request *req) { struct crypto_aead *aead = crypto_aead_reqtfm(req); struct crypto_ccm_ctx *ctx = crypto_aead_ctx(aead); struct crypto_ccm_req_priv_ctx *pctx = crypto_ccm_reqctx(req); struct ablkcipher_request *abreq = &pctx->abreq; struct scatterlist *dst; unsigned int authsize = crypto_aead_authsize(aead); unsigned int cryptlen = req->cryptlen; u8 *authtag = pctx->auth_tag; u8 *odata = pctx->odata; u8 *iv = req->iv; int err; if (cryptlen < authsize) return -EINVAL; cryptlen -= authsize; err = crypto_ccm_check_iv(iv); if (err) return err; pctx->flags = aead_request_flags(req); scatterwalk_map_and_copy(authtag, req->src, cryptlen, authsize, 0); memset(iv + 15 - iv[0], 0, iv[0] + 1); sg_init_table(pctx->src, 2); sg_set_buf(pctx->src, authtag, 16); scatterwalk_sg_chain(pctx->src, 2, req->src); dst = pctx->src; if (req->src != req->dst) { sg_init_table(pctx->dst, 2); sg_set_buf(pctx->dst, authtag, 16); scatterwalk_sg_chain(pctx->dst, 2, req->dst); dst = pctx->dst; } ablkcipher_request_set_tfm(abreq, ctx->ctr); ablkcipher_request_set_callback(abreq, pctx->flags, crypto_ccm_decrypt_done, req); ablkcipher_request_set_crypt(abreq, pctx->src, dst, cryptlen + 16, iv); err = crypto_ablkcipher_decrypt(abreq); if (err) return err; err = crypto_ccm_auth(req, req->dst, cryptlen); if (err) return err; /* verify */ if (crypto_memneq(authtag, odata, authsize)) return -EBADMSG; return err; }
static inline struct generic_gcmaes_ctx *generic_gcmaes_ctx_get(struct crypto_aead *tfm) { unsigned long align = AESNI_ALIGN; if (align <= crypto_tfm_ctx_alignment()) align = 1; return PTR_ALIGN(crypto_aead_ctx(tfm), align); }
static int aead_authenc_setauthsize(struct crypto_aead *authenc, unsigned int authsize) { struct talitos_ctx *ctx = crypto_aead_ctx(authenc); ctx->authsize = authsize; return 0; }
int omap_aes_4106gcm_decrypt(struct aead_request *req) { struct omap_aes_ctx *ctx = crypto_aead_ctx(crypto_aead_reqtfm(req)); struct omap_aes_reqctx *rctx = aead_request_ctx(req); memcpy(rctx->iv, ctx->nonce, 4); memcpy(rctx->iv + 4, req->iv, 8); return omap_aes_gcm_crypt(req, FLAGS_GCM | FLAGS_RFC4106_GCM); }
static inline struct aesni_rfc4106_gcm_ctx *aesni_rfc4106_gcm_ctx_get(struct crypto_aead *tfm) { unsigned long align = AESNI_ALIGN; if (align <= crypto_tfm_ctx_alignment()) align = 1; return PTR_ALIGN(crypto_aead_ctx(tfm), align); }
static int ccm_encrypt(struct aead_request *req) { struct crypto_aead *aead = crypto_aead_reqtfm(req); struct crypto_aes_ctx *ctx = crypto_aead_ctx(aead); struct skcipher_walk walk; u8 __aligned(8) mac[AES_BLOCK_SIZE]; u8 buf[AES_BLOCK_SIZE]; u32 len = req->cryptlen; int err; err = ccm_init_mac(req, mac, len); if (err) return err; if (req->assoclen) ccm_calculate_auth_mac(req, mac); /* preserve the original iv for the final round */ memcpy(buf, req->iv, AES_BLOCK_SIZE); err = skcipher_walk_aead_encrypt(&walk, req, false); if (crypto_simd_usable()) { while (walk.nbytes) { u32 tail = walk.nbytes % AES_BLOCK_SIZE; if (walk.nbytes == walk.total) tail = 0; kernel_neon_begin(); ce_aes_ccm_encrypt(walk.dst.virt.addr, walk.src.virt.addr, walk.nbytes - tail, ctx->key_enc, num_rounds(ctx), mac, walk.iv); kernel_neon_end(); err = skcipher_walk_done(&walk, tail); } if (!err) { kernel_neon_begin(); ce_aes_ccm_final(mac, buf, ctx->key_enc, num_rounds(ctx)); kernel_neon_end(); } } else { err = ccm_crypt_fallback(&walk, mac, buf, ctx, true); } if (err) return err; /* copy authtag to end of dst */ scatterwalk_map_and_copy(mac, req->dst, req->assoclen + req->cryptlen, crypto_aead_authsize(aead), 1); return 0; }
static int crypto_rfc4543_setauthsize(struct crypto_aead *parent, unsigned int authsize) { struct crypto_rfc4543_ctx *ctx = crypto_aead_ctx(parent); if (authsize != 16) return -EINVAL; return crypto_aead_setauthsize(ctx->child, authsize); }
static int omap_aes_gcm_handle_queue(struct omap_aes_dev *dd, struct aead_request *req) { struct omap_aes_ctx *ctx; struct aead_request *backlog; struct omap_aes_reqctx *rctx; unsigned long flags; int err, ret = 0; spin_lock_irqsave(&dd->lock, flags); if (req) ret = aead_enqueue_request(&dd->aead_queue, req); if (dd->flags & FLAGS_BUSY) { spin_unlock_irqrestore(&dd->lock, flags); return ret; } backlog = aead_get_backlog(&dd->aead_queue); req = aead_dequeue_request(&dd->aead_queue); if (req) dd->flags |= FLAGS_BUSY; spin_unlock_irqrestore(&dd->lock, flags); if (!req) return ret; if (backlog) backlog->base.complete(&backlog->base, -EINPROGRESS); ctx = crypto_aead_ctx(crypto_aead_reqtfm(req)); rctx = aead_request_ctx(req); dd->ctx = ctx; rctx->dd = dd; dd->aead_req = req; rctx->mode &= FLAGS_MODE_MASK; dd->flags = (dd->flags & ~FLAGS_MODE_MASK) | rctx->mode; err = omap_aes_gcm_copy_buffers(dd, req); if (err) return err; err = omap_aes_write_ctrl(dd); if (!err) err = omap_aes_crypt_dma_start(dd); if (err) { omap_aes_gcm_finish_req(dd, err); omap_aes_gcm_handle_queue(dd, NULL); } return ret; }
static int seqiv_aead_init(struct crypto_tfm *tfm) { struct crypto_aead *geniv = __crypto_aead_cast(tfm); struct seqiv_ctx *ctx = crypto_aead_ctx(geniv); spin_lock_init(&ctx->lock); tfm->crt_aead.reqsize = sizeof(struct aead_request); return aead_geniv_init(tfm); }