示例#1
0
static int seqiv_aead_init(struct crypto_tfm *tfm)
{
	struct crypto_aead *geniv = __crypto_aead_cast(tfm);
	struct seqiv_ctx *ctx = crypto_aead_ctx(geniv);

	spin_lock_init(&ctx->lock);

	tfm->crt_aead.reqsize = sizeof(struct aead_request);

	return aead_geniv_init(tfm);
}
示例#2
0
static int seqiv_aead_init(struct crypto_tfm *tfm)
{
	struct crypto_aead *geniv = __crypto_aead_cast(tfm);
	struct seqiv_ctx *ctx = crypto_aead_ctx(geniv);

	spin_lock_init(&ctx->lock);

	tfm->crt_aead.reqsize = sizeof(struct aead_request);

	return crypto_rng_get_bytes(crypto_default_rng, ctx->salt,
				    crypto_aead_ivsize(geniv)) ?:
	       aead_geniv_init(tfm);
}
示例#3
0
static int crypto_init_aead_ops(struct crypto_tfm *tfm, u32 type, u32 mask)
{
	struct aead_alg *alg = &tfm->__crt_alg->cra_aead;
	struct aead_tfm *crt = &tfm->crt_aead;

	if (max(alg->maxauthsize, alg->ivsize) > PAGE_SIZE / 8)
		return -EINVAL;

	crt->setkey = tfm->__crt_alg->cra_flags & CRYPTO_ALG_GENIV ?
		      alg->setkey : setkey;
	crt->encrypt = alg->encrypt;
	crt->decrypt = alg->decrypt;
	crt->givencrypt = alg->givencrypt ?: no_givcrypt;
	crt->givdecrypt = alg->givdecrypt ?: no_givcrypt;
	crt->base = __crypto_aead_cast(tfm);
	crt->ivsize = alg->ivsize;
	crt->authsize = alg->maxauthsize;

	return 0;
}
示例#4
0
static int crypto_init_nivaead_ops(struct crypto_tfm *tfm, u32 type, u32 mask)
{
	struct aead_alg *alg = &tfm->__crt_alg->cra_aead;
	struct aead_tfm *crt = &tfm->crt_aead;

	if (max(alg->maxauthsize, alg->ivsize) > PAGE_SIZE / 8)
		return -EINVAL;

	crt->setkey = setkey;
	crt->encrypt = alg->encrypt;
	crt->decrypt = alg->decrypt;
	if (!alg->ivsize) {
		crt->givencrypt = aead_null_givencrypt;
		crt->givdecrypt = aead_null_givdecrypt;
	}
	crt->base = __crypto_aead_cast(tfm);
	crt->ivsize = alg->ivsize;
	crt->authsize = alg->maxauthsize;

	return 0;
}
示例#5
0
struct crypto_aead *crypto_alloc_aead(const char *alg_name, u32 type, u32 mask)
{
	struct crypto_tfm *tfm;
	int err;

	type &= ~(CRYPTO_ALG_TYPE_MASK | CRYPTO_ALG_GENIV);
	type |= CRYPTO_ALG_TYPE_AEAD;
	mask &= ~(CRYPTO_ALG_TYPE_MASK | CRYPTO_ALG_GENIV);
	mask |= CRYPTO_ALG_TYPE_MASK;

	for (;;) {
		struct crypto_alg *alg;

		alg = crypto_lookup_aead(alg_name, type, mask);
		if (IS_ERR(alg)) {
			err = PTR_ERR(alg);
			goto err;
		}

		tfm = __crypto_alloc_tfm(alg, type, mask);
		if (!IS_ERR(tfm))
			return __crypto_aead_cast(tfm);

		crypto_mod_put(alg);
		err = PTR_ERR(tfm);

err:
		if (err != -EAGAIN)
			break;
		if (signal_pending(current)) {
			err = -EINTR;
			break;
		}
	}

	return ERR_PTR(err);
}
示例#6
0
static int echainiv_encrypt(struct aead_request *req)
{
    struct crypto_aead *geniv = crypto_aead_reqtfm(req);
    struct echainiv_ctx *ctx = crypto_aead_ctx(geniv);
    struct aead_request *subreq = aead_request_ctx(req);
    crypto_completion_t compl;
    void *data;
    u8 *info;
    unsigned int ivsize = crypto_aead_ivsize(geniv);
    int err;

    if (req->cryptlen < ivsize)
        return -EINVAL;

    aead_request_set_tfm(subreq, ctx->geniv.child);

    compl = echainiv_encrypt_complete;
    data = req;
    info = req->iv;

    if (req->src != req->dst) {
        struct blkcipher_desc desc = {
            .tfm = ctx->null,
        };

        err = crypto_blkcipher_encrypt(
                  &desc, req->dst, req->src,
                  req->assoclen + req->cryptlen);
        if (err)
            return err;
    }

    if (unlikely(!IS_ALIGNED((unsigned long)info,
                             crypto_aead_alignmask(geniv) + 1))) {
        info = kmalloc(ivsize, req->base.flags &
                       CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL:
                       GFP_ATOMIC);
        if (!info)
            return -ENOMEM;

        memcpy(info, req->iv, ivsize);
    }

    aead_request_set_callback(subreq, req->base.flags, compl, data);
    aead_request_set_crypt(subreq, req->dst, req->dst,
                           req->cryptlen - ivsize, info);
    aead_request_set_ad(subreq, req->assoclen + ivsize);

    crypto_xor(info, ctx->salt, ivsize);
    scatterwalk_map_and_copy(info, req->dst, req->assoclen, ivsize, 1);
    echainiv_read_iv(info, ivsize);

    err = crypto_aead_encrypt(subreq);
    echainiv_encrypt_complete2(req, err);
    return err;
}

static int echainiv_decrypt(struct aead_request *req)
{
    struct crypto_aead *geniv = crypto_aead_reqtfm(req);
    struct echainiv_ctx *ctx = crypto_aead_ctx(geniv);
    struct aead_request *subreq = aead_request_ctx(req);
    crypto_completion_t compl;
    void *data;
    unsigned int ivsize = crypto_aead_ivsize(geniv);

    if (req->cryptlen < ivsize + crypto_aead_authsize(geniv))
        return -EINVAL;

    aead_request_set_tfm(subreq, ctx->geniv.child);

    compl = req->base.complete;
    data = req->base.data;

    aead_request_set_callback(subreq, req->base.flags, compl, data);
    aead_request_set_crypt(subreq, req->src, req->dst,
                           req->cryptlen - ivsize, req->iv);
    aead_request_set_ad(subreq, req->assoclen + ivsize);

    scatterwalk_map_and_copy(req->iv, req->src, req->assoclen, ivsize, 0);
    if (req->src != req->dst)
        scatterwalk_map_and_copy(req->iv, req->dst,
                                 req->assoclen, ivsize, 1);

    return crypto_aead_decrypt(subreq);
}

static int echainiv_init(struct crypto_tfm *tfm)
{
    struct crypto_aead *geniv = __crypto_aead_cast(tfm);
    struct echainiv_ctx *ctx = crypto_aead_ctx(geniv);
    int err;

    spin_lock_init(&ctx->geniv.lock);

    crypto_aead_set_reqsize(geniv, sizeof(struct aead_request));

    err = crypto_get_default_rng();
    if (err)
        goto out;

    err = crypto_rng_get_bytes(crypto_default_rng, ctx->salt,
                               crypto_aead_ivsize(geniv));
    crypto_put_default_rng();
    if (err)
        goto out;

    ctx->null = crypto_get_default_null_skcipher();
    err = PTR_ERR(ctx->null);
    if (IS_ERR(ctx->null))
        goto out;

    err = aead_geniv_init(tfm);
    if (err)
        goto drop_null;

    ctx->geniv.child = geniv->child;
    geniv->child = geniv;

out:
    return err;

drop_null:
    crypto_put_default_null_skcipher();
    goto out;
}

static void echainiv_exit(struct crypto_tfm *tfm)
{
    struct echainiv_ctx *ctx = crypto_tfm_ctx(tfm);

    crypto_free_aead(ctx->geniv.child);
    crypto_put_default_null_skcipher();
}

static int echainiv_aead_create(struct crypto_template *tmpl,
                                struct rtattr **tb)
{
    struct aead_instance *inst;
    struct crypto_aead_spawn *spawn;
    struct aead_alg *alg;
    int err;

    inst = aead_geniv_alloc(tmpl, tb, 0, 0);

    if (IS_ERR(inst))
        return PTR_ERR(inst);

    spawn = aead_instance_ctx(inst);
    alg = crypto_spawn_aead_alg(spawn);

    if (alg->base.cra_aead.encrypt)
        goto done;

    err = -EINVAL;
    if (inst->alg.ivsize & (sizeof(u32) - 1) ||
            inst->alg.ivsize > MAX_IV_SIZE)
        goto free_inst;

    inst->alg.encrypt = echainiv_encrypt;
    inst->alg.decrypt = echainiv_decrypt;

    inst->alg.base.cra_init = echainiv_init;
    inst->alg.base.cra_exit = echainiv_exit;

    inst->alg.base.cra_alignmask |= __alignof__(u32) - 1;
    inst->alg.base.cra_ctxsize = sizeof(struct echainiv_ctx);
    inst->alg.base.cra_ctxsize += inst->alg.ivsize;

done:
    err = aead_register_instance(tmpl, inst);
    if (err)
        goto free_inst;

out:
    return err;

free_inst:
    aead_geniv_free(inst);
    goto out;
}

static void echainiv_free(struct crypto_instance *inst)
{
    aead_geniv_free(aead_instance(inst));
}
示例#7
0
static int qat_alg_aead_init_dec_session(struct qat_alg_aead_ctx *ctx,
					 int alg,
					 struct crypto_authenc_keys *keys)
{
	struct crypto_aead *aead_tfm = __crypto_aead_cast(ctx->tfm);
	unsigned int digestsize = crypto_aead_crt(aead_tfm)->authsize;
	struct qat_dec *dec_ctx = &ctx->dec_cd->qat_dec_cd;
	struct icp_qat_hw_auth_algo_blk *hash = &dec_ctx->hash;
	struct icp_qat_hw_cipher_algo_blk *cipher =
		(struct icp_qat_hw_cipher_algo_blk *)((char *)dec_ctx +
		sizeof(struct icp_qat_hw_auth_setup) +
		roundup(crypto_shash_digestsize(ctx->hash_tfm), 8) * 2);
	struct icp_qat_fw_la_bulk_req *req_tmpl = &ctx->dec_fw_req;
	struct icp_qat_fw_comn_req_hdr_cd_pars *cd_pars = &req_tmpl->cd_pars;
	struct icp_qat_fw_comn_req_hdr *header = &req_tmpl->comn_hdr;
	void *ptr = &req_tmpl->cd_ctrl;
	struct icp_qat_fw_cipher_cd_ctrl_hdr *cipher_cd_ctrl = ptr;
	struct icp_qat_fw_auth_cd_ctrl_hdr *hash_cd_ctrl = ptr;
	struct icp_qat_fw_la_auth_req_params *auth_param =
		(struct icp_qat_fw_la_auth_req_params *)
		((char *)&req_tmpl->serv_specif_rqpars +
		sizeof(struct icp_qat_fw_la_cipher_req_params));

	/* CD setup */
	cipher->aes.cipher_config.val = QAT_AES_HW_CONFIG_CBC_DEC(alg);
	memcpy(cipher->aes.key, keys->enckey, keys->enckeylen);
	hash->sha.inner_setup.auth_config.config =
		ICP_QAT_HW_AUTH_CONFIG_BUILD(ICP_QAT_HW_AUTH_MODE1,
					     ctx->qat_hash_alg,
					     digestsize);
	hash->sha.inner_setup.auth_counter.counter =
		cpu_to_be32(crypto_shash_blocksize(ctx->hash_tfm));

	if (qat_alg_do_precomputes(hash, ctx, keys->authkey, keys->authkeylen))
		return -EFAULT;

	/* Request setup */
	qat_alg_init_common_hdr(header);
	header->service_cmd_id = ICP_QAT_FW_LA_CMD_HASH_CIPHER;
	ICP_QAT_FW_LA_DIGEST_IN_BUFFER_SET(header->serv_specif_flags,
					   ICP_QAT_FW_LA_DIGEST_IN_BUFFER);
	ICP_QAT_FW_LA_RET_AUTH_SET(header->serv_specif_flags,
				   ICP_QAT_FW_LA_NO_RET_AUTH_RES);
	ICP_QAT_FW_LA_CMP_AUTH_SET(header->serv_specif_flags,
				   ICP_QAT_FW_LA_CMP_AUTH_RES);
	cd_pars->u.s.content_desc_addr = ctx->dec_cd_paddr;
	cd_pars->u.s.content_desc_params_sz = sizeof(struct qat_alg_cd) >> 3;

	/* Cipher CD config setup */
	cipher_cd_ctrl->cipher_key_sz = keys->enckeylen >> 3;
	cipher_cd_ctrl->cipher_state_sz = AES_BLOCK_SIZE >> 3;
	cipher_cd_ctrl->cipher_cfg_offset =
		(sizeof(struct icp_qat_hw_auth_setup) +
		 roundup(crypto_shash_digestsize(ctx->hash_tfm), 8) * 2) >> 3;
	ICP_QAT_FW_COMN_CURR_ID_SET(cipher_cd_ctrl, ICP_QAT_FW_SLICE_CIPHER);
	ICP_QAT_FW_COMN_NEXT_ID_SET(cipher_cd_ctrl, ICP_QAT_FW_SLICE_DRAM_WR);

	/* Auth CD config setup */
	hash_cd_ctrl->hash_cfg_offset = 0;
	hash_cd_ctrl->hash_flags = ICP_QAT_FW_AUTH_HDR_FLAG_NO_NESTED;
	hash_cd_ctrl->inner_res_sz = digestsize;
	hash_cd_ctrl->final_sz = digestsize;

	switch (ctx->qat_hash_alg) {
	case ICP_QAT_HW_AUTH_ALGO_SHA1:
		hash_cd_ctrl->inner_state1_sz =
			round_up(ICP_QAT_HW_SHA1_STATE1_SZ, 8);
		hash_cd_ctrl->inner_state2_sz =
			round_up(ICP_QAT_HW_SHA1_STATE2_SZ, 8);
		break;
	case ICP_QAT_HW_AUTH_ALGO_SHA256:
		hash_cd_ctrl->inner_state1_sz = ICP_QAT_HW_SHA256_STATE1_SZ;
		hash_cd_ctrl->inner_state2_sz = ICP_QAT_HW_SHA256_STATE2_SZ;
		break;
	case ICP_QAT_HW_AUTH_ALGO_SHA512:
		hash_cd_ctrl->inner_state1_sz = ICP_QAT_HW_SHA512_STATE1_SZ;
		hash_cd_ctrl->inner_state2_sz = ICP_QAT_HW_SHA512_STATE2_SZ;
		break;
	default:
		break;
	}

	hash_cd_ctrl->inner_state2_offset = hash_cd_ctrl->hash_cfg_offset +
			((sizeof(struct icp_qat_hw_auth_setup) +
			 round_up(hash_cd_ctrl->inner_state1_sz, 8)) >> 3);
	auth_param->auth_res_sz = digestsize;
	ICP_QAT_FW_COMN_CURR_ID_SET(hash_cd_ctrl, ICP_QAT_FW_SLICE_AUTH);
	ICP_QAT_FW_COMN_NEXT_ID_SET(hash_cd_ctrl, ICP_QAT_FW_SLICE_CIPHER);
	return 0;
}