示例#1
0
static int qat_alg_aead_init_sessions(struct qat_alg_aead_ctx *ctx,
				      const uint8_t *key, unsigned int keylen)
{
	struct crypto_authenc_keys keys;
	int alg;

	if (crypto_rng_get_bytes(crypto_default_rng, ctx->salt, AES_BLOCK_SIZE))
		return -EFAULT;

	if (crypto_authenc_extractkeys(&keys, key, keylen))
		goto bad_key;

	if (qat_alg_validate_key(keys.enckeylen, &alg))
		goto bad_key;

	if (qat_alg_aead_init_enc_session(ctx, alg, &keys))
		goto error;

	if (qat_alg_aead_init_dec_session(ctx, alg, &keys))
		goto error;

	return 0;
bad_key:
	crypto_tfm_set_flags(ctx->tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
	return -EINVAL;
error:
	return -EFAULT;
}
示例#2
0
static int async_chainiv_givencrypt_first(struct skcipher_givcrypt_request *req)
{
	struct crypto_ablkcipher *geniv = skcipher_givcrypt_reqtfm(req);
	struct async_chainiv_ctx *ctx = crypto_ablkcipher_ctx(geniv);
	int err = 0;

	if (test_and_set_bit(CHAINIV_STATE_INUSE, &ctx->state))
		goto out;

	if (crypto_ablkcipher_crt(geniv)->givencrypt !=
	    async_chainiv_givencrypt_first)
		goto unlock;

	crypto_ablkcipher_crt(geniv)->givencrypt = async_chainiv_givencrypt;
	err = crypto_rng_get_bytes(crypto_default_rng, ctx->iv,
				   crypto_ablkcipher_ivsize(geniv));

unlock:
	clear_bit(CHAINIV_STATE_INUSE, &ctx->state);

	if (err)
		return err;

out:
	return async_chainiv_givencrypt(req);
}
示例#3
0
static int qat_alg_init_sessions(struct qat_alg_session_ctx *ctx,
				 const uint8_t *key, unsigned int keylen)
{
	struct crypto_authenc_keys keys;
	int alg;

	if (crypto_rng_get_bytes(crypto_default_rng, ctx->salt, AES_BLOCK_SIZE))
		return -EFAULT;

	if (crypto_authenc_extractkeys(&keys, key, keylen))
		goto bad_key;

	switch (keys.enckeylen) {
	case AES_KEYSIZE_128:
		alg = ICP_QAT_HW_CIPHER_ALGO_AES128;
		break;
	case AES_KEYSIZE_192:
		alg = ICP_QAT_HW_CIPHER_ALGO_AES192;
		break;
	case AES_KEYSIZE_256:
		alg = ICP_QAT_HW_CIPHER_ALGO_AES256;
		break;
	default:
		goto bad_key;
		break;
	}

	if (qat_alg_init_enc_session(ctx, alg, &keys))
		goto error;

	if (qat_alg_init_dec_session(ctx, alg, &keys))
		goto error;

	return 0;
bad_key:
	crypto_tfm_set_flags(ctx->tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
	return -EINVAL;
error:
	return -EFAULT;
}
示例#4
0
static int eseqiv_givencrypt_first(struct skcipher_givcrypt_request *req)
{
	struct crypto_ablkcipher *geniv = skcipher_givcrypt_reqtfm(req);
	struct eseqiv_ctx *ctx = crypto_ablkcipher_ctx(geniv);
	int err = 0;

	spin_lock_bh(&ctx->lock);
	if (crypto_ablkcipher_crt(geniv)->givencrypt != eseqiv_givencrypt_first)
		goto unlock;

	crypto_ablkcipher_crt(geniv)->givencrypt = eseqiv_givencrypt;
	err = crypto_rng_get_bytes(crypto_default_rng, ctx->salt,
				   crypto_ablkcipher_ivsize(geniv));

unlock:
	spin_unlock_bh(&ctx->lock);

	if (err)
		return err;

	return eseqiv_givencrypt(req);
}
示例#5
0
static int eseqiv_init(struct crypto_tfm *tfm)
{
	struct crypto_ablkcipher *geniv = __crypto_ablkcipher_cast(tfm);
	struct eseqiv_ctx *ctx = crypto_ablkcipher_ctx(geniv);
	unsigned long alignmask;
	unsigned int reqsize;

#ifndef CONFIG_CRYPTO_DRBG
	spin_lock_init(&ctx->lock);
#endif

	alignmask = crypto_tfm_ctx_alignment() - 1;
	reqsize = sizeof(struct eseqiv_request_ctx);

	if (alignmask & reqsize) {
		alignmask &= reqsize;
		alignmask--;
	}

	alignmask = ~alignmask;
	alignmask &= crypto_ablkcipher_alignmask(geniv);

	reqsize += alignmask;
	reqsize += crypto_ablkcipher_ivsize(geniv);
	reqsize = ALIGN(reqsize, crypto_tfm_ctx_alignment());

	ctx->reqoff = reqsize - sizeof(struct eseqiv_request_ctx);

	tfm->crt_ablkcipher.reqsize = reqsize +
				      sizeof(struct ablkcipher_request);
#ifdef CONFIG_CRYPTO_DRBG
	crypto_rng_get_bytes(crypto_default_rng, ctx->salt,
						crypto_ablkcipher_ivsize(geniv));
#endif

	return skcipher_geniv_init(tfm);
}
示例#6
0
static int echainiv_encrypt(struct aead_request *req)
{
    struct crypto_aead *geniv = crypto_aead_reqtfm(req);
    struct echainiv_ctx *ctx = crypto_aead_ctx(geniv);
    struct aead_request *subreq = aead_request_ctx(req);
    crypto_completion_t compl;
    void *data;
    u8 *info;
    unsigned int ivsize = crypto_aead_ivsize(geniv);
    int err;

    if (req->cryptlen < ivsize)
        return -EINVAL;

    aead_request_set_tfm(subreq, ctx->geniv.child);

    compl = echainiv_encrypt_complete;
    data = req;
    info = req->iv;

    if (req->src != req->dst) {
        struct blkcipher_desc desc = {
            .tfm = ctx->null,
        };

        err = crypto_blkcipher_encrypt(
                  &desc, req->dst, req->src,
                  req->assoclen + req->cryptlen);
        if (err)
            return err;
    }

    if (unlikely(!IS_ALIGNED((unsigned long)info,
                             crypto_aead_alignmask(geniv) + 1))) {
        info = kmalloc(ivsize, req->base.flags &
                       CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL:
                       GFP_ATOMIC);
        if (!info)
            return -ENOMEM;

        memcpy(info, req->iv, ivsize);
    }

    aead_request_set_callback(subreq, req->base.flags, compl, data);
    aead_request_set_crypt(subreq, req->dst, req->dst,
                           req->cryptlen - ivsize, info);
    aead_request_set_ad(subreq, req->assoclen + ivsize);

    crypto_xor(info, ctx->salt, ivsize);
    scatterwalk_map_and_copy(info, req->dst, req->assoclen, ivsize, 1);
    echainiv_read_iv(info, ivsize);

    err = crypto_aead_encrypt(subreq);
    echainiv_encrypt_complete2(req, err);
    return err;
}

static int echainiv_decrypt(struct aead_request *req)
{
    struct crypto_aead *geniv = crypto_aead_reqtfm(req);
    struct echainiv_ctx *ctx = crypto_aead_ctx(geniv);
    struct aead_request *subreq = aead_request_ctx(req);
    crypto_completion_t compl;
    void *data;
    unsigned int ivsize = crypto_aead_ivsize(geniv);

    if (req->cryptlen < ivsize + crypto_aead_authsize(geniv))
        return -EINVAL;

    aead_request_set_tfm(subreq, ctx->geniv.child);

    compl = req->base.complete;
    data = req->base.data;

    aead_request_set_callback(subreq, req->base.flags, compl, data);
    aead_request_set_crypt(subreq, req->src, req->dst,
                           req->cryptlen - ivsize, req->iv);
    aead_request_set_ad(subreq, req->assoclen + ivsize);

    scatterwalk_map_and_copy(req->iv, req->src, req->assoclen, ivsize, 0);
    if (req->src != req->dst)
        scatterwalk_map_and_copy(req->iv, req->dst,
                                 req->assoclen, ivsize, 1);

    return crypto_aead_decrypt(subreq);
}

static int echainiv_init(struct crypto_tfm *tfm)
{
    struct crypto_aead *geniv = __crypto_aead_cast(tfm);
    struct echainiv_ctx *ctx = crypto_aead_ctx(geniv);
    int err;

    spin_lock_init(&ctx->geniv.lock);

    crypto_aead_set_reqsize(geniv, sizeof(struct aead_request));

    err = crypto_get_default_rng();
    if (err)
        goto out;

    err = crypto_rng_get_bytes(crypto_default_rng, ctx->salt,
                               crypto_aead_ivsize(geniv));
    crypto_put_default_rng();
    if (err)
        goto out;

    ctx->null = crypto_get_default_null_skcipher();
    err = PTR_ERR(ctx->null);
    if (IS_ERR(ctx->null))
        goto out;

    err = aead_geniv_init(tfm);
    if (err)
        goto drop_null;

    ctx->geniv.child = geniv->child;
    geniv->child = geniv;

out:
    return err;

drop_null:
    crypto_put_default_null_skcipher();
    goto out;
}

static void echainiv_exit(struct crypto_tfm *tfm)
{
    struct echainiv_ctx *ctx = crypto_tfm_ctx(tfm);

    crypto_free_aead(ctx->geniv.child);
    crypto_put_default_null_skcipher();
}

static int echainiv_aead_create(struct crypto_template *tmpl,
                                struct rtattr **tb)
{
    struct aead_instance *inst;
    struct crypto_aead_spawn *spawn;
    struct aead_alg *alg;
    int err;

    inst = aead_geniv_alloc(tmpl, tb, 0, 0);

    if (IS_ERR(inst))
        return PTR_ERR(inst);

    spawn = aead_instance_ctx(inst);
    alg = crypto_spawn_aead_alg(spawn);

    if (alg->base.cra_aead.encrypt)
        goto done;

    err = -EINVAL;
    if (inst->alg.ivsize & (sizeof(u32) - 1) ||
            inst->alg.ivsize > MAX_IV_SIZE)
        goto free_inst;

    inst->alg.encrypt = echainiv_encrypt;
    inst->alg.decrypt = echainiv_decrypt;

    inst->alg.base.cra_init = echainiv_init;
    inst->alg.base.cra_exit = echainiv_exit;

    inst->alg.base.cra_alignmask |= __alignof__(u32) - 1;
    inst->alg.base.cra_ctxsize = sizeof(struct echainiv_ctx);
    inst->alg.base.cra_ctxsize += inst->alg.ivsize;

done:
    err = aead_register_instance(tmpl, inst);
    if (err)
        goto free_inst;

out:
    return err;

free_inst:
    aead_geniv_free(inst);
    goto out;
}

static void echainiv_free(struct crypto_instance *inst)
{
    aead_geniv_free(aead_instance(inst));
}
示例#7
0
static long tegra_crypto_dev_ioctl(struct file *filp,
	unsigned int ioctl_num, unsigned long arg)
{
	struct tegra_crypto_ctx *ctx = filp->private_data;
	struct tegra_crypt_req crypt_req;
	struct tegra_rng_req rng_req;
	struct tegra_sha_req sha_req;
	struct tegra_rsa_req rsa_req;
	char *rng;
	int ret = 0;

	switch (ioctl_num) {

	case TEGRA_CRYPTO_IOCTL_NEED_SSK:
		ctx->use_ssk = (int)arg;
		break;

	case TEGRA_CRYPTO_IOCTL_PROCESS_REQ:
		ret = copy_from_user(&crypt_req, (void __user *)arg,
			sizeof(crypt_req));
		if (ret) {
			ret = -EFAULT;
			pr_err("%s: copy_from_user fail(%d)\n", __func__, ret);
			break;
		}

		ret = process_crypt_req(ctx, &crypt_req);
		break;

	case TEGRA_CRYPTO_IOCTL_SET_SEED:
		if (copy_from_user(&rng_req, (void __user *)arg,
			sizeof(rng_req))) {
			ret = -EFAULT;
			pr_err("%s: copy_from_user fail(%d)\n", __func__, ret);
			return ret;
		}

		memcpy(ctx->seed, rng_req.seed, TEGRA_CRYPTO_RNG_SEED_SIZE);

		if (rng_req.type == RNG_DRBG)
			ret = crypto_rng_reset(ctx->rng_drbg, ctx->seed,
				crypto_rng_seedsize(ctx->rng_drbg));
		else
			ret = crypto_rng_reset(ctx->rng, ctx->seed,
				crypto_rng_seedsize(ctx->rng));
		break;

	case TEGRA_CRYPTO_IOCTL_GET_RANDOM:
		if (copy_from_user(&rng_req, (void __user *)arg,
			sizeof(rng_req))) {
			ret = -EFAULT;
			pr_err("%s: copy_from_user fail(%d)\n", __func__, ret);
			return ret;
		}

		rng = kzalloc(rng_req.nbytes, GFP_KERNEL);
		if (!rng) {
			if (rng_req.type == RNG_DRBG)
				pr_err("mem alloc for rng_drbg fail");
			else
				pr_err("mem alloc for rng fail");

			ret = -ENODATA;
			goto rng_out;
		}

		if (rng_req.type == RNG_DRBG)
			ret = crypto_rng_get_bytes(ctx->rng_drbg, rng,
				rng_req.nbytes);
		else
			ret = crypto_rng_get_bytes(ctx->rng, rng,
				rng_req.nbytes);

		if (ret != rng_req.nbytes) {
			if (rng_req.type == RNG_DRBG)
				pr_err("rng_drbg failed");
			else
				pr_err("rng failed");
			ret = -ENODATA;
			goto rng_out;
		}

		ret = copy_to_user((void __user *)rng_req.rdata,
			(const void *)rng, rng_req.nbytes);
		if (ret) {
			ret = -EFAULT;
			pr_err("%s: copy_to_user fail(%d)\n", __func__, ret);
			return ret;
		}

rng_out:
		if (rng)
			kfree(rng);
		break;

	case TEGRA_CRYPTO_IOCTL_GET_SHA:
		if (tegra_get_chipid() != TEGRA_CHIPID_TEGRA2) {
			if (copy_from_user(&sha_req, (void __user *)arg,
				sizeof(sha_req))) {
				ret = -EFAULT;
				pr_err("%s: copy_from_user fail(%d)\n",
						__func__, ret);
				return ret;
			}

			ret = tegra_crypto_sha(&sha_req);
		} else {
			ret = -EINVAL;
		}
		break;

	case TEGRA_CRYPTO_IOCTL_RSA_REQ:
		if (copy_from_user(&rsa_req, (void __user *)arg,
			sizeof(rsa_req))) {
			ret = -EFAULT;
			pr_err("%s: copy_from_user fail(%d)\n", __func__, ret);
			return ret;
		}

		ret = tegra_crypt_rsa(ctx, &rsa_req);
		break;

	default:
		pr_debug("invalid ioctl code(%d)", ioctl_num);
		ret = -EINVAL;
	}

	return ret;
}
int cryptodev_rng_get_bytes(struct rng_data *rdata, void *output, int len)
{
	return crypto_rng_get_bytes(rdata->s, output, len);
}
示例#9
0
文件: big_key.c 项目: mkrufky/linux
/*
 * Generate random key to encrypt big_key data
 */
static inline int big_key_gen_enckey(u8 *key)
{
	return crypto_rng_get_bytes(big_key_rng, key, ENC_KEY_SIZE);
}