int rtl_cipher_init_ctx(struct crypto_tfm *tfm,
	struct rtl_cipher_ctx *ctx)
{
	const char *algname = crypto_tfm_alg_name(tfm);

	memset(ctx, 0, sizeof(*ctx));

	if (strcmp(algname, "cbc(des)") == 0)
		ctx->mode = 0x00;
	else if (strcmp(algname, "cbc(des3_ede)") == 0)
		ctx->mode = 0x01;
	else if (strcmp(algname, "ecb(des)") == 0)
		ctx->mode = 0x02;
	else if (strcmp(algname, "ecb(des3_ede)") == 0)
		ctx->mode = 0x03;
	else if (strcmp(algname, "cbc(aes)") == 0)
		ctx->mode = 0x20;
	else if (strcmp(algname, "ecb(aes)") == 0)
		ctx->mode = 0x22;
	else if (strcmp(algname, "ctr(aes)") == 0)
		ctx->mode = 0x23;
	else
		ctx->mode = -1;

#ifdef CONFIG_RTK_VOIP_DBG
	printk("%s: alg=%s, driver=%s, mode=%x\n", __FUNCTION__,
		crypto_tfm_alg_name(tfm), 
		crypto_tfm_alg_driver_name(tfm),
		ctx->mode
	);
#endif

	ctx->aes_dekey = &ctx->__aes_dekey[32]; // cache align issue
	return 0;
}
示例#2
0
static int p8_ghash_init_tfm(struct crypto_tfm *tfm)
{
    const char *alg;
    struct crypto_shash *fallback;
    struct crypto_shash *shash_tfm = __crypto_shash_cast(tfm);
    struct p8_ghash_ctx *ctx = crypto_tfm_ctx(tfm);

    if (!(alg = crypto_tfm_alg_name(tfm))) {
        printk(KERN_ERR "Failed to get algorithm name.\n");
        return -ENOENT;
    }

    fallback = crypto_alloc_shash(alg, 0 ,CRYPTO_ALG_NEED_FALLBACK);
    if (IS_ERR(fallback)) {
        printk(KERN_ERR "Failed to allocate transformation for '%s': %ld\n",
                alg, PTR_ERR(fallback));
        return PTR_ERR(fallback);
    }
    printk(KERN_INFO "Using '%s' as fallback implementation.\n",
            crypto_tfm_alg_driver_name(crypto_shash_tfm(fallback)));

    crypto_shash_set_flags(fallback,
            crypto_shash_get_flags((struct crypto_shash *) tfm));
    ctx->fallback = fallback;

    shash_tfm->descsize = sizeof(struct p8_ghash_desc_ctx)
        + crypto_shash_descsize(fallback);

    return 0;
}
示例#3
0
static void cc_cipher_exit(struct crypto_tfm *tfm)
{
	struct cc_cipher_ctx *ctx_p = crypto_tfm_ctx(tfm);
	struct device *dev = drvdata_to_dev(ctx_p->drvdata);
	unsigned int max_key_buf_size = get_max_keysize(tfm);

	dev_dbg(dev, "Clearing context @%p for %s\n",
		crypto_tfm_ctx(tfm), crypto_tfm_alg_name(tfm));

	if (ctx_p->cipher_mode == DRV_CIPHER_ESSIV) {
		/* Free hash tfm for essiv */
		crypto_free_shash(ctx_p->shash_tfm);
		ctx_p->shash_tfm = NULL;
	}

	/* Unmap key buffer */
	dma_unmap_single(dev, ctx_p->user.key_dma_addr, max_key_buf_size,
			 DMA_TO_DEVICE);
	dev_dbg(dev, "Unmapped key buffer key_dma_addr=%pad\n",
		&ctx_p->user.key_dma_addr);

	/* Free key buffer in context */
	kfree(ctx_p->user.key);
	dev_dbg(dev, "Free key buffer in context. key=@%p\n", ctx_p->user.key);
}
static int rk_cra_hash_init(struct crypto_tfm *tfm)
{
	struct rk_ahash_ctx *tctx = crypto_tfm_ctx(tfm);
	struct rk_crypto_tmp *algt;
	struct ahash_alg *alg = __crypto_ahash_alg(tfm->__crt_alg);

	const char *alg_name = crypto_tfm_alg_name(tfm);

	algt = container_of(alg, struct rk_crypto_tmp, alg.hash);

	tctx->dev = algt->dev;
	tctx->dev->addr_vir = (void *)__get_free_page(GFP_KERNEL);
	if (!tctx->dev->addr_vir) {
		dev_err(tctx->dev->dev, "failed to kmalloc for addr_vir\n");
		return -ENOMEM;
	}
	tctx->dev->start = rk_ahash_start;
	tctx->dev->update = rk_ahash_crypto_rx;
	tctx->dev->complete = rk_ahash_crypto_complete;

	/* for fallback */
	tctx->fallback_tfm = crypto_alloc_ahash(alg_name, 0,
					       CRYPTO_ALG_NEED_FALLBACK);
	if (IS_ERR(tctx->fallback_tfm)) {
		dev_err(tctx->dev->dev, "Could not load fallback driver.\n");
		return PTR_ERR(tctx->fallback_tfm);
	}
	crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
				 sizeof(struct rk_ahash_rctx) +
				 crypto_ahash_reqsize(tctx->fallback_tfm));

	return tctx->dev->enable_clk(tctx->dev);
}
static int p8_aes_xts_init(struct crypto_tfm *tfm)
{
	const char *alg;
	struct crypto_skcipher *fallback;
	struct p8_aes_xts_ctx *ctx = crypto_tfm_ctx(tfm);

	if (!(alg = crypto_tfm_alg_name(tfm))) {
		printk(KERN_ERR "Failed to get algorithm name.\n");
		return -ENOENT;
	}

	fallback = crypto_alloc_skcipher(alg, 0,
			CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK);
	if (IS_ERR(fallback)) {
		printk(KERN_ERR
			"Failed to allocate transformation for '%s': %ld\n",
			alg, PTR_ERR(fallback));
		return PTR_ERR(fallback);
	}
	printk(KERN_INFO "Using '%s' as fallback implementation.\n",
		crypto_skcipher_driver_name(fallback));

	crypto_skcipher_set_flags(
		fallback,
		crypto_skcipher_get_flags((struct crypto_skcipher *)tfm));
	ctx->fallback = fallback;

	return 0;
}
示例#6
0
static int qce_ablkcipher_init(struct crypto_tfm *tfm)
{
	struct qce_cipher_ctx *ctx = crypto_tfm_ctx(tfm);

	memset(ctx, 0, sizeof(*ctx));
	tfm->crt_ablkcipher.reqsize = sizeof(struct qce_cipher_reqctx);

	ctx->fallback = crypto_alloc_sync_skcipher(crypto_tfm_alg_name(tfm),
						   0, CRYPTO_ALG_NEED_FALLBACK);
	return PTR_ERR_OR_ZERO(ctx->fallback);
}
示例#7
0
static int cc_cipher_init(struct crypto_tfm *tfm)
{
	struct cc_cipher_ctx *ctx_p = crypto_tfm_ctx(tfm);
	struct crypto_alg *alg = tfm->__crt_alg;
	struct cc_crypto_alg *cc_alg =
			container_of(alg, struct cc_crypto_alg, crypto_alg);
	struct device *dev = drvdata_to_dev(cc_alg->drvdata);
	int rc = 0;
	unsigned int max_key_buf_size = get_max_keysize(tfm);
	struct ablkcipher_tfm *ablktfm = &tfm->crt_ablkcipher;

	dev_dbg(dev, "Initializing context @%p for %s\n", ctx_p,
		crypto_tfm_alg_name(tfm));

	ablktfm->reqsize = sizeof(struct blkcipher_req_ctx);

	ctx_p->cipher_mode = cc_alg->cipher_mode;
	ctx_p->flow_mode = cc_alg->flow_mode;
	ctx_p->drvdata = cc_alg->drvdata;

	/* Allocate key buffer, cache line aligned */
	ctx_p->user.key = kmalloc(max_key_buf_size, GFP_KERNEL);
	if (!ctx_p->user.key)
		return -ENOMEM;

	dev_dbg(dev, "Allocated key buffer in context. key=@%p\n",
		ctx_p->user.key);

	/* Map key buffer */
	ctx_p->user.key_dma_addr = dma_map_single(dev, (void *)ctx_p->user.key,
						  max_key_buf_size,
						  DMA_TO_DEVICE);
	if (dma_mapping_error(dev, ctx_p->user.key_dma_addr)) {
		dev_err(dev, "Mapping Key %u B at va=%pK for DMA failed\n",
			max_key_buf_size, ctx_p->user.key);
		return -ENOMEM;
	}
	dev_dbg(dev, "Mapped key %u B at va=%pK to dma=%pad\n",
		max_key_buf_size, ctx_p->user.key, &ctx_p->user.key_dma_addr);

	if (ctx_p->cipher_mode == DRV_CIPHER_ESSIV) {
		/* Alloc hash tfm for essiv */
		ctx_p->shash_tfm = crypto_alloc_shash("sha256-generic", 0, 0);
		if (IS_ERR(ctx_p->shash_tfm)) {
			dev_err(dev, "Error allocating hash tfm for ESSIV.\n");
			return PTR_ERR(ctx_p->shash_tfm);
		}
	}

	return rc;
}
int rtl_hash_init_ctx(struct crypto_tfm *tfm, struct rtl_hash_ctx *ctx)
{
	const char *algname = crypto_tfm_alg_name(tfm);

	if (strcmp(algname, "md5") == 0)
		ctx->mode = 0x00;
	else if (strcmp(algname, "sha1") == 0)
		ctx->mode = 0x01;
	else
		ctx->mode = -1;
	
	ctx->data = (u8 *) kmalloc(MAX_PKTLEN, GFP_KERNEL);
	ctx->length = 0;
	return 0;
}
示例#9
0
文件: ablkcipher.c 项目: 020gzh/linux
static int qce_ablkcipher_init(struct crypto_tfm *tfm)
{
	struct qce_cipher_ctx *ctx = crypto_tfm_ctx(tfm);

	memset(ctx, 0, sizeof(*ctx));
	tfm->crt_ablkcipher.reqsize = sizeof(struct qce_cipher_reqctx);

	ctx->fallback = crypto_alloc_ablkcipher(crypto_tfm_alg_name(tfm),
						CRYPTO_ALG_TYPE_ABLKCIPHER,
						CRYPTO_ALG_ASYNC |
						CRYPTO_ALG_NEED_FALLBACK);
	if (IS_ERR(ctx->fallback))
		return PTR_ERR(ctx->fallback);

	return 0;
}
示例#10
0
static int sahara_aes_cra_init(struct crypto_tfm *tfm)
{
	const char *name = crypto_tfm_alg_name(tfm);
	struct sahara_ctx *ctx = crypto_tfm_ctx(tfm);

	ctx->fallback = crypto_alloc_ablkcipher(name, 0,
				CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK);
	if (IS_ERR(ctx->fallback)) {
		pr_err("Error allocating fallback algo %s\n", name);
		return PTR_ERR(ctx->fallback);
	}

	tfm->crt_ablkcipher.reqsize = sizeof(struct sahara_aes_reqctx);

	return 0;
}
示例#11
0
文件: sahara.c 项目: GongZiYuan/linux
static int sahara_sha_cra_init(struct crypto_tfm *tfm)
{
	const char *name = crypto_tfm_alg_name(tfm);
	struct sahara_ctx *ctx = crypto_tfm_ctx(tfm);

	ctx->shash_fallback = crypto_alloc_shash(name, 0,
					CRYPTO_ALG_NEED_FALLBACK);
	if (IS_ERR(ctx->shash_fallback)) {
		pr_err("Error allocating fallback algo %s\n", name);
		return PTR_ERR(ctx->shash_fallback);
	}
	crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
				 sizeof(struct sahara_sha_reqctx) +
				 SHA_BUFFER_LEN + SHA256_BLOCK_SIZE);

	return 0;
}
示例#12
0
static int p8_aes_ctr_init(struct crypto_tfm *tfm)
{
	const char *alg = crypto_tfm_alg_name(tfm);
	struct crypto_sync_skcipher *fallback;
	struct p8_aes_ctr_ctx *ctx = crypto_tfm_ctx(tfm);

	fallback = crypto_alloc_sync_skcipher(alg, 0,
					      CRYPTO_ALG_NEED_FALLBACK);
	if (IS_ERR(fallback)) {
		printk(KERN_ERR
		       "Failed to allocate transformation for '%s': %ld\n",
		       alg, PTR_ERR(fallback));
		return PTR_ERR(fallback);
	}

	crypto_sync_skcipher_set_flags(
		fallback,
		crypto_skcipher_get_flags((struct crypto_skcipher *)tfm));
	ctx->fallback = fallback;

	return 0;
}
示例#13
0
int crypto_init_cipher_ops(struct crypto_tfm *tfm)
{
	int ret = 0;
	struct cipher_tfm *ops = &tfm->crt_cipher;

	ops->cit_setkey = setkey;

	switch (tfm->crt_cipher.cit_mode) {
	case CRYPTO_TFM_MODE_ECB:
		ops->cit_encrypt = ecb_encrypt;
		ops->cit_decrypt = ecb_decrypt;
		break;
		
	case CRYPTO_TFM_MODE_CBC:
		ops->cit_encrypt = cbc_encrypt;
		ops->cit_decrypt = cbc_decrypt;
		ops->cit_encrypt_iv = cbc_encrypt_iv;
		ops->cit_decrypt_iv = cbc_decrypt_iv;
		break;
		
	case CRYPTO_TFM_MODE_CFB:
		ops->cit_encrypt = nocrypt;
		ops->cit_decrypt = nocrypt;
		ops->cit_encrypt_iv = nocrypt_iv;
		ops->cit_decrypt_iv = nocrypt_iv;
		break;
	
	case CRYPTO_TFM_MODE_CTR:
		ops->cit_encrypt = nocrypt;
		ops->cit_decrypt = nocrypt;
		ops->cit_encrypt_iv = nocrypt_iv;
		ops->cit_decrypt_iv = nocrypt_iv;
		break;

	default:
		BUG();
	}
	
	if (ops->cit_mode == CRYPTO_TFM_MODE_CBC) {
		unsigned long align;
		unsigned long addr;
	    	
	    	switch (crypto_tfm_alg_blocksize(tfm)) {
	    	case 8:
	    		ops->cit_xor_block = xor_64;
	    		break;
	    		
	    	case 16:
	    		ops->cit_xor_block = xor_128;
	    		break;
	    		
	    	default:
	    		printk(KERN_WARNING "%s: block size %u not supported\n",
	    		       crypto_tfm_alg_name(tfm),
	    		       crypto_tfm_alg_blocksize(tfm));
	    		ret = -EINVAL;
	    		goto out;
	    	}
	    	
		ops->cit_ivsize = crypto_tfm_alg_blocksize(tfm);
		align = crypto_tfm_alg_alignmask(tfm) + 1;
		addr = (unsigned long)crypto_tfm_ctx(tfm);
		addr = ALIGN(addr, align);
		addr += ALIGN(tfm->__crt_alg->cra_ctxsize, align);
		ops->cit_iv = (void *)addr;
#ifdef CONFIG_CRYPTO_XCBC
		ret = crypto_alloc_xcbc_block(tfm);
#endif
	}

out:	
	return ret;
}
示例#14
0
static int cc_cipher_sethkey(struct crypto_skcipher *sktfm, const u8 *key,
			     unsigned int keylen)
{
	struct crypto_tfm *tfm = crypto_skcipher_tfm(sktfm);
	struct cc_cipher_ctx *ctx_p = crypto_tfm_ctx(tfm);
	struct device *dev = drvdata_to_dev(ctx_p->drvdata);
	struct cc_hkey_info hki;

	dev_dbg(dev, "Setting HW key in context @%p for %s. keylen=%u\n",
		ctx_p, crypto_tfm_alg_name(tfm), keylen);
	dump_byte_array("key", (u8 *)key, keylen);

	/* STAT_PHASE_0: Init and sanity checks */

	/* This check the size of the hardware key token */
	if (keylen != sizeof(hki)) {
		dev_err(dev, "Unsupported HW key size %d.\n", keylen);
		crypto_tfm_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
		return -EINVAL;
	}

	if (ctx_p->flow_mode != S_DIN_to_AES) {
		dev_err(dev, "HW key not supported for non-AES flows\n");
		return -EINVAL;
	}

	memcpy(&hki, key, keylen);

	/* The real key len for crypto op is the size of the HW key
	 * referenced by the HW key slot, not the hardware key token
	 */
	keylen = hki.keylen;

	if (validate_keys_sizes(ctx_p, keylen)) {
		dev_err(dev, "Unsupported key size %d.\n", keylen);
		crypto_tfm_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
		return -EINVAL;
	}

	ctx_p->hw.key1_slot = cc_slot_to_hw_key(hki.hw_key1);
	if (ctx_p->hw.key1_slot == END_OF_KEYS) {
		dev_err(dev, "Unsupported hw key1 number (%d)\n", hki.hw_key1);
		return -EINVAL;
	}

	if (ctx_p->cipher_mode == DRV_CIPHER_XTS ||
	    ctx_p->cipher_mode == DRV_CIPHER_ESSIV ||
	    ctx_p->cipher_mode == DRV_CIPHER_BITLOCKER) {
		if (hki.hw_key1 == hki.hw_key2) {
			dev_err(dev, "Illegal hw key numbers (%d,%d)\n",
				hki.hw_key1, hki.hw_key2);
			return -EINVAL;
		}
		ctx_p->hw.key2_slot = cc_slot_to_hw_key(hki.hw_key2);
		if (ctx_p->hw.key2_slot == END_OF_KEYS) {
			dev_err(dev, "Unsupported hw key2 number (%d)\n",
				hki.hw_key2);
			return -EINVAL;
		}
	}

	ctx_p->keylen = keylen;
	ctx_p->hw_key = true;
	dev_dbg(dev, "cc_is_hw_key ret 0");

	return 0;
}
示例#15
0
static int cc_cipher_setkey(struct crypto_skcipher *sktfm, const u8 *key,
			    unsigned int keylen)
{
	struct crypto_tfm *tfm = crypto_skcipher_tfm(sktfm);
	struct cc_cipher_ctx *ctx_p = crypto_tfm_ctx(tfm);
	struct device *dev = drvdata_to_dev(ctx_p->drvdata);
	u32 tmp[DES3_EDE_EXPKEY_WORDS];
	struct cc_crypto_alg *cc_alg =
			container_of(tfm->__crt_alg, struct cc_crypto_alg,
				     skcipher_alg.base);
	unsigned int max_key_buf_size = cc_alg->skcipher_alg.max_keysize;

	dev_dbg(dev, "Setting key in context @%p for %s. keylen=%u\n",
		ctx_p, crypto_tfm_alg_name(tfm), keylen);
	dump_byte_array("key", (u8 *)key, keylen);

	/* STAT_PHASE_0: Init and sanity checks */

	if (validate_keys_sizes(ctx_p, keylen)) {
		dev_err(dev, "Unsupported key size %d.\n", keylen);
		crypto_tfm_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
		return -EINVAL;
	}

	ctx_p->hw_key = false;

	/*
	 * Verify DES weak keys
	 * Note that we're dropping the expanded key since the
	 * HW does the expansion on its own.
	 */
	if (ctx_p->flow_mode == S_DIN_to_DES) {
		if (keylen == DES3_EDE_KEY_SIZE &&
		    __des3_ede_setkey(tmp, &tfm->crt_flags, key,
				      DES3_EDE_KEY_SIZE)) {
			dev_dbg(dev, "weak 3DES key");
			return -EINVAL;
		} else if (!des_ekey(tmp, key) &&
			   (crypto_tfm_get_flags(tfm) &
			    CRYPTO_TFM_REQ_FORBID_WEAK_KEYS)) {
			tfm->crt_flags |= CRYPTO_TFM_RES_WEAK_KEY;
			dev_dbg(dev, "weak DES key");
			return -EINVAL;
		}
	}

	if (ctx_p->cipher_mode == DRV_CIPHER_XTS &&
	    xts_check_key(tfm, key, keylen)) {
		dev_dbg(dev, "weak XTS key");
		return -EINVAL;
	}

	/* STAT_PHASE_1: Copy key to ctx */
	dma_sync_single_for_cpu(dev, ctx_p->user.key_dma_addr,
				max_key_buf_size, DMA_TO_DEVICE);

	memcpy(ctx_p->user.key, key, keylen);
	if (keylen == 24)
		memset(ctx_p->user.key + 24, 0, CC_AES_KEY_SIZE_MAX - 24);

	if (ctx_p->cipher_mode == DRV_CIPHER_ESSIV) {
		/* sha256 for key2 - use sw implementation */
		int key_len = keylen >> 1;
		int err;

		SHASH_DESC_ON_STACK(desc, ctx_p->shash_tfm);

		desc->tfm = ctx_p->shash_tfm;

		err = crypto_shash_digest(desc, ctx_p->user.key, key_len,
					  ctx_p->user.key + key_len);
		if (err) {
			dev_err(dev, "Failed to hash ESSIV key.\n");
			return err;
		}
	}
示例#16
0
static inline const char *crypto_ahash_alg_name(struct crypto_ahash *tfm)
{
	return crypto_tfm_alg_name(crypto_ahash_tfm(tfm));
}
示例#17
0
static int cc_cipher_setkey(struct crypto_ablkcipher *atfm, const u8 *key,
			    unsigned int keylen)
{
	struct crypto_tfm *tfm = crypto_ablkcipher_tfm(atfm);
	struct cc_cipher_ctx *ctx_p = crypto_tfm_ctx(tfm);
	struct device *dev = drvdata_to_dev(ctx_p->drvdata);
	u32 tmp[DES_EXPKEY_WORDS];
	unsigned int max_key_buf_size = get_max_keysize(tfm);

	dev_dbg(dev, "Setting key in context @%p for %s. keylen=%u\n",
		ctx_p, crypto_tfm_alg_name(tfm), keylen);
	dump_byte_array("key", (u8 *)key, keylen);

	/* STAT_PHASE_0: Init and sanity checks */

	if (validate_keys_sizes(ctx_p, keylen)) {
		dev_err(dev, "Unsupported key size %d.\n", keylen);
		crypto_tfm_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
		return -EINVAL;
	}

	if (cc_is_hw_key(tfm)) {
		/* setting HW key slots */
		struct arm_hw_key_info *hki = (struct arm_hw_key_info *)key;

		if (ctx_p->flow_mode != S_DIN_to_AES) {
			dev_err(dev, "HW key not supported for non-AES flows\n");
			return -EINVAL;
		}

		ctx_p->hw.key1_slot = hw_key_to_cc_hw_key(hki->hw_key1);
		if (ctx_p->hw.key1_slot == END_OF_KEYS) {
			dev_err(dev, "Unsupported hw key1 number (%d)\n",
				hki->hw_key1);
			return -EINVAL;
		}

		if (ctx_p->cipher_mode == DRV_CIPHER_XTS ||
		    ctx_p->cipher_mode == DRV_CIPHER_ESSIV ||
		    ctx_p->cipher_mode == DRV_CIPHER_BITLOCKER) {
			if (hki->hw_key1 == hki->hw_key2) {
				dev_err(dev, "Illegal hw key numbers (%d,%d)\n",
					hki->hw_key1, hki->hw_key2);
				return -EINVAL;
			}
			ctx_p->hw.key2_slot =
				hw_key_to_cc_hw_key(hki->hw_key2);
			if (ctx_p->hw.key2_slot == END_OF_KEYS) {
				dev_err(dev, "Unsupported hw key2 number (%d)\n",
					hki->hw_key2);
				return -EINVAL;
			}
		}

		ctx_p->keylen = keylen;
		dev_dbg(dev, "cc_is_hw_key ret 0");

		return 0;
	}

	// verify weak keys
	if (ctx_p->flow_mode == S_DIN_to_DES) {
		if (!des_ekey(tmp, key) &&
		    (crypto_tfm_get_flags(tfm) & CRYPTO_TFM_REQ_WEAK_KEY)) {
			tfm->crt_flags |= CRYPTO_TFM_RES_WEAK_KEY;
			dev_dbg(dev, "weak DES key");
			return -EINVAL;
		}
	}
	if (ctx_p->cipher_mode == DRV_CIPHER_XTS &&
	    xts_check_key(tfm, key, keylen)) {
		dev_dbg(dev, "weak XTS key");
		return -EINVAL;
	}
	if (ctx_p->flow_mode == S_DIN_to_DES &&
	    keylen == DES3_EDE_KEY_SIZE &&
	    cc_verify_3des_keys(key, keylen)) {
		dev_dbg(dev, "weak 3DES key");
		return -EINVAL;
	}

	/* STAT_PHASE_1: Copy key to ctx */
	dma_sync_single_for_cpu(dev, ctx_p->user.key_dma_addr,
				max_key_buf_size, DMA_TO_DEVICE);

	memcpy(ctx_p->user.key, key, keylen);
	if (keylen == 24)
		memset(ctx_p->user.key + 24, 0, CC_AES_KEY_SIZE_MAX - 24);

	if (ctx_p->cipher_mode == DRV_CIPHER_ESSIV) {
		/* sha256 for key2 - use sw implementation */
		int key_len = keylen >> 1;
		int err;

		SHASH_DESC_ON_STACK(desc, ctx_p->shash_tfm);

		desc->tfm = ctx_p->shash_tfm;

		err = crypto_shash_digest(desc, ctx_p->user.key, key_len,
					  ctx_p->user.key + key_len);
		if (err) {
			dev_err(dev, "Failed to hash ESSIV key.\n");
			return err;
		}
	}
示例#18
0
int crypto_init_cipher_ops(struct crypto_tfm *tfm)
{
	int ret = 0;
	struct cipher_tfm *ops = &tfm->crt_cipher;

	ops->cit_setkey = setkey;

	switch (tfm->crt_cipher.cit_mode) {
	case CRYPTO_TFM_MODE_ECB:
		ops->cit_encrypt = ecb_encrypt;
		ops->cit_decrypt = ecb_decrypt;
		break;

	case CRYPTO_TFM_MODE_CBC:
		ops->cit_encrypt = cbc_encrypt;
		ops->cit_decrypt = cbc_decrypt;
		ops->cit_encrypt_iv = cbc_encrypt_iv;
		ops->cit_decrypt_iv = cbc_decrypt_iv;
		break;

	case CRYPTO_TFM_MODE_CFB:
		ops->cit_encrypt = nocrypt;
		ops->cit_decrypt = nocrypt;
		ops->cit_encrypt_iv = nocrypt_iv;
		ops->cit_decrypt_iv = nocrypt_iv;
		break;

	case CRYPTO_TFM_MODE_CTR:
		ops->cit_encrypt = nocrypt;
		ops->cit_decrypt = nocrypt;
		ops->cit_encrypt_iv = nocrypt_iv;
		ops->cit_decrypt_iv = nocrypt_iv;
		break;

	default:
		BUG();
	}

	if (ops->cit_mode == CRYPTO_TFM_MODE_CBC) {

		switch (crypto_tfm_alg_blocksize(tfm)) {
		case 8:
			ops->cit_xor_block = xor_64;
			break;

		case 16:
			ops->cit_xor_block = xor_128;
			break;

		default:
			printk(KERN_WARNING "%s: block size %u not supported\n",
			       crypto_tfm_alg_name(tfm),
			       crypto_tfm_alg_blocksize(tfm));
			ret = -EINVAL;
			goto out;
		}

		ops->cit_ivsize = crypto_tfm_alg_blocksize(tfm);
		ops->cit_iv = kmalloc(ops->cit_ivsize, GFP_KERNEL);
		if (ops->cit_iv == NULL)
			ret = -ENOMEM;
	}

out:
	return ret;
}