/*
* Cipher algorithm self tests
*/
int _fips_qcrypto_cipher_selftest(struct fips_selftest_data *selftest_d)
{
	int rc = 0, err, tv_index, num_tv;
	struct crypto_ablkcipher *tfm;
	struct ablkcipher_request *ablkcipher_req;
	struct _fips_completion fips_completion;
	char *k_align_src = NULL;
	struct scatterlist fips_sg;
	struct _fips_test_vector_cipher tv_cipher;

	num_tv = (sizeof(fips_test_vector_cipher)) /
		(sizeof(struct _fips_test_vector_cipher));

	/* One-by-one testing */
	for (tv_index = 0; tv_index < num_tv; tv_index++) {

		memcpy(&tv_cipher, &fips_test_vector_cipher[tv_index],
			(sizeof(struct _fips_test_vector_cipher)));

		/* Single buffer allocation for in place operation */
		k_align_src = kzalloc(tv_cipher.pln_txt_len, GFP_KERNEL);
		if (k_align_src == NULL) {
			pr_err("qcrypto:, Failed to allocate memory for k_align_src %ld\n",
			PTR_ERR(k_align_src));
			return -ENOMEM;
		}

		memcpy(&k_align_src[0], tv_cipher.pln_txt,
			tv_cipher.pln_txt_len);

		/* use_sw flags are set in dtsi file which makes
		default Linux API calls to go to s/w crypto instead
		of h/w crypto. This code makes sure that all selftests
		calls always go to h/w, independent of DTSI flags. */
		if (!strcmp(tv_cipher.mod_alg, "xts(aes)")) {
			if (selftest_d->prefix_aes_xts_algo)
				if (_fips_get_alg_cra_name(
					tv_cipher.mod_alg,
					selftest_d->algo_prefix,
					strlen(tv_cipher.mod_alg))) {
					rc = -1;
					pr_err("Algo Name is too long for tv %d\n",
					tv_index);
					goto clr_buf;
				}
		} else {
			if (selftest_d->prefix_aes_cbc_ecb_ctr_algo)
				if (_fips_get_alg_cra_name(
					tv_cipher.mod_alg,
					selftest_d->algo_prefix,
					strlen(tv_cipher.mod_alg))) {
					rc = -1;
					pr_err("Algo Name is too long for tv %d\n",
					tv_index);
					goto clr_buf;
				}
		}

		tfm = crypto_alloc_ablkcipher(tv_cipher.mod_alg, 0, 0);
		if (IS_ERR(tfm)) {
			pr_err("qcrypto: %s algorithm not found\n",
			tv_cipher.mod_alg);
			rc = -ENOMEM;
			goto clr_buf;
		}

		ablkcipher_req = ablkcipher_request_alloc(tfm, GFP_KERNEL);
		if (!ablkcipher_req) {
			pr_err("qcrypto: ablkcipher_request_alloc failed\n");
			rc = -ENOMEM;
			goto clr_tfm;
		}
		rc = qcrypto_cipher_set_device(ablkcipher_req,
			selftest_d->ce_device);
		if (rc != 0) {
			pr_err("%s qcrypto_cipher_set_device failed with err %d\n",
				__func__, rc);
			goto clr_ablkcipher_req;
		}
		ablkcipher_request_set_callback(ablkcipher_req,
			CRYPTO_TFM_REQ_MAY_BACKLOG,
			_fips_cb, &fips_completion);

		crypto_ablkcipher_clear_flags(tfm, ~0);
		rc = crypto_ablkcipher_setkey(tfm, tv_cipher.key,
			tv_cipher.klen);
		if (rc) {
			pr_err("qcrypto: crypto_ablkcipher_setkey failed\n");
			goto clr_ablkcipher_req;
		}
		sg_set_buf(&fips_sg, k_align_src, tv_cipher.enc_txt_len);
		sg_mark_end(&fips_sg);
		ablkcipher_request_set_crypt(ablkcipher_req,
			&fips_sg, &fips_sg, tv_cipher.pln_txt_len,
			tv_cipher.iv);

		/**** Encryption Test ****/
		init_completion(&fips_completion.completion);
		rc = crypto_ablkcipher_encrypt(ablkcipher_req);
		if (rc == -EINPROGRESS || rc == -EBUSY) {
			rc = wait_for_completion_interruptible(
				&fips_completion.completion);
			err = fips_completion.err;
			if (!rc && !err) {
				INIT_COMPLETION(fips_completion.completion);
			} else {
				pr_err("qcrypto:cipher:ENC, wait_for_completion failed\n");
				goto clr_ablkcipher_req;
			}

		}

		if (memcmp(k_align_src, tv_cipher.enc_txt,
			tv_cipher.enc_txt_len)) {
			rc = -1;
			goto clr_ablkcipher_req;
		}

		/**** Decryption test ****/
		init_completion(&fips_completion.completion);
		rc = crypto_ablkcipher_decrypt(ablkcipher_req);
		if (rc == -EINPROGRESS || rc == -EBUSY) {
			rc = wait_for_completion_interruptible(
				&fips_completion.completion);
			err = fips_completion.err;
			if (!rc && !err) {
				INIT_COMPLETION(fips_completion.completion);
			} else {
				pr_err("qcrypto:cipher:DEC, wait_for_completion failed\n");
				goto clr_ablkcipher_req;
			}

		}

		if (memcmp(k_align_src, tv_cipher.pln_txt,
			tv_cipher.pln_txt_len))
			rc = -1;

clr_ablkcipher_req:
		ablkcipher_request_free(ablkcipher_req);
clr_tfm:
		crypto_free_ablkcipher(tfm);
clr_buf:
		kzfree(k_align_src);

		if (rc)
			return rc;

	}
	return rc;
}
Exemple #2
0
CDF_STATUS cds_decrypt_aes(uint32_t cryptHandle,        /* Handle */
			   uint8_t *pText,      /* pointer to data stream */
			   uint8_t *pDecrypted, uint8_t *pKey)
{                               /* pointer to authentication key */
/*    CDF_STATUS uResult = CDF_STATUS_E_FAILURE; */
	struct ecb_aes_result result;
	struct ablkcipher_request *req;
	struct crypto_ablkcipher *tfm;
	int ret = 0;
	char iv[IV_SIZE_AES_128];
	struct scatterlist sg_in;
	struct scatterlist sg_out;

	init_completion(&result.completion);

	tfm = cds_crypto_alloc_ablkcipher("cbc(aes)", 0, 0);
	if (IS_ERR(tfm)) {
		CDF_TRACE(CDF_MODULE_ID_CDF, CDF_TRACE_LEVEL_ERROR,
			  "crypto_alloc_ablkcipher failed");
		ret = PTR_ERR(tfm);
		goto err_tfm;
	}

	req = ablkcipher_request_alloc(tfm, GFP_KERNEL);
	if (!req) {
		CDF_TRACE(CDF_MODULE_ID_CDF, CDF_TRACE_LEVEL_ERROR,
			  "Failed to allocate request for cbc(aes)");
		ret = -ENOMEM;
		goto err_req;
	}

	ablkcipher_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
					ecb_aes_complete, &result);

	crypto_ablkcipher_clear_flags(tfm, ~0);

	ret = crypto_ablkcipher_setkey(tfm, pKey, AES_KEYSIZE_128);
	if (ret) {
		CDF_TRACE(CDF_MODULE_ID_CDF, CDF_TRACE_LEVEL_ERROR,
			  "crypto_cipher_setkey failed");
		goto err_setkey;
	}

	memset(iv, 0, IV_SIZE_AES_128);

	sg_init_one(&sg_in, pText, AES_BLOCK_SIZE);

	sg_init_one(&sg_out, pDecrypted, AES_BLOCK_SIZE);

	ablkcipher_request_set_crypt(req, &sg_in, &sg_out, AES_BLOCK_SIZE, iv);

	crypto_ablkcipher_decrypt(req);

/* ------------------------------------- */
err_setkey:
	cds_ablkcipher_request_free(req);
err_req:
	cds_crypto_free_ablkcipher(tfm);
err_tfm:
	/* return ret; */
	if (ret != 0) {
		CDF_TRACE(CDF_MODULE_ID_CDF, CDF_TRACE_LEVEL_ERROR,
			  "%s() call failed", __func__);
		return CDF_STATUS_E_FAULT;
	}

	return CDF_STATUS_SUCCESS;
}
VOS_STATUS vos_decrypt_AES(v_U32_t cryptHandle, /*        */
                           v_U8_t *pText, /*                        */
                           v_U8_t *pDecrypted,
                           v_U8_t *pKey) /*                               */
{
//                                              
    struct ecb_aes_result result;
    struct ablkcipher_request *req;
    struct crypto_ablkcipher *tfm;
    int ret = 0;
    char iv[IV_SIZE_AES_128];
    struct scatterlist sg_in;
    struct scatterlist sg_out;

    init_completion(&result.completion);

    tfm =  wcnss_wlan_crypto_alloc_ablkcipher( "cbc(aes)", 0, 0);
    if (IS_ERR(tfm)) {
        VOS_TRACE(VOS_MODULE_ID_VOSS,VOS_TRACE_LEVEL_ERROR, "crypto_alloc_ablkcipher failed");
        ret = PTR_ERR(tfm);
        goto err_tfm;
    }

    req = ablkcipher_request_alloc(tfm, GFP_KERNEL);
    if (!req) {
        VOS_TRACE( VOS_MODULE_ID_VOSS, VOS_TRACE_LEVEL_ERROR, "Failed to allocate request for cbc(aes)");
        ret = -ENOMEM;
        goto err_req;
    }

    ablkcipher_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
                                    ecb_aes_complete, &result);


    crypto_ablkcipher_clear_flags(tfm, ~0);

    ret = crypto_ablkcipher_setkey(tfm, pKey, KEY_SIZE_AES_128);
    if (ret) {
        VOS_TRACE(VOS_MODULE_ID_VOSS,VOS_TRACE_LEVEL_ERROR, "crypto_cipher_setkey failed");
        goto err_setkey;
       }

    memset(iv, 0, IV_SIZE_AES_128);

    sg_init_one(&sg_in, pText, AES_BLOCK_SIZE);

    sg_init_one(&sg_out, pDecrypted, AES_BLOCK_SIZE);

    ablkcipher_request_set_crypt(req, &sg_in, &sg_out, AES_BLOCK_SIZE, iv);

    crypto_ablkcipher_decrypt(req);



//                                      
err_setkey:
    wcnss_wlan_ablkcipher_request_free(req);
err_req:
    wcnss_wlan_crypto_free_ablkcipher(tfm);
err_tfm:
    //           
    if (ret != 0) {
        VOS_TRACE(VOS_MODULE_ID_VOSS,VOS_TRACE_LEVEL_ERROR,"%s() call failed", __func__);
        return VOS_STATUS_E_FAULT;
      }

    return VOS_STATUS_SUCCESS;
}
static int process_crypt_req(struct tegra_crypto_ctx *ctx, struct tegra_crypt_req *crypt_req)
{
	struct crypto_ablkcipher *tfm;
	struct ablkcipher_request *req = NULL;
	struct scatterlist in_sg;
	struct scatterlist out_sg;
	unsigned long *xbuf[NBUFS];
	int ret = 0, size = 0;
	unsigned long total = 0;
	const u8 *key = NULL;
	struct tegra_crypto_completion tcrypt_complete;

	if (crypt_req->op & TEGRA_CRYPTO_ECB) {
		req = ablkcipher_request_alloc(ctx->ecb_tfm, GFP_KERNEL);
		tfm = ctx->ecb_tfm;
	} else if (crypt_req->op & TEGRA_CRYPTO_CBC) {
		req = ablkcipher_request_alloc(ctx->cbc_tfm, GFP_KERNEL);
		tfm = ctx->cbc_tfm;
	} else if ((crypt_req->op & TEGRA_CRYPTO_OFB) &&
			(tegra_get_chipid() != TEGRA_CHIPID_TEGRA2)) {

		req = ablkcipher_request_alloc(ctx->ofb_tfm, GFP_KERNEL);
		tfm = ctx->ofb_tfm;
	} else if ((crypt_req->op & TEGRA_CRYPTO_CTR) &&
			(tegra_get_chipid() != TEGRA_CHIPID_TEGRA2)) {

		req = ablkcipher_request_alloc(ctx->ctr_tfm, GFP_KERNEL);
		tfm = ctx->ctr_tfm;
	}

	if (!req) {
		pr_err("%s: Failed to allocate request\n", __func__);
		return -ENOMEM;
	}

	if ((crypt_req->keylen < 0) || (crypt_req->keylen > AES_MAX_KEY_SIZE)) {
		ret = -EINVAL;
		pr_err("crypt_req keylen invalid");
		goto process_req_out;
	}

	crypto_ablkcipher_clear_flags(tfm, ~0);

	if (!ctx->use_ssk)
		key = crypt_req->key;

	if (!crypt_req->skip_key) {
		ret = crypto_ablkcipher_setkey(tfm, key, crypt_req->keylen);
		if (ret < 0) {
			pr_err("setkey failed");
			goto process_req_out;
		}
	}

	ret = alloc_bufs(xbuf);
	if (ret < 0) {
		pr_err("alloc_bufs failed");
		goto process_req_out;
	}

	init_completion(&tcrypt_complete.restart);

	ablkcipher_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
		tegra_crypt_complete, &tcrypt_complete);

	total = crypt_req->plaintext_sz;
	while (total > 0) {
		size = min(total, PAGE_SIZE);
		ret = copy_from_user((void *)xbuf[0],
			(void __user *)crypt_req->plaintext, size);
		if (ret) {
			ret = -EFAULT;
			pr_debug("%s: copy_from_user failed (%d)\n", __func__, ret);
			goto process_req_buf_out;
		}
		sg_init_one(&in_sg, xbuf[0], size);
		sg_init_one(&out_sg, xbuf[1], size);

		if (!crypt_req->skip_iv)
			ablkcipher_request_set_crypt(req, &in_sg,
				&out_sg, size, crypt_req->iv);
		else
			ablkcipher_request_set_crypt(req, &in_sg,
				&out_sg, size, NULL);

		INIT_COMPLETION(tcrypt_complete.restart);

		tcrypt_complete.req_err = 0;

		ret = crypt_req->encrypt ?
			crypto_ablkcipher_encrypt(req) :
			crypto_ablkcipher_decrypt(req);
		if ((ret == -EINPROGRESS) || (ret == -EBUSY)) {
			/* crypto driver is asynchronous */
			ret = wait_for_completion_interruptible(&tcrypt_complete.restart);

			if (ret < 0)
				goto process_req_buf_out;

			if (tcrypt_complete.req_err < 0) {
				ret = tcrypt_complete.req_err;
				goto process_req_buf_out;
			}
		} else if (ret < 0) {
			pr_debug("%scrypt failed (%d)\n",
				crypt_req->encrypt ? "en" : "de", ret);
			goto process_req_buf_out;
		}

		ret = copy_to_user((void __user *)crypt_req->result,
			(const void *)xbuf[1], size);
		if (ret) {
			ret = -EFAULT;
			pr_debug("%s: copy_to_user failed (%d)\n", __func__,
					ret);
			goto process_req_buf_out;
		}

		total -= size;
		crypt_req->result += size;
		crypt_req->plaintext += size;
	}

process_req_buf_out:
	free_bufs(xbuf);
process_req_out:
	ablkcipher_request_free(req);

	return ret;
}
Exemple #5
0
static int test_acipher(const char *algo, int enc, char *data_in,
		char *data_out, size_t data_len, char *key, int keysize)
		{
	struct crypto_ablkcipher *tfm;
	struct tcrypt_result tresult;
	struct ablkcipher_request *req;
	struct scatterlist sg[TVMEMSIZE];
	unsigned int ret, i, j, iv_len;
	char iv[128];

	ret = -EAGAIN;

	init_completion(&tresult.completion);

	tfm = crypto_alloc_ablkcipher(algo, 0, 0);
	if (IS_ERR(tfm)) {
		printk(KERN_ERR "failed to load transform for %s: %ld\n",
			algo, PTR_ERR(tfm));
		return ret;
	}

	req = ablkcipher_request_alloc(tfm, GFP_KERNEL);
	if (!req) {
		printk(KERN_ERR "tcrypt: skcipher: Failed to allocate request for %s\n",
			algo);
		goto out;
	}

	ablkcipher_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
					tcrypt_complete, &tresult);

	crypto_ablkcipher_clear_flags(tfm, ~0);

	ret = crypto_ablkcipher_setkey(tfm, key, keysize);
	if (ret) {
		printk(KERN_ERR "setkey() failed flags=%x\n",
			crypto_ablkcipher_get_flags(tfm));
		goto out_free_req;
	}

	printk(KERN_INFO "KEY:\n");
	hexdump(key, keysize);

	sg_init_table(sg, TVMEMSIZE);

	i = 0;
	j = data_len;

	while (j > PAGE_SIZE) {
		sg_set_buf(sg + i, tvmem[i], PAGE_SIZE);
		memcpy(tvmem[i], data_in + i * PAGE_SIZE, PAGE_SIZE);
		i++;
		j -= PAGE_SIZE;
	}
	sg_set_buf(sg + i, tvmem[i], j);
	memcpy(tvmem[i], data_in + i * PAGE_SIZE, j);

	iv_len = crypto_ablkcipher_ivsize(tfm);
	memcpy(iv, iv16, iv_len);

	printk(KERN_INFO "IV:\n");
	hexdump(iv, iv_len);

	ablkcipher_request_set_crypt(req, sg, sg, data_len, iv);

	printk(KERN_INFO "IN:\n");
	hexdump(data_in, data_len);

	if (enc)
		ret = do_one_acipher_op(req, crypto_ablkcipher_encrypt(req));
	else
		ret = do_one_acipher_op(req, crypto_ablkcipher_decrypt(req));

	if (ret)
		printk(KERN_ERR "failed flags=%x\n",
			crypto_ablkcipher_get_flags(tfm));
	else {
		i = 0;
		j = data_len;
		while (j > PAGE_SIZE) {
			memcpy(data_out + i * PAGE_SIZE, tvmem[i], PAGE_SIZE);
			i++;
			j -= PAGE_SIZE;
		}
		memcpy(data_out + i * PAGE_SIZE, tvmem[i], j);

		printk(KERN_INFO "OUT:\n");
		hexdump(data_out, data_len);
	}

out_free_req:
	ablkcipher_request_free(req);
out:
	crypto_free_ablkcipher(tfm);

	return ret;
}
Exemple #6
0
void mmc_decrypt_req(struct mmc_host *host, struct mmc_request *mrq)
{
	struct crypto_ablkcipher *tfm;
	struct ablkcipher_request *req;
	struct mmc_tcrypt_result result;
	struct scatterlist *in_sg = mrq->data->sg;
	int rc = 0;
	u8 IV[MMC_AES_XTS_IV_LEN];
	sector_t sector = mrq->data->sector;

	tfm = crypto_alloc_ablkcipher("xts(aes)", 0, 0);
	if (IS_ERR(tfm)) {
		pr_err("%s:%s ablkcipher tfm allocation failed : error = %lu\n",
				mmc_hostname(host), __func__, PTR_ERR(tfm));
		return;
	}

	req = ablkcipher_request_alloc(tfm, GFP_KERNEL);
	if (!req) {
		pr_err("%s:%s ablkcipher request allocation failed\n",
				mmc_hostname(host), __func__);
		goto ablkcipher_req_alloc_failure;
	}

	ablkcipher_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
					mmc_crypto_cipher_complete, &result);

	init_completion(&result.completion);
	qcrypto_cipher_set_flag(req,
		QCRYPTO_CTX_USE_PIPE_KEY | QCRYPTO_CTX_XTS_DU_SIZE_512B);
	crypto_ablkcipher_clear_flags(tfm, ~0);
	crypto_ablkcipher_setkey(tfm, NULL, MMC_KEY_SIZE_XTS);

	memset(IV, 0, MMC_AES_XTS_IV_LEN);
	memcpy(IV, &sector, sizeof(sector_t));

	ablkcipher_request_set_crypt(req, in_sg, in_sg,
		mrq->data->blksz * mrq->data->blocks, (void *) IV);

	rc = crypto_ablkcipher_decrypt(req);

	switch (rc) {
	case 0:
		break;

	case -EBUSY:
		/*
		 * Lets make this synchronous request by waiting on
		 * in progress as well
		 */
	case -EINPROGRESS:
		wait_for_completion_interruptible(&result.completion);
		if (result.err)
			pr_err("%s:%s error = %d decrypting the request\n",
				mmc_hostname(host), __func__, result.err);
		break;

	default:
		goto crypto_operation_failure;
	}

crypto_operation_failure:
	ablkcipher_request_free(req);

ablkcipher_req_alloc_failure:
	crypto_free_ablkcipher(tfm);

	return;
}
int skcipher_null_givdecrypt(struct skcipher_givcrypt_request *req)
{
	return crypto_ablkcipher_decrypt(&req->creq);
}