Exemple #1
0
static int mv_cesa_ahmac_setkey(const char *hash_alg_name,
				const u8 *key, unsigned int keylen,
				void *istate, void *ostate)
{
	struct ahash_request *req;
	struct crypto_ahash *tfm;
	unsigned int blocksize;
	u8 *ipad = NULL;
	u8 *opad;
	int ret;

	tfm = crypto_alloc_ahash(hash_alg_name, CRYPTO_ALG_TYPE_AHASH,
				 CRYPTO_ALG_TYPE_AHASH_MASK);
	if (IS_ERR(tfm))
		return PTR_ERR(tfm);

	req = ahash_request_alloc(tfm, GFP_KERNEL);
	if (!req) {
		ret = -ENOMEM;
		goto free_ahash;
	}

	crypto_ahash_clear_flags(tfm, ~0);

	blocksize = crypto_tfm_alg_blocksize(crypto_ahash_tfm(tfm));

	ipad = kzalloc(2 * blocksize, GFP_KERNEL);
	if (!ipad) {
		ret = -ENOMEM;
		goto free_req;
	}

	opad = ipad + blocksize;

	ret = mv_cesa_ahmac_pad_init(req, key, keylen, ipad, opad, blocksize);
	if (ret)
		goto free_ipad;

	ret = mv_cesa_ahmac_iv_state_init(req, ipad, istate, blocksize);
	if (ret)
		goto free_ipad;

	ret = mv_cesa_ahmac_iv_state_init(req, opad, ostate, blocksize);

free_ipad:
	kfree(ipad);
free_req:
	ahash_request_free(req);
free_ahash:
	crypto_free_ahash(tfm);

	return ret;
}
static int ghash_async_setkey(struct crypto_ahash *tfm, const u8 *key,
			      unsigned int keylen)
{
	struct ghash_async_ctx *ctx = crypto_ahash_ctx(tfm);
	struct crypto_ahash *child = &ctx->cryptd_tfm->base;
	int err;

	crypto_ahash_clear_flags(child, CRYPTO_TFM_REQ_MASK);
	crypto_ahash_set_flags(child, crypto_ahash_get_flags(tfm)
			       & CRYPTO_TFM_REQ_MASK);
	err = crypto_ahash_setkey(child, key, keylen);
	crypto_ahash_set_flags(tfm, crypto_ahash_get_flags(child)
			       & CRYPTO_TFM_RES_MASK);

	return 0;
}
Exemple #3
0
static int hmac_sha_digest(const char *algo, char *data_in, size_t dlen,
			char *hash_out, size_t outlen)
{
	int rc = 0;
	struct crypto_ahash *tfm;
	struct scatterlist sg;
	struct ahash_request *req;
	struct hmac_sha_result tresult;

	/* Set hash output to 0 initially */
	memset(hash_out, 0, outlen);

	init_completion(&tresult.completion);
	tfm = crypto_alloc_ahash(algo, 0, 0);
	if (IS_ERR(tfm)) {
			printk(KERN_ERR "crypto_alloc_ahash failed\n");
			rc = PTR_ERR(tfm);
			goto err_tfm;
	}
	req = ahash_request_alloc(tfm, GFP_KERNEL);
	if (!req) {
			printk(KERN_ERR "failed to allocate request\n");
			rc = -ENOMEM;
			goto err_req;
	}
	if (crypto_ahash_digestsize(tfm) > outlen) {
			printk(KERN_ERR "tfm size > result buffer\n");
			rc = -EINVAL;
			goto err_req;
	}
	ahash_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
			hmac_sha_complete, &tresult);

	sg_init_one(&sg, data_in, dlen);

	crypto_ahash_clear_flags(tfm, -0);
	ahash_request_set_crypt(req, &sg, hash_out, dlen);
	rc = do_one_ahash_op(req, crypto_ahash_digest(req));

	ahash_request_free(req);
err_req:
	crypto_free_ahash(tfm);
err_tfm:
	return rc;
}
Exemple #4
0
int hmac_md5(v_U8_t *key, v_U8_t ksize, char *plaintext, v_U8_t psize,
                v_U8_t *output, v_U8_t outlen)
{
    int ret = 0;
    struct crypto_ahash *tfm;
    struct scatterlist sg;
    struct ahash_request *req;
    struct hmac_md5_result tresult;
    void *hash_buff = NULL;

    unsigned char hash_result[64];
    int i;

    memset(output, 0, outlen);

    init_completion(&tresult.completion);

    tfm = wcnss_wlan_crypto_alloc_ahash("hmac(md5)", CRYPTO_ALG_TYPE_AHASH,
                                        CRYPTO_ALG_TYPE_AHASH_MASK);
    if (IS_ERR(tfm)) {
        VOS_TRACE(VOS_MODULE_ID_VOSS,VOS_TRACE_LEVEL_ERROR, "crypto_alloc_ahash failed");
                ret = PTR_ERR(tfm);
                goto err_tfm;
    }

    req = ahash_request_alloc(tfm, GFP_KERNEL);
    if (!req) {
        VOS_TRACE(VOS_MODULE_ID_VOSS,VOS_TRACE_LEVEL_ERROR, "failed to allocate request for hmac(md5)");
        ret = -ENOMEM;
        goto err_req;
    }

    ahash_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
                                        hmac_md5_complete, &tresult);

    hash_buff = kzalloc(psize, GFP_KERNEL);
    if (!hash_buff) {
        VOS_TRACE(VOS_MODULE_ID_VOSS,VOS_TRACE_LEVEL_ERROR, "failed to kzalloc hash_buff");
        ret = -ENOMEM;
        goto err_hash_buf;
    }

    memset(hash_result, 0, 64);
    memcpy(hash_buff, plaintext, psize);
    sg_init_one(&sg, hash_buff, psize);

    if (ksize) {
        crypto_ahash_clear_flags(tfm, ~0);
        ret = wcnss_wlan_crypto_ahash_setkey(tfm, key, ksize);

        if (ret) {
            VOS_TRACE(VOS_MODULE_ID_VOSS,VOS_TRACE_LEVEL_ERROR, "crypto_ahash_setkey failed");
            goto err_setkey;
        }
    }

    ahash_request_set_crypt(req, &sg, hash_result, psize);
    ret = wcnss_wlan_crypto_ahash_digest(req);

    VOS_TRACE(VOS_MODULE_ID_VOSS,VOS_TRACE_LEVEL_ERROR, "ret 0x%x");

    switch (ret) {
        case 0:
            for (i=0; i< outlen; i++)
                    output[i] = hash_result[i];
            break;
        case -EINPROGRESS:
        case -EBUSY:
             ret = wait_for_completion_interruptible(&tresult.completion);
             if (!ret && !tresult.err) {
                  INIT_COMPLETION(tresult.completion);
                  break;
             } else {
                 VOS_TRACE(VOS_MODULE_ID_VOSS,VOS_TRACE_LEVEL_ERROR, "wait_for_completion_interruptible failed");
                 if (!ret)
                     ret = tresult.err;
                 goto out;
             }
        default:
              goto out;
        }

out:
err_setkey:
        kfree(hash_buff);
err_hash_buf:
        ahash_request_free(req);
err_req:
        wcnss_wlan_crypto_free_ahash(tfm);
err_tfm:
        return ret;
}
Exemple #5
0
static int crypto_gcm_setkey(struct crypto_aead *aead, const u8 *key,
                             unsigned int keylen)
{
    struct crypto_gcm_ctx *ctx = crypto_aead_ctx(aead);
    struct crypto_ahash *ghash = ctx->ghash;
    struct crypto_ablkcipher *ctr = ctx->ctr;
    struct {
        be128 hash;
        u8 iv[8];

        struct crypto_gcm_setkey_result result;

        struct scatterlist sg[1];
        struct ablkcipher_request req;
    } *data;
    int err;

    crypto_ablkcipher_clear_flags(ctr, CRYPTO_TFM_REQ_MASK);
    crypto_ablkcipher_set_flags(ctr, crypto_aead_get_flags(aead) &
                                CRYPTO_TFM_REQ_MASK);

    err = crypto_ablkcipher_setkey(ctr, key, keylen);
    if (err)
        return err;

    crypto_aead_set_flags(aead, crypto_ablkcipher_get_flags(ctr) &
                          CRYPTO_TFM_RES_MASK);

    data = kzalloc(sizeof(*data) + crypto_ablkcipher_reqsize(ctr),
                   GFP_KERNEL);
    if (!data)
        return -ENOMEM;

    init_completion(&data->result.completion);
    sg_init_one(data->sg, &data->hash, sizeof(data->hash));
    ablkcipher_request_set_tfm(&data->req, ctr);
    ablkcipher_request_set_callback(&data->req, CRYPTO_TFM_REQ_MAY_SLEEP |
                                    CRYPTO_TFM_REQ_MAY_BACKLOG,
                                    crypto_gcm_setkey_done,
                                    &data->result);
    ablkcipher_request_set_crypt(&data->req, data->sg, data->sg,
                                 sizeof(data->hash), data->iv);

    err = crypto_ablkcipher_encrypt(&data->req);
    if (err == -EINPROGRESS || err == -EBUSY) {
        err = wait_for_completion_interruptible(
                  &data->result.completion);
        if (!err)
            err = data->result.err;
    }

    if (err)
        goto out;

    crypto_ahash_clear_flags(ghash, CRYPTO_TFM_REQ_MASK);
    crypto_ahash_set_flags(ghash, crypto_aead_get_flags(aead) &
                           CRYPTO_TFM_REQ_MASK);
    err = crypto_ahash_setkey(ghash, (u8 *)&data->hash, sizeof(be128));
    crypto_aead_set_flags(aead, crypto_ahash_get_flags(ghash) &
                          CRYPTO_TFM_RES_MASK);

out:
    kfree(data);
    return err;
}
int hmac_md5(v_U8_t *key, v_U8_t ksize, char *plaintext, v_U8_t psize,
                v_U8_t *output, v_U8_t outlen)
{
    int ret = 0;
    struct crypto_ahash *tfm;
    struct scatterlist sg;
    struct ahash_request *req;
    struct hmac_md5_result tresult = {.err = 0};
    void *hash_buff = NULL;

    unsigned char hash_result[64];
    int i;

    memset(output, 0, outlen);

    init_completion(&tresult.completion);

#if  !defined(CONFIG_CNSS) && (defined(HIF_USB) || defined(HIF_SDIO))
    tfm = crypto_alloc_ahash("hmac(md5)", CRYPTO_ALG_TYPE_AHASH,
                                        CRYPTO_ALG_TYPE_AHASH_MASK);
#else
    tfm = wcnss_wlan_crypto_alloc_ahash("hmac(md5)", CRYPTO_ALG_TYPE_AHASH,
                                        CRYPTO_ALG_TYPE_AHASH_MASK);
#endif
    if (IS_ERR(tfm)) {
        VOS_TRACE(VOS_MODULE_ID_VOSS,VOS_TRACE_LEVEL_ERROR, "crypto_alloc_ahash failed");
                ret = PTR_ERR(tfm);
                goto err_tfm;
    }

    req = ahash_request_alloc(tfm, GFP_KERNEL);
    if (!req) {
        VOS_TRACE(VOS_MODULE_ID_VOSS,VOS_TRACE_LEVEL_ERROR, "failed to allocate request for hmac(md5)");
        ret = -ENOMEM;
        goto err_req;
    }

    ahash_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
                                        hmac_md5_complete, &tresult);

    hash_buff = kzalloc(psize, GFP_KERNEL);
    if (!hash_buff) {
        VOS_TRACE(VOS_MODULE_ID_VOSS,VOS_TRACE_LEVEL_ERROR, "failed to kzalloc hash_buff");
        ret = -ENOMEM;
        goto err_hash_buf;
    }

    memset(hash_result, 0, 64);
    vos_mem_copy(hash_buff, plaintext, psize);
    sg_init_one(&sg, hash_buff, psize);

    if (ksize) {
        crypto_ahash_clear_flags(tfm, ~0);
#if  !defined(CONFIG_CNSS) && (defined(HIF_USB) || defined(HIF_SDIO))
        ret = crypto_ahash_setkey(tfm, key, ksize);
#else
        ret = wcnss_wlan_crypto_ahash_setkey(tfm, key, ksize);
#endif
        if (ret) {
            VOS_TRACE(VOS_MODULE_ID_VOSS,VOS_TRACE_LEVEL_ERROR, "crypto_ahash_setkey failed");
            goto err_setkey;
        }
    }

    ahash_request_set_crypt(req, &sg, hash_result, psize);
#if  !defined(CONFIG_CNSS) && (defined(HIF_USB) || defined(HIF_SDIO))
    ret = crypto_ahash_digest(req);
#else
    ret = wcnss_wlan_crypto_ahash_digest(req);
#endif

    VOS_TRACE(VOS_MODULE_ID_VOSS,VOS_TRACE_LEVEL_ERROR, "ret 0x%x", ret);

    switch (ret) {
        case 0:
            for (i=0; i< outlen; i++)
                    output[i] = hash_result[i];
            break;
        case -EINPROGRESS:
        case -EBUSY:
             ret = wait_for_completion_interruptible(&tresult.completion);
             if (!ret && !tresult.err) {
                 for (i = 0; i < outlen; i++)
                    output[i] = hash_result[i];
                  INIT_COMPLETION(tresult.completion);
                  break;
             } else {
                 VOS_TRACE(VOS_MODULE_ID_VOSS,VOS_TRACE_LEVEL_ERROR, "wait_for_completion_interruptible failed");
                 if (!ret)
                     ret = tresult.err;
                 goto out;
             }
        default:
              goto out;
        }

out:
err_setkey:
        kfree(hash_buff);
err_hash_buf:
        ahash_request_free(req);
err_req:
#if  !defined(CONFIG_CNSS) && (defined(HIF_USB) || defined(HIF_SDIO))
        crypto_free_ahash(tfm);
#else
        wcnss_wlan_crypto_free_ahash(tfm);
#endif
err_tfm:
        return ret;
}

VOS_STATUS vos_md5_hmac_str(v_U32_t cryptHandle, /* Handle */
           v_U8_t *pText, /* pointer to data stream */
           v_U32_t textLen, /* length of data stream */
           v_U8_t *pKey, /* pointer to authentication key */
           v_U32_t keyLen, /* length of authentication key */
           v_U8_t digest[VOS_DIGEST_MD5_SIZE])/* caller digest to be filled in */
{
    int ret = 0;

    ret = hmac_md5(
            pKey,                   //v_U8_t *key,
            (v_U8_t) keyLen,        //v_U8_t ksize,
            (char *)pText,          //char *plaintext,
            (v_U8_t) textLen,       //v_U8_t psize,
            digest,                 //v_U8_t *output,
            VOS_DIGEST_MD5_SIZE     //v_U8_t outlen
            );

    if (ret != 0) {
        VOS_TRACE(VOS_MODULE_ID_VOSS,VOS_TRACE_LEVEL_ERROR,"hmac_md5() call failed");
        return VOS_STATUS_E_FAULT;
    }

    return VOS_STATUS_SUCCESS;
}
Exemple #7
0
int
hmac_md5(uint8_t *key, uint8_t ksize, char *plaintext, uint8_t psize,
	 uint8_t *output, uint8_t outlen)
{
	int ret = 0;
	struct crypto_ahash *tfm;
	struct scatterlist sg;
	struct ahash_request *req;
	struct hmac_md5_result tresult = {.err = 0 };
	void *hash_buff = NULL;

	unsigned char hash_result[64];
	int i;

	memset(output, 0, outlen);

	init_completion(&tresult.completion);

	tfm = cds_crypto_alloc_ahash("hmac(md5)", CRYPTO_ALG_TYPE_AHASH,
				 CRYPTO_ALG_TYPE_AHASH_MASK);
	if (IS_ERR(tfm)) {
		CDF_TRACE(CDF_MODULE_ID_CDF, CDF_TRACE_LEVEL_ERROR,
			  "crypto_alloc_ahash failed");
		ret = PTR_ERR(tfm);
		goto err_tfm;
	}

	req = ahash_request_alloc(tfm, GFP_KERNEL);
	if (!req) {
		CDF_TRACE(CDF_MODULE_ID_CDF, CDF_TRACE_LEVEL_ERROR,
			  "failed to allocate request for hmac(md5)");
		ret = -ENOMEM;
		goto err_req;
	}

	ahash_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
				   hmac_md5_complete, &tresult);

	hash_buff = kzalloc(psize, GFP_KERNEL);
	if (!hash_buff) {
		CDF_TRACE(CDF_MODULE_ID_CDF, CDF_TRACE_LEVEL_ERROR,
			  "failed to kzalloc hash_buff");
		ret = -ENOMEM;
		goto err_hash_buf;
	}

	memset(hash_result, 0, 64);
	memcpy(hash_buff, plaintext, psize);
	sg_init_one(&sg, hash_buff, psize);

	if (ksize) {
		crypto_ahash_clear_flags(tfm, ~0);
		ret = cds_crypto_ahash_setkey(tfm, key, ksize);
		if (ret) {
			CDF_TRACE(CDF_MODULE_ID_CDF, CDF_TRACE_LEVEL_ERROR,
				  "crypto_ahash_setkey failed");
			goto err_setkey;
		}
	}

	ahash_request_set_crypt(req, &sg, hash_result, psize);
	ret = cds_crypto_ahash_digest(req);

	CDF_TRACE(CDF_MODULE_ID_CDF, CDF_TRACE_LEVEL_ERROR, "ret 0x%x", ret);

	switch (ret) {
	case 0:
		for (i = 0; i < outlen; i++)
			output[i] = hash_result[i];
		break;
	case -EINPROGRESS:
	case -EBUSY:
		ret = wait_for_completion_interruptible(&tresult.completion);
		if (!ret && !tresult.err) {
			INIT_COMPLETION(tresult.completion);
			break;
		} else {
			CDF_TRACE(CDF_MODULE_ID_CDF, CDF_TRACE_LEVEL_ERROR,
				  "wait_for_completion_interruptible failed");
			if (!ret)
				ret = tresult.err;
			goto out;
		}
	default:
		goto out;
	}

out:
err_setkey:
	kfree(hash_buff);
err_hash_buf:
	ahash_request_free(req);
err_req:
	cds_crypto_free_ahash(tfm);
err_tfm:
	return ret;
}

CDF_STATUS cds_md5_hmac_str(uint32_t cryptHandle,       /* Handle */
			    uint8_t *pText,     /* pointer to data stream */
			    uint32_t textLen,   /* length of data stream */
			    uint8_t *pKey,      /* pointer to authentication key */
			    uint32_t keyLen,    /* length of authentication key */
			    uint8_t digest[CDS_DIGEST_MD5_SIZE])
{                               /* caller digest to be filled in */
	int ret = 0;

	ret = hmac_md5(pKey,    /* uint8_t *key, */
		       (uint8_t) keyLen,        /* uint8_t ksize, */
		       (char *)pText,   /* char *plaintext, */
		       (uint8_t) textLen,       /* uint8_t psize, */
		       digest,  /* uint8_t *output, */
		       CDS_DIGEST_MD5_SIZE      /* uint8_t outlen */
		       );

	if (ret != 0) {
		CDF_TRACE(CDF_MODULE_ID_CDF, CDF_TRACE_LEVEL_ERROR,
			  "hmac_md5() call failed");
		return CDF_STATUS_E_FAULT;
	}

	return CDF_STATUS_SUCCESS;
}
/*
 * Sha/HMAC self tests
 */
int _fips_qcrypto_sha_selftest(struct fips_selftest_data *selftest_d)
{
	int rc = 0, err, tv_index = 0, num_tv;
	char *k_out_buf = NULL;
	struct scatterlist fips_sg;
	struct crypto_ahash *tfm;
	struct ahash_request *ahash_req;
	struct _fips_completion fips_completion;
	struct _fips_test_vector_sha_hmac tv_sha_hmac;

	num_tv = (sizeof(fips_test_vector_sha_hmac)) /
	(sizeof(struct _fips_test_vector_sha_hmac));

	/* One-by-one testing */
	for (tv_index = 0; tv_index < num_tv; tv_index++) {
		memcpy(&tv_sha_hmac, &fips_test_vector_sha_hmac[tv_index],
			(sizeof(struct _fips_test_vector_sha_hmac)));
		k_out_buf = kzalloc(tv_sha_hmac.diglen, GFP_KERNEL);
		if (k_out_buf == NULL) {
			pr_err("qcrypto: Failed to allocate memory for k_out_buf %ld\n",
				PTR_ERR(k_out_buf));
			return -ENOMEM;
		}

		memset(k_out_buf, 0, tv_sha_hmac.diglen);
		init_completion(&fips_completion.completion);

		/* use_sw flags are set in dtsi file which makes
		default Linux API calls to go to s/w crypto instead
		of h/w crypto. This code makes sure that all selftests
		calls always go to h/w, independent of DTSI flags. */
		if (tv_sha_hmac.klen == 0) {
			if (selftest_d->prefix_ahash_algo)
				if (_fips_get_alg_cra_name(tv_sha_hmac
					.hash_alg, selftest_d->algo_prefix,
					strlen(tv_sha_hmac.hash_alg))) {
					rc = -1;
					pr_err("Algo Name is too long for tv %d\n",
					tv_index);
					goto clr_buf;
				}
		} else {
			if (selftest_d->prefix_hmac_algo)
				if (_fips_get_alg_cra_name(tv_sha_hmac
					.hash_alg, selftest_d->algo_prefix,
					strlen(tv_sha_hmac.hash_alg))) {
					rc = -1;
					pr_err("Algo Name is too long for tv %d\n",
					tv_index);
					goto clr_buf;
				}
		}

		tfm = crypto_alloc_ahash(tv_sha_hmac.hash_alg, 0, 0);
		if (IS_ERR(tfm)) {
			pr_err("qcrypto: %s algorithm not found\n",
			tv_sha_hmac.hash_alg);
			rc = PTR_ERR(tfm);
			goto clr_buf;
		}

		ahash_req = ahash_request_alloc(tfm, GFP_KERNEL);
		if (!ahash_req) {
			pr_err("qcrypto: ahash_request_alloc failed\n");
			rc = -ENOMEM;
			goto clr_tfm;
		}
		rc = qcrypto_ahash_set_device(ahash_req, selftest_d->ce_device);
		if (rc != 0) {
			pr_err("%s qcrypto_cipher_set_device failed with err %d\n",
				__func__, rc);
			goto clr_ahash_req;
		}
		ahash_request_set_callback(ahash_req,
			CRYPTO_TFM_REQ_MAY_BACKLOG,
			_fips_cb, &fips_completion);

		sg_init_one(&fips_sg, &tv_sha_hmac.input[0], tv_sha_hmac.ilen);

		crypto_ahash_clear_flags(tfm, ~0);
		if (tv_sha_hmac.klen != 0) {
			rc = crypto_ahash_setkey(tfm, tv_sha_hmac.key,
				tv_sha_hmac.klen);
			if (rc) {
				pr_err("qcrypto: crypto_ahash_setkey failed\n");
				goto clr_ahash_req;
			}
		}

		ahash_request_set_crypt(ahash_req, &fips_sg, k_out_buf,
			tv_sha_hmac.ilen);
		rc = crypto_ahash_digest(ahash_req);
		if (rc == -EINPROGRESS || rc == -EBUSY) {
			rc = wait_for_completion_interruptible(
				&fips_completion.completion);
			err = fips_completion.err;
			if (!rc && !err) {
				INIT_COMPLETION(fips_completion.completion);
			} else {
				pr_err("qcrypto:SHA: wait_for_completion failed\n");
				goto clr_ahash_req;
			}

		}

		if (memcmp(k_out_buf, tv_sha_hmac.digest,
			tv_sha_hmac.diglen))
			rc = -1;

clr_ahash_req:
		ahash_request_free(ahash_req);
clr_tfm:
		crypto_free_ahash(tfm);
clr_buf:
		kzfree(k_out_buf);

	/* For any failure, return error */
		if (rc)
			return rc;

	}
	return rc;
}
static int tegra_crypto_sha(struct tegra_sha_req *sha_req)
{

	struct crypto_ahash *tfm;
	struct scatterlist sg[1];
	char result[64];
	struct ahash_request *req;
	struct tegra_crypto_completion sha_complete;
	void *hash_buff;
	unsigned long *xbuf[XBUFSIZE];
	int ret = -ENOMEM;

	tfm = crypto_alloc_ahash(sha_req->algo, 0, 0);
	if (IS_ERR(tfm)) {
		printk(KERN_ERR "alg: hash: Failed to load transform for %s: "
		       "%ld\n", sha_req->algo, PTR_ERR(tfm));
		goto out_alloc;
	}

	req = ahash_request_alloc(tfm, GFP_KERNEL);
	if (!req) {
		printk(KERN_ERR "alg: hash: Failed to allocate request for "
		       "%s\n", sha_req->algo);
		goto out_noreq;
	}

	ret = alloc_bufs(xbuf);
	if (ret < 0) {
		pr_err("alloc_bufs failed");
		goto out_buf;
	}

	init_completion(&sha_complete.restart);

	memset(result, 0, 64);

	hash_buff = xbuf[0];

	memcpy(hash_buff, sha_req->plaintext, sha_req->plaintext_sz);
	sg_init_one(&sg[0], hash_buff, sha_req->plaintext_sz);

	if (sha_req->keylen) {
		crypto_ahash_clear_flags(tfm, ~0);
		ret = crypto_ahash_setkey(tfm, sha_req->key,
					  sha_req->keylen);
		if (ret) {
			printk(KERN_ERR "alg: hash: setkey failed on "
			       " %s: ret=%d\n", sha_req->algo,
			       -ret);
			goto out;
		}
	}

	ahash_request_set_crypt(req, sg, result, sha_req->plaintext_sz);

	ret = sha_async_hash_op(req, &sha_complete, crypto_ahash_init(req));
	if (ret) {
		pr_err("alg: hash: init failed on "
		       "for %s: ret=%d\n", sha_req->algo, -ret);
		goto out;
	}

	ret = sha_async_hash_op(req, &sha_complete, crypto_ahash_update(req));
	if (ret) {
		pr_err("alg: hash: update failed on "
		       "for %s: ret=%d\n", sha_req->algo, -ret);
		goto out;
	}

	ret = sha_async_hash_op(req, &sha_complete, crypto_ahash_final(req));
	if (ret) {
		pr_err("alg: hash: final failed on "
		       "for %s: ret=%d\n", sha_req->algo, -ret);
		goto out;
	}

	ret = copy_to_user((void __user *)sha_req->result,
		(const void *)result, crypto_ahash_digestsize(tfm));
	if (ret) {
		ret = -EFAULT;
		pr_err("alg: hash: copy_to_user failed (%d) for %s\n",
				ret, sha_req->algo);
	}

out:
	free_bufs(xbuf);

out_buf:
	ahash_request_free(req);

out_noreq:
	crypto_free_ahash(tfm);

out_alloc:
	return ret;
}
Exemple #10
0
static int qce_ahash_hmac_setkey(struct crypto_ahash *tfm, const u8 *key,
				 unsigned int keylen)
{
	unsigned int digestsize = crypto_ahash_digestsize(tfm);
	struct qce_sha_ctx *ctx = crypto_tfm_ctx(&tfm->base);
	struct qce_ahash_result result;
	struct ahash_request *req;
	struct scatterlist sg;
	unsigned int blocksize;
	struct crypto_ahash *ahash_tfm;
	u8 *buf;
	int ret;
	const char *alg_name;

	blocksize = crypto_tfm_alg_blocksize(crypto_ahash_tfm(tfm));
	memset(ctx->authkey, 0, sizeof(ctx->authkey));

	if (keylen <= blocksize) {
		memcpy(ctx->authkey, key, keylen);
		return 0;
	}

	if (digestsize == SHA1_DIGEST_SIZE)
		alg_name = "sha1-qce";
	else if (digestsize == SHA256_DIGEST_SIZE)
		alg_name = "sha256-qce";
	else
		return -EINVAL;

	ahash_tfm = crypto_alloc_ahash(alg_name, CRYPTO_ALG_TYPE_AHASH,
				       CRYPTO_ALG_TYPE_AHASH_MASK);
	if (IS_ERR(ahash_tfm))
		return PTR_ERR(ahash_tfm);

	req = ahash_request_alloc(ahash_tfm, GFP_KERNEL);
	if (!req) {
		ret = -ENOMEM;
		goto err_free_ahash;
	}

	init_completion(&result.completion);
	ahash_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
				   qce_digest_complete, &result);
	crypto_ahash_clear_flags(ahash_tfm, ~0);

	buf = kzalloc(keylen + QCE_MAX_ALIGN_SIZE, GFP_KERNEL);
	if (!buf) {
		ret = -ENOMEM;
		goto err_free_req;
	}

	memcpy(buf, key, keylen);
	sg_init_one(&sg, buf, keylen);
	ahash_request_set_crypt(req, &sg, ctx->authkey, keylen);

	ret = crypto_ahash_digest(req);
	if (ret == -EINPROGRESS || ret == -EBUSY) {
		ret = wait_for_completion_interruptible(&result.completion);
		if (!ret)
			ret = result.error;
	}

	if (ret)
		crypto_ahash_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);

	kfree(buf);
err_free_req:
	ahash_request_free(req);
err_free_ahash:
	crypto_free_ahash(ahash_tfm);
	return ret;
}
Exemple #11
0
static int hmac_sha_update(const char *algo, char *data_in, size_t dlen,
			char *hash_out, size_t outlen)
{
	int rc = 0;
	struct crypto_ahash *tfm;
	struct scatterlist sg[TVMEMSIZE];
	struct ahash_request *req;
	struct hmac_sha_result tresult;
	int i, j;

	/* Set hash output to 0 initially */
	memset(hash_out, 0, outlen);

	init_completion(&tresult.completion);
	tfm = crypto_alloc_ahash(algo, 0, 0);
	if (IS_ERR(tfm)) {
			printk(KERN_ERR "crypto_alloc_ahash failed\n");
			rc = PTR_ERR(tfm);
			goto err_tfm;
	}
	req = ahash_request_alloc(tfm, GFP_KERNEL);
	if (!req) {
			printk(KERN_ERR "failed to allocate request\n");
			rc = -ENOMEM;
			goto err_req;
	}
	if (crypto_ahash_digestsize(tfm) > outlen) {
			printk(KERN_ERR "tfm size > result buffer\n");
			rc = -EINVAL;
			goto err_req;
	}
	ahash_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
			hmac_sha_complete, &tresult);

	sg_init_table(sg, TVMEMSIZE);

	i = 0;
	j = dlen;

	while (j > PAGE_SIZE) {
		sg_set_buf(sg + i, tvmem[i], PAGE_SIZE);
		memcpy(tvmem[i], data_in + i * PAGE_SIZE, PAGE_SIZE);
		i++;
		j -= PAGE_SIZE;
	}
	sg_set_buf(sg + i, tvmem[i], j);
	memcpy(tvmem[i], data_in + i * PAGE_SIZE, j);

	crypto_ahash_clear_flags(tfm, -0);
	ahash_request_set_crypt(req, sg, hash_out, dlen);
	rc = crypto_ahash_init(req);
	rc = do_one_ahash_op(req, crypto_ahash_update(req));
	if (rc)
		goto out;

	rc = do_one_ahash_op(req, crypto_ahash_final(req));

out:
	ahash_request_free(req);
err_req:
	crypto_free_ahash(tfm);
err_tfm:
	return rc;
}