int cryptodev_hash_init(struct hash_data *hdata, const char *alg_name,
			int hmac_mode, void *mackey, size_t mackeylen)
{
	int ret;

	hdata->async.s = crypto_alloc_ahash(alg_name, 0, 0);
	if (unlikely(IS_ERR(hdata->async.s))) {
		ddebug(1, "Failed to load transform for %s", alg_name);
		return -EINVAL;
	}

	/* Copy the key from user and set to TFM. */
	if (hmac_mode != 0) {
		ret = crypto_ahash_setkey(hdata->async.s, mackey, mackeylen);
		if (unlikely(ret)) {
			ddebug(1, "Setting hmac key failed for %s-%zu.",
					alg_name, mackeylen*8);
			ret = -EINVAL;
			goto error;
		}
	}

	hdata->digestsize = crypto_ahash_digestsize(hdata->async.s);
	hdata->alignmask = crypto_ahash_alignmask(hdata->async.s);

	hdata->async.result = kzalloc(sizeof(*hdata->async.result), GFP_KERNEL);
	if (unlikely(!hdata->async.result)) {
		ret = -ENOMEM;
		goto error;
	}

	init_completion(&hdata->async.result->completion);

	hdata->async.request = ahash_request_alloc(hdata->async.s, GFP_KERNEL);
	if (unlikely(!hdata->async.request)) {
		derr(0, "error allocating async crypto request");
		ret = -ENOMEM;
		goto error;
	}

	ahash_request_set_callback(hdata->async.request,
			CRYPTO_TFM_REQ_MAY_BACKLOG,
			cryptodev_complete, hdata->async.result);

	ret = crypto_ahash_init(hdata->async.request);
	if (unlikely(ret)) {
		derr(0, "error in crypto_hash_init()");
		goto error_request;
	}

	hdata->init = 1;
	return 0;

error_request:
	ahash_request_free(hdata->async.request);
error:
	kfree(hdata->async.result);
	crypto_free_ahash(hdata->async.s);
	return ret;
}
Exemple #2
0
/**
 * Initialize the state descriptor for the specified hash algorithm.
 *
 * An internal routine to allocate the hash-specific state in \a hdesc for
 * use with cfs_crypto_hash_digest() to compute the hash of a single message,
 * though possibly in multiple chunks.  The descriptor internal state should
 * be freed with cfs_crypto_hash_final().
 *
 * \param[in]  hash_alg	hash algorithm id (CFS_HASH_ALG_*)
 * \param[out] type	pointer to the hash description in hash_types[] array
 * \param[in,out] req	ahash request to be initialized
 * \param[in]  key	initial hash value/state, NULL to use default value
 * \param[in]  key_len	length of \a key
 *
 * \retval		0 on success
 * \retval		negative errno on failure
 */
static int cfs_crypto_hash_alloc(enum cfs_crypto_hash_alg hash_alg,
				 const struct cfs_crypto_hash_type **type,
				 struct ahash_request **req,
				 unsigned char *key,
				 unsigned int key_len)
{
	struct crypto_ahash *tfm;
	int err = 0;

	*type = cfs_crypto_hash_type(hash_alg);

	if (*type == NULL) {
		CWARN("Unsupported hash algorithm id = %d, max id is %d\n",
		      hash_alg, CFS_HASH_ALG_MAX);
		return -EINVAL;
	}
	tfm = crypto_alloc_ahash((*type)->cht_name, 0, CRYPTO_ALG_ASYNC);
	if (IS_ERR(tfm)) {
		CDEBUG(D_INFO, "Failed to alloc crypto hash %s\n",
		       (*type)->cht_name);
		return PTR_ERR(tfm);
	}

	*req = ahash_request_alloc(tfm, GFP_KERNEL);
	if (!*req) {
		CDEBUG(D_INFO, "Failed to alloc ahash_request for %s\n",
		       (*type)->cht_name);
		crypto_free_ahash(tfm);
		return -ENOMEM;
	}

	ahash_request_set_callback(*req, 0, NULL, NULL);

	if (key)
		err = crypto_ahash_setkey(tfm, key, key_len);
	else if ((*type)->cht_key != 0)
		err = crypto_ahash_setkey(tfm,
					 (unsigned char *)&((*type)->cht_key),
					 (*type)->cht_size);

	if (err != 0) {
		ahash_request_free(*req);
		crypto_free_ahash(tfm);
		return err;
	}

	CDEBUG(D_INFO, "Using crypto hash: %s (%s) speed %d MB/s\n",
	       crypto_ahash_alg_name(tfm), crypto_ahash_driver_name(tfm),
	       cfs_crypto_hash_speeds[hash_alg]);

	err = crypto_ahash_init(*req);
	if (err) {
		ahash_request_free(*req);
		crypto_free_ahash(tfm);
	}
	return err;
}
Exemple #3
0
static int mv_cesa_ahmac_setkey(const char *hash_alg_name,
				const u8 *key, unsigned int keylen,
				void *istate, void *ostate)
{
	struct ahash_request *req;
	struct crypto_ahash *tfm;
	unsigned int blocksize;
	u8 *ipad = NULL;
	u8 *opad;
	int ret;

	tfm = crypto_alloc_ahash(hash_alg_name, CRYPTO_ALG_TYPE_AHASH,
				 CRYPTO_ALG_TYPE_AHASH_MASK);
	if (IS_ERR(tfm))
		return PTR_ERR(tfm);

	req = ahash_request_alloc(tfm, GFP_KERNEL);
	if (!req) {
		ret = -ENOMEM;
		goto free_ahash;
	}

	crypto_ahash_clear_flags(tfm, ~0);

	blocksize = crypto_tfm_alg_blocksize(crypto_ahash_tfm(tfm));

	ipad = kzalloc(2 * blocksize, GFP_KERNEL);
	if (!ipad) {
		ret = -ENOMEM;
		goto free_req;
	}

	opad = ipad + blocksize;

	ret = mv_cesa_ahmac_pad_init(req, key, keylen, ipad, opad, blocksize);
	if (ret)
		goto free_ipad;

	ret = mv_cesa_ahmac_iv_state_init(req, ipad, istate, blocksize);
	if (ret)
		goto free_ipad;

	ret = mv_cesa_ahmac_iv_state_init(req, opad, ostate, blocksize);

free_ipad:
	kfree(ipad);
free_req:
	ahash_request_free(req);
free_ahash:
	crypto_free_ahash(tfm);

	return ret;
}
Exemple #4
0
static int hmac_sha_digest(const char *algo, char *data_in, size_t dlen,
			char *hash_out, size_t outlen)
{
	int rc = 0;
	struct crypto_ahash *tfm;
	struct scatterlist sg;
	struct ahash_request *req;
	struct hmac_sha_result tresult;

	/* Set hash output to 0 initially */
	memset(hash_out, 0, outlen);

	init_completion(&tresult.completion);
	tfm = crypto_alloc_ahash(algo, 0, 0);
	if (IS_ERR(tfm)) {
			printk(KERN_ERR "crypto_alloc_ahash failed\n");
			rc = PTR_ERR(tfm);
			goto err_tfm;
	}
	req = ahash_request_alloc(tfm, GFP_KERNEL);
	if (!req) {
			printk(KERN_ERR "failed to allocate request\n");
			rc = -ENOMEM;
			goto err_req;
	}
	if (crypto_ahash_digestsize(tfm) > outlen) {
			printk(KERN_ERR "tfm size > result buffer\n");
			rc = -EINVAL;
			goto err_req;
	}
	ahash_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
			hmac_sha_complete, &tresult);

	sg_init_one(&sg, data_in, dlen);

	crypto_ahash_clear_flags(tfm, -0);
	ahash_request_set_crypt(req, &sg, hash_out, dlen);
	rc = do_one_ahash_op(req, crypto_ahash_digest(req));

	ahash_request_free(req);
err_req:
	crypto_free_ahash(tfm);
err_tfm:
	return rc;
}
Exemple #5
0
static int calc_buffer_ahash_atfm(const void *buf, loff_t len,
				  struct ima_digest_data *hash,
				  struct crypto_ahash *tfm)
{
	struct ahash_request *req;
	struct scatterlist sg;
	struct ahash_completion res;
	int rc, ahash_rc = 0;

	hash->length = crypto_ahash_digestsize(tfm);

	req = ahash_request_alloc(tfm, GFP_KERNEL);
	if (!req)
		return -ENOMEM;

	init_completion(&res.completion);
	ahash_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG |
				   CRYPTO_TFM_REQ_MAY_SLEEP,
				   ahash_complete, &res);

	rc = ahash_wait(crypto_ahash_init(req), &res);
	if (rc)
		goto out;

	sg_init_one(&sg, buf, len);
	ahash_request_set_crypt(req, &sg, NULL, len);

	ahash_rc = crypto_ahash_update(req);

	/* wait for the update request to complete */
	rc = ahash_wait(ahash_rc, &res);
	if (!rc) {
		ahash_request_set_crypt(req, NULL, hash->digest, 0);
		rc = ahash_wait(crypto_ahash_final(req), &res);
	}
out:
	ahash_request_free(req);
	return rc;
}
Exemple #6
0
static int ima_calc_file_hash_atfm(struct file *file,
				   struct ima_digest_data *hash,
				   struct crypto_ahash *tfm)
{
	loff_t i_size, offset;
	char *rbuf[2] = { NULL, };
	int rc, read = 0, rbuf_len, active = 0, ahash_rc = 0;
	struct ahash_request *req;
	struct scatterlist sg[1];
	struct ahash_completion res;
	size_t rbuf_size[2];

	hash->length = crypto_ahash_digestsize(tfm);

	req = ahash_request_alloc(tfm, GFP_KERNEL);
	if (!req)
		return -ENOMEM;

	init_completion(&res.completion);
	ahash_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG |
				   CRYPTO_TFM_REQ_MAY_SLEEP,
				   ahash_complete, &res);

	rc = ahash_wait(crypto_ahash_init(req), &res);
	if (rc)
		goto out1;

	i_size = i_size_read(file_inode(file));

	if (i_size == 0)
		goto out2;

	/*
	 * Try to allocate maximum size of memory.
	 * Fail if even a single page cannot be allocated.
	 */
	rbuf[0] = ima_alloc_pages(i_size, &rbuf_size[0], 1);
	if (!rbuf[0]) {
		rc = -ENOMEM;
		goto out1;
	}

	/* Only allocate one buffer if that is enough. */
	if (i_size > rbuf_size[0]) {
		/*
		 * Try to allocate secondary buffer. If that fails fallback to
		 * using single buffering. Use previous memory allocation size
		 * as baseline for possible allocation size.
		 */
		rbuf[1] = ima_alloc_pages(i_size - rbuf_size[0],
					  &rbuf_size[1], 0);
	}

	if (!(file->f_mode & FMODE_READ)) {
		file->f_mode |= FMODE_READ;
		read = 1;
	}

	for (offset = 0; offset < i_size; offset += rbuf_len) {
		if (!rbuf[1] && offset) {
			/* Not using two buffers, and it is not the first
			 * read/request, wait for the completion of the
			 * previous ahash_update() request.
			 */
			rc = ahash_wait(ahash_rc, &res);
			if (rc)
				goto out3;
		}
		/* read buffer */
		rbuf_len = min_t(loff_t, i_size - offset, rbuf_size[active]);
		rc = integrity_kernel_read(file, offset, rbuf[active],
					   rbuf_len);
		if (rc != rbuf_len)
			goto out3;

		if (rbuf[1] && offset) {
			/* Using two buffers, and it is not the first
			 * read/request, wait for the completion of the
			 * previous ahash_update() request.
			 */
			rc = ahash_wait(ahash_rc, &res);
			if (rc)
				goto out3;
		}

		sg_init_one(&sg[0], rbuf[active], rbuf_len);
		ahash_request_set_crypt(req, sg, NULL, rbuf_len);

		ahash_rc = crypto_ahash_update(req);

		if (rbuf[1])
			active = !active; /* swap buffers, if we use two */
	}
	/* wait for the last update request to complete */
	rc = ahash_wait(ahash_rc, &res);
out3:
	if (read)
		file->f_mode &= ~FMODE_READ;
	ima_free_pages(rbuf[0], rbuf_size[0]);
	ima_free_pages(rbuf[1], rbuf_size[1]);
out2:
	if (!rc) {
		ahash_request_set_crypt(req, NULL, hash->digest, 0);
		rc = ahash_wait(crypto_ahash_final(req), &res);
	}
out1:
	ahash_request_free(req);
	return rc;
}
int hmac_md5(v_U8_t *key, v_U8_t ksize, char *plaintext, v_U8_t psize,
                v_U8_t *output, v_U8_t outlen)
{
    int ret = 0;
    struct crypto_ahash *tfm;
    struct scatterlist sg;
    struct ahash_request *req;
    struct hmac_md5_result tresult = {.err = 0};
    void *hash_buff = NULL;

    unsigned char hash_result[64];
    int i;

    memset(output, 0, outlen);

    init_completion(&tresult.completion);

#if  !defined(CONFIG_CNSS) && (defined(HIF_USB) || defined(HIF_SDIO))
    tfm = crypto_alloc_ahash("hmac(md5)", CRYPTO_ALG_TYPE_AHASH,
                                        CRYPTO_ALG_TYPE_AHASH_MASK);
#else
    tfm = wcnss_wlan_crypto_alloc_ahash("hmac(md5)", CRYPTO_ALG_TYPE_AHASH,
                                        CRYPTO_ALG_TYPE_AHASH_MASK);
#endif
    if (IS_ERR(tfm)) {
        VOS_TRACE(VOS_MODULE_ID_VOSS,VOS_TRACE_LEVEL_ERROR, "crypto_alloc_ahash failed");
                ret = PTR_ERR(tfm);
                goto err_tfm;
    }

    req = ahash_request_alloc(tfm, GFP_KERNEL);
    if (!req) {
        VOS_TRACE(VOS_MODULE_ID_VOSS,VOS_TRACE_LEVEL_ERROR, "failed to allocate request for hmac(md5)");
        ret = -ENOMEM;
        goto err_req;
    }

    ahash_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
                                        hmac_md5_complete, &tresult);

    hash_buff = kzalloc(psize, GFP_KERNEL);
    if (!hash_buff) {
        VOS_TRACE(VOS_MODULE_ID_VOSS,VOS_TRACE_LEVEL_ERROR, "failed to kzalloc hash_buff");
        ret = -ENOMEM;
        goto err_hash_buf;
    }

    memset(hash_result, 0, 64);
    vos_mem_copy(hash_buff, plaintext, psize);
    sg_init_one(&sg, hash_buff, psize);

    if (ksize) {
        crypto_ahash_clear_flags(tfm, ~0);
#if  !defined(CONFIG_CNSS) && (defined(HIF_USB) || defined(HIF_SDIO))
        ret = crypto_ahash_setkey(tfm, key, ksize);
#else
        ret = wcnss_wlan_crypto_ahash_setkey(tfm, key, ksize);
#endif
        if (ret) {
            VOS_TRACE(VOS_MODULE_ID_VOSS,VOS_TRACE_LEVEL_ERROR, "crypto_ahash_setkey failed");
            goto err_setkey;
        }
    }

    ahash_request_set_crypt(req, &sg, hash_result, psize);
#if  !defined(CONFIG_CNSS) && (defined(HIF_USB) || defined(HIF_SDIO))
    ret = crypto_ahash_digest(req);
#else
    ret = wcnss_wlan_crypto_ahash_digest(req);
#endif

    VOS_TRACE(VOS_MODULE_ID_VOSS,VOS_TRACE_LEVEL_ERROR, "ret 0x%x", ret);

    switch (ret) {
        case 0:
            for (i=0; i< outlen; i++)
                    output[i] = hash_result[i];
            break;
        case -EINPROGRESS:
        case -EBUSY:
             ret = wait_for_completion_interruptible(&tresult.completion);
             if (!ret && !tresult.err) {
                 for (i = 0; i < outlen; i++)
                    output[i] = hash_result[i];
                  INIT_COMPLETION(tresult.completion);
                  break;
             } else {
                 VOS_TRACE(VOS_MODULE_ID_VOSS,VOS_TRACE_LEVEL_ERROR, "wait_for_completion_interruptible failed");
                 if (!ret)
                     ret = tresult.err;
                 goto out;
             }
        default:
              goto out;
        }

out:
err_setkey:
        kfree(hash_buff);
err_hash_buf:
        ahash_request_free(req);
err_req:
#if  !defined(CONFIG_CNSS) && (defined(HIF_USB) || defined(HIF_SDIO))
        crypto_free_ahash(tfm);
#else
        wcnss_wlan_crypto_free_ahash(tfm);
#endif
err_tfm:
        return ret;
}

VOS_STATUS vos_md5_hmac_str(v_U32_t cryptHandle, /* Handle */
           v_U8_t *pText, /* pointer to data stream */
           v_U32_t textLen, /* length of data stream */
           v_U8_t *pKey, /* pointer to authentication key */
           v_U32_t keyLen, /* length of authentication key */
           v_U8_t digest[VOS_DIGEST_MD5_SIZE])/* caller digest to be filled in */
{
    int ret = 0;

    ret = hmac_md5(
            pKey,                   //v_U8_t *key,
            (v_U8_t) keyLen,        //v_U8_t ksize,
            (char *)pText,          //char *plaintext,
            (v_U8_t) textLen,       //v_U8_t psize,
            digest,                 //v_U8_t *output,
            VOS_DIGEST_MD5_SIZE     //v_U8_t outlen
            );

    if (ret != 0) {
        VOS_TRACE(VOS_MODULE_ID_VOSS,VOS_TRACE_LEVEL_ERROR,"hmac_md5() call failed");
        return VOS_STATUS_E_FAULT;
    }

    return VOS_STATUS_SUCCESS;
}
Exemple #8
0
int
hmac_md5(uint8_t *key, uint8_t ksize, char *plaintext, uint8_t psize,
	 uint8_t *output, uint8_t outlen)
{
	int ret = 0;
	struct crypto_ahash *tfm;
	struct scatterlist sg;
	struct ahash_request *req;
	struct hmac_md5_result tresult = {.err = 0 };
	void *hash_buff = NULL;

	unsigned char hash_result[64];
	int i;

	memset(output, 0, outlen);

	init_completion(&tresult.completion);

	tfm = cds_crypto_alloc_ahash("hmac(md5)", CRYPTO_ALG_TYPE_AHASH,
				 CRYPTO_ALG_TYPE_AHASH_MASK);
	if (IS_ERR(tfm)) {
		CDF_TRACE(CDF_MODULE_ID_CDF, CDF_TRACE_LEVEL_ERROR,
			  "crypto_alloc_ahash failed");
		ret = PTR_ERR(tfm);
		goto err_tfm;
	}

	req = ahash_request_alloc(tfm, GFP_KERNEL);
	if (!req) {
		CDF_TRACE(CDF_MODULE_ID_CDF, CDF_TRACE_LEVEL_ERROR,
			  "failed to allocate request for hmac(md5)");
		ret = -ENOMEM;
		goto err_req;
	}

	ahash_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
				   hmac_md5_complete, &tresult);

	hash_buff = kzalloc(psize, GFP_KERNEL);
	if (!hash_buff) {
		CDF_TRACE(CDF_MODULE_ID_CDF, CDF_TRACE_LEVEL_ERROR,
			  "failed to kzalloc hash_buff");
		ret = -ENOMEM;
		goto err_hash_buf;
	}

	memset(hash_result, 0, 64);
	memcpy(hash_buff, plaintext, psize);
	sg_init_one(&sg, hash_buff, psize);

	if (ksize) {
		crypto_ahash_clear_flags(tfm, ~0);
		ret = cds_crypto_ahash_setkey(tfm, key, ksize);
		if (ret) {
			CDF_TRACE(CDF_MODULE_ID_CDF, CDF_TRACE_LEVEL_ERROR,
				  "crypto_ahash_setkey failed");
			goto err_setkey;
		}
	}

	ahash_request_set_crypt(req, &sg, hash_result, psize);
	ret = cds_crypto_ahash_digest(req);

	CDF_TRACE(CDF_MODULE_ID_CDF, CDF_TRACE_LEVEL_ERROR, "ret 0x%x", ret);

	switch (ret) {
	case 0:
		for (i = 0; i < outlen; i++)
			output[i] = hash_result[i];
		break;
	case -EINPROGRESS:
	case -EBUSY:
		ret = wait_for_completion_interruptible(&tresult.completion);
		if (!ret && !tresult.err) {
			INIT_COMPLETION(tresult.completion);
			break;
		} else {
			CDF_TRACE(CDF_MODULE_ID_CDF, CDF_TRACE_LEVEL_ERROR,
				  "wait_for_completion_interruptible failed");
			if (!ret)
				ret = tresult.err;
			goto out;
		}
	default:
		goto out;
	}

out:
err_setkey:
	kfree(hash_buff);
err_hash_buf:
	ahash_request_free(req);
err_req:
	cds_crypto_free_ahash(tfm);
err_tfm:
	return ret;
}

CDF_STATUS cds_md5_hmac_str(uint32_t cryptHandle,       /* Handle */
			    uint8_t *pText,     /* pointer to data stream */
			    uint32_t textLen,   /* length of data stream */
			    uint8_t *pKey,      /* pointer to authentication key */
			    uint32_t keyLen,    /* length of authentication key */
			    uint8_t digest[CDS_DIGEST_MD5_SIZE])
{                               /* caller digest to be filled in */
	int ret = 0;

	ret = hmac_md5(pKey,    /* uint8_t *key, */
		       (uint8_t) keyLen,        /* uint8_t ksize, */
		       (char *)pText,   /* char *plaintext, */
		       (uint8_t) textLen,       /* uint8_t psize, */
		       digest,  /* uint8_t *output, */
		       CDS_DIGEST_MD5_SIZE      /* uint8_t outlen */
		       );

	if (ret != 0) {
		CDF_TRACE(CDF_MODULE_ID_CDF, CDF_TRACE_LEVEL_ERROR,
			  "hmac_md5() call failed");
		return CDF_STATUS_E_FAULT;
	}

	return CDF_STATUS_SUCCESS;
}
/*
 * Sha/HMAC self tests
 */
int _fips_qcrypto_sha_selftest(struct fips_selftest_data *selftest_d)
{
	int rc = 0, err, tv_index = 0, num_tv;
	char *k_out_buf = NULL;
	struct scatterlist fips_sg;
	struct crypto_ahash *tfm;
	struct ahash_request *ahash_req;
	struct _fips_completion fips_completion;
	struct _fips_test_vector_sha_hmac tv_sha_hmac;

	num_tv = (sizeof(fips_test_vector_sha_hmac)) /
	(sizeof(struct _fips_test_vector_sha_hmac));

	/* One-by-one testing */
	for (tv_index = 0; tv_index < num_tv; tv_index++) {
		memcpy(&tv_sha_hmac, &fips_test_vector_sha_hmac[tv_index],
			(sizeof(struct _fips_test_vector_sha_hmac)));
		k_out_buf = kzalloc(tv_sha_hmac.diglen, GFP_KERNEL);
		if (k_out_buf == NULL) {
			pr_err("qcrypto: Failed to allocate memory for k_out_buf %ld\n",
				PTR_ERR(k_out_buf));
			return -ENOMEM;
		}

		memset(k_out_buf, 0, tv_sha_hmac.diglen);
		init_completion(&fips_completion.completion);

		/* use_sw flags are set in dtsi file which makes
		default Linux API calls to go to s/w crypto instead
		of h/w crypto. This code makes sure that all selftests
		calls always go to h/w, independent of DTSI flags. */
		if (tv_sha_hmac.klen == 0) {
			if (selftest_d->prefix_ahash_algo)
				if (_fips_get_alg_cra_name(tv_sha_hmac
					.hash_alg, selftest_d->algo_prefix,
					strlen(tv_sha_hmac.hash_alg))) {
					rc = -1;
					pr_err("Algo Name is too long for tv %d\n",
					tv_index);
					goto clr_buf;
				}
		} else {
			if (selftest_d->prefix_hmac_algo)
				if (_fips_get_alg_cra_name(tv_sha_hmac
					.hash_alg, selftest_d->algo_prefix,
					strlen(tv_sha_hmac.hash_alg))) {
					rc = -1;
					pr_err("Algo Name is too long for tv %d\n",
					tv_index);
					goto clr_buf;
				}
		}

		tfm = crypto_alloc_ahash(tv_sha_hmac.hash_alg, 0, 0);
		if (IS_ERR(tfm)) {
			pr_err("qcrypto: %s algorithm not found\n",
			tv_sha_hmac.hash_alg);
			rc = PTR_ERR(tfm);
			goto clr_buf;
		}

		ahash_req = ahash_request_alloc(tfm, GFP_KERNEL);
		if (!ahash_req) {
			pr_err("qcrypto: ahash_request_alloc failed\n");
			rc = -ENOMEM;
			goto clr_tfm;
		}
		rc = qcrypto_ahash_set_device(ahash_req, selftest_d->ce_device);
		if (rc != 0) {
			pr_err("%s qcrypto_cipher_set_device failed with err %d\n",
				__func__, rc);
			goto clr_ahash_req;
		}
		ahash_request_set_callback(ahash_req,
			CRYPTO_TFM_REQ_MAY_BACKLOG,
			_fips_cb, &fips_completion);

		sg_init_one(&fips_sg, &tv_sha_hmac.input[0], tv_sha_hmac.ilen);

		crypto_ahash_clear_flags(tfm, ~0);
		if (tv_sha_hmac.klen != 0) {
			rc = crypto_ahash_setkey(tfm, tv_sha_hmac.key,
				tv_sha_hmac.klen);
			if (rc) {
				pr_err("qcrypto: crypto_ahash_setkey failed\n");
				goto clr_ahash_req;
			}
		}

		ahash_request_set_crypt(ahash_req, &fips_sg, k_out_buf,
			tv_sha_hmac.ilen);
		rc = crypto_ahash_digest(ahash_req);
		if (rc == -EINPROGRESS || rc == -EBUSY) {
			rc = wait_for_completion_interruptible(
				&fips_completion.completion);
			err = fips_completion.err;
			if (!rc && !err) {
				INIT_COMPLETION(fips_completion.completion);
			} else {
				pr_err("qcrypto:SHA: wait_for_completion failed\n");
				goto clr_ahash_req;
			}

		}

		if (memcmp(k_out_buf, tv_sha_hmac.digest,
			tv_sha_hmac.diglen))
			rc = -1;

clr_ahash_req:
		ahash_request_free(ahash_req);
clr_tfm:
		crypto_free_ahash(tfm);
clr_buf:
		kzfree(k_out_buf);

	/* For any failure, return error */
		if (rc)
			return rc;

	}
	return rc;
}
Exemple #10
0
/**
 * Initialize the state descriptor for the specified hash algorithm.
 *
 * An internal routine to allocate the hash-specific state in \a hdesc for
 * use with cfs_crypto_hash_digest() to compute the hash of a single message,
 * though possibly in multiple chunks.  The descriptor internal state should
 * be freed with cfs_crypto_hash_final().
 *
 * \param[in]  hash_alg	hash algorithm id (CFS_HASH_ALG_*)
 * \param[out] type	pointer to the hash description in hash_types[] array
 * \param[in,out] req	ahash request to be initialized
 * \param[in]  key	initial hash value/state, NULL to use default value
 * \param[in]  key_len	length of \a key
 *
 * \retval		0 on success
 * \retval		negative errno on failure
 */
static int cfs_crypto_hash_alloc(enum cfs_crypto_hash_alg hash_alg,
				 const struct cfs_crypto_hash_type **type,
				 struct ahash_request **req,
				 unsigned char *key,
				 unsigned int key_len)
{
	struct crypto_ahash *tfm;
	int err = 0;

	*type = cfs_crypto_hash_type(hash_alg);
	if (!*type) {
		CWARN("Unsupported hash algorithm id = %d, max id is %d\n",
		      hash_alg, CFS_HASH_ALG_MAX);
		return -EINVAL;
	}

	/* Keys are only supported for the hmac version */
	if (key && key_len > 0) {
		char *algo_name;

		algo_name = kasprintf(GFP_KERNEL, "hmac(%s)",
				      (*type)->cht_name);
		if (!algo_name)
			return -ENOMEM;

		tfm = crypto_alloc_ahash(algo_name, 0, CRYPTO_ALG_ASYNC);
		kfree(algo_name);
	} else {
		tfm = crypto_alloc_ahash((*type)->cht_name, 0,
					 CRYPTO_ALG_ASYNC);
	}
	if (IS_ERR(tfm)) {
		CDEBUG(D_INFO, "Failed to alloc crypto hash %s\n",
		       (*type)->cht_name);
		return PTR_ERR(tfm);
	}

	*req = ahash_request_alloc(tfm, GFP_KERNEL);
	if (!*req) {
		CDEBUG(D_INFO, "Failed to alloc ahash_request for %s\n",
		       (*type)->cht_name);
		GOTO(out_free_tfm, err = -ENOMEM);
	}

	ahash_request_set_callback(*req, 0, NULL, NULL);

	if (key)
		err = crypto_ahash_setkey(tfm, key, key_len);
	else if ((*type)->cht_key != 0)
		err = crypto_ahash_setkey(tfm,
					 (unsigned char *)&((*type)->cht_key),
					 (*type)->cht_size);
	if (err)
		GOTO(out_free_req, err);

	CDEBUG(D_INFO, "Using crypto hash: %s (%s) speed %d MB/s\n",
	       crypto_ahash_alg_name(tfm), crypto_ahash_driver_name(tfm),
	       cfs_crypto_hash_speeds[hash_alg]);

	err = crypto_ahash_init(*req);
	if (err) {
out_free_req:
		ahash_request_free(*req);
out_free_tfm:
		crypto_free_ahash(tfm);
	}
	return err;
}
static int tegra_crypt_rsa(struct tegra_crypto_ctx *ctx,
				struct tegra_rsa_req *rsa_req)
{
	struct crypto_ahash *tfm = NULL;
	struct ahash_request *req = NULL;
	struct scatterlist sg[1];
	char *result = NULL;
	void *hash_buff;
	int ret = 0;
	unsigned long *xbuf[XBUFSIZE];
	struct tegra_crypto_completion rsa_complete;

	switch (rsa_req->algo) {
	case TEGRA_RSA512:
		req = ahash_request_alloc(ctx->rsa512_tfm, GFP_KERNEL);
		if (!req) {
			pr_err("alg: hash: Failed to allocate request for rsa512\n");
			goto req_fail;
		}
		tfm = ctx->rsa512_tfm;
		break;
	case TEGRA_RSA1024:
		req = ahash_request_alloc(ctx->rsa1024_tfm, GFP_KERNEL);
		if (!req) {
			pr_err("alg: hash: Failed to allocate request for rsa1024\n");
			goto req_fail;
		}
		tfm = ctx->rsa1024_tfm;
		break;

	case TEGRA_RSA1536:
		req = ahash_request_alloc(ctx->rsa1536_tfm, GFP_KERNEL);
		if (!req) {
			pr_err("alg: hash: Failed to allocate request for rsa1536\n");
			goto req_fail;
		}
		tfm = ctx->rsa1536_tfm;
		break;

	case TEGRA_RSA2048:
		req = ahash_request_alloc(ctx->rsa2048_tfm, GFP_KERNEL);
		if (!req) {
			pr_err("alg: hash: Failed to allocate request for rsa2048\n");
			goto req_fail;
		}
		tfm = ctx->rsa2048_tfm;
		break;

	default:
		goto req_fail;
	}

	ret = alloc_bufs(xbuf);
	 if (ret < 0) {
		pr_err("alloc_bufs failed");
		goto buf_fail;
	}

	init_completion(&rsa_complete.restart);

	result = kzalloc(rsa_req->keylen >> 16, GFP_KERNEL);
	if (!result) {
		pr_err("\nresult alloc fail\n");
		goto result_fail;
	}

	hash_buff = xbuf[0];

	memcpy(hash_buff, rsa_req->message, rsa_req->msg_len);

	sg_init_one(&sg[0], hash_buff, rsa_req->msg_len);

	if (!(rsa_req->keylen))
		goto rsa_fail;

	if (!rsa_req->skip_key) {
		ret = crypto_ahash_setkey(tfm, rsa_req->key, rsa_req->keylen);
		if (ret) {
			pr_err("alg: hash: setkey failed\n");
			goto rsa_fail;
		}
	}

	ahash_request_set_crypt(req, sg, result, rsa_req->msg_len);

	ret = crypto_ahash_digest(req);

	if (ret == -EINPROGRESS || ret == -EBUSY) {
		ret = wait_for_completion_interruptible(&rsa_complete.restart);
		if (!ret)
			ret = rsa_complete.req_err;
		INIT_COMPLETION(rsa_complete.restart);
	}

	if (ret) {
		pr_err("alg: hash: digest failed\n");
		goto rsa_fail;
	}

	ret = copy_to_user((void __user *)rsa_req->result, (const void *)result,
		crypto_ahash_digestsize(tfm));
	if (ret) {
		ret = -EFAULT;
		pr_err("alg: hash: copy_to_user failed (%d)\n", ret);
	}

rsa_fail:
	kfree(result);
result_fail:
	free_bufs(xbuf);
buf_fail:
	ahash_request_free(req);
req_fail:
	return ret;
}
Exemple #12
0
static int qce_ahash_hmac_setkey(struct crypto_ahash *tfm, const u8 *key,
				 unsigned int keylen)
{
	unsigned int digestsize = crypto_ahash_digestsize(tfm);
	struct qce_sha_ctx *ctx = crypto_tfm_ctx(&tfm->base);
	struct qce_ahash_result result;
	struct ahash_request *req;
	struct scatterlist sg;
	unsigned int blocksize;
	struct crypto_ahash *ahash_tfm;
	u8 *buf;
	int ret;
	const char *alg_name;

	blocksize = crypto_tfm_alg_blocksize(crypto_ahash_tfm(tfm));
	memset(ctx->authkey, 0, sizeof(ctx->authkey));

	if (keylen <= blocksize) {
		memcpy(ctx->authkey, key, keylen);
		return 0;
	}

	if (digestsize == SHA1_DIGEST_SIZE)
		alg_name = "sha1-qce";
	else if (digestsize == SHA256_DIGEST_SIZE)
		alg_name = "sha256-qce";
	else
		return -EINVAL;

	ahash_tfm = crypto_alloc_ahash(alg_name, CRYPTO_ALG_TYPE_AHASH,
				       CRYPTO_ALG_TYPE_AHASH_MASK);
	if (IS_ERR(ahash_tfm))
		return PTR_ERR(ahash_tfm);

	req = ahash_request_alloc(ahash_tfm, GFP_KERNEL);
	if (!req) {
		ret = -ENOMEM;
		goto err_free_ahash;
	}

	init_completion(&result.completion);
	ahash_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
				   qce_digest_complete, &result);
	crypto_ahash_clear_flags(ahash_tfm, ~0);

	buf = kzalloc(keylen + QCE_MAX_ALIGN_SIZE, GFP_KERNEL);
	if (!buf) {
		ret = -ENOMEM;
		goto err_free_req;
	}

	memcpy(buf, key, keylen);
	sg_init_one(&sg, buf, keylen);
	ahash_request_set_crypt(req, &sg, ctx->authkey, keylen);

	ret = crypto_ahash_digest(req);
	if (ret == -EINPROGRESS || ret == -EBUSY) {
		ret = wait_for_completion_interruptible(&result.completion);
		if (!ret)
			ret = result.error;
	}

	if (ret)
		crypto_ahash_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);

	kfree(buf);
err_free_req:
	ahash_request_free(req);
err_free_ahash:
	crypto_free_ahash(ahash_tfm);
	return ret;
}
Exemple #13
0
static int hmac_sha_update(const char *algo, char *data_in, size_t dlen,
			char *hash_out, size_t outlen)
{
	int rc = 0;
	struct crypto_ahash *tfm;
	struct scatterlist sg[TVMEMSIZE];
	struct ahash_request *req;
	struct hmac_sha_result tresult;
	int i, j;

	/* Set hash output to 0 initially */
	memset(hash_out, 0, outlen);

	init_completion(&tresult.completion);
	tfm = crypto_alloc_ahash(algo, 0, 0);
	if (IS_ERR(tfm)) {
			printk(KERN_ERR "crypto_alloc_ahash failed\n");
			rc = PTR_ERR(tfm);
			goto err_tfm;
	}
	req = ahash_request_alloc(tfm, GFP_KERNEL);
	if (!req) {
			printk(KERN_ERR "failed to allocate request\n");
			rc = -ENOMEM;
			goto err_req;
	}
	if (crypto_ahash_digestsize(tfm) > outlen) {
			printk(KERN_ERR "tfm size > result buffer\n");
			rc = -EINVAL;
			goto err_req;
	}
	ahash_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
			hmac_sha_complete, &tresult);

	sg_init_table(sg, TVMEMSIZE);

	i = 0;
	j = dlen;

	while (j > PAGE_SIZE) {
		sg_set_buf(sg + i, tvmem[i], PAGE_SIZE);
		memcpy(tvmem[i], data_in + i * PAGE_SIZE, PAGE_SIZE);
		i++;
		j -= PAGE_SIZE;
	}
	sg_set_buf(sg + i, tvmem[i], j);
	memcpy(tvmem[i], data_in + i * PAGE_SIZE, j);

	crypto_ahash_clear_flags(tfm, -0);
	ahash_request_set_crypt(req, sg, hash_out, dlen);
	rc = crypto_ahash_init(req);
	rc = do_one_ahash_op(req, crypto_ahash_update(req));
	if (rc)
		goto out;

	rc = do_one_ahash_op(req, crypto_ahash_final(req));

out:
	ahash_request_free(req);
err_req:
	crypto_free_ahash(tfm);
err_tfm:
	return rc;
}
Exemple #14
0
/**
 * iscsi_sw_tcp_xmit_segment - transmit segment
 * @tcp_conn: the iSCSI TCP connection
 * @segment: the buffer to transmnit
 *
 * This function transmits as much of the buffer as
 * the network layer will accept, and returns the number of
 * bytes transmitted.
 *
 * If CRC hashing is enabled, the function will compute the
 * hash as it goes. When the entire segment has been transmitted,
 * it will retrieve the hash value and send it as well.
 */
static int iscsi_sw_tcp_xmit_segment(struct iscsi_tcp_conn *tcp_conn,
				     struct iscsi_segment *segment)
{
	struct iscsi_sw_tcp_conn *tcp_sw_conn = tcp_conn->dd_data;
	struct socket *sk = tcp_sw_conn->sock;
	unsigned int copied = 0;
	int r = 0;

	while (!iscsi_tcp_segment_done(tcp_conn, segment, 0, r)) {
		struct scatterlist *sg;
		unsigned int offset, copy;
		int flags = 0;

		r = 0;
		offset = segment->copied;
		copy = segment->size - offset;

		if (segment->total_copied + segment->size < segment->total_size)
			flags |= MSG_MORE;

		/* Use sendpage if we can; else fall back to sendmsg */
		if (!segment->data) {
			sg = segment->sg;
			offset += segment->sg_offset + sg->offset;
			r = tcp_sw_conn->sendpage(sk, sg_page(sg), offset,
						  copy, flags);
		} else {
			struct msghdr msg = { .msg_flags = flags };
			struct kvec iov = {
				.iov_base = segment->data + offset,
				.iov_len = copy
			};

			r = kernel_sendmsg(sk, &msg, &iov, 1, copy);
		}

		if (r < 0) {
			iscsi_tcp_segment_unmap(segment);
			return r;
		}
		copied += r;
	}
	return copied;
}

/**
 * iscsi_sw_tcp_xmit - TCP transmit
 **/
static int iscsi_sw_tcp_xmit(struct iscsi_conn *conn)
{
	struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
	struct iscsi_sw_tcp_conn *tcp_sw_conn = tcp_conn->dd_data;
	struct iscsi_segment *segment = &tcp_sw_conn->out.segment;
	unsigned int consumed = 0;
	int rc = 0;

	while (1) {
		rc = iscsi_sw_tcp_xmit_segment(tcp_conn, segment);
		/*
		 * We may not have been able to send data because the conn
		 * is getting stopped. libiscsi will know so propagate err
		 * for it to do the right thing.
		 */
		if (rc == -EAGAIN)
			return rc;
		else if (rc < 0) {
			rc = ISCSI_ERR_XMIT_FAILED;
			goto error;
		} else if (rc == 0)
			break;

		consumed += rc;

		if (segment->total_copied >= segment->total_size) {
			if (segment->done != NULL) {
				rc = segment->done(tcp_conn, segment);
				if (rc != 0)
					goto error;
			}
		}
	}

	ISCSI_SW_TCP_DBG(conn, "xmit %d bytes\n", consumed);

	conn->txdata_octets += consumed;
	return consumed;

error:
	/* Transmit error. We could initiate error recovery
	 * here. */
	ISCSI_SW_TCP_DBG(conn, "Error sending PDU, errno=%d\n", rc);
	iscsi_conn_failure(conn, rc);
	return -EIO;
}

/**
 * iscsi_tcp_xmit_qlen - return the number of bytes queued for xmit
 */
static inline int iscsi_sw_tcp_xmit_qlen(struct iscsi_conn *conn)
{
	struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
	struct iscsi_sw_tcp_conn *tcp_sw_conn = tcp_conn->dd_data;
	struct iscsi_segment *segment = &tcp_sw_conn->out.segment;

	return segment->total_copied - segment->total_size;
}

static int iscsi_sw_tcp_pdu_xmit(struct iscsi_task *task)
{
	struct iscsi_conn *conn = task->conn;
	unsigned int noreclaim_flag;
	int rc = 0;

	noreclaim_flag = memalloc_noreclaim_save();

	while (iscsi_sw_tcp_xmit_qlen(conn)) {
		rc = iscsi_sw_tcp_xmit(conn);
		if (rc == 0) {
			rc = -EAGAIN;
			break;
		}
		if (rc < 0)
			break;
		rc = 0;
	}

	memalloc_noreclaim_restore(noreclaim_flag);
	return rc;
}

/*
 * This is called when we're done sending the header.
 * Simply copy the data_segment to the send segment, and return.
 */
static int iscsi_sw_tcp_send_hdr_done(struct iscsi_tcp_conn *tcp_conn,
				      struct iscsi_segment *segment)
{
	struct iscsi_sw_tcp_conn *tcp_sw_conn = tcp_conn->dd_data;

	tcp_sw_conn->out.segment = tcp_sw_conn->out.data_segment;
	ISCSI_SW_TCP_DBG(tcp_conn->iscsi_conn,
			 "Header done. Next segment size %u total_size %u\n",
			 tcp_sw_conn->out.segment.size,
			 tcp_sw_conn->out.segment.total_size);
	return 0;
}

static void iscsi_sw_tcp_send_hdr_prep(struct iscsi_conn *conn, void *hdr,
				       size_t hdrlen)
{
	struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
	struct iscsi_sw_tcp_conn *tcp_sw_conn = tcp_conn->dd_data;

	ISCSI_SW_TCP_DBG(conn, "%s\n", conn->hdrdgst_en ?
			 "digest enabled" : "digest disabled");

	/* Clear the data segment - needs to be filled in by the
	 * caller using iscsi_tcp_send_data_prep() */
	memset(&tcp_sw_conn->out.data_segment, 0,
	       sizeof(struct iscsi_segment));

	/* If header digest is enabled, compute the CRC and
	 * place the digest into the same buffer. We make
	 * sure that both iscsi_tcp_task and mtask have
	 * sufficient room.
	 */
	if (conn->hdrdgst_en) {
		iscsi_tcp_dgst_header(tcp_sw_conn->tx_hash, hdr, hdrlen,
				      hdr + hdrlen);
		hdrlen += ISCSI_DIGEST_SIZE;
	}

	/* Remember header pointer for later, when we need
	 * to decide whether there's a payload to go along
	 * with the header. */
	tcp_sw_conn->out.hdr = hdr;

	iscsi_segment_init_linear(&tcp_sw_conn->out.segment, hdr, hdrlen,
				  iscsi_sw_tcp_send_hdr_done, NULL);
}

/*
 * Prepare the send buffer for the payload data.
 * Padding and checksumming will all be taken care
 * of by the iscsi_segment routines.
 */
static int
iscsi_sw_tcp_send_data_prep(struct iscsi_conn *conn, struct scatterlist *sg,
			    unsigned int count, unsigned int offset,
			    unsigned int len)
{
	struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
	struct iscsi_sw_tcp_conn *tcp_sw_conn = tcp_conn->dd_data;
	struct ahash_request *tx_hash = NULL;
	unsigned int hdr_spec_len;

	ISCSI_SW_TCP_DBG(conn, "offset=%d, datalen=%d %s\n", offset, len,
			 conn->datadgst_en ?
			 "digest enabled" : "digest disabled");

	/* Make sure the datalen matches what the caller
	   said he would send. */
	hdr_spec_len = ntoh24(tcp_sw_conn->out.hdr->dlength);
	WARN_ON(iscsi_padded(len) != iscsi_padded(hdr_spec_len));

	if (conn->datadgst_en)
		tx_hash = tcp_sw_conn->tx_hash;

	return iscsi_segment_seek_sg(&tcp_sw_conn->out.data_segment,
				     sg, count, offset, len,
				     NULL, tx_hash);
}

static void
iscsi_sw_tcp_send_linear_data_prep(struct iscsi_conn *conn, void *data,
				   size_t len)
{
	struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
	struct iscsi_sw_tcp_conn *tcp_sw_conn = tcp_conn->dd_data;
	struct ahash_request *tx_hash = NULL;
	unsigned int hdr_spec_len;

	ISCSI_SW_TCP_DBG(conn, "datalen=%zd %s\n", len, conn->datadgst_en ?
			 "digest enabled" : "digest disabled");

	/* Make sure the datalen matches what the caller
	   said he would send. */
	hdr_spec_len = ntoh24(tcp_sw_conn->out.hdr->dlength);
	WARN_ON(iscsi_padded(len) != iscsi_padded(hdr_spec_len));

	if (conn->datadgst_en)
		tx_hash = tcp_sw_conn->tx_hash;

	iscsi_segment_init_linear(&tcp_sw_conn->out.data_segment,
				data, len, NULL, tx_hash);
}

static int iscsi_sw_tcp_pdu_init(struct iscsi_task *task,
				 unsigned int offset, unsigned int count)
{
	struct iscsi_conn *conn = task->conn;
	int err = 0;

	iscsi_sw_tcp_send_hdr_prep(conn, task->hdr, task->hdr_len);

	if (!count)
		return 0;

	if (!task->sc)
		iscsi_sw_tcp_send_linear_data_prep(conn, task->data, count);
	else {
		struct scsi_data_buffer *sdb = scsi_out(task->sc);

		err = iscsi_sw_tcp_send_data_prep(conn, sdb->table.sgl,
						  sdb->table.nents, offset,
						  count);
	}

	if (err) {
		/* got invalid offset/len */
		return -EIO;
	}
	return 0;
}

static int iscsi_sw_tcp_pdu_alloc(struct iscsi_task *task, uint8_t opcode)
{
	struct iscsi_tcp_task *tcp_task = task->dd_data;

	task->hdr = task->dd_data + sizeof(*tcp_task);
	task->hdr_max = sizeof(struct iscsi_sw_tcp_hdrbuf) - ISCSI_DIGEST_SIZE;
	return 0;
}

static struct iscsi_cls_conn *
iscsi_sw_tcp_conn_create(struct iscsi_cls_session *cls_session,
			 uint32_t conn_idx)
{
	struct iscsi_conn *conn;
	struct iscsi_cls_conn *cls_conn;
	struct iscsi_tcp_conn *tcp_conn;
	struct iscsi_sw_tcp_conn *tcp_sw_conn;
	struct crypto_ahash *tfm;

	cls_conn = iscsi_tcp_conn_setup(cls_session, sizeof(*tcp_sw_conn),
					conn_idx);
	if (!cls_conn)
		return NULL;
	conn = cls_conn->dd_data;
	tcp_conn = conn->dd_data;
	tcp_sw_conn = tcp_conn->dd_data;

	tfm = crypto_alloc_ahash("crc32c", 0, CRYPTO_ALG_ASYNC);
	if (IS_ERR(tfm))
		goto free_conn;

	tcp_sw_conn->tx_hash = ahash_request_alloc(tfm, GFP_KERNEL);
	if (!tcp_sw_conn->tx_hash)
		goto free_tfm;
	ahash_request_set_callback(tcp_sw_conn->tx_hash, 0, NULL, NULL);

	tcp_sw_conn->rx_hash = ahash_request_alloc(tfm, GFP_KERNEL);
	if (!tcp_sw_conn->rx_hash)
		goto free_tx_hash;
	ahash_request_set_callback(tcp_sw_conn->rx_hash, 0, NULL, NULL);

	tcp_conn->rx_hash = tcp_sw_conn->rx_hash;

	return cls_conn;

free_tx_hash:
	ahash_request_free(tcp_sw_conn->tx_hash);
free_tfm:
	crypto_free_ahash(tfm);
free_conn:
	iscsi_conn_printk(KERN_ERR, conn,
			  "Could not create connection due to crc32c "
			  "loading error. Make sure the crc32c "
			  "module is built as a module or into the "
			  "kernel\n");
	iscsi_tcp_conn_teardown(cls_conn);
	return NULL;
}

static void iscsi_sw_tcp_release_conn(struct iscsi_conn *conn)
{
	struct iscsi_session *session = conn->session;
	struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
	struct iscsi_sw_tcp_conn *tcp_sw_conn = tcp_conn->dd_data;
	struct socket *sock = tcp_sw_conn->sock;

	if (!sock)
		return;

	sock_hold(sock->sk);
	iscsi_sw_tcp_conn_restore_callbacks(conn);
	sock_put(sock->sk);

	spin_lock_bh(&session->frwd_lock);
	tcp_sw_conn->sock = NULL;
	spin_unlock_bh(&session->frwd_lock);
	sockfd_put(sock);
}

static void iscsi_sw_tcp_conn_destroy(struct iscsi_cls_conn *cls_conn)
{
	struct iscsi_conn *conn = cls_conn->dd_data;
	struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
	struct iscsi_sw_tcp_conn *tcp_sw_conn = tcp_conn->dd_data;

	iscsi_sw_tcp_release_conn(conn);

	ahash_request_free(tcp_sw_conn->rx_hash);
	if (tcp_sw_conn->tx_hash) {
		struct crypto_ahash *tfm;

		tfm = crypto_ahash_reqtfm(tcp_sw_conn->tx_hash);
		ahash_request_free(tcp_sw_conn->tx_hash);
		crypto_free_ahash(tfm);
	}

	iscsi_tcp_conn_teardown(cls_conn);
}

static void iscsi_sw_tcp_conn_stop(struct iscsi_cls_conn *cls_conn, int flag)
{
	struct iscsi_conn *conn = cls_conn->dd_data;
	struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
	struct iscsi_sw_tcp_conn *tcp_sw_conn = tcp_conn->dd_data;
	struct socket *sock = tcp_sw_conn->sock;

	/* userspace may have goofed up and not bound us */
	if (!sock)
		return;

	sock->sk->sk_err = EIO;
	wake_up_interruptible(sk_sleep(sock->sk));

	/* stop xmit side */
	iscsi_suspend_tx(conn);

	/* stop recv side and release socket */
	iscsi_sw_tcp_release_conn(conn);

	iscsi_conn_stop(cls_conn, flag);
}

static int
iscsi_sw_tcp_conn_bind(struct iscsi_cls_session *cls_session,
		       struct iscsi_cls_conn *cls_conn, uint64_t transport_eph,
		       int is_leading)
{
	struct iscsi_session *session = cls_session->dd_data;
	struct iscsi_conn *conn = cls_conn->dd_data;
	struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
	struct iscsi_sw_tcp_conn *tcp_sw_conn = tcp_conn->dd_data;
	struct sock *sk;
	struct socket *sock;
	int err;

	/* lookup for existing socket */
	sock = sockfd_lookup((int)transport_eph, &err);
	if (!sock) {
		iscsi_conn_printk(KERN_ERR, conn,
				  "sockfd_lookup failed %d\n", err);
		return -EEXIST;
	}

	err = iscsi_conn_bind(cls_session, cls_conn, is_leading);
	if (err)
		goto free_socket;

	spin_lock_bh(&session->frwd_lock);
	/* bind iSCSI connection and socket */
	tcp_sw_conn->sock = sock;
	spin_unlock_bh(&session->frwd_lock);

	/* setup Socket parameters */
	sk = sock->sk;
	sk->sk_reuse = SK_CAN_REUSE;
	sk->sk_sndtimeo = 15 * HZ; /* FIXME: make it configurable */
	sk->sk_allocation = GFP_ATOMIC;
	sk_set_memalloc(sk);

	iscsi_sw_tcp_conn_set_callbacks(conn);
	tcp_sw_conn->sendpage = tcp_sw_conn->sock->ops->sendpage;
	/*
	 * set receive state machine into initial state
	 */
	iscsi_tcp_hdr_recv_prep(tcp_conn);
	return 0;

free_socket:
	sockfd_put(sock);
	return err;
}

static int iscsi_sw_tcp_conn_set_param(struct iscsi_cls_conn *cls_conn,
				       enum iscsi_param param, char *buf,
				       int buflen)
{
	struct iscsi_conn *conn = cls_conn->dd_data;
	struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
	struct iscsi_sw_tcp_conn *tcp_sw_conn = tcp_conn->dd_data;

	switch(param) {
	case ISCSI_PARAM_HDRDGST_EN:
		iscsi_set_param(cls_conn, param, buf, buflen);
		break;
	case ISCSI_PARAM_DATADGST_EN:
		iscsi_set_param(cls_conn, param, buf, buflen);
		tcp_sw_conn->sendpage = conn->datadgst_en ?
			sock_no_sendpage : tcp_sw_conn->sock->ops->sendpage;
		break;
	case ISCSI_PARAM_MAX_R2T:
		return iscsi_tcp_set_max_r2t(conn, buf);
	default:
		return iscsi_set_param(cls_conn, param, buf, buflen);
	}

	return 0;
}

static int iscsi_sw_tcp_conn_get_param(struct iscsi_cls_conn *cls_conn,
				       enum iscsi_param param, char *buf)
{
	struct iscsi_conn *conn = cls_conn->dd_data;
	struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
	struct iscsi_sw_tcp_conn *tcp_sw_conn = tcp_conn->dd_data;
	struct sockaddr_in6 addr;
	int rc, len;

	switch(param) {
	case ISCSI_PARAM_CONN_PORT:
	case ISCSI_PARAM_CONN_ADDRESS:
	case ISCSI_PARAM_LOCAL_PORT:
		spin_lock_bh(&conn->session->frwd_lock);
		if (!tcp_sw_conn || !tcp_sw_conn->sock) {
			spin_unlock_bh(&conn->session->frwd_lock);
			return -ENOTCONN;
		}
		if (param == ISCSI_PARAM_LOCAL_PORT)
			rc = kernel_getsockname(tcp_sw_conn->sock,
						(struct sockaddr *)&addr, &len);
		else
			rc = kernel_getpeername(tcp_sw_conn->sock,
						(struct sockaddr *)&addr, &len);
		spin_unlock_bh(&conn->session->frwd_lock);
		if (rc)
			return rc;

		return iscsi_conn_get_addr_param((struct sockaddr_storage *)
						 &addr, param, buf);
	default:
		return iscsi_conn_get_param(cls_conn, param, buf);
	}

	return 0;
}

static int iscsi_sw_tcp_host_get_param(struct Scsi_Host *shost,
				       enum iscsi_host_param param, char *buf)
{
	struct iscsi_sw_tcp_host *tcp_sw_host = iscsi_host_priv(shost);
	struct iscsi_session *session = tcp_sw_host->session;
	struct iscsi_conn *conn;
	struct iscsi_tcp_conn *tcp_conn;
	struct iscsi_sw_tcp_conn *tcp_sw_conn;
	struct sockaddr_in6 addr;
	int rc, len;

	switch (param) {
	case ISCSI_HOST_PARAM_IPADDRESS:
		if (!session)
			return -ENOTCONN;

		spin_lock_bh(&session->frwd_lock);
		conn = session->leadconn;
		if (!conn) {
			spin_unlock_bh(&session->frwd_lock);
			return -ENOTCONN;
		}
		tcp_conn = conn->dd_data;

		tcp_sw_conn = tcp_conn->dd_data;
		if (!tcp_sw_conn->sock) {
			spin_unlock_bh(&session->frwd_lock);
			return -ENOTCONN;
		}

		rc = kernel_getsockname(tcp_sw_conn->sock,
					(struct sockaddr *)&addr, &len);
		spin_unlock_bh(&session->frwd_lock);
		if (rc)
			return rc;

		return iscsi_conn_get_addr_param((struct sockaddr_storage *)
						 &addr, param, buf);
	default:
		return iscsi_host_get_param(shost, param, buf);
	}

	return 0;
}

static void
iscsi_sw_tcp_conn_get_stats(struct iscsi_cls_conn *cls_conn,
			    struct iscsi_stats *stats)
{
	struct iscsi_conn *conn = cls_conn->dd_data;
	struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
	struct iscsi_sw_tcp_conn *tcp_sw_conn = tcp_conn->dd_data;

	stats->custom_length = 3;
	strcpy(stats->custom[0].desc, "tx_sendpage_failures");
	stats->custom[0].value = tcp_sw_conn->sendpage_failures_cnt;
	strcpy(stats->custom[1].desc, "rx_discontiguous_hdr");
	stats->custom[1].value = tcp_sw_conn->discontiguous_hdr_cnt;
	strcpy(stats->custom[2].desc, "eh_abort_cnt");
	stats->custom[2].value = conn->eh_abort_cnt;

	iscsi_tcp_conn_get_stats(cls_conn, stats);
}

static struct iscsi_cls_session *
iscsi_sw_tcp_session_create(struct iscsi_endpoint *ep, uint16_t cmds_max,
			    uint16_t qdepth, uint32_t initial_cmdsn)
{
	struct iscsi_cls_session *cls_session;
	struct iscsi_session *session;
	struct iscsi_sw_tcp_host *tcp_sw_host;
	struct Scsi_Host *shost;

	if (ep) {
		printk(KERN_ERR "iscsi_tcp: invalid ep %p.\n", ep);
		return NULL;
	}

	shost = iscsi_host_alloc(&iscsi_sw_tcp_sht,
				 sizeof(struct iscsi_sw_tcp_host), 1);
	if (!shost)
		return NULL;
	shost->transportt = iscsi_sw_tcp_scsi_transport;
	shost->cmd_per_lun = qdepth;
	shost->max_lun = iscsi_max_lun;
	shost->max_id = 0;
	shost->max_channel = 0;
	shost->max_cmd_len = SCSI_MAX_VARLEN_CDB_SIZE;

	if (iscsi_host_add(shost, NULL))
		goto free_host;

	cls_session = iscsi_session_setup(&iscsi_sw_tcp_transport, shost,
					  cmds_max, 0,
					  sizeof(struct iscsi_tcp_task) +
					  sizeof(struct iscsi_sw_tcp_hdrbuf),
					  initial_cmdsn, 0);
	if (!cls_session)
		goto remove_host;
	session = cls_session->dd_data;
	tcp_sw_host = iscsi_host_priv(shost);
	tcp_sw_host->session = session;

	shost->can_queue = session->scsi_cmds_max;
	if (iscsi_tcp_r2tpool_alloc(session))
		goto remove_session;
	return cls_session;

remove_session:
	iscsi_session_teardown(cls_session);
remove_host:
	iscsi_host_remove(shost);
free_host:
	iscsi_host_free(shost);
	return NULL;
}

static void iscsi_sw_tcp_session_destroy(struct iscsi_cls_session *cls_session)
{
	struct Scsi_Host *shost = iscsi_session_to_shost(cls_session);

	iscsi_tcp_r2tpool_free(cls_session->dd_data);
	iscsi_session_teardown(cls_session);

	iscsi_host_remove(shost);
	iscsi_host_free(shost);
}

static umode_t iscsi_sw_tcp_attr_is_visible(int param_type, int param)
{
	switch (param_type) {
	case ISCSI_HOST_PARAM:
		switch (param) {
		case ISCSI_HOST_PARAM_NETDEV_NAME:
		case ISCSI_HOST_PARAM_HWADDRESS:
		case ISCSI_HOST_PARAM_IPADDRESS:
		case ISCSI_HOST_PARAM_INITIATOR_NAME:
			return S_IRUGO;
		default:
			return 0;
		}
	case ISCSI_PARAM:
		switch (param) {
		case ISCSI_PARAM_MAX_RECV_DLENGTH:
		case ISCSI_PARAM_MAX_XMIT_DLENGTH:
		case ISCSI_PARAM_HDRDGST_EN:
		case ISCSI_PARAM_DATADGST_EN:
		case ISCSI_PARAM_CONN_ADDRESS:
		case ISCSI_PARAM_CONN_PORT:
		case ISCSI_PARAM_LOCAL_PORT:
		case ISCSI_PARAM_EXP_STATSN:
		case ISCSI_PARAM_PERSISTENT_ADDRESS:
		case ISCSI_PARAM_PERSISTENT_PORT:
		case ISCSI_PARAM_PING_TMO:
		case ISCSI_PARAM_RECV_TMO:
		case ISCSI_PARAM_INITIAL_R2T_EN:
		case ISCSI_PARAM_MAX_R2T:
		case ISCSI_PARAM_IMM_DATA_EN:
		case ISCSI_PARAM_FIRST_BURST:
		case ISCSI_PARAM_MAX_BURST:
		case ISCSI_PARAM_PDU_INORDER_EN:
		case ISCSI_PARAM_DATASEQ_INORDER_EN:
		case ISCSI_PARAM_ERL:
		case ISCSI_PARAM_TARGET_NAME:
		case ISCSI_PARAM_TPGT:
		case ISCSI_PARAM_USERNAME:
		case ISCSI_PARAM_PASSWORD:
		case ISCSI_PARAM_USERNAME_IN:
		case ISCSI_PARAM_PASSWORD_IN:
		case ISCSI_PARAM_FAST_ABORT:
		case ISCSI_PARAM_ABORT_TMO:
		case ISCSI_PARAM_LU_RESET_TMO:
		case ISCSI_PARAM_TGT_RESET_TMO:
		case ISCSI_PARAM_IFACE_NAME:
		case ISCSI_PARAM_INITIATOR_NAME:
			return S_IRUGO;
		default:
			return 0;
		}
	}

	return 0;
}

static int iscsi_sw_tcp_slave_alloc(struct scsi_device *sdev)
{
	set_bit(QUEUE_FLAG_BIDI, &sdev->request_queue->queue_flags);
	return 0;
}
Exemple #15
0
static int test_hash(struct crypto_ahash *tfm, struct hash_testvec *template,
		     unsigned int tcount)
{
	const char *algo = crypto_tfm_alg_driver_name(crypto_ahash_tfm(tfm));
	unsigned int i, j, k, temp;
	struct scatterlist sg[8];
	char result[64];
	struct ahash_request *req;
	struct tcrypt_result tresult;
	int ret;
	void *hash_buff;

	init_completion(&tresult.completion);

	req = ahash_request_alloc(tfm, GFP_KERNEL);
	if (!req) {
		printk(KERN_ERR "alg: hash: Failed to allocate request for "
		       "%s\n", algo);
		ret = -ENOMEM;
		goto out_noreq;
	}
	ahash_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
				   tcrypt_complete, &tresult);

	for (i = 0; i < tcount; i++) {
		memset(result, 0, 64);

		hash_buff = xbuf[0];

		memcpy(hash_buff, template[i].plaintext, template[i].psize);
Exemple #16
0
int hmac_md5(v_U8_t *key, v_U8_t ksize, char *plaintext, v_U8_t psize,
                v_U8_t *output, v_U8_t outlen)
{
    int ret = 0;
    struct crypto_ahash *tfm;
    struct scatterlist sg;
    struct ahash_request *req;
    struct hmac_md5_result tresult;
    void *hash_buff = NULL;

    unsigned char hash_result[64];
    int i;

    memset(output, 0, outlen);

    init_completion(&tresult.completion);

    tfm = wcnss_wlan_crypto_alloc_ahash("hmac(md5)", CRYPTO_ALG_TYPE_AHASH,
                                        CRYPTO_ALG_TYPE_AHASH_MASK);
    if (IS_ERR(tfm)) {
        VOS_TRACE(VOS_MODULE_ID_VOSS,VOS_TRACE_LEVEL_ERROR, "crypto_alloc_ahash failed");
                ret = PTR_ERR(tfm);
                goto err_tfm;
    }

    req = ahash_request_alloc(tfm, GFP_KERNEL);
    if (!req) {
        VOS_TRACE(VOS_MODULE_ID_VOSS,VOS_TRACE_LEVEL_ERROR, "failed to allocate request for hmac(md5)");
        ret = -ENOMEM;
        goto err_req;
    }

    ahash_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
                                        hmac_md5_complete, &tresult);

    hash_buff = kzalloc(psize, GFP_KERNEL);
    if (!hash_buff) {
        VOS_TRACE(VOS_MODULE_ID_VOSS,VOS_TRACE_LEVEL_ERROR, "failed to kzalloc hash_buff");
        ret = -ENOMEM;
        goto err_hash_buf;
    }

    memset(hash_result, 0, 64);
    memcpy(hash_buff, plaintext, psize);
    sg_init_one(&sg, hash_buff, psize);

    if (ksize) {
        crypto_ahash_clear_flags(tfm, ~0);
        ret = wcnss_wlan_crypto_ahash_setkey(tfm, key, ksize);

        if (ret) {
            VOS_TRACE(VOS_MODULE_ID_VOSS,VOS_TRACE_LEVEL_ERROR, "crypto_ahash_setkey failed");
            goto err_setkey;
        }
    }

    ahash_request_set_crypt(req, &sg, hash_result, psize);
    ret = wcnss_wlan_crypto_ahash_digest(req);

    VOS_TRACE(VOS_MODULE_ID_VOSS,VOS_TRACE_LEVEL_ERROR, "ret 0x%x");

    switch (ret) {
        case 0:
            for (i=0; i< outlen; i++)
                    output[i] = hash_result[i];
            break;
        case -EINPROGRESS:
        case -EBUSY:
             ret = wait_for_completion_interruptible(&tresult.completion);
             if (!ret && !tresult.err) {
                  INIT_COMPLETION(tresult.completion);
                  break;
             } else {
                 VOS_TRACE(VOS_MODULE_ID_VOSS,VOS_TRACE_LEVEL_ERROR, "wait_for_completion_interruptible failed");
                 if (!ret)
                     ret = tresult.err;
                 goto out;
             }
        default:
              goto out;
        }

out:
err_setkey:
        kfree(hash_buff);
err_hash_buf:
        ahash_request_free(req);
err_req:
        wcnss_wlan_crypto_free_ahash(tfm);
err_tfm:
        return ret;
}
static int tegra_crypto_sha(struct tegra_sha_req *sha_req)
{

	struct crypto_ahash *tfm;
	struct scatterlist sg[1];
	char result[64];
	struct ahash_request *req;
	struct tegra_crypto_completion sha_complete;
	void *hash_buff;
	unsigned long *xbuf[XBUFSIZE];
	int ret = -ENOMEM;

	tfm = crypto_alloc_ahash(sha_req->algo, 0, 0);
	if (IS_ERR(tfm)) {
		printk(KERN_ERR "alg: hash: Failed to load transform for %s: "
		       "%ld\n", sha_req->algo, PTR_ERR(tfm));
		goto out_alloc;
	}

	req = ahash_request_alloc(tfm, GFP_KERNEL);
	if (!req) {
		printk(KERN_ERR "alg: hash: Failed to allocate request for "
		       "%s\n", sha_req->algo);
		goto out_noreq;
	}

	ret = alloc_bufs(xbuf);
	if (ret < 0) {
		pr_err("alloc_bufs failed");
		goto out_buf;
	}

	init_completion(&sha_complete.restart);

	memset(result, 0, 64);

	hash_buff = xbuf[0];

	memcpy(hash_buff, sha_req->plaintext, sha_req->plaintext_sz);
	sg_init_one(&sg[0], hash_buff, sha_req->plaintext_sz);

	if (sha_req->keylen) {
		crypto_ahash_clear_flags(tfm, ~0);
		ret = crypto_ahash_setkey(tfm, sha_req->key,
					  sha_req->keylen);
		if (ret) {
			printk(KERN_ERR "alg: hash: setkey failed on "
			       " %s: ret=%d\n", sha_req->algo,
			       -ret);
			goto out;
		}
	}

	ahash_request_set_crypt(req, sg, result, sha_req->plaintext_sz);

	ret = sha_async_hash_op(req, &sha_complete, crypto_ahash_init(req));
	if (ret) {
		pr_err("alg: hash: init failed on "
		       "for %s: ret=%d\n", sha_req->algo, -ret);
		goto out;
	}

	ret = sha_async_hash_op(req, &sha_complete, crypto_ahash_update(req));
	if (ret) {
		pr_err("alg: hash: update failed on "
		       "for %s: ret=%d\n", sha_req->algo, -ret);
		goto out;
	}

	ret = sha_async_hash_op(req, &sha_complete, crypto_ahash_final(req));
	if (ret) {
		pr_err("alg: hash: final failed on "
		       "for %s: ret=%d\n", sha_req->algo, -ret);
		goto out;
	}

	ret = copy_to_user((void __user *)sha_req->result,
		(const void *)result, crypto_ahash_digestsize(tfm));
	if (ret) {
		ret = -EFAULT;
		pr_err("alg: hash: copy_to_user failed (%d) for %s\n",
				ret, sha_req->algo);
	}

out:
	free_bufs(xbuf);

out_buf:
	ahash_request_free(req);

out_noreq:
	crypto_free_ahash(tfm);

out_alloc:
	return ret;
}