Esempio n. 1
0
static int tegra_crypto_dev_release(struct inode *inode, struct file *filp)
{
	struct tegra_crypto_ctx *ctx = filp->private_data;

	crypto_free_ablkcipher(ctx->ecb_tfm);
	crypto_free_ablkcipher(ctx->cbc_tfm);

	if (tegra_get_chipid() != TEGRA_CHIPID_TEGRA2) {
		crypto_free_ablkcipher(ctx->ofb_tfm);
		crypto_free_ablkcipher(ctx->ctr_tfm);
	}

	if (tegra_get_chipid() != TEGRA_CHIPID_TEGRA2 &&
			tegra_get_chipid() != TEGRA_CHIPID_TEGRA3)
		crypto_free_rng(ctx->rng_drbg);
	else
		crypto_free_rng(ctx->rng);

	crypto_free_ahash(ctx->rsa2048_tfm);
	crypto_free_ahash(ctx->rsa1536_tfm);
	crypto_free_ahash(ctx->rsa1024_tfm);
	crypto_free_ahash(ctx->rsa512_tfm);

	kfree(ctx);
	filp->private_data = NULL;
	return 0;
}
Esempio n. 2
0
/**
 * Calculate hash digest for the passed buffer.
 *
 * This should be used when computing the hash on a single contiguous buffer.
 * It combines the hash initialization, computation, and cleanup.
 *
 * \param[in] hash_alg	id of hash algorithm (CFS_HASH_ALG_*)
 * \param[in] buf	data buffer on which to compute hash
 * \param[in] buf_len	length of \a buf in bytes
 * \param[in] key	initial value/state for algorithm, if \a key = NULL
 *			use default initial value
 * \param[in] key_len	length of \a key in bytes
 * \param[out] hash	pointer to computed hash value, if \a hash = NULL then
 *			\a hash_len is to digest size in bytes, retval -ENOSPC
 * \param[in,out] hash_len size of \a hash buffer
 *
 * \retval -EINVAL       \a buf, \a buf_len, \a hash_len, \a hash_alg invalid
 * \retval -ENOENT       \a hash_alg is unsupported
 * \retval -ENOSPC       \a hash is NULL, or \a hash_len less than digest size
 * \retval		0 for success
 * \retval		negative errno for other errors from lower layers.
 */
int cfs_crypto_hash_digest(enum cfs_crypto_hash_alg hash_alg,
			   const void *buf, unsigned int buf_len,
			   unsigned char *key, unsigned int key_len,
			   unsigned char *hash, unsigned int *hash_len)
{
	struct scatterlist	sl;
	struct ahash_request *req;
	int			err;
	const struct cfs_crypto_hash_type	*type;

	if (!buf || buf_len == 0 || !hash_len)
		return -EINVAL;

	err = cfs_crypto_hash_alloc(hash_alg, &type, &req, key, key_len);
	if (err != 0)
		return err;

	if (!hash || *hash_len < type->cht_size) {
		*hash_len = type->cht_size;
		crypto_free_ahash(crypto_ahash_reqtfm(req));
		ahash_request_free(req);
		return -ENOSPC;
	}
	sg_init_one(&sl, (void *)buf, buf_len);

	ahash_request_set_crypt(req, &sl, hash, sl.length);
	err = crypto_ahash_digest(req);
	crypto_free_ahash(crypto_ahash_reqtfm(req));
	ahash_request_free(req);

	return err;
}
Esempio n. 3
0
/**
 * Initialize the state descriptor for the specified hash algorithm.
 *
 * An internal routine to allocate the hash-specific state in \a hdesc for
 * use with cfs_crypto_hash_digest() to compute the hash of a single message,
 * though possibly in multiple chunks.  The descriptor internal state should
 * be freed with cfs_crypto_hash_final().
 *
 * \param[in]  hash_alg	hash algorithm id (CFS_HASH_ALG_*)
 * \param[out] type	pointer to the hash description in hash_types[] array
 * \param[in,out] req	ahash request to be initialized
 * \param[in]  key	initial hash value/state, NULL to use default value
 * \param[in]  key_len	length of \a key
 *
 * \retval		0 on success
 * \retval		negative errno on failure
 */
static int cfs_crypto_hash_alloc(enum cfs_crypto_hash_alg hash_alg,
				 const struct cfs_crypto_hash_type **type,
				 struct ahash_request **req,
				 unsigned char *key,
				 unsigned int key_len)
{
	struct crypto_ahash *tfm;
	int err = 0;

	*type = cfs_crypto_hash_type(hash_alg);

	if (*type == NULL) {
		CWARN("Unsupported hash algorithm id = %d, max id is %d\n",
		      hash_alg, CFS_HASH_ALG_MAX);
		return -EINVAL;
	}
	tfm = crypto_alloc_ahash((*type)->cht_name, 0, CRYPTO_ALG_ASYNC);
	if (IS_ERR(tfm)) {
		CDEBUG(D_INFO, "Failed to alloc crypto hash %s\n",
		       (*type)->cht_name);
		return PTR_ERR(tfm);
	}

	*req = ahash_request_alloc(tfm, GFP_KERNEL);
	if (!*req) {
		CDEBUG(D_INFO, "Failed to alloc ahash_request for %s\n",
		       (*type)->cht_name);
		crypto_free_ahash(tfm);
		return -ENOMEM;
	}

	ahash_request_set_callback(*req, 0, NULL, NULL);

	if (key)
		err = crypto_ahash_setkey(tfm, key, key_len);
	else if ((*type)->cht_key != 0)
		err = crypto_ahash_setkey(tfm,
					 (unsigned char *)&((*type)->cht_key),
					 (*type)->cht_size);

	if (err != 0) {
		ahash_request_free(*req);
		crypto_free_ahash(tfm);
		return err;
	}

	CDEBUG(D_INFO, "Using crypto hash: %s (%s) speed %d MB/s\n",
	       crypto_ahash_alg_name(tfm), crypto_ahash_driver_name(tfm),
	       cfs_crypto_hash_speeds[hash_alg]);

	err = crypto_ahash_init(*req);
	if (err) {
		ahash_request_free(*req);
		crypto_free_ahash(tfm);
	}
	return err;
}
Esempio n. 4
0
static void crypto_gcm_exit_tfm(struct crypto_tfm *tfm)
{
    struct crypto_gcm_ctx *ctx = crypto_tfm_ctx(tfm);

    crypto_free_ahash(ctx->ghash);
    crypto_free_ablkcipher(ctx->ctr);
}
int cryptodev_hash_init(struct hash_data *hdata, const char *alg_name,
			int hmac_mode, void *mackey, size_t mackeylen)
{
	int ret;

	hdata->async.s = crypto_alloc_ahash(alg_name, 0, 0);
	if (unlikely(IS_ERR(hdata->async.s))) {
		ddebug(1, "Failed to load transform for %s", alg_name);
		return -EINVAL;
	}

	/* Copy the key from user and set to TFM. */
	if (hmac_mode != 0) {
		ret = crypto_ahash_setkey(hdata->async.s, mackey, mackeylen);
		if (unlikely(ret)) {
			ddebug(1, "Setting hmac key failed for %s-%zu.",
					alg_name, mackeylen*8);
			ret = -EINVAL;
			goto error;
		}
	}

	hdata->digestsize = crypto_ahash_digestsize(hdata->async.s);
	hdata->alignmask = crypto_ahash_alignmask(hdata->async.s);

	hdata->async.result = kzalloc(sizeof(*hdata->async.result), GFP_KERNEL);
	if (unlikely(!hdata->async.result)) {
		ret = -ENOMEM;
		goto error;
	}

	init_completion(&hdata->async.result->completion);

	hdata->async.request = ahash_request_alloc(hdata->async.s, GFP_KERNEL);
	if (unlikely(!hdata->async.request)) {
		derr(0, "error allocating async crypto request");
		ret = -ENOMEM;
		goto error;
	}

	ahash_request_set_callback(hdata->async.request,
			CRYPTO_TFM_REQ_MAY_BACKLOG,
			cryptodev_complete, hdata->async.result);

	ret = crypto_ahash_init(hdata->async.request);
	if (unlikely(ret)) {
		derr(0, "error in crypto_hash_init()");
		goto error_request;
	}

	hdata->init = 1;
	return 0;

error_request:
	ahash_request_free(hdata->async.request);
error:
	kfree(hdata->async.result);
	crypto_free_ahash(hdata->async.s);
	return ret;
}
static void n2_hash_cra_exit(struct crypto_tfm *tfm)
{
	struct crypto_ahash *ahash = __crypto_ahash_cast(tfm);
	struct n2_hash_ctx *ctx = crypto_ahash_ctx(ahash);

	crypto_free_ahash(ctx->fallback_tfm);
}
Esempio n. 7
0
/**
 * Finish hash calculation, copy hash digest to buffer, clean up hash descriptor
 *
 * \param[in]	req		ahash request
 * \param[out]	hash		pointer to hash buffer to store hash digest
 * \param[in,out] hash_len	pointer to hash buffer size, if \a hash == NULL
 *				or hash_len == NULL only free \a hdesc instead
 *				of computing the hash
 *
 * \retval		0 for success
 * \retval		-EOVERFLOW if hash_len is too small for the hash digest
 * \retval		negative errno for other errors from lower layers
 */
int cfs_crypto_hash_final(struct ahash_request *req,
			  unsigned char *hash, unsigned int *hash_len)
{
	int size = crypto_ahash_digestsize(crypto_ahash_reqtfm(req));
	int err;

	if (!hash || !hash_len) {
		err = 0;
		goto free;
	}
	if (*hash_len < size) {
		err = -EOVERFLOW;
		goto free;
	}

	ahash_request_set_crypt(req, NULL, hash, 0);
	err = crypto_ahash_final(req);
	if (err == 0)
		*hash_len = size;
free:
	crypto_free_ahash(crypto_ahash_reqtfm(req));
	ahash_request_free(req);

	return err;
}
Esempio n. 8
0
void cryptodev_hash_deinit(struct hash_data *hdata)
{
	if (hdata->init) {
		ahash_request_free(hdata->async.request);
		crypto_free_ahash(hdata->async.s);
		hdata->init = 0;
	}
}
void fmpdev_hash_deinit(struct hash_data *hdata)
{
	if (hdata->init) {
		if (hdata->async.request)
			ahash_request_free(hdata->async.request);
		kfree(hdata->async.result);
		if (hdata->async.s)
			crypto_free_ahash(hdata->async.s);
		hdata->init = 0;
	}
}
Esempio n. 10
0
static int mv_cesa_ahmac_setkey(const char *hash_alg_name,
				const u8 *key, unsigned int keylen,
				void *istate, void *ostate)
{
	struct ahash_request *req;
	struct crypto_ahash *tfm;
	unsigned int blocksize;
	u8 *ipad = NULL;
	u8 *opad;
	int ret;

	tfm = crypto_alloc_ahash(hash_alg_name, CRYPTO_ALG_TYPE_AHASH,
				 CRYPTO_ALG_TYPE_AHASH_MASK);
	if (IS_ERR(tfm))
		return PTR_ERR(tfm);

	req = ahash_request_alloc(tfm, GFP_KERNEL);
	if (!req) {
		ret = -ENOMEM;
		goto free_ahash;
	}

	crypto_ahash_clear_flags(tfm, ~0);

	blocksize = crypto_tfm_alg_blocksize(crypto_ahash_tfm(tfm));

	ipad = kzalloc(2 * blocksize, GFP_KERNEL);
	if (!ipad) {
		ret = -ENOMEM;
		goto free_req;
	}

	opad = ipad + blocksize;

	ret = mv_cesa_ahmac_pad_init(req, key, keylen, ipad, opad, blocksize);
	if (ret)
		goto free_ipad;

	ret = mv_cesa_ahmac_iv_state_init(req, ipad, istate, blocksize);
	if (ret)
		goto free_ipad;

	ret = mv_cesa_ahmac_iv_state_init(req, opad, ostate, blocksize);

free_ipad:
	kfree(ipad);
free_req:
	ahash_request_free(req);
free_ahash:
	crypto_free_ahash(tfm);

	return ret;
}
Esempio n. 11
0
static int hmac_sha_digest(const char *algo, char *data_in, size_t dlen,
			char *hash_out, size_t outlen)
{
	int rc = 0;
	struct crypto_ahash *tfm;
	struct scatterlist sg;
	struct ahash_request *req;
	struct hmac_sha_result tresult;

	/* Set hash output to 0 initially */
	memset(hash_out, 0, outlen);

	init_completion(&tresult.completion);
	tfm = crypto_alloc_ahash(algo, 0, 0);
	if (IS_ERR(tfm)) {
			printk(KERN_ERR "crypto_alloc_ahash failed\n");
			rc = PTR_ERR(tfm);
			goto err_tfm;
	}
	req = ahash_request_alloc(tfm, GFP_KERNEL);
	if (!req) {
			printk(KERN_ERR "failed to allocate request\n");
			rc = -ENOMEM;
			goto err_req;
	}
	if (crypto_ahash_digestsize(tfm) > outlen) {
			printk(KERN_ERR "tfm size > result buffer\n");
			rc = -EINVAL;
			goto err_req;
	}
	ahash_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
			hmac_sha_complete, &tresult);

	sg_init_one(&sg, data_in, dlen);

	crypto_ahash_clear_flags(tfm, -0);
	ahash_request_set_crypt(req, &sg, hash_out, dlen);
	rc = do_one_ahash_op(req, crypto_ahash_digest(req));

	ahash_request_free(req);
err_req:
	crypto_free_ahash(tfm);
err_tfm:
	return rc;
}
Esempio n. 12
0
static int crypto_gcm_init_tfm(struct crypto_tfm *tfm)
{
    struct crypto_instance *inst = (void *)tfm->__crt_alg;
    struct gcm_instance_ctx *ictx = crypto_instance_ctx(inst);
    struct crypto_gcm_ctx *ctx = crypto_tfm_ctx(tfm);
    struct crypto_ablkcipher *ctr;
    struct crypto_ahash *ghash;
    unsigned long align;
    int err;

    ghash = crypto_spawn_ahash(&ictx->ghash);
    if (IS_ERR(ghash))
        return PTR_ERR(ghash);

    ctr = crypto_spawn_skcipher(&ictx->ctr);
    err = PTR_ERR(ctr);
    if (IS_ERR(ctr))
        goto err_free_hash;

    ctx->ctr = ctr;
    ctx->ghash = ghash;

    align = crypto_tfm_alg_alignmask(tfm);
    align &= ~(crypto_tfm_ctx_alignment() - 1);
    tfm->crt_aead.reqsize = align +
                            offsetof(struct crypto_gcm_req_priv_ctx, u) +
                            max(sizeof(struct ablkcipher_request) +
                                crypto_ablkcipher_reqsize(ctr),
                                sizeof(struct ahash_request) +
                                crypto_ahash_reqsize(ghash));

    return 0;

err_free_hash:
    crypto_free_ahash(ghash);
    return err;
}
Esempio n. 13
0
static void ima_free_atfm(struct crypto_ahash *tfm)
{
	if (tfm != ima_ahash_tfm)
		crypto_free_ahash(tfm);
}
Esempio n. 14
0
static int tegra_crypto_sha(struct tegra_sha_req *sha_req)
{

	struct crypto_ahash *tfm;
	struct scatterlist sg[1];
	char result[64];
	struct ahash_request *req;
	struct tegra_crypto_completion sha_complete;
	void *hash_buff;
	unsigned long *xbuf[XBUFSIZE];
	int ret = -ENOMEM;

	tfm = crypto_alloc_ahash(sha_req->algo, 0, 0);
	if (IS_ERR(tfm)) {
		printk(KERN_ERR "alg: hash: Failed to load transform for %s: "
		       "%ld\n", sha_req->algo, PTR_ERR(tfm));
		goto out_alloc;
	}

	req = ahash_request_alloc(tfm, GFP_KERNEL);
	if (!req) {
		printk(KERN_ERR "alg: hash: Failed to allocate request for "
		       "%s\n", sha_req->algo);
		goto out_noreq;
	}

	ret = alloc_bufs(xbuf);
	if (ret < 0) {
		pr_err("alloc_bufs failed");
		goto out_buf;
	}

	init_completion(&sha_complete.restart);

	memset(result, 0, 64);

	hash_buff = xbuf[0];

	memcpy(hash_buff, sha_req->plaintext, sha_req->plaintext_sz);
	sg_init_one(&sg[0], hash_buff, sha_req->plaintext_sz);

	if (sha_req->keylen) {
		crypto_ahash_clear_flags(tfm, ~0);
		ret = crypto_ahash_setkey(tfm, sha_req->key,
					  sha_req->keylen);
		if (ret) {
			printk(KERN_ERR "alg: hash: setkey failed on "
			       " %s: ret=%d\n", sha_req->algo,
			       -ret);
			goto out;
		}
	}

	ahash_request_set_crypt(req, sg, result, sha_req->plaintext_sz);

	ret = sha_async_hash_op(req, &sha_complete, crypto_ahash_init(req));
	if (ret) {
		pr_err("alg: hash: init failed on "
		       "for %s: ret=%d\n", sha_req->algo, -ret);
		goto out;
	}

	ret = sha_async_hash_op(req, &sha_complete, crypto_ahash_update(req));
	if (ret) {
		pr_err("alg: hash: update failed on "
		       "for %s: ret=%d\n", sha_req->algo, -ret);
		goto out;
	}

	ret = sha_async_hash_op(req, &sha_complete, crypto_ahash_final(req));
	if (ret) {
		pr_err("alg: hash: final failed on "
		       "for %s: ret=%d\n", sha_req->algo, -ret);
		goto out;
	}

	ret = copy_to_user((void __user *)sha_req->result,
		(const void *)result, crypto_ahash_digestsize(tfm));
	if (ret) {
		ret = -EFAULT;
		pr_err("alg: hash: copy_to_user failed (%d) for %s\n",
				ret, sha_req->algo);
	}

out:
	free_bufs(xbuf);

out_buf:
	ahash_request_free(req);

out_noreq:
	crypto_free_ahash(tfm);

out_alloc:
	return ret;
}
int hmac_sha1(v_U8_t *key, v_U8_t ksize, char *plaintext, v_U8_t psize,
              v_U8_t *output, v_U8_t outlen)
{
    int ret = 0;
    struct crypto_ahash *tfm;
    struct scatterlist sg;
    struct ahash_request *req;
    struct hmac_sha1_result tresult;
    void *hash_buff = NULL;

    unsigned char hash_result[64];
    int i;

    memset(output, 0, outlen);

    init_completion(&tresult.completion);

#if  !defined(CONFIG_CNSS) && (defined(HIF_USB) || defined(HIF_SDIO))
    tfm = crypto_alloc_ahash("hmac(sha1)", CRYPTO_ALG_TYPE_AHASH,
                                        CRYPTO_ALG_TYPE_AHASH_MASK);
#else
    tfm = wcnss_wlan_crypto_alloc_ahash("hmac(sha1)", CRYPTO_ALG_TYPE_AHASH,
                                        CRYPTO_ALG_TYPE_AHASH_MASK);
#endif
    if (IS_ERR(tfm)) {
        VOS_TRACE(VOS_MODULE_ID_VOSS,VOS_TRACE_LEVEL_ERROR, "crypto_alloc_ahash failed");
        ret = PTR_ERR(tfm);
        goto err_tfm;
    }

    req = ahash_request_alloc(tfm, GFP_KERNEL);
    if (!req) {
        VOS_TRACE(VOS_MODULE_ID_VOSS,VOS_TRACE_LEVEL_ERROR, "failed to allocate request for hmac(sha1)");
        ret = -ENOMEM;
        goto err_req;
    }

    ahash_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
                               hmac_sha1_complete, &tresult);

    hash_buff = kzalloc(psize, GFP_KERNEL);
    if (!hash_buff) {
        VOS_TRACE(VOS_MODULE_ID_VOSS,VOS_TRACE_LEVEL_ERROR, "failed to kzalloc hash_buff");
        ret = -ENOMEM;
        goto err_hash_buf;
    }

    memset(hash_result, 0, 64);
    vos_mem_copy(hash_buff, plaintext, psize);
    sg_init_one(&sg, hash_buff, psize);

    if (ksize) {
        crypto_ahash_clear_flags(tfm, ~0);
#if  !defined(CONFIG_CNSS) && (defined(HIF_USB) || defined(HIF_SDIO))
        ret = crypto_ahash_setkey(tfm, key, ksize);
#else
        ret = wcnss_wlan_crypto_ahash_setkey(tfm, key, ksize);
#endif

        if (ret) {
            VOS_TRACE(VOS_MODULE_ID_VOSS,VOS_TRACE_LEVEL_ERROR, "crypto_ahash_setkey failed");
            goto err_setkey;
        }
    }

    ahash_request_set_crypt(req, &sg, hash_result, psize);
#if  !defined(CONFIG_CNSS) && (defined(HIF_USB) || defined(HIF_SDIO))
    ret = crypto_ahash_digest(req);
#else
    ret = wcnss_wlan_crypto_ahash_digest(req);
#endif
    VOS_TRACE(VOS_MODULE_ID_VOSS,VOS_TRACE_LEVEL_ERROR, "ret 0x%x", ret);

    switch (ret) {
    case 0:
        for (i=0; i< outlen; i++)
            output[i] = hash_result[i];
        break;
    case -EINPROGRESS:
    case -EBUSY:
        ret = wait_for_completion_interruptible(&tresult.completion);
        if (!ret && !tresult.err) {
            for (i = 0; i < outlen; i++)
               output[i] = hash_result[i];
            INIT_COMPLETION(tresult.completion);
            break;
        } else {
            VOS_TRACE(VOS_MODULE_ID_VOSS,VOS_TRACE_LEVEL_ERROR, "wait_for_completion_interruptible failed");
            if (!ret)
                ret = tresult.err;
            goto out;
        }
    default:
        goto out;
    }

out:
err_setkey:
    kfree(hash_buff);
err_hash_buf:
    ahash_request_free(req);
err_req:
#if  !defined(CONFIG_CNSS) && (defined(HIF_USB) || defined(HIF_SDIO))
    crypto_free_ahash(tfm);
#else
    wcnss_wlan_crypto_free_ahash(tfm);
#endif
err_tfm:
    return ret;
}
int hmac_md5(v_U8_t *key, v_U8_t ksize, char *plaintext, v_U8_t psize,
                v_U8_t *output, v_U8_t outlen)
{
    int ret = 0;
    struct crypto_ahash *tfm;
    struct scatterlist sg;
    struct ahash_request *req;
    struct hmac_md5_result tresult = {.err = 0};
    void *hash_buff = NULL;

    unsigned char hash_result[64];
    int i;

    memset(output, 0, outlen);

    init_completion(&tresult.completion);

#if  !defined(CONFIG_CNSS) && (defined(HIF_USB) || defined(HIF_SDIO))
    tfm = crypto_alloc_ahash("hmac(md5)", CRYPTO_ALG_TYPE_AHASH,
                                        CRYPTO_ALG_TYPE_AHASH_MASK);
#else
    tfm = wcnss_wlan_crypto_alloc_ahash("hmac(md5)", CRYPTO_ALG_TYPE_AHASH,
                                        CRYPTO_ALG_TYPE_AHASH_MASK);
#endif
    if (IS_ERR(tfm)) {
        VOS_TRACE(VOS_MODULE_ID_VOSS,VOS_TRACE_LEVEL_ERROR, "crypto_alloc_ahash failed");
                ret = PTR_ERR(tfm);
                goto err_tfm;
    }

    req = ahash_request_alloc(tfm, GFP_KERNEL);
    if (!req) {
        VOS_TRACE(VOS_MODULE_ID_VOSS,VOS_TRACE_LEVEL_ERROR, "failed to allocate request for hmac(md5)");
        ret = -ENOMEM;
        goto err_req;
    }

    ahash_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
                                        hmac_md5_complete, &tresult);

    hash_buff = kzalloc(psize, GFP_KERNEL);
    if (!hash_buff) {
        VOS_TRACE(VOS_MODULE_ID_VOSS,VOS_TRACE_LEVEL_ERROR, "failed to kzalloc hash_buff");
        ret = -ENOMEM;
        goto err_hash_buf;
    }

    memset(hash_result, 0, 64);
    vos_mem_copy(hash_buff, plaintext, psize);
    sg_init_one(&sg, hash_buff, psize);

    if (ksize) {
        crypto_ahash_clear_flags(tfm, ~0);
#if  !defined(CONFIG_CNSS) && (defined(HIF_USB) || defined(HIF_SDIO))
        ret = crypto_ahash_setkey(tfm, key, ksize);
#else
        ret = wcnss_wlan_crypto_ahash_setkey(tfm, key, ksize);
#endif
        if (ret) {
            VOS_TRACE(VOS_MODULE_ID_VOSS,VOS_TRACE_LEVEL_ERROR, "crypto_ahash_setkey failed");
            goto err_setkey;
        }
    }

    ahash_request_set_crypt(req, &sg, hash_result, psize);
#if  !defined(CONFIG_CNSS) && (defined(HIF_USB) || defined(HIF_SDIO))
    ret = crypto_ahash_digest(req);
#else
    ret = wcnss_wlan_crypto_ahash_digest(req);
#endif

    VOS_TRACE(VOS_MODULE_ID_VOSS,VOS_TRACE_LEVEL_ERROR, "ret 0x%x", ret);

    switch (ret) {
        case 0:
            for (i=0; i< outlen; i++)
                    output[i] = hash_result[i];
            break;
        case -EINPROGRESS:
        case -EBUSY:
             ret = wait_for_completion_interruptible(&tresult.completion);
             if (!ret && !tresult.err) {
                 for (i = 0; i < outlen; i++)
                    output[i] = hash_result[i];
                  INIT_COMPLETION(tresult.completion);
                  break;
             } else {
                 VOS_TRACE(VOS_MODULE_ID_VOSS,VOS_TRACE_LEVEL_ERROR, "wait_for_completion_interruptible failed");
                 if (!ret)
                     ret = tresult.err;
                 goto out;
             }
        default:
              goto out;
        }

out:
err_setkey:
        kfree(hash_buff);
err_hash_buf:
        ahash_request_free(req);
err_req:
#if  !defined(CONFIG_CNSS) && (defined(HIF_USB) || defined(HIF_SDIO))
        crypto_free_ahash(tfm);
#else
        wcnss_wlan_crypto_free_ahash(tfm);
#endif
err_tfm:
        return ret;
}

VOS_STATUS vos_md5_hmac_str(v_U32_t cryptHandle, /* Handle */
           v_U8_t *pText, /* pointer to data stream */
           v_U32_t textLen, /* length of data stream */
           v_U8_t *pKey, /* pointer to authentication key */
           v_U32_t keyLen, /* length of authentication key */
           v_U8_t digest[VOS_DIGEST_MD5_SIZE])/* caller digest to be filled in */
{
    int ret = 0;

    ret = hmac_md5(
            pKey,                   //v_U8_t *key,
            (v_U8_t) keyLen,        //v_U8_t ksize,
            (char *)pText,          //char *plaintext,
            (v_U8_t) textLen,       //v_U8_t psize,
            digest,                 //v_U8_t *output,
            VOS_DIGEST_MD5_SIZE     //v_U8_t outlen
            );

    if (ret != 0) {
        VOS_TRACE(VOS_MODULE_ID_VOSS,VOS_TRACE_LEVEL_ERROR,"hmac_md5() call failed");
        return VOS_STATUS_E_FAULT;
    }

    return VOS_STATUS_SUCCESS;
}
/*
 * Sha/HMAC self tests
 */
int _fips_qcrypto_sha_selftest(struct fips_selftest_data *selftest_d)
{
	int rc = 0, err, tv_index = 0, num_tv;
	char *k_out_buf = NULL;
	struct scatterlist fips_sg;
	struct crypto_ahash *tfm;
	struct ahash_request *ahash_req;
	struct _fips_completion fips_completion;
	struct _fips_test_vector_sha_hmac tv_sha_hmac;

	num_tv = (sizeof(fips_test_vector_sha_hmac)) /
	(sizeof(struct _fips_test_vector_sha_hmac));

	/* One-by-one testing */
	for (tv_index = 0; tv_index < num_tv; tv_index++) {
		memcpy(&tv_sha_hmac, &fips_test_vector_sha_hmac[tv_index],
			(sizeof(struct _fips_test_vector_sha_hmac)));
		k_out_buf = kzalloc(tv_sha_hmac.diglen, GFP_KERNEL);
		if (k_out_buf == NULL) {
			pr_err("qcrypto: Failed to allocate memory for k_out_buf %ld\n",
				PTR_ERR(k_out_buf));
			return -ENOMEM;
		}

		memset(k_out_buf, 0, tv_sha_hmac.diglen);
		init_completion(&fips_completion.completion);

		/* use_sw flags are set in dtsi file which makes
		default Linux API calls to go to s/w crypto instead
		of h/w crypto. This code makes sure that all selftests
		calls always go to h/w, independent of DTSI flags. */
		if (tv_sha_hmac.klen == 0) {
			if (selftest_d->prefix_ahash_algo)
				if (_fips_get_alg_cra_name(tv_sha_hmac
					.hash_alg, selftest_d->algo_prefix,
					strlen(tv_sha_hmac.hash_alg))) {
					rc = -1;
					pr_err("Algo Name is too long for tv %d\n",
					tv_index);
					goto clr_buf;
				}
		} else {
			if (selftest_d->prefix_hmac_algo)
				if (_fips_get_alg_cra_name(tv_sha_hmac
					.hash_alg, selftest_d->algo_prefix,
					strlen(tv_sha_hmac.hash_alg))) {
					rc = -1;
					pr_err("Algo Name is too long for tv %d\n",
					tv_index);
					goto clr_buf;
				}
		}

		tfm = crypto_alloc_ahash(tv_sha_hmac.hash_alg, 0, 0);
		if (IS_ERR(tfm)) {
			pr_err("qcrypto: %s algorithm not found\n",
			tv_sha_hmac.hash_alg);
			rc = PTR_ERR(tfm);
			goto clr_buf;
		}

		ahash_req = ahash_request_alloc(tfm, GFP_KERNEL);
		if (!ahash_req) {
			pr_err("qcrypto: ahash_request_alloc failed\n");
			rc = -ENOMEM;
			goto clr_tfm;
		}
		rc = qcrypto_ahash_set_device(ahash_req, selftest_d->ce_device);
		if (rc != 0) {
			pr_err("%s qcrypto_cipher_set_device failed with err %d\n",
				__func__, rc);
			goto clr_ahash_req;
		}
		ahash_request_set_callback(ahash_req,
			CRYPTO_TFM_REQ_MAY_BACKLOG,
			_fips_cb, &fips_completion);

		sg_init_one(&fips_sg, &tv_sha_hmac.input[0], tv_sha_hmac.ilen);

		crypto_ahash_clear_flags(tfm, ~0);
		if (tv_sha_hmac.klen != 0) {
			rc = crypto_ahash_setkey(tfm, tv_sha_hmac.key,
				tv_sha_hmac.klen);
			if (rc) {
				pr_err("qcrypto: crypto_ahash_setkey failed\n");
				goto clr_ahash_req;
			}
		}

		ahash_request_set_crypt(ahash_req, &fips_sg, k_out_buf,
			tv_sha_hmac.ilen);
		rc = crypto_ahash_digest(ahash_req);
		if (rc == -EINPROGRESS || rc == -EBUSY) {
			rc = wait_for_completion_interruptible(
				&fips_completion.completion);
			err = fips_completion.err;
			if (!rc && !err) {
				INIT_COMPLETION(fips_completion.completion);
			} else {
				pr_err("qcrypto:SHA: wait_for_completion failed\n");
				goto clr_ahash_req;
			}

		}

		if (memcmp(k_out_buf, tv_sha_hmac.digest,
			tv_sha_hmac.diglen))
			rc = -1;

clr_ahash_req:
		ahash_request_free(ahash_req);
clr_tfm:
		crypto_free_ahash(tfm);
clr_buf:
		kzfree(k_out_buf);

	/* For any failure, return error */
		if (rc)
			return rc;

	}
	return rc;
}
Esempio n. 18
0
/**
 * Initialize the state descriptor for the specified hash algorithm.
 *
 * An internal routine to allocate the hash-specific state in \a hdesc for
 * use with cfs_crypto_hash_digest() to compute the hash of a single message,
 * though possibly in multiple chunks.  The descriptor internal state should
 * be freed with cfs_crypto_hash_final().
 *
 * \param[in]  hash_alg	hash algorithm id (CFS_HASH_ALG_*)
 * \param[out] type	pointer to the hash description in hash_types[] array
 * \param[in,out] req	ahash request to be initialized
 * \param[in]  key	initial hash value/state, NULL to use default value
 * \param[in]  key_len	length of \a key
 *
 * \retval		0 on success
 * \retval		negative errno on failure
 */
static int cfs_crypto_hash_alloc(enum cfs_crypto_hash_alg hash_alg,
				 const struct cfs_crypto_hash_type **type,
				 struct ahash_request **req,
				 unsigned char *key,
				 unsigned int key_len)
{
	struct crypto_ahash *tfm;
	int err = 0;

	*type = cfs_crypto_hash_type(hash_alg);
	if (!*type) {
		CWARN("Unsupported hash algorithm id = %d, max id is %d\n",
		      hash_alg, CFS_HASH_ALG_MAX);
		return -EINVAL;
	}

	/* Keys are only supported for the hmac version */
	if (key && key_len > 0) {
		char *algo_name;

		algo_name = kasprintf(GFP_KERNEL, "hmac(%s)",
				      (*type)->cht_name);
		if (!algo_name)
			return -ENOMEM;

		tfm = crypto_alloc_ahash(algo_name, 0, CRYPTO_ALG_ASYNC);
		kfree(algo_name);
	} else {
		tfm = crypto_alloc_ahash((*type)->cht_name, 0,
					 CRYPTO_ALG_ASYNC);
	}
	if (IS_ERR(tfm)) {
		CDEBUG(D_INFO, "Failed to alloc crypto hash %s\n",
		       (*type)->cht_name);
		return PTR_ERR(tfm);
	}

	*req = ahash_request_alloc(tfm, GFP_KERNEL);
	if (!*req) {
		CDEBUG(D_INFO, "Failed to alloc ahash_request for %s\n",
		       (*type)->cht_name);
		GOTO(out_free_tfm, err = -ENOMEM);
	}

	ahash_request_set_callback(*req, 0, NULL, NULL);

	if (key)
		err = crypto_ahash_setkey(tfm, key, key_len);
	else if ((*type)->cht_key != 0)
		err = crypto_ahash_setkey(tfm,
					 (unsigned char *)&((*type)->cht_key),
					 (*type)->cht_size);
	if (err)
		GOTO(out_free_req, err);

	CDEBUG(D_INFO, "Using crypto hash: %s (%s) speed %d MB/s\n",
	       crypto_ahash_alg_name(tfm), crypto_ahash_driver_name(tfm),
	       cfs_crypto_hash_speeds[hash_alg]);

	err = crypto_ahash_init(*req);
	if (err) {
out_free_req:
		ahash_request_free(*req);
out_free_tfm:
		crypto_free_ahash(tfm);
	}
	return err;
}
Esempio n. 19
0
/**
 * iscsi_sw_tcp_xmit_segment - transmit segment
 * @tcp_conn: the iSCSI TCP connection
 * @segment: the buffer to transmnit
 *
 * This function transmits as much of the buffer as
 * the network layer will accept, and returns the number of
 * bytes transmitted.
 *
 * If CRC hashing is enabled, the function will compute the
 * hash as it goes. When the entire segment has been transmitted,
 * it will retrieve the hash value and send it as well.
 */
static int iscsi_sw_tcp_xmit_segment(struct iscsi_tcp_conn *tcp_conn,
				     struct iscsi_segment *segment)
{
	struct iscsi_sw_tcp_conn *tcp_sw_conn = tcp_conn->dd_data;
	struct socket *sk = tcp_sw_conn->sock;
	unsigned int copied = 0;
	int r = 0;

	while (!iscsi_tcp_segment_done(tcp_conn, segment, 0, r)) {
		struct scatterlist *sg;
		unsigned int offset, copy;
		int flags = 0;

		r = 0;
		offset = segment->copied;
		copy = segment->size - offset;

		if (segment->total_copied + segment->size < segment->total_size)
			flags |= MSG_MORE;

		/* Use sendpage if we can; else fall back to sendmsg */
		if (!segment->data) {
			sg = segment->sg;
			offset += segment->sg_offset + sg->offset;
			r = tcp_sw_conn->sendpage(sk, sg_page(sg), offset,
						  copy, flags);
		} else {
			struct msghdr msg = { .msg_flags = flags };
			struct kvec iov = {
				.iov_base = segment->data + offset,
				.iov_len = copy
			};

			r = kernel_sendmsg(sk, &msg, &iov, 1, copy);
		}

		if (r < 0) {
			iscsi_tcp_segment_unmap(segment);
			return r;
		}
		copied += r;
	}
	return copied;
}

/**
 * iscsi_sw_tcp_xmit - TCP transmit
 **/
static int iscsi_sw_tcp_xmit(struct iscsi_conn *conn)
{
	struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
	struct iscsi_sw_tcp_conn *tcp_sw_conn = tcp_conn->dd_data;
	struct iscsi_segment *segment = &tcp_sw_conn->out.segment;
	unsigned int consumed = 0;
	int rc = 0;

	while (1) {
		rc = iscsi_sw_tcp_xmit_segment(tcp_conn, segment);
		/*
		 * We may not have been able to send data because the conn
		 * is getting stopped. libiscsi will know so propagate err
		 * for it to do the right thing.
		 */
		if (rc == -EAGAIN)
			return rc;
		else if (rc < 0) {
			rc = ISCSI_ERR_XMIT_FAILED;
			goto error;
		} else if (rc == 0)
			break;

		consumed += rc;

		if (segment->total_copied >= segment->total_size) {
			if (segment->done != NULL) {
				rc = segment->done(tcp_conn, segment);
				if (rc != 0)
					goto error;
			}
		}
	}

	ISCSI_SW_TCP_DBG(conn, "xmit %d bytes\n", consumed);

	conn->txdata_octets += consumed;
	return consumed;

error:
	/* Transmit error. We could initiate error recovery
	 * here. */
	ISCSI_SW_TCP_DBG(conn, "Error sending PDU, errno=%d\n", rc);
	iscsi_conn_failure(conn, rc);
	return -EIO;
}

/**
 * iscsi_tcp_xmit_qlen - return the number of bytes queued for xmit
 */
static inline int iscsi_sw_tcp_xmit_qlen(struct iscsi_conn *conn)
{
	struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
	struct iscsi_sw_tcp_conn *tcp_sw_conn = tcp_conn->dd_data;
	struct iscsi_segment *segment = &tcp_sw_conn->out.segment;

	return segment->total_copied - segment->total_size;
}

static int iscsi_sw_tcp_pdu_xmit(struct iscsi_task *task)
{
	struct iscsi_conn *conn = task->conn;
	unsigned int noreclaim_flag;
	int rc = 0;

	noreclaim_flag = memalloc_noreclaim_save();

	while (iscsi_sw_tcp_xmit_qlen(conn)) {
		rc = iscsi_sw_tcp_xmit(conn);
		if (rc == 0) {
			rc = -EAGAIN;
			break;
		}
		if (rc < 0)
			break;
		rc = 0;
	}

	memalloc_noreclaim_restore(noreclaim_flag);
	return rc;
}

/*
 * This is called when we're done sending the header.
 * Simply copy the data_segment to the send segment, and return.
 */
static int iscsi_sw_tcp_send_hdr_done(struct iscsi_tcp_conn *tcp_conn,
				      struct iscsi_segment *segment)
{
	struct iscsi_sw_tcp_conn *tcp_sw_conn = tcp_conn->dd_data;

	tcp_sw_conn->out.segment = tcp_sw_conn->out.data_segment;
	ISCSI_SW_TCP_DBG(tcp_conn->iscsi_conn,
			 "Header done. Next segment size %u total_size %u\n",
			 tcp_sw_conn->out.segment.size,
			 tcp_sw_conn->out.segment.total_size);
	return 0;
}

static void iscsi_sw_tcp_send_hdr_prep(struct iscsi_conn *conn, void *hdr,
				       size_t hdrlen)
{
	struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
	struct iscsi_sw_tcp_conn *tcp_sw_conn = tcp_conn->dd_data;

	ISCSI_SW_TCP_DBG(conn, "%s\n", conn->hdrdgst_en ?
			 "digest enabled" : "digest disabled");

	/* Clear the data segment - needs to be filled in by the
	 * caller using iscsi_tcp_send_data_prep() */
	memset(&tcp_sw_conn->out.data_segment, 0,
	       sizeof(struct iscsi_segment));

	/* If header digest is enabled, compute the CRC and
	 * place the digest into the same buffer. We make
	 * sure that both iscsi_tcp_task and mtask have
	 * sufficient room.
	 */
	if (conn->hdrdgst_en) {
		iscsi_tcp_dgst_header(tcp_sw_conn->tx_hash, hdr, hdrlen,
				      hdr + hdrlen);
		hdrlen += ISCSI_DIGEST_SIZE;
	}

	/* Remember header pointer for later, when we need
	 * to decide whether there's a payload to go along
	 * with the header. */
	tcp_sw_conn->out.hdr = hdr;

	iscsi_segment_init_linear(&tcp_sw_conn->out.segment, hdr, hdrlen,
				  iscsi_sw_tcp_send_hdr_done, NULL);
}

/*
 * Prepare the send buffer for the payload data.
 * Padding and checksumming will all be taken care
 * of by the iscsi_segment routines.
 */
static int
iscsi_sw_tcp_send_data_prep(struct iscsi_conn *conn, struct scatterlist *sg,
			    unsigned int count, unsigned int offset,
			    unsigned int len)
{
	struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
	struct iscsi_sw_tcp_conn *tcp_sw_conn = tcp_conn->dd_data;
	struct ahash_request *tx_hash = NULL;
	unsigned int hdr_spec_len;

	ISCSI_SW_TCP_DBG(conn, "offset=%d, datalen=%d %s\n", offset, len,
			 conn->datadgst_en ?
			 "digest enabled" : "digest disabled");

	/* Make sure the datalen matches what the caller
	   said he would send. */
	hdr_spec_len = ntoh24(tcp_sw_conn->out.hdr->dlength);
	WARN_ON(iscsi_padded(len) != iscsi_padded(hdr_spec_len));

	if (conn->datadgst_en)
		tx_hash = tcp_sw_conn->tx_hash;

	return iscsi_segment_seek_sg(&tcp_sw_conn->out.data_segment,
				     sg, count, offset, len,
				     NULL, tx_hash);
}

static void
iscsi_sw_tcp_send_linear_data_prep(struct iscsi_conn *conn, void *data,
				   size_t len)
{
	struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
	struct iscsi_sw_tcp_conn *tcp_sw_conn = tcp_conn->dd_data;
	struct ahash_request *tx_hash = NULL;
	unsigned int hdr_spec_len;

	ISCSI_SW_TCP_DBG(conn, "datalen=%zd %s\n", len, conn->datadgst_en ?
			 "digest enabled" : "digest disabled");

	/* Make sure the datalen matches what the caller
	   said he would send. */
	hdr_spec_len = ntoh24(tcp_sw_conn->out.hdr->dlength);
	WARN_ON(iscsi_padded(len) != iscsi_padded(hdr_spec_len));

	if (conn->datadgst_en)
		tx_hash = tcp_sw_conn->tx_hash;

	iscsi_segment_init_linear(&tcp_sw_conn->out.data_segment,
				data, len, NULL, tx_hash);
}

static int iscsi_sw_tcp_pdu_init(struct iscsi_task *task,
				 unsigned int offset, unsigned int count)
{
	struct iscsi_conn *conn = task->conn;
	int err = 0;

	iscsi_sw_tcp_send_hdr_prep(conn, task->hdr, task->hdr_len);

	if (!count)
		return 0;

	if (!task->sc)
		iscsi_sw_tcp_send_linear_data_prep(conn, task->data, count);
	else {
		struct scsi_data_buffer *sdb = scsi_out(task->sc);

		err = iscsi_sw_tcp_send_data_prep(conn, sdb->table.sgl,
						  sdb->table.nents, offset,
						  count);
	}

	if (err) {
		/* got invalid offset/len */
		return -EIO;
	}
	return 0;
}

static int iscsi_sw_tcp_pdu_alloc(struct iscsi_task *task, uint8_t opcode)
{
	struct iscsi_tcp_task *tcp_task = task->dd_data;

	task->hdr = task->dd_data + sizeof(*tcp_task);
	task->hdr_max = sizeof(struct iscsi_sw_tcp_hdrbuf) - ISCSI_DIGEST_SIZE;
	return 0;
}

static struct iscsi_cls_conn *
iscsi_sw_tcp_conn_create(struct iscsi_cls_session *cls_session,
			 uint32_t conn_idx)
{
	struct iscsi_conn *conn;
	struct iscsi_cls_conn *cls_conn;
	struct iscsi_tcp_conn *tcp_conn;
	struct iscsi_sw_tcp_conn *tcp_sw_conn;
	struct crypto_ahash *tfm;

	cls_conn = iscsi_tcp_conn_setup(cls_session, sizeof(*tcp_sw_conn),
					conn_idx);
	if (!cls_conn)
		return NULL;
	conn = cls_conn->dd_data;
	tcp_conn = conn->dd_data;
	tcp_sw_conn = tcp_conn->dd_data;

	tfm = crypto_alloc_ahash("crc32c", 0, CRYPTO_ALG_ASYNC);
	if (IS_ERR(tfm))
		goto free_conn;

	tcp_sw_conn->tx_hash = ahash_request_alloc(tfm, GFP_KERNEL);
	if (!tcp_sw_conn->tx_hash)
		goto free_tfm;
	ahash_request_set_callback(tcp_sw_conn->tx_hash, 0, NULL, NULL);

	tcp_sw_conn->rx_hash = ahash_request_alloc(tfm, GFP_KERNEL);
	if (!tcp_sw_conn->rx_hash)
		goto free_tx_hash;
	ahash_request_set_callback(tcp_sw_conn->rx_hash, 0, NULL, NULL);

	tcp_conn->rx_hash = tcp_sw_conn->rx_hash;

	return cls_conn;

free_tx_hash:
	ahash_request_free(tcp_sw_conn->tx_hash);
free_tfm:
	crypto_free_ahash(tfm);
free_conn:
	iscsi_conn_printk(KERN_ERR, conn,
			  "Could not create connection due to crc32c "
			  "loading error. Make sure the crc32c "
			  "module is built as a module or into the "
			  "kernel\n");
	iscsi_tcp_conn_teardown(cls_conn);
	return NULL;
}

static void iscsi_sw_tcp_release_conn(struct iscsi_conn *conn)
{
	struct iscsi_session *session = conn->session;
	struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
	struct iscsi_sw_tcp_conn *tcp_sw_conn = tcp_conn->dd_data;
	struct socket *sock = tcp_sw_conn->sock;

	if (!sock)
		return;

	sock_hold(sock->sk);
	iscsi_sw_tcp_conn_restore_callbacks(conn);
	sock_put(sock->sk);

	spin_lock_bh(&session->frwd_lock);
	tcp_sw_conn->sock = NULL;
	spin_unlock_bh(&session->frwd_lock);
	sockfd_put(sock);
}

static void iscsi_sw_tcp_conn_destroy(struct iscsi_cls_conn *cls_conn)
{
	struct iscsi_conn *conn = cls_conn->dd_data;
	struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
	struct iscsi_sw_tcp_conn *tcp_sw_conn = tcp_conn->dd_data;

	iscsi_sw_tcp_release_conn(conn);

	ahash_request_free(tcp_sw_conn->rx_hash);
	if (tcp_sw_conn->tx_hash) {
		struct crypto_ahash *tfm;

		tfm = crypto_ahash_reqtfm(tcp_sw_conn->tx_hash);
		ahash_request_free(tcp_sw_conn->tx_hash);
		crypto_free_ahash(tfm);
	}

	iscsi_tcp_conn_teardown(cls_conn);
}

static void iscsi_sw_tcp_conn_stop(struct iscsi_cls_conn *cls_conn, int flag)
{
	struct iscsi_conn *conn = cls_conn->dd_data;
	struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
	struct iscsi_sw_tcp_conn *tcp_sw_conn = tcp_conn->dd_data;
	struct socket *sock = tcp_sw_conn->sock;

	/* userspace may have goofed up and not bound us */
	if (!sock)
		return;

	sock->sk->sk_err = EIO;
	wake_up_interruptible(sk_sleep(sock->sk));

	/* stop xmit side */
	iscsi_suspend_tx(conn);

	/* stop recv side and release socket */
	iscsi_sw_tcp_release_conn(conn);

	iscsi_conn_stop(cls_conn, flag);
}

static int
iscsi_sw_tcp_conn_bind(struct iscsi_cls_session *cls_session,
		       struct iscsi_cls_conn *cls_conn, uint64_t transport_eph,
		       int is_leading)
{
	struct iscsi_session *session = cls_session->dd_data;
	struct iscsi_conn *conn = cls_conn->dd_data;
	struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
	struct iscsi_sw_tcp_conn *tcp_sw_conn = tcp_conn->dd_data;
	struct sock *sk;
	struct socket *sock;
	int err;

	/* lookup for existing socket */
	sock = sockfd_lookup((int)transport_eph, &err);
	if (!sock) {
		iscsi_conn_printk(KERN_ERR, conn,
				  "sockfd_lookup failed %d\n", err);
		return -EEXIST;
	}

	err = iscsi_conn_bind(cls_session, cls_conn, is_leading);
	if (err)
		goto free_socket;

	spin_lock_bh(&session->frwd_lock);
	/* bind iSCSI connection and socket */
	tcp_sw_conn->sock = sock;
	spin_unlock_bh(&session->frwd_lock);

	/* setup Socket parameters */
	sk = sock->sk;
	sk->sk_reuse = SK_CAN_REUSE;
	sk->sk_sndtimeo = 15 * HZ; /* FIXME: make it configurable */
	sk->sk_allocation = GFP_ATOMIC;
	sk_set_memalloc(sk);

	iscsi_sw_tcp_conn_set_callbacks(conn);
	tcp_sw_conn->sendpage = tcp_sw_conn->sock->ops->sendpage;
	/*
	 * set receive state machine into initial state
	 */
	iscsi_tcp_hdr_recv_prep(tcp_conn);
	return 0;

free_socket:
	sockfd_put(sock);
	return err;
}

static int iscsi_sw_tcp_conn_set_param(struct iscsi_cls_conn *cls_conn,
				       enum iscsi_param param, char *buf,
				       int buflen)
{
	struct iscsi_conn *conn = cls_conn->dd_data;
	struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
	struct iscsi_sw_tcp_conn *tcp_sw_conn = tcp_conn->dd_data;

	switch(param) {
	case ISCSI_PARAM_HDRDGST_EN:
		iscsi_set_param(cls_conn, param, buf, buflen);
		break;
	case ISCSI_PARAM_DATADGST_EN:
		iscsi_set_param(cls_conn, param, buf, buflen);
		tcp_sw_conn->sendpage = conn->datadgst_en ?
			sock_no_sendpage : tcp_sw_conn->sock->ops->sendpage;
		break;
	case ISCSI_PARAM_MAX_R2T:
		return iscsi_tcp_set_max_r2t(conn, buf);
	default:
		return iscsi_set_param(cls_conn, param, buf, buflen);
	}

	return 0;
}

static int iscsi_sw_tcp_conn_get_param(struct iscsi_cls_conn *cls_conn,
				       enum iscsi_param param, char *buf)
{
	struct iscsi_conn *conn = cls_conn->dd_data;
	struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
	struct iscsi_sw_tcp_conn *tcp_sw_conn = tcp_conn->dd_data;
	struct sockaddr_in6 addr;
	int rc, len;

	switch(param) {
	case ISCSI_PARAM_CONN_PORT:
	case ISCSI_PARAM_CONN_ADDRESS:
	case ISCSI_PARAM_LOCAL_PORT:
		spin_lock_bh(&conn->session->frwd_lock);
		if (!tcp_sw_conn || !tcp_sw_conn->sock) {
			spin_unlock_bh(&conn->session->frwd_lock);
			return -ENOTCONN;
		}
		if (param == ISCSI_PARAM_LOCAL_PORT)
			rc = kernel_getsockname(tcp_sw_conn->sock,
						(struct sockaddr *)&addr, &len);
		else
			rc = kernel_getpeername(tcp_sw_conn->sock,
						(struct sockaddr *)&addr, &len);
		spin_unlock_bh(&conn->session->frwd_lock);
		if (rc)
			return rc;

		return iscsi_conn_get_addr_param((struct sockaddr_storage *)
						 &addr, param, buf);
	default:
		return iscsi_conn_get_param(cls_conn, param, buf);
	}

	return 0;
}

static int iscsi_sw_tcp_host_get_param(struct Scsi_Host *shost,
				       enum iscsi_host_param param, char *buf)
{
	struct iscsi_sw_tcp_host *tcp_sw_host = iscsi_host_priv(shost);
	struct iscsi_session *session = tcp_sw_host->session;
	struct iscsi_conn *conn;
	struct iscsi_tcp_conn *tcp_conn;
	struct iscsi_sw_tcp_conn *tcp_sw_conn;
	struct sockaddr_in6 addr;
	int rc, len;

	switch (param) {
	case ISCSI_HOST_PARAM_IPADDRESS:
		if (!session)
			return -ENOTCONN;

		spin_lock_bh(&session->frwd_lock);
		conn = session->leadconn;
		if (!conn) {
			spin_unlock_bh(&session->frwd_lock);
			return -ENOTCONN;
		}
		tcp_conn = conn->dd_data;

		tcp_sw_conn = tcp_conn->dd_data;
		if (!tcp_sw_conn->sock) {
			spin_unlock_bh(&session->frwd_lock);
			return -ENOTCONN;
		}

		rc = kernel_getsockname(tcp_sw_conn->sock,
					(struct sockaddr *)&addr, &len);
		spin_unlock_bh(&session->frwd_lock);
		if (rc)
			return rc;

		return iscsi_conn_get_addr_param((struct sockaddr_storage *)
						 &addr, param, buf);
	default:
		return iscsi_host_get_param(shost, param, buf);
	}

	return 0;
}

static void
iscsi_sw_tcp_conn_get_stats(struct iscsi_cls_conn *cls_conn,
			    struct iscsi_stats *stats)
{
	struct iscsi_conn *conn = cls_conn->dd_data;
	struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
	struct iscsi_sw_tcp_conn *tcp_sw_conn = tcp_conn->dd_data;

	stats->custom_length = 3;
	strcpy(stats->custom[0].desc, "tx_sendpage_failures");
	stats->custom[0].value = tcp_sw_conn->sendpage_failures_cnt;
	strcpy(stats->custom[1].desc, "rx_discontiguous_hdr");
	stats->custom[1].value = tcp_sw_conn->discontiguous_hdr_cnt;
	strcpy(stats->custom[2].desc, "eh_abort_cnt");
	stats->custom[2].value = conn->eh_abort_cnt;

	iscsi_tcp_conn_get_stats(cls_conn, stats);
}

static struct iscsi_cls_session *
iscsi_sw_tcp_session_create(struct iscsi_endpoint *ep, uint16_t cmds_max,
			    uint16_t qdepth, uint32_t initial_cmdsn)
{
	struct iscsi_cls_session *cls_session;
	struct iscsi_session *session;
	struct iscsi_sw_tcp_host *tcp_sw_host;
	struct Scsi_Host *shost;

	if (ep) {
		printk(KERN_ERR "iscsi_tcp: invalid ep %p.\n", ep);
		return NULL;
	}

	shost = iscsi_host_alloc(&iscsi_sw_tcp_sht,
				 sizeof(struct iscsi_sw_tcp_host), 1);
	if (!shost)
		return NULL;
	shost->transportt = iscsi_sw_tcp_scsi_transport;
	shost->cmd_per_lun = qdepth;
	shost->max_lun = iscsi_max_lun;
	shost->max_id = 0;
	shost->max_channel = 0;
	shost->max_cmd_len = SCSI_MAX_VARLEN_CDB_SIZE;

	if (iscsi_host_add(shost, NULL))
		goto free_host;

	cls_session = iscsi_session_setup(&iscsi_sw_tcp_transport, shost,
					  cmds_max, 0,
					  sizeof(struct iscsi_tcp_task) +
					  sizeof(struct iscsi_sw_tcp_hdrbuf),
					  initial_cmdsn, 0);
	if (!cls_session)
		goto remove_host;
	session = cls_session->dd_data;
	tcp_sw_host = iscsi_host_priv(shost);
	tcp_sw_host->session = session;

	shost->can_queue = session->scsi_cmds_max;
	if (iscsi_tcp_r2tpool_alloc(session))
		goto remove_session;
	return cls_session;

remove_session:
	iscsi_session_teardown(cls_session);
remove_host:
	iscsi_host_remove(shost);
free_host:
	iscsi_host_free(shost);
	return NULL;
}

static void iscsi_sw_tcp_session_destroy(struct iscsi_cls_session *cls_session)
{
	struct Scsi_Host *shost = iscsi_session_to_shost(cls_session);

	iscsi_tcp_r2tpool_free(cls_session->dd_data);
	iscsi_session_teardown(cls_session);

	iscsi_host_remove(shost);
	iscsi_host_free(shost);
}

static umode_t iscsi_sw_tcp_attr_is_visible(int param_type, int param)
{
	switch (param_type) {
	case ISCSI_HOST_PARAM:
		switch (param) {
		case ISCSI_HOST_PARAM_NETDEV_NAME:
		case ISCSI_HOST_PARAM_HWADDRESS:
		case ISCSI_HOST_PARAM_IPADDRESS:
		case ISCSI_HOST_PARAM_INITIATOR_NAME:
			return S_IRUGO;
		default:
			return 0;
		}
	case ISCSI_PARAM:
		switch (param) {
		case ISCSI_PARAM_MAX_RECV_DLENGTH:
		case ISCSI_PARAM_MAX_XMIT_DLENGTH:
		case ISCSI_PARAM_HDRDGST_EN:
		case ISCSI_PARAM_DATADGST_EN:
		case ISCSI_PARAM_CONN_ADDRESS:
		case ISCSI_PARAM_CONN_PORT:
		case ISCSI_PARAM_LOCAL_PORT:
		case ISCSI_PARAM_EXP_STATSN:
		case ISCSI_PARAM_PERSISTENT_ADDRESS:
		case ISCSI_PARAM_PERSISTENT_PORT:
		case ISCSI_PARAM_PING_TMO:
		case ISCSI_PARAM_RECV_TMO:
		case ISCSI_PARAM_INITIAL_R2T_EN:
		case ISCSI_PARAM_MAX_R2T:
		case ISCSI_PARAM_IMM_DATA_EN:
		case ISCSI_PARAM_FIRST_BURST:
		case ISCSI_PARAM_MAX_BURST:
		case ISCSI_PARAM_PDU_INORDER_EN:
		case ISCSI_PARAM_DATASEQ_INORDER_EN:
		case ISCSI_PARAM_ERL:
		case ISCSI_PARAM_TARGET_NAME:
		case ISCSI_PARAM_TPGT:
		case ISCSI_PARAM_USERNAME:
		case ISCSI_PARAM_PASSWORD:
		case ISCSI_PARAM_USERNAME_IN:
		case ISCSI_PARAM_PASSWORD_IN:
		case ISCSI_PARAM_FAST_ABORT:
		case ISCSI_PARAM_ABORT_TMO:
		case ISCSI_PARAM_LU_RESET_TMO:
		case ISCSI_PARAM_TGT_RESET_TMO:
		case ISCSI_PARAM_IFACE_NAME:
		case ISCSI_PARAM_INITIATOR_NAME:
			return S_IRUGO;
		default:
			return 0;
		}
	}

	return 0;
}

static int iscsi_sw_tcp_slave_alloc(struct scsi_device *sdev)
{
	set_bit(QUEUE_FLAG_BIDI, &sdev->request_queue->queue_flags);
	return 0;
}
Esempio n. 20
0
static int hmac_sha_update(const char *algo, char *data_in, size_t dlen,
			char *hash_out, size_t outlen)
{
	int rc = 0;
	struct crypto_ahash *tfm;
	struct scatterlist sg[TVMEMSIZE];
	struct ahash_request *req;
	struct hmac_sha_result tresult;
	int i, j;

	/* Set hash output to 0 initially */
	memset(hash_out, 0, outlen);

	init_completion(&tresult.completion);
	tfm = crypto_alloc_ahash(algo, 0, 0);
	if (IS_ERR(tfm)) {
			printk(KERN_ERR "crypto_alloc_ahash failed\n");
			rc = PTR_ERR(tfm);
			goto err_tfm;
	}
	req = ahash_request_alloc(tfm, GFP_KERNEL);
	if (!req) {
			printk(KERN_ERR "failed to allocate request\n");
			rc = -ENOMEM;
			goto err_req;
	}
	if (crypto_ahash_digestsize(tfm) > outlen) {
			printk(KERN_ERR "tfm size > result buffer\n");
			rc = -EINVAL;
			goto err_req;
	}
	ahash_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
			hmac_sha_complete, &tresult);

	sg_init_table(sg, TVMEMSIZE);

	i = 0;
	j = dlen;

	while (j > PAGE_SIZE) {
		sg_set_buf(sg + i, tvmem[i], PAGE_SIZE);
		memcpy(tvmem[i], data_in + i * PAGE_SIZE, PAGE_SIZE);
		i++;
		j -= PAGE_SIZE;
	}
	sg_set_buf(sg + i, tvmem[i], j);
	memcpy(tvmem[i], data_in + i * PAGE_SIZE, j);

	crypto_ahash_clear_flags(tfm, -0);
	ahash_request_set_crypt(req, sg, hash_out, dlen);
	rc = crypto_ahash_init(req);
	rc = do_one_ahash_op(req, crypto_ahash_update(req));
	if (rc)
		goto out;

	rc = do_one_ahash_op(req, crypto_ahash_final(req));

out:
	ahash_request_free(req);
err_req:
	crypto_free_ahash(tfm);
err_tfm:
	return rc;
}
Esempio n. 21
0
File: sha.c Progetto: DenisLug/mptcp
static int qce_ahash_hmac_setkey(struct crypto_ahash *tfm, const u8 *key,
				 unsigned int keylen)
{
	unsigned int digestsize = crypto_ahash_digestsize(tfm);
	struct qce_sha_ctx *ctx = crypto_tfm_ctx(&tfm->base);
	struct qce_ahash_result result;
	struct ahash_request *req;
	struct scatterlist sg;
	unsigned int blocksize;
	struct crypto_ahash *ahash_tfm;
	u8 *buf;
	int ret;
	const char *alg_name;

	blocksize = crypto_tfm_alg_blocksize(crypto_ahash_tfm(tfm));
	memset(ctx->authkey, 0, sizeof(ctx->authkey));

	if (keylen <= blocksize) {
		memcpy(ctx->authkey, key, keylen);
		return 0;
	}

	if (digestsize == SHA1_DIGEST_SIZE)
		alg_name = "sha1-qce";
	else if (digestsize == SHA256_DIGEST_SIZE)
		alg_name = "sha256-qce";
	else
		return -EINVAL;

	ahash_tfm = crypto_alloc_ahash(alg_name, CRYPTO_ALG_TYPE_AHASH,
				       CRYPTO_ALG_TYPE_AHASH_MASK);
	if (IS_ERR(ahash_tfm))
		return PTR_ERR(ahash_tfm);

	req = ahash_request_alloc(ahash_tfm, GFP_KERNEL);
	if (!req) {
		ret = -ENOMEM;
		goto err_free_ahash;
	}

	init_completion(&result.completion);
	ahash_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
				   qce_digest_complete, &result);
	crypto_ahash_clear_flags(ahash_tfm, ~0);

	buf = kzalloc(keylen + QCE_MAX_ALIGN_SIZE, GFP_KERNEL);
	if (!buf) {
		ret = -ENOMEM;
		goto err_free_req;
	}

	memcpy(buf, key, keylen);
	sg_init_one(&sg, buf, keylen);
	ahash_request_set_crypt(req, &sg, ctx->authkey, keylen);

	ret = crypto_ahash_digest(req);
	if (ret == -EINPROGRESS || ret == -EBUSY) {
		ret = wait_for_completion_interruptible(&result.completion);
		if (!ret)
			ret = result.error;
	}

	if (ret)
		crypto_ahash_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);

	kfree(buf);
err_free_req:
	ahash_request_free(req);
err_free_ahash:
	crypto_free_ahash(ahash_tfm);
	return ret;
}
void wcnss_wlan_crypto_free_ahash(struct crypto_ahash *tfm)
{
	crypto_free_ahash(tfm);
}
Esempio n. 23
0
static int tegra_crypto_dev_open(struct inode *inode, struct file *filp)
{
	struct tegra_crypto_ctx *ctx;
	int ret = 0;

	ctx = kzalloc(sizeof(struct tegra_crypto_ctx), GFP_KERNEL);
	if (!ctx) {
		pr_err("no memory for context\n");
		return -ENOMEM;
	}

	ctx->ecb_tfm = crypto_alloc_ablkcipher("ecb-aes-tegra",
		CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC, 0);
	if (IS_ERR(ctx->ecb_tfm)) {
		pr_err("Failed to load transform for ecb-aes-tegra: %ld\n",
			PTR_ERR(ctx->ecb_tfm));
		ret = PTR_ERR(ctx->ecb_tfm);
		goto fail_ecb;
	}

	ctx->cbc_tfm = crypto_alloc_ablkcipher("cbc-aes-tegra",
		CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC, 0);
	if (IS_ERR(ctx->cbc_tfm)) {
		pr_err("Failed to load transform for cbc-aes-tegra: %ld\n",
			PTR_ERR(ctx->cbc_tfm));
		ret = PTR_ERR(ctx->cbc_tfm);
		goto fail_cbc;
	}

	if (tegra_get_chipid() != TEGRA_CHIPID_TEGRA2) {
		ctx->ofb_tfm = crypto_alloc_ablkcipher("ofb-aes-tegra",
			CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC, 0);
		if (IS_ERR(ctx->ofb_tfm)) {
			pr_err("Failed to load transform for ofb-aes-tegra: %ld\n",
				PTR_ERR(ctx->ofb_tfm));
			ret = PTR_ERR(ctx->ofb_tfm);
			goto fail_ofb;
		}

		ctx->ctr_tfm = crypto_alloc_ablkcipher("ctr-aes-tegra",
			CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC, 0);
		if (IS_ERR(ctx->ctr_tfm)) {
			pr_err("Failed to load transform for ctr-aes-tegra: %ld\n",
				PTR_ERR(ctx->ctr_tfm));
			ret = PTR_ERR(ctx->ctr_tfm);
			goto fail_ctr;
		}
	}

	if (tegra_get_chipid() != TEGRA_CHIPID_TEGRA2 &&
			tegra_get_chipid() != TEGRA_CHIPID_TEGRA3) {
		ctx->rng_drbg = crypto_alloc_rng("rng_drbg-aes-tegra",
			CRYPTO_ALG_TYPE_RNG, 0);
		if (IS_ERR(ctx->rng_drbg)) {
			pr_err("Failed to load transform for rng_drbg tegra: %ld\n",
				PTR_ERR(ctx->rng_drbg));
			ret = PTR_ERR(ctx->rng_drbg);
			goto fail_rng;
		}
	} else {
		ctx->rng = crypto_alloc_rng("rng-aes-tegra",
			CRYPTO_ALG_TYPE_RNG, 0);
		if (IS_ERR(ctx->rng)) {
			pr_err("Failed to load transform for tegra rng: %ld\n",
				PTR_ERR(ctx->rng));
			ret = PTR_ERR(ctx->rng);
			goto fail_rng;
		}
	}

	ctx->rsa512_tfm = crypto_alloc_ahash("tegra-se-rsa512",
					CRYPTO_ALG_TYPE_AHASH, 0);
	if (IS_ERR(ctx->rsa512_tfm)) {
		pr_err("Failed to load transform for rsa512: %ld\n",
			PTR_ERR(ctx->rsa512_tfm));
		goto fail_rsa512;
	}

	ctx->rsa1024_tfm = crypto_alloc_ahash("tegra-se-rsa1024",
					CRYPTO_ALG_TYPE_AHASH, 0);
	if (IS_ERR(ctx->rsa1024_tfm)) {
		pr_err("Failed to load transform for rsa1024: %ld\n",
			PTR_ERR(ctx->rsa1024_tfm));
		goto fail_rsa1024;
	}

	ctx->rsa1536_tfm = crypto_alloc_ahash("tegra-se-rsa1536",
					CRYPTO_ALG_TYPE_AHASH, 0);
	if (IS_ERR(ctx->rsa1536_tfm)) {
		pr_err("Failed to load transform for rsa1536: %ld\n",
			PTR_ERR(ctx->rsa1536_tfm));
		goto fail_rsa1536;
	}

	ctx->rsa2048_tfm = crypto_alloc_ahash("tegra-se-rsa2048",
					CRYPTO_ALG_TYPE_AHASH, 0);
	if (IS_ERR(ctx->rsa2048_tfm)) {
		pr_err("Failed to load transform for rsa2048: %ld\n",
			PTR_ERR(ctx->rsa2048_tfm));
		goto fail_rsa2048;
	}

	filp->private_data = ctx;
	return ret;

fail_rsa2048:
	crypto_free_ahash(ctx->rsa1536_tfm);
fail_rsa1536:
	crypto_free_ahash(ctx->rsa1024_tfm);
fail_rsa1024:
	crypto_free_ahash(ctx->rsa512_tfm);
fail_rsa512:
	if (tegra_get_chipid() != TEGRA_CHIPID_TEGRA2 &&
			tegra_get_chipid() != TEGRA_CHIPID_TEGRA3)
		crypto_free_rng(ctx->rng_drbg);
	else
		crypto_free_rng(ctx->rng);
fail_rng:
	if (tegra_get_chipid() != TEGRA_CHIPID_TEGRA2)
		crypto_free_ablkcipher(ctx->ctr_tfm);
fail_ctr:
	if (tegra_get_chipid() != TEGRA_CHIPID_TEGRA2)
		crypto_free_ablkcipher(ctx->ofb_tfm);
fail_ofb:
	crypto_free_ablkcipher(ctx->cbc_tfm);

fail_cbc:
	crypto_free_ablkcipher(ctx->ecb_tfm);

fail_ecb:
	kfree(ctx);
	return ret;
}