Esempio n. 1
0
/**
 * Calculate hash digest for the passed buffer.
 *
 * This should be used when computing the hash on a single contiguous buffer.
 * It combines the hash initialization, computation, and cleanup.
 *
 * \param[in] hash_alg	id of hash algorithm (CFS_HASH_ALG_*)
 * \param[in] buf	data buffer on which to compute hash
 * \param[in] buf_len	length of \a buf in bytes
 * \param[in] key	initial value/state for algorithm, if \a key = NULL
 *			use default initial value
 * \param[in] key_len	length of \a key in bytes
 * \param[out] hash	pointer to computed hash value, if \a hash = NULL then
 *			\a hash_len is to digest size in bytes, retval -ENOSPC
 * \param[in,out] hash_len size of \a hash buffer
 *
 * \retval -EINVAL       \a buf, \a buf_len, \a hash_len, \a hash_alg invalid
 * \retval -ENOENT       \a hash_alg is unsupported
 * \retval -ENOSPC       \a hash is NULL, or \a hash_len less than digest size
 * \retval		0 for success
 * \retval		negative errno for other errors from lower layers.
 */
int cfs_crypto_hash_digest(enum cfs_crypto_hash_alg hash_alg,
			   const void *buf, unsigned int buf_len,
			   unsigned char *key, unsigned int key_len,
			   unsigned char *hash, unsigned int *hash_len)
{
	struct scatterlist	sl;
	struct ahash_request *req;
	int			err;
	const struct cfs_crypto_hash_type	*type;

	if (!buf || buf_len == 0 || !hash_len)
		return -EINVAL;

	err = cfs_crypto_hash_alloc(hash_alg, &type, &req, key, key_len);
	if (err != 0)
		return err;

	if (!hash || *hash_len < type->cht_size) {
		*hash_len = type->cht_size;
		crypto_free_ahash(crypto_ahash_reqtfm(req));
		ahash_request_free(req);
		return -ENOSPC;
	}
	sg_init_one(&sl, (void *)buf, buf_len);

	ahash_request_set_crypt(req, &sl, hash, sl.length);
	err = crypto_ahash_digest(req);
	crypto_free_ahash(crypto_ahash_reqtfm(req));
	ahash_request_free(req);

	return err;
}
Esempio n. 2
0
/**
 * Finish hash calculation, copy hash digest to buffer, clean up hash descriptor
 *
 * \param[in]	req		ahash request
 * \param[out]	hash		pointer to hash buffer to store hash digest
 * \param[in,out] hash_len	pointer to hash buffer size, if \a hash == NULL
 *				or hash_len == NULL only free \a hdesc instead
 *				of computing the hash
 *
 * \retval		0 for success
 * \retval		-EOVERFLOW if hash_len is too small for the hash digest
 * \retval		negative errno for other errors from lower layers
 */
int cfs_crypto_hash_final(struct ahash_request *req,
			  unsigned char *hash, unsigned int *hash_len)
{
	int size = crypto_ahash_digestsize(crypto_ahash_reqtfm(req));
	int err;

	if (!hash || !hash_len) {
		err = 0;
		goto free;
	}
	if (*hash_len < size) {
		err = -EOVERFLOW;
		goto free;
	}

	ahash_request_set_crypt(req, NULL, hash, 0);
	err = crypto_ahash_final(req);
	if (err == 0)
		*hash_len = size;
free:
	crypto_free_ahash(crypto_ahash_reqtfm(req));
	ahash_request_free(req);

	return err;
}
Esempio n. 3
0
static void mv_cesa_ahash_complete(struct crypto_async_request *req)
{
	struct ahash_request *ahashreq = ahash_request_cast(req);
	struct mv_cesa_ahash_req *creq = ahash_request_ctx(ahashreq);
	struct mv_cesa_engine *engine = creq->base.engine;
	unsigned int digsize;
	int i;

	digsize = crypto_ahash_digestsize(crypto_ahash_reqtfm(ahashreq));
	for (i = 0; i < digsize / 4; i++)
		creq->state[i] = readl_relaxed(engine->regs + CESA_IVDIG(i));

	if (creq->last_req) {
		/*
		 * Hardware's MD5 digest is in little endian format, but
		 * SHA in big endian format
		 */
		if (creq->algo_le) {
			__le32 *result = (void *)ahashreq->result;

			for (i = 0; i < digsize / 4; i++)
				result[i] = cpu_to_le32(creq->state[i]);
		} else {
			__be32 *result = (void *)ahashreq->result;

			for (i = 0; i < digsize / 4; i++)
				result[i] = cpu_to_be32(creq->state[i]);
		}
	}

	atomic_sub(ahashreq->nbytes, &engine->load);
}
Esempio n. 4
0
static int mv_cesa_ahash_import(struct ahash_request *req, const void *hash,
				u64 len, const void *cache)
{
	struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
	struct mv_cesa_ahash_req *creq = ahash_request_ctx(req);
	unsigned int digsize = crypto_ahash_digestsize(ahash);
	unsigned int blocksize;
	unsigned int cache_ptr;
	int ret;

	ret = crypto_ahash_init(req);
	if (ret)
		return ret;

	blocksize = crypto_ahash_blocksize(ahash);
	if (len >= blocksize)
		mv_cesa_update_op_cfg(&creq->op_tmpl,
				      CESA_SA_DESC_CFG_MID_FRAG,
				      CESA_SA_DESC_CFG_FRAG_MSK);

	creq->len = len;
	memcpy(creq->state, hash, digsize);
	creq->cache_ptr = 0;

	cache_ptr = do_div(len, blocksize);
	if (!cache_ptr)
		return 0;

	memcpy(creq->cache, cache, cache_ptr);
	creq->cache_ptr = cache_ptr;

	return 0;
}
static int ccp_aes_cmac_complete(struct crypto_async_request *async_req,
				 int ret)
{
	struct ahash_request *req = ahash_request_cast(async_req);
	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
	struct ccp_aes_cmac_req_ctx *rctx = ahash_request_ctx(req);
	unsigned int digest_size = crypto_ahash_digestsize(tfm);

	if (ret)
		goto e_free;

	if (rctx->hash_rem) {
		/* Save remaining data to buffer */
		unsigned int offset = rctx->nbytes - rctx->hash_rem;

		scatterwalk_map_and_copy(rctx->buf, rctx->src,
					 offset, rctx->hash_rem, 0);
		rctx->buf_count = rctx->hash_rem;
	} else {
		rctx->buf_count = 0;
	}

	/* Update result area if supplied */
	if (req->result)
		memcpy(req->result, rctx->iv, digest_size);

e_free:
	sg_free_table(&rctx->data_sg);

	return ret;
}
Esempio n. 6
0
File: sha.c Progetto: DenisLug/mptcp
static int qce_ahash_export(struct ahash_request *req, void *out)
{
	struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
	struct qce_sha_reqctx *rctx = ahash_request_ctx(req);
	unsigned long flags = rctx->flags;
	unsigned int digestsize = crypto_ahash_digestsize(ahash);
	unsigned int blocksize =
			crypto_tfm_alg_blocksize(crypto_ahash_tfm(ahash));

	if (IS_SHA1(flags) || IS_SHA1_HMAC(flags)) {
		struct sha1_state *out_state = out;

		out_state->count = rctx->count;
		qce_cpu_to_be32p_array((__be32 *)out_state->state,
				       rctx->digest, digestsize);
		memcpy(out_state->buffer, rctx->buf, blocksize);
	} else if (IS_SHA256(flags) || IS_SHA256_HMAC(flags)) {
		struct sha256_state *out_state = out;

		out_state->count = rctx->count;
		qce_cpu_to_be32p_array((__be32 *)out_state->state,
				       rctx->digest, digestsize);
		memcpy(out_state->buf, rctx->buf, blocksize);
	} else {
		return -EINVAL;
	}

	return 0;
}
Esempio n. 7
0
static int sahara_sha_init(struct ahash_request *req)
{
	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
	struct sahara_sha_reqctx *rctx = ahash_request_ctx(req);

	memset(rctx, 0, sizeof(*rctx));

	switch (crypto_ahash_digestsize(tfm)) {
	case SHA1_DIGEST_SIZE:
		rctx->mode |= SAHARA_HDR_MDHA_ALG_SHA1;
		rctx->digest_size = SHA1_DIGEST_SIZE;
		break;
	case SHA256_DIGEST_SIZE:
		rctx->mode |= SAHARA_HDR_MDHA_ALG_SHA256;
		rctx->digest_size = SHA256_DIGEST_SIZE;
		break;
	default:
		return -EINVAL;
	}

	rctx->context_size = rctx->digest_size + 4;
	rctx->active = 0;

	mutex_init(&rctx->mutex);

	return 0;
}
Esempio n. 8
0
File: sha.c Progetto: DenisLug/mptcp
static int qce_import_common(struct ahash_request *req, u64 in_count,
			     const u32 *state, const u8 *buffer, bool hmac)
{
	struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
	struct qce_sha_reqctx *rctx = ahash_request_ctx(req);
	unsigned int digestsize = crypto_ahash_digestsize(ahash);
	unsigned int blocksize;
	u64 count = in_count;

	blocksize = crypto_tfm_alg_blocksize(crypto_ahash_tfm(ahash));
	rctx->count = in_count;
	memcpy(rctx->buf, buffer, blocksize);

	if (in_count <= blocksize) {
		rctx->first_blk = 1;
	} else {
		rctx->first_blk = 0;
		/*
		 * For HMAC, there is a hardware padding done when first block
		 * is set. Therefore the byte_count must be incremened by 64
		 * after the first block operation.
		 */
		if (hmac)
			count += SHA_PADDING;
	}

	rctx->byte_count[0] = (__force __be32)(count & ~SHA_PADDING_MASK);
	rctx->byte_count[1] = (__force __be32)(count >> 32);
	qce_cpu_to_be32p_array((__be32 *)rctx->digest, (const u8 *)state,
			       digestsize);
	rctx->buflen = (unsigned int)(in_count & (blocksize - 1));

	return 0;
}
Esempio n. 9
0
File: crc32c.c Progetto: E-LLP/n900
static int crc32c_init(struct ahash_request *req)
{
	u32 *mctx = crypto_ahash_ctx(crypto_ahash_reqtfm(req));
	u32 *crcp = ahash_request_ctx(req);

	*crcp = *mctx;
	return 0;
}
Esempio n. 10
0
/*
 *	chcr_handle_resp - Unmap the DMA buffers associated with the request
 *	@req: crypto request
 */
int chcr_handle_resp(struct crypto_async_request *req, unsigned char *input,
		     int error_status)
{
	struct crypto_tfm *tfm = req->tfm;
	struct chcr_context *ctx = crypto_tfm_ctx(tfm);
	struct uld_ctx *u_ctx = ULD_CTX(ctx);
	struct chcr_req_ctx ctx_req;
	struct cpl_fw6_pld *fw6_pld;
	unsigned int digestsize, updated_digestsize;

	switch (tfm->__crt_alg->cra_flags & CRYPTO_ALG_TYPE_MASK) {
	case CRYPTO_ALG_TYPE_BLKCIPHER:
		ctx_req.req.ablk_req = (struct ablkcipher_request *)req;
		ctx_req.ctx.ablk_ctx =
			ablkcipher_request_ctx(ctx_req.req.ablk_req);
		if (!error_status) {
			fw6_pld = (struct cpl_fw6_pld *)input;
			memcpy(ctx_req.req.ablk_req->info, &fw6_pld->data[2],
			       AES_BLOCK_SIZE);
		}
		dma_unmap_sg(&u_ctx->lldi.pdev->dev, ctx_req.req.ablk_req->dst,
			     ABLK_CTX(ctx)->dst_nents, DMA_FROM_DEVICE);
		if (ctx_req.ctx.ablk_ctx->skb) {
			kfree_skb(ctx_req.ctx.ablk_ctx->skb);
			ctx_req.ctx.ablk_ctx->skb = NULL;
		}
		break;

	case CRYPTO_ALG_TYPE_AHASH:
		ctx_req.req.ahash_req = (struct ahash_request *)req;
		ctx_req.ctx.ahash_ctx =
			ahash_request_ctx(ctx_req.req.ahash_req);
		digestsize =
			crypto_ahash_digestsize(crypto_ahash_reqtfm(
							ctx_req.req.ahash_req));
		updated_digestsize = digestsize;
		if (digestsize == SHA224_DIGEST_SIZE)
			updated_digestsize = SHA256_DIGEST_SIZE;
		else if (digestsize == SHA384_DIGEST_SIZE)
			updated_digestsize = SHA512_DIGEST_SIZE;
		if (ctx_req.ctx.ahash_ctx->skb)
			ctx_req.ctx.ahash_ctx->skb = NULL;
		if (ctx_req.ctx.ahash_ctx->result == 1) {
			ctx_req.ctx.ahash_ctx->result = 0;
			memcpy(ctx_req.req.ahash_req->result, input +
			       sizeof(struct cpl_fw6_pld),
			       digestsize);
		} else {
			memcpy(ctx_req.ctx.ahash_ctx->partial_hash, input +
			       sizeof(struct cpl_fw6_pld),
			       updated_digestsize);
		}
		kfree(ctx_req.ctx.ahash_ctx->dummy_payload_ptr);
		ctx_req.ctx.ahash_ctx->dummy_payload_ptr = NULL;
		break;
	}
	return 0;
}
Esempio n. 11
0
static int n2_hash_async_init(struct ahash_request *req)
{
	struct n2_hash_req_ctx *rctx = ahash_request_ctx(req);
	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
	struct n2_hash_ctx *ctx = crypto_ahash_ctx(tfm);

	ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback_tfm);
	rctx->fallback_req.base.flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP;

	return crypto_ahash_init(&rctx->fallback_req);
}
static int rk_ahash_export(struct ahash_request *req, void *out)
{
	struct rk_ahash_rctx *rctx = ahash_request_ctx(req);
	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
	struct rk_ahash_ctx *ctx = crypto_ahash_ctx(tfm);

	ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback_tfm);
	rctx->fallback_req.base.flags = req->base.flags &
					CRYPTO_TFM_REQ_MAY_SLEEP;

	return crypto_ahash_export(&rctx->fallback_req, out);
}
Esempio n. 13
0
static int sahara_sha_import(struct ahash_request *req, const void *in)
{
	struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
	struct sahara_ctx *ctx = crypto_ahash_ctx(ahash);
	struct sahara_sha_reqctx *rctx = ahash_request_ctx(req);

	memcpy(ctx, in, sizeof(struct sahara_ctx));
	memcpy(rctx, in + sizeof(struct sahara_sha_reqctx),
	       sizeof(struct sahara_sha_reqctx));

	return 0;
}
Esempio n. 14
0
static int sahara_sha_export(struct ahash_request *req, void *out)
{
	struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
	struct sahara_ctx *ctx = crypto_ahash_ctx(ahash);
	struct sahara_sha_reqctx *rctx = ahash_request_ctx(req);

	memcpy(out, ctx, sizeof(struct sahara_ctx));
	memcpy(out + sizeof(struct sahara_sha_reqctx), rctx,
	       sizeof(struct sahara_sha_reqctx));

	return 0;
}
static int rk_ahash_final(struct ahash_request *req)
{
	struct rk_ahash_rctx *rctx = ahash_request_ctx(req);
	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
	struct rk_ahash_ctx *ctx = crypto_ahash_ctx(tfm);

	ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback_tfm);
	rctx->fallback_req.base.flags = req->base.flags &
					CRYPTO_TFM_REQ_MAY_SLEEP;
	rctx->fallback_req.result = req->result;

	return crypto_ahash_final(&rctx->fallback_req);
}
Esempio n. 16
0
int crypto4xx_hash_digest(struct ahash_request *req)
{
	struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
	struct crypto4xx_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
	struct scatterlist dst;
	unsigned int ds = crypto_ahash_digestsize(ahash);

	sg_init_one(&dst, req->result, ds);

	return crypto4xx_build_pd(&req->base, ctx, req->src, &dst,
				  req->nbytes, NULL, 0, ctx->sa_in,
				  ctx->sa_len, 0);
}
Esempio n. 17
0
static int mv_cesa_ahmac_pad_init(struct ahash_request *req,
				  const u8 *key, unsigned int keylen,
				  u8 *ipad, u8 *opad,
				  unsigned int blocksize)
{
	struct mv_cesa_ahash_result result;
	struct scatterlist sg;
	int ret;
	int i;

	if (keylen <= blocksize) {
		memcpy(ipad, key, keylen);
	} else {
		u8 *keydup = kmemdup(key, keylen, GFP_KERNEL);

		if (!keydup)
			return -ENOMEM;

		ahash_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
					   mv_cesa_hmac_ahash_complete,
					   &result);
		sg_init_one(&sg, keydup, keylen);
		ahash_request_set_crypt(req, &sg, ipad, keylen);
		init_completion(&result.completion);

		ret = crypto_ahash_digest(req);
		if (ret == -EINPROGRESS) {
			wait_for_completion_interruptible(&result.completion);
			ret = result.error;
		}

		/* Set the memory region to 0 to avoid any leak. */
		memset(keydup, 0, keylen);
		kfree(keydup);

		if (ret)
			return ret;

		keylen = crypto_ahash_digestsize(crypto_ahash_reqtfm(req));
	}

	memset(ipad + keylen, 0, blocksize - keylen);
	memcpy(opad, ipad, blocksize);

	for (i = 0; i < blocksize; i++) {
		ipad[i] ^= 0x36;
		opad[i] ^= 0x5c;
	}

	return 0;
}
Esempio n. 18
0
File: crc32c.c Progetto: E-LLP/n900
static int crc32c_digest(struct ahash_request *req)
{
	struct crypto_hash_walk walk;
	u32 *mctx = crypto_ahash_ctx(crypto_ahash_reqtfm(req));
	u32 crc = *mctx;
	int nbytes;

	for (nbytes = crypto_hash_walk_first(req, &walk); nbytes;
	     nbytes = crypto_hash_walk_done(&walk, 0))
		crc = crc32c(crc, walk.data, nbytes);

	*(__le32 *)req->result = ~cpu_to_le32(crc);
	return 0;
}
Esempio n. 19
0
static int n2_hash_async_finup(struct ahash_request *req)
{
	struct n2_hash_req_ctx *rctx = ahash_request_ctx(req);
	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
	struct n2_hash_ctx *ctx = crypto_ahash_ctx(tfm);

	ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback_tfm);
	rctx->fallback_req.base.flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP;
	rctx->fallback_req.nbytes = req->nbytes;
	rctx->fallback_req.src = req->src;
	rctx->fallback_req.result = req->result;

	return crypto_ahash_finup(&rctx->fallback_req);
}
static int rk_ahash_update(struct ahash_request *req)
{
	struct rk_ahash_rctx *rctx = ahash_request_ctx(req);
	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
	struct rk_ahash_ctx *ctx = crypto_ahash_ctx(tfm);

	ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback_tfm);
	rctx->fallback_req.base.flags = req->base.flags &
					CRYPTO_TFM_REQ_MAY_SLEEP;
	rctx->fallback_req.nbytes = req->nbytes;
	rctx->fallback_req.src = req->src;

	return crypto_ahash_update(&rctx->fallback_req);
}
Esempio n. 21
0
static int ss_hash_init(struct ahash_request *req, int type, int size, char *iv)
{
	ss_aes_req_ctx_t *req_ctx = ahash_request_ctx(req);
	ss_hash_ctx_t *ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(req));

	SS_DBG("Method: %d \n", type);

	memset(req_ctx, 0, sizeof(ss_aes_req_ctx_t));
	req_ctx->type = type;

	ctx->md_size = size;
	memcpy(ctx->md, iv, size);

	ctx->cnt = 0;
	memset(ctx->pad, 0, SS_HASH_PAD_SIZE);
	return 0;
}
static int ghash_async_final(struct ahash_request *req)
{
	struct ahash_request *cryptd_req = ahash_request_ctx(req);

	if (!irq_fpu_usable()) {
		struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
		struct ghash_async_ctx *ctx = crypto_ahash_ctx(tfm);
		struct cryptd_ahash *cryptd_tfm = ctx->cryptd_tfm;

		memcpy(cryptd_req, req, sizeof(*req));
		ahash_request_set_tfm(cryptd_req, &cryptd_tfm->base);
		return crypto_ahash_final(cryptd_req);
	} else {
		struct shash_desc *desc = cryptd_shash_desc(cryptd_req);
		return crypto_shash_final(desc, req->result);
	}
}
Esempio n. 23
0
static int mv_cesa_ahash_export(struct ahash_request *req, void *hash,
				u64 *len, void *cache)
{
	struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
	struct mv_cesa_ahash_req *creq = ahash_request_ctx(req);
	unsigned int digsize = crypto_ahash_digestsize(ahash);
	unsigned int blocksize;

	blocksize = crypto_ahash_blocksize(ahash);

	*len = creq->len;
	memcpy(hash, creq->state, digsize);
	memset(cache, 0, blocksize);
	memcpy(cache, creq->cache, creq->cache_ptr);

	return 0;
}
Esempio n. 24
0
int ss_hash_final(struct ahash_request *req)
{
	int pad_len = 0;
	ss_aes_req_ctx_t *req_ctx = ahash_request_ctx(req);
	ss_hash_ctx_t *ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(req));
	struct scatterlist last = {0}; /* make a sg struct for padding data. */

	if (req->result == NULL) {
		SS_ERR("Invalid result porinter. \n");
		return -EINVAL;
	}
	SS_DBG("Method: %d, cnt: %d\n", req_ctx->type, ctx->cnt);
	if (ss_dev->suspend) {
		SS_ERR("SS has already suspend. \n");
		return -EAGAIN;
	}

	/* Process the padding data. */
	pad_len = ss_hash_padding(ctx, req_ctx->type == SS_METHOD_MD5 ? 0 : 1);
	SS_DBG("Pad len: %d \n", pad_len);
	req_ctx->dma_src.sg = &last;
	sg_init_table(&last, 1);
	sg_set_buf(&last, ctx->pad, pad_len);
	SS_DBG("Padding data: \n");
	print_hex(ctx->pad, 128, (int)ctx->pad);

	ss_dev_lock();
	ss_hash_start(ctx, req_ctx, pad_len);

	ss_sha_final();

	SS_DBG("Method: %d, cnt: %d\n", req_ctx->type, ctx->cnt);

	ss_check_sha_end();
	memcpy(req->result, ctx->md, ctx->md_size);
	ss_ctrl_stop();
	ss_dev_unlock();

#ifdef SS_SHA_SWAP_FINAL_ENABLE
	if (req_ctx->type != SS_METHOD_MD5)
		ss_hash_swap(req->result, ctx->md_size);
#endif

	return 0;
}
static int ghash_async_digest(struct ahash_request *req)
{
	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
	struct ghash_async_ctx *ctx = crypto_ahash_ctx(tfm);
	struct ahash_request *cryptd_req = ahash_request_ctx(req);
	struct cryptd_ahash *cryptd_tfm = ctx->cryptd_tfm;

	if (!irq_fpu_usable()) {
		memcpy(cryptd_req, req, sizeof(*req));
		ahash_request_set_tfm(cryptd_req, &cryptd_tfm->base);
		return crypto_ahash_digest(cryptd_req);
	} else {
		struct shash_desc *desc = cryptd_shash_desc(cryptd_req);
		struct crypto_shash *child = cryptd_ahash_child(cryptd_tfm);

		desc->tfm = child;
		desc->flags = req->base.flags;
		return shash_ahash_digest(req, desc);
	}
}
static int zero_message_process(struct ahash_request *req)
{
	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
	int rk_digest_size = crypto_ahash_digestsize(tfm);

	switch (rk_digest_size) {
	case SHA1_DIGEST_SIZE:
		memcpy(req->result, sha1_zero_message_hash, rk_digest_size);
		break;
	case SHA256_DIGEST_SIZE:
		memcpy(req->result, sha256_zero_message_hash, rk_digest_size);
		break;
	case MD5_DIGEST_SIZE:
		memcpy(req->result, md5_zero_message_hash, rk_digest_size);
		break;
	default:
		return -EINVAL;
	}

	return 0;
}
Esempio n. 27
0
File: sha.c Progetto: DenisLug/mptcp
static void qce_ahash_done(void *data)
{
	struct crypto_async_request *async_req = data;
	struct ahash_request *req = ahash_request_cast(async_req);
	struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
	struct qce_sha_reqctx *rctx = ahash_request_ctx(req);
	struct qce_alg_template *tmpl = to_ahash_tmpl(async_req->tfm);
	struct qce_device *qce = tmpl->qce;
	struct qce_result_dump *result = qce->dma.result_buf;
	unsigned int digestsize = crypto_ahash_digestsize(ahash);
	int error;
	u32 status;

	error = qce_dma_terminate_all(&qce->dma);
	if (error)
		dev_dbg(qce->dev, "ahash dma termination error (%d)\n", error);

	qce_unmapsg(qce->dev, req->src, rctx->src_nents, DMA_TO_DEVICE,
		    rctx->src_chained);
	qce_unmapsg(qce->dev, &rctx->result_sg, 1, DMA_FROM_DEVICE, 0);

	memcpy(rctx->digest, result->auth_iv, digestsize);
	if (req->result)
		memcpy(req->result, result->auth_iv, digestsize);

	rctx->byte_count[0] = cpu_to_be32(result->auth_byte_count[0]);
	rctx->byte_count[1] = cpu_to_be32(result->auth_byte_count[1]);

	error = qce_check_status(qce, &status);
	if (error < 0)
		dev_dbg(qce->dev, "ahash operation error (%x)\n", status);

	req->src = rctx->src_orig;
	req->nbytes = rctx->nbytes_orig;
	rctx->last_blk = false;
	rctx->first_blk = false;

	qce->async_req_done(tmpl->qce, error);
}
Esempio n. 28
0
static void mv_hash_algo_completion(void)
{
	struct ahash_request *req = ahash_request_cast(cpg->cur_req);
	struct mv_req_hash_ctx *ctx = ahash_request_ctx(req);

	if (ctx->extra_bytes)
		copy_src_to_buf(&cpg->p, ctx->buffer, ctx->extra_bytes);
	sg_miter_stop(&cpg->p.src_sg_it);

	ctx->state[0] = readl(cpg->reg + DIGEST_INITIAL_VAL_A);
	ctx->state[1] = readl(cpg->reg + DIGEST_INITIAL_VAL_B);
	ctx->state[2] = readl(cpg->reg + DIGEST_INITIAL_VAL_C);
	ctx->state[3] = readl(cpg->reg + DIGEST_INITIAL_VAL_D);
	ctx->state[4] = readl(cpg->reg + DIGEST_INITIAL_VAL_E);

	if (likely(ctx->last_chunk)) {
		if (likely(ctx->count <= MAX_HW_HASH_SIZE)) {
			memcpy(req->result, cpg->sram + SRAM_DIGEST_BUF,
			       crypto_ahash_digestsize(crypto_ahash_reqtfm
						       (req)));
		} else
			mv_hash_final_fallback(req);
	}
}
	}

	/* Update result area if supplied */
	if (req->result)
		memcpy(req->result, rctx->iv, digest_size);

e_free:
	sg_free_table(&rctx->data_sg);

	return ret;
}

static int ccp_do_cmac_update(struct ahash_request *req, unsigned int nbytes,
			      unsigned int final)
{
	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
	struct ccp_ctx *ctx = crypto_ahash_ctx(tfm);
	struct ccp_aes_cmac_req_ctx *rctx = ahash_request_ctx(req);
	struct scatterlist *sg, *cmac_key_sg = NULL;
	unsigned int block_size =
		crypto_tfm_alg_blocksize(crypto_ahash_tfm(tfm));
	unsigned int need_pad, sg_count;
	gfp_t gfp;
	u64 len;
	int ret;

	if (!ctx->u.aes.key_len)
		return -EINVAL;

	if (nbytes)
		rctx->null_msg = 0;
Esempio n. 30
0
static int n2_hash_async_digest(struct ahash_request *req,
				unsigned int auth_type, unsigned int digest_size,
				unsigned int result_size, void *hash_loc)
{
	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
	struct n2_hash_ctx *ctx = crypto_ahash_ctx(tfm);
	struct cwq_initial_entry *ent;
	struct crypto_hash_walk walk;
	struct spu_queue *qp;
	unsigned long flags;
	int err = -ENODEV;
	int nbytes, cpu;

	/* The total effective length of the operation may not
	 * exceed 2^16.
	 */
	if (unlikely(req->nbytes > (1 << 16))) {
		struct n2_hash_req_ctx *rctx = ahash_request_ctx(req);

		ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback_tfm);
		rctx->fallback_req.base.flags =
			req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP;
		rctx->fallback_req.nbytes = req->nbytes;
		rctx->fallback_req.src = req->src;
		rctx->fallback_req.result = req->result;

		return crypto_ahash_digest(&rctx->fallback_req);
	}

	n2_base_ctx_init(&ctx->base);

	nbytes = crypto_hash_walk_first(req, &walk);

	cpu = get_cpu();
	qp = cpu_to_cwq[cpu];
	if (!qp)
		goto out;

	spin_lock_irqsave(&qp->lock, flags);

	/* XXX can do better, improve this later by doing a by-hand scatterlist
	 * XXX walk, etc.
	 */
	ent = qp->q + qp->tail;

	ent->control = control_word_base(nbytes, 0, 0,
					 auth_type, digest_size,
					 false, true, false, false,
					 OPCODE_INPLACE_BIT |
					 OPCODE_AUTH_MAC);
	ent->src_addr = __pa(walk.data);
	ent->auth_key_addr = 0UL;
	ent->auth_iv_addr = __pa(hash_loc);
	ent->final_auth_state_addr = 0UL;
	ent->enc_key_addr = 0UL;
	ent->enc_iv_addr = 0UL;
	ent->dest_addr = __pa(hash_loc);

	nbytes = crypto_hash_walk_done(&walk, 0);
	while (nbytes > 0) {
		ent = spu_queue_next(qp, ent);

		ent->control = (nbytes - 1);
		ent->src_addr = __pa(walk.data);
		ent->auth_key_addr = 0UL;
		ent->auth_iv_addr = 0UL;
		ent->final_auth_state_addr = 0UL;
		ent->enc_key_addr = 0UL;
		ent->enc_iv_addr = 0UL;
		ent->dest_addr = 0UL;

		nbytes = crypto_hash_walk_done(&walk, 0);
	}
	ent->control |= CONTROL_END_OF_BLOCK;

	if (submit_and_wait_for_tail(qp, ent) != HV_EOK)
		err = -EINVAL;
	else
		err = 0;

	spin_unlock_irqrestore(&qp->lock, flags);

	if (!err)
		memcpy(req->result, hash_loc, result_size);
out:
	put_cpu();

	return err;
}