/** * Finish hash calculation, copy hash digest to buffer, clean up hash descriptor * * \param[in] req ahash request * \param[out] hash pointer to hash buffer to store hash digest * \param[in,out] hash_len pointer to hash buffer size, if \a hash == NULL * or hash_len == NULL only free \a hdesc instead * of computing the hash * * \retval 0 for success * \retval -EOVERFLOW if hash_len is too small for the hash digest * \retval negative errno for other errors from lower layers */ int cfs_crypto_hash_final(struct ahash_request *req, unsigned char *hash, unsigned int *hash_len) { int size = crypto_ahash_digestsize(crypto_ahash_reqtfm(req)); int err; if (!hash || !hash_len) { err = 0; goto free; } if (*hash_len < size) { err = -EOVERFLOW; goto free; } ahash_request_set_crypt(req, NULL, hash, 0); err = crypto_ahash_final(req); if (err == 0) *hash_len = size; free: crypto_free_ahash(crypto_ahash_reqtfm(req)); ahash_request_free(req); return err; }
static int mv_cesa_ahmac_iv_state_init(struct ahash_request *req, u8 *pad, void *state, unsigned int blocksize) { struct mv_cesa_ahash_result result; struct scatterlist sg; int ret; ahash_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG, mv_cesa_hmac_ahash_complete, &result); sg_init_one(&sg, pad, blocksize); ahash_request_set_crypt(req, &sg, pad, blocksize); init_completion(&result.completion); ret = crypto_ahash_init(req); if (ret) return ret; ret = crypto_ahash_update(req); if (ret && ret != -EINPROGRESS) return ret; wait_for_completion_interruptible(&result.completion); if (result.error) return result.error; ret = crypto_ahash_export(req, state); if (ret) return ret; return 0; }
/** * Calculate hash digest for the passed buffer. * * This should be used when computing the hash on a single contiguous buffer. * It combines the hash initialization, computation, and cleanup. * * \param[in] hash_alg id of hash algorithm (CFS_HASH_ALG_*) * \param[in] buf data buffer on which to compute hash * \param[in] buf_len length of \a buf in bytes * \param[in] key initial value/state for algorithm, if \a key = NULL * use default initial value * \param[in] key_len length of \a key in bytes * \param[out] hash pointer to computed hash value, if \a hash = NULL then * \a hash_len is to digest size in bytes, retval -ENOSPC * \param[in,out] hash_len size of \a hash buffer * * \retval -EINVAL \a buf, \a buf_len, \a hash_len, \a hash_alg invalid * \retval -ENOENT \a hash_alg is unsupported * \retval -ENOSPC \a hash is NULL, or \a hash_len less than digest size * \retval 0 for success * \retval negative errno for other errors from lower layers. */ int cfs_crypto_hash_digest(enum cfs_crypto_hash_alg hash_alg, const void *buf, unsigned int buf_len, unsigned char *key, unsigned int key_len, unsigned char *hash, unsigned int *hash_len) { struct scatterlist sl; struct ahash_request *req; int err; const struct cfs_crypto_hash_type *type; if (!buf || buf_len == 0 || !hash_len) return -EINVAL; err = cfs_crypto_hash_alloc(hash_alg, &type, &req, key, key_len); if (err != 0) return err; if (!hash || *hash_len < type->cht_size) { *hash_len = type->cht_size; crypto_free_ahash(crypto_ahash_reqtfm(req)); ahash_request_free(req); return -ENOSPC; } sg_init_one(&sl, (void *)buf, buf_len); ahash_request_set_crypt(req, &sl, hash, sl.length); err = crypto_ahash_digest(req); crypto_free_ahash(crypto_ahash_reqtfm(req)); ahash_request_free(req); return err; }
/** * Update hash digest computed on the specified data * * \param[in] req ahash request * \param[in] buf data buffer on which to compute the hash * \param[in] buf_len length of \buf on which to compute hash * * \retval 0 for success * \retval negative errno on failure */ int cfs_crypto_hash_update(struct ahash_request *req, const void *buf, unsigned int buf_len) { struct scatterlist sl; sg_init_one(&sl, (void *)buf, buf_len); ahash_request_set_crypt(req, &sl, NULL, sl.length); return crypto_ahash_update(req); }
inline void iscsi_tcp_dgst_header(struct ahash_request *hash, const void *hdr, size_t hdrlen, unsigned char digest[ISCSI_DIGEST_SIZE]) { struct scatterlist sg; sg_init_one(&sg, hdr, hdrlen); ahash_request_set_crypt(hash, &sg, digest, hdrlen); crypto_ahash_digest(hash); }
static int gcm_hash_final(struct aead_request *req, struct crypto_gcm_req_priv_ctx *pctx) { struct ahash_request *ahreq = &pctx->u.ahreq; ahash_request_set_callback(ahreq, aead_request_flags(req), gcm_hash_final_done, req); ahash_request_set_crypt(ahreq, NULL, pctx->iauth_tag, 0); return crypto_ahash_final(ahreq); }
int fmpdev_hash_final(struct fmp_info *info, struct hash_data *hdata, void *output) { int ret; reinit_completion(&hdata->async.result->completion); ahash_request_set_crypt(hdata->async.request, NULL, output, 0); ret = crypto_ahash_final(hdata->async.request); return waitfor(info, hdata->async.result, ret); }
/** * Update hash digest computed on data within the given \a page * * \param[in] req ahash request * \param[in] page data page on which to compute the hash * \param[in] offset offset within \a page at which to start hash * \param[in] len length of data on which to compute hash * * \retval 0 for success * \retval negative errno on failure */ int cfs_crypto_hash_update_page(struct ahash_request *req, struct page *page, unsigned int offset, unsigned int len) { struct scatterlist sl; sg_init_table(&sl, 1); sg_set_page(&sl, page, len, offset & ~PAGE_MASK); ahash_request_set_crypt(req, &sl, NULL, sl.length); return crypto_ahash_update(req); }
ssize_t fmpdev_hash_update(struct fmp_info *info, struct hash_data *hdata, struct scatterlist *sg, size_t len) { int ret; reinit_completion(&hdata->async.result->completion); ahash_request_set_crypt(hdata->async.request, sg, NULL, len); ret = crypto_ahash_update(hdata->async.request); return waitfor(info, hdata->async.result, ret); }
static int calc_buffer_ahash_atfm(const void *buf, loff_t len, struct ima_digest_data *hash, struct crypto_ahash *tfm) { struct ahash_request *req; struct scatterlist sg; struct ahash_completion res; int rc, ahash_rc = 0; hash->length = crypto_ahash_digestsize(tfm); req = ahash_request_alloc(tfm, GFP_KERNEL); if (!req) return -ENOMEM; init_completion(&res.completion); ahash_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG | CRYPTO_TFM_REQ_MAY_SLEEP, ahash_complete, &res); rc = ahash_wait(crypto_ahash_init(req), &res); if (rc) goto out; sg_init_one(&sg, buf, len); ahash_request_set_crypt(req, &sg, NULL, len); ahash_rc = crypto_ahash_update(req); /* wait for the update request to complete */ rc = ahash_wait(ahash_rc, &res); if (!rc) { ahash_request_set_crypt(req, NULL, hash->digest, 0); rc = ahash_wait(crypto_ahash_final(req), &res); } out: ahash_request_free(req); return rc; }
static int mv_cesa_ahmac_pad_init(struct ahash_request *req, const u8 *key, unsigned int keylen, u8 *ipad, u8 *opad, unsigned int blocksize) { struct mv_cesa_ahash_result result; struct scatterlist sg; int ret; int i; if (keylen <= blocksize) { memcpy(ipad, key, keylen); } else { u8 *keydup = kmemdup(key, keylen, GFP_KERNEL); if (!keydup) return -ENOMEM; ahash_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG, mv_cesa_hmac_ahash_complete, &result); sg_init_one(&sg, keydup, keylen); ahash_request_set_crypt(req, &sg, ipad, keylen); init_completion(&result.completion); ret = crypto_ahash_digest(req); if (ret == -EINPROGRESS) { wait_for_completion_interruptible(&result.completion); ret = result.error; } /* Set the memory region to 0 to avoid any leak. */ memset(keydup, 0, keylen); kfree(keydup); if (ret) return ret; keylen = crypto_ahash_digestsize(crypto_ahash_reqtfm(req)); } memset(ipad + keylen, 0, blocksize - keylen); memcpy(opad, ipad, blocksize); for (i = 0; i < blocksize; i++) { ipad[i] ^= 0x36; opad[i] ^= 0x5c; } return 0; }
static int gcm_hash_update(struct aead_request *req, struct crypto_gcm_req_priv_ctx *pctx, crypto_completion_t compl, struct scatterlist *src, unsigned int len) { struct ahash_request *ahreq = &pctx->u.ahreq; ahash_request_set_callback(ahreq, aead_request_flags(req), compl, req); ahash_request_set_crypt(ahreq, src, NULL, len); return crypto_ahash_update(ahreq); }
static int gcm_hash_remain(struct aead_request *req, struct crypto_gcm_req_priv_ctx *pctx, unsigned int remain, crypto_completion_t compl) { struct ahash_request *ahreq = &pctx->u.ahreq; ahash_request_set_callback(ahreq, aead_request_flags(req), compl, req); sg_init_one(pctx->src, gcm_zeroes, remain); ahash_request_set_crypt(ahreq, pctx->src, NULL, remain); return crypto_ahash_update(ahreq); }
static int hmac_sha_digest(const char *algo, char *data_in, size_t dlen, char *hash_out, size_t outlen) { int rc = 0; struct crypto_ahash *tfm; struct scatterlist sg; struct ahash_request *req; struct hmac_sha_result tresult; /* Set hash output to 0 initially */ memset(hash_out, 0, outlen); init_completion(&tresult.completion); tfm = crypto_alloc_ahash(algo, 0, 0); if (IS_ERR(tfm)) { printk(KERN_ERR "crypto_alloc_ahash failed\n"); rc = PTR_ERR(tfm); goto err_tfm; } req = ahash_request_alloc(tfm, GFP_KERNEL); if (!req) { printk(KERN_ERR "failed to allocate request\n"); rc = -ENOMEM; goto err_req; } if (crypto_ahash_digestsize(tfm) > outlen) { printk(KERN_ERR "tfm size > result buffer\n"); rc = -EINVAL; goto err_req; } ahash_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG, hmac_sha_complete, &tresult); sg_init_one(&sg, data_in, dlen); crypto_ahash_clear_flags(tfm, -0); ahash_request_set_crypt(req, &sg, hash_out, dlen); rc = do_one_ahash_op(req, crypto_ahash_digest(req)); ahash_request_free(req); err_req: crypto_free_ahash(tfm); err_tfm: return rc; }
static int gcm_hash_len(struct aead_request *req, struct crypto_gcm_req_priv_ctx *pctx) { struct ahash_request *ahreq = &pctx->u.ahreq; struct crypto_gcm_ghash_ctx *gctx = &pctx->ghash_ctx; u128 lengths; lengths.a = cpu_to_be64(req->assoclen * 8); lengths.b = cpu_to_be64(gctx->cryptlen * 8); memcpy(pctx->iauth_tag, &lengths, 16); sg_init_one(pctx->src, pctx->iauth_tag, 16); ahash_request_set_callback(ahreq, aead_request_flags(req), gcm_hash_len_done, req); ahash_request_set_crypt(ahreq, pctx->src, NULL, sizeof(lengths)); return crypto_ahash_update(ahreq); }
static int qce_ahash_hmac_setkey(struct crypto_ahash *tfm, const u8 *key, unsigned int keylen) { unsigned int digestsize = crypto_ahash_digestsize(tfm); struct qce_sha_ctx *ctx = crypto_tfm_ctx(&tfm->base); struct qce_ahash_result result; struct ahash_request *req; struct scatterlist sg; unsigned int blocksize; struct crypto_ahash *ahash_tfm; u8 *buf; int ret; const char *alg_name; blocksize = crypto_tfm_alg_blocksize(crypto_ahash_tfm(tfm)); memset(ctx->authkey, 0, sizeof(ctx->authkey)); if (keylen <= blocksize) { memcpy(ctx->authkey, key, keylen); return 0; } if (digestsize == SHA1_DIGEST_SIZE) alg_name = "sha1-qce"; else if (digestsize == SHA256_DIGEST_SIZE) alg_name = "sha256-qce"; else return -EINVAL; ahash_tfm = crypto_alloc_ahash(alg_name, CRYPTO_ALG_TYPE_AHASH, CRYPTO_ALG_TYPE_AHASH_MASK); if (IS_ERR(ahash_tfm)) return PTR_ERR(ahash_tfm); req = ahash_request_alloc(ahash_tfm, GFP_KERNEL); if (!req) { ret = -ENOMEM; goto err_free_ahash; } init_completion(&result.completion); ahash_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG, qce_digest_complete, &result); crypto_ahash_clear_flags(ahash_tfm, ~0); buf = kzalloc(keylen + QCE_MAX_ALIGN_SIZE, GFP_KERNEL); if (!buf) { ret = -ENOMEM; goto err_free_req; } memcpy(buf, key, keylen); sg_init_one(&sg, buf, keylen); ahash_request_set_crypt(req, &sg, ctx->authkey, keylen); ret = crypto_ahash_digest(req); if (ret == -EINPROGRESS || ret == -EBUSY) { ret = wait_for_completion_interruptible(&result.completion); if (!ret) ret = result.error; } if (ret) crypto_ahash_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN); kfree(buf); err_free_req: ahash_request_free(req); err_free_ahash: crypto_free_ahash(ahash_tfm); return ret; }
static int tegra_crypto_sha(struct tegra_sha_req *sha_req) { struct crypto_ahash *tfm; struct scatterlist sg[1]; char result[64]; struct ahash_request *req; struct tegra_crypto_completion sha_complete; void *hash_buff; unsigned long *xbuf[XBUFSIZE]; int ret = -ENOMEM; tfm = crypto_alloc_ahash(sha_req->algo, 0, 0); if (IS_ERR(tfm)) { printk(KERN_ERR "alg: hash: Failed to load transform for %s: " "%ld\n", sha_req->algo, PTR_ERR(tfm)); goto out_alloc; } req = ahash_request_alloc(tfm, GFP_KERNEL); if (!req) { printk(KERN_ERR "alg: hash: Failed to allocate request for " "%s\n", sha_req->algo); goto out_noreq; } ret = alloc_bufs(xbuf); if (ret < 0) { pr_err("alloc_bufs failed"); goto out_buf; } init_completion(&sha_complete.restart); memset(result, 0, 64); hash_buff = xbuf[0]; memcpy(hash_buff, sha_req->plaintext, sha_req->plaintext_sz); sg_init_one(&sg[0], hash_buff, sha_req->plaintext_sz); if (sha_req->keylen) { crypto_ahash_clear_flags(tfm, ~0); ret = crypto_ahash_setkey(tfm, sha_req->key, sha_req->keylen); if (ret) { printk(KERN_ERR "alg: hash: setkey failed on " " %s: ret=%d\n", sha_req->algo, -ret); goto out; } } ahash_request_set_crypt(req, sg, result, sha_req->plaintext_sz); ret = sha_async_hash_op(req, &sha_complete, crypto_ahash_init(req)); if (ret) { pr_err("alg: hash: init failed on " "for %s: ret=%d\n", sha_req->algo, -ret); goto out; } ret = sha_async_hash_op(req, &sha_complete, crypto_ahash_update(req)); if (ret) { pr_err("alg: hash: update failed on " "for %s: ret=%d\n", sha_req->algo, -ret); goto out; } ret = sha_async_hash_op(req, &sha_complete, crypto_ahash_final(req)); if (ret) { pr_err("alg: hash: final failed on " "for %s: ret=%d\n", sha_req->algo, -ret); goto out; } ret = copy_to_user((void __user *)sha_req->result, (const void *)result, crypto_ahash_digestsize(tfm)); if (ret) { ret = -EFAULT; pr_err("alg: hash: copy_to_user failed (%d) for %s\n", ret, sha_req->algo); } out: free_bufs(xbuf); out_buf: ahash_request_free(req); out_noreq: crypto_free_ahash(tfm); out_alloc: return ret; }
static int tegra_crypt_rsa(struct tegra_crypto_ctx *ctx, struct tegra_rsa_req *rsa_req) { struct crypto_ahash *tfm = NULL; struct ahash_request *req = NULL; struct scatterlist sg[1]; char *result = NULL; void *hash_buff; int ret = 0; unsigned long *xbuf[XBUFSIZE]; struct tegra_crypto_completion rsa_complete; switch (rsa_req->algo) { case TEGRA_RSA512: req = ahash_request_alloc(ctx->rsa512_tfm, GFP_KERNEL); if (!req) { pr_err("alg: hash: Failed to allocate request for rsa512\n"); goto req_fail; } tfm = ctx->rsa512_tfm; break; case TEGRA_RSA1024: req = ahash_request_alloc(ctx->rsa1024_tfm, GFP_KERNEL); if (!req) { pr_err("alg: hash: Failed to allocate request for rsa1024\n"); goto req_fail; } tfm = ctx->rsa1024_tfm; break; case TEGRA_RSA1536: req = ahash_request_alloc(ctx->rsa1536_tfm, GFP_KERNEL); if (!req) { pr_err("alg: hash: Failed to allocate request for rsa1536\n"); goto req_fail; } tfm = ctx->rsa1536_tfm; break; case TEGRA_RSA2048: req = ahash_request_alloc(ctx->rsa2048_tfm, GFP_KERNEL); if (!req) { pr_err("alg: hash: Failed to allocate request for rsa2048\n"); goto req_fail; } tfm = ctx->rsa2048_tfm; break; default: goto req_fail; } ret = alloc_bufs(xbuf); if (ret < 0) { pr_err("alloc_bufs failed"); goto buf_fail; } init_completion(&rsa_complete.restart); result = kzalloc(rsa_req->keylen >> 16, GFP_KERNEL); if (!result) { pr_err("\nresult alloc fail\n"); goto result_fail; } hash_buff = xbuf[0]; memcpy(hash_buff, rsa_req->message, rsa_req->msg_len); sg_init_one(&sg[0], hash_buff, rsa_req->msg_len); if (!(rsa_req->keylen)) goto rsa_fail; if (!rsa_req->skip_key) { ret = crypto_ahash_setkey(tfm, rsa_req->key, rsa_req->keylen); if (ret) { pr_err("alg: hash: setkey failed\n"); goto rsa_fail; } } ahash_request_set_crypt(req, sg, result, rsa_req->msg_len); ret = crypto_ahash_digest(req); if (ret == -EINPROGRESS || ret == -EBUSY) { ret = wait_for_completion_interruptible(&rsa_complete.restart); if (!ret) ret = rsa_complete.req_err; INIT_COMPLETION(rsa_complete.restart); } if (ret) { pr_err("alg: hash: digest failed\n"); goto rsa_fail; } ret = copy_to_user((void __user *)rsa_req->result, (const void *)result, crypto_ahash_digestsize(tfm)); if (ret) { ret = -EFAULT; pr_err("alg: hash: copy_to_user failed (%d)\n", ret); } rsa_fail: kfree(result); result_fail: free_bufs(xbuf); buf_fail: ahash_request_free(req); req_fail: return ret; }
static int ima_calc_file_hash_atfm(struct file *file, struct ima_digest_data *hash, struct crypto_ahash *tfm) { loff_t i_size, offset; char *rbuf[2] = { NULL, }; int rc, read = 0, rbuf_len, active = 0, ahash_rc = 0; struct ahash_request *req; struct scatterlist sg[1]; struct ahash_completion res; size_t rbuf_size[2]; hash->length = crypto_ahash_digestsize(tfm); req = ahash_request_alloc(tfm, GFP_KERNEL); if (!req) return -ENOMEM; init_completion(&res.completion); ahash_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG | CRYPTO_TFM_REQ_MAY_SLEEP, ahash_complete, &res); rc = ahash_wait(crypto_ahash_init(req), &res); if (rc) goto out1; i_size = i_size_read(file_inode(file)); if (i_size == 0) goto out2; /* * Try to allocate maximum size of memory. * Fail if even a single page cannot be allocated. */ rbuf[0] = ima_alloc_pages(i_size, &rbuf_size[0], 1); if (!rbuf[0]) { rc = -ENOMEM; goto out1; } /* Only allocate one buffer if that is enough. */ if (i_size > rbuf_size[0]) { /* * Try to allocate secondary buffer. If that fails fallback to * using single buffering. Use previous memory allocation size * as baseline for possible allocation size. */ rbuf[1] = ima_alloc_pages(i_size - rbuf_size[0], &rbuf_size[1], 0); } if (!(file->f_mode & FMODE_READ)) { file->f_mode |= FMODE_READ; read = 1; } for (offset = 0; offset < i_size; offset += rbuf_len) { if (!rbuf[1] && offset) { /* Not using two buffers, and it is not the first * read/request, wait for the completion of the * previous ahash_update() request. */ rc = ahash_wait(ahash_rc, &res); if (rc) goto out3; } /* read buffer */ rbuf_len = min_t(loff_t, i_size - offset, rbuf_size[active]); rc = integrity_kernel_read(file, offset, rbuf[active], rbuf_len); if (rc != rbuf_len) goto out3; if (rbuf[1] && offset) { /* Using two buffers, and it is not the first * read/request, wait for the completion of the * previous ahash_update() request. */ rc = ahash_wait(ahash_rc, &res); if (rc) goto out3; } sg_init_one(&sg[0], rbuf[active], rbuf_len); ahash_request_set_crypt(req, sg, NULL, rbuf_len); ahash_rc = crypto_ahash_update(req); if (rbuf[1]) active = !active; /* swap buffers, if we use two */ } /* wait for the last update request to complete */ rc = ahash_wait(ahash_rc, &res); out3: if (read) file->f_mode &= ~FMODE_READ; ima_free_pages(rbuf[0], rbuf_size[0]); ima_free_pages(rbuf[1], rbuf_size[1]); out2: if (!rc) { ahash_request_set_crypt(req, NULL, hash->digest, 0); rc = ahash_wait(crypto_ahash_final(req), &res); } out1: ahash_request_free(req); return rc; }
int hmac_md5(v_U8_t *key, v_U8_t ksize, char *plaintext, v_U8_t psize, v_U8_t *output, v_U8_t outlen) { int ret = 0; struct crypto_ahash *tfm; struct scatterlist sg; struct ahash_request *req; struct hmac_md5_result tresult = {.err = 0}; void *hash_buff = NULL; unsigned char hash_result[64]; int i; memset(output, 0, outlen); init_completion(&tresult.completion); #if !defined(CONFIG_CNSS) && (defined(HIF_USB) || defined(HIF_SDIO)) tfm = crypto_alloc_ahash("hmac(md5)", CRYPTO_ALG_TYPE_AHASH, CRYPTO_ALG_TYPE_AHASH_MASK); #else tfm = wcnss_wlan_crypto_alloc_ahash("hmac(md5)", CRYPTO_ALG_TYPE_AHASH, CRYPTO_ALG_TYPE_AHASH_MASK); #endif if (IS_ERR(tfm)) { VOS_TRACE(VOS_MODULE_ID_VOSS,VOS_TRACE_LEVEL_ERROR, "crypto_alloc_ahash failed"); ret = PTR_ERR(tfm); goto err_tfm; } req = ahash_request_alloc(tfm, GFP_KERNEL); if (!req) { VOS_TRACE(VOS_MODULE_ID_VOSS,VOS_TRACE_LEVEL_ERROR, "failed to allocate request for hmac(md5)"); ret = -ENOMEM; goto err_req; } ahash_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG, hmac_md5_complete, &tresult); hash_buff = kzalloc(psize, GFP_KERNEL); if (!hash_buff) { VOS_TRACE(VOS_MODULE_ID_VOSS,VOS_TRACE_LEVEL_ERROR, "failed to kzalloc hash_buff"); ret = -ENOMEM; goto err_hash_buf; } memset(hash_result, 0, 64); vos_mem_copy(hash_buff, plaintext, psize); sg_init_one(&sg, hash_buff, psize); if (ksize) { crypto_ahash_clear_flags(tfm, ~0); #if !defined(CONFIG_CNSS) && (defined(HIF_USB) || defined(HIF_SDIO)) ret = crypto_ahash_setkey(tfm, key, ksize); #else ret = wcnss_wlan_crypto_ahash_setkey(tfm, key, ksize); #endif if (ret) { VOS_TRACE(VOS_MODULE_ID_VOSS,VOS_TRACE_LEVEL_ERROR, "crypto_ahash_setkey failed"); goto err_setkey; } } ahash_request_set_crypt(req, &sg, hash_result, psize); #if !defined(CONFIG_CNSS) && (defined(HIF_USB) || defined(HIF_SDIO)) ret = crypto_ahash_digest(req); #else ret = wcnss_wlan_crypto_ahash_digest(req); #endif VOS_TRACE(VOS_MODULE_ID_VOSS,VOS_TRACE_LEVEL_ERROR, "ret 0x%x", ret); switch (ret) { case 0: for (i=0; i< outlen; i++) output[i] = hash_result[i]; break; case -EINPROGRESS: case -EBUSY: ret = wait_for_completion_interruptible(&tresult.completion); if (!ret && !tresult.err) { for (i = 0; i < outlen; i++) output[i] = hash_result[i]; INIT_COMPLETION(tresult.completion); break; } else { VOS_TRACE(VOS_MODULE_ID_VOSS,VOS_TRACE_LEVEL_ERROR, "wait_for_completion_interruptible failed"); if (!ret) ret = tresult.err; goto out; } default: goto out; } out: err_setkey: kfree(hash_buff); err_hash_buf: ahash_request_free(req); err_req: #if !defined(CONFIG_CNSS) && (defined(HIF_USB) || defined(HIF_SDIO)) crypto_free_ahash(tfm); #else wcnss_wlan_crypto_free_ahash(tfm); #endif err_tfm: return ret; } VOS_STATUS vos_md5_hmac_str(v_U32_t cryptHandle, /* Handle */ v_U8_t *pText, /* pointer to data stream */ v_U32_t textLen, /* length of data stream */ v_U8_t *pKey, /* pointer to authentication key */ v_U32_t keyLen, /* length of authentication key */ v_U8_t digest[VOS_DIGEST_MD5_SIZE])/* caller digest to be filled in */ { int ret = 0; ret = hmac_md5( pKey, //v_U8_t *key, (v_U8_t) keyLen, //v_U8_t ksize, (char *)pText, //char *plaintext, (v_U8_t) textLen, //v_U8_t psize, digest, //v_U8_t *output, VOS_DIGEST_MD5_SIZE //v_U8_t outlen ); if (ret != 0) { VOS_TRACE(VOS_MODULE_ID_VOSS,VOS_TRACE_LEVEL_ERROR,"hmac_md5() call failed"); return VOS_STATUS_E_FAULT; } return VOS_STATUS_SUCCESS; }
static int hmac_sha_update(const char *algo, char *data_in, size_t dlen, char *hash_out, size_t outlen) { int rc = 0; struct crypto_ahash *tfm; struct scatterlist sg[TVMEMSIZE]; struct ahash_request *req; struct hmac_sha_result tresult; int i, j; /* Set hash output to 0 initially */ memset(hash_out, 0, outlen); init_completion(&tresult.completion); tfm = crypto_alloc_ahash(algo, 0, 0); if (IS_ERR(tfm)) { printk(KERN_ERR "crypto_alloc_ahash failed\n"); rc = PTR_ERR(tfm); goto err_tfm; } req = ahash_request_alloc(tfm, GFP_KERNEL); if (!req) { printk(KERN_ERR "failed to allocate request\n"); rc = -ENOMEM; goto err_req; } if (crypto_ahash_digestsize(tfm) > outlen) { printk(KERN_ERR "tfm size > result buffer\n"); rc = -EINVAL; goto err_req; } ahash_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG, hmac_sha_complete, &tresult); sg_init_table(sg, TVMEMSIZE); i = 0; j = dlen; while (j > PAGE_SIZE) { sg_set_buf(sg + i, tvmem[i], PAGE_SIZE); memcpy(tvmem[i], data_in + i * PAGE_SIZE, PAGE_SIZE); i++; j -= PAGE_SIZE; } sg_set_buf(sg + i, tvmem[i], j); memcpy(tvmem[i], data_in + i * PAGE_SIZE, j); crypto_ahash_clear_flags(tfm, -0); ahash_request_set_crypt(req, sg, hash_out, dlen); rc = crypto_ahash_init(req); rc = do_one_ahash_op(req, crypto_ahash_update(req)); if (rc) goto out; rc = do_one_ahash_op(req, crypto_ahash_final(req)); out: ahash_request_free(req); err_req: crypto_free_ahash(tfm); err_tfm: return rc; }
/** * iscsi_tcp_segment_done - check whether the segment is complete * @tcp_conn: iscsi tcp connection * @segment: iscsi segment to check * @recv: set to one of this is called from the recv path * @copied: number of bytes copied * * Check if we're done receiving this segment. If the receive * buffer is full but we expect more data, move on to the * next entry in the scatterlist. * * If the amount of data we received isn't a multiple of 4, * we will transparently receive the pad bytes, too. * * This function must be re-entrant. */ int iscsi_tcp_segment_done(struct iscsi_tcp_conn *tcp_conn, struct iscsi_segment *segment, int recv, unsigned copied) { struct scatterlist sg; unsigned int pad; ISCSI_DBG_TCP(tcp_conn->iscsi_conn, "copied %u %u size %u %s\n", segment->copied, copied, segment->size, recv ? "recv" : "xmit"); if (segment->hash && copied) { /* * If a segment is kmapd we must unmap it before sending * to the crypto layer since that will try to kmap it again. */ iscsi_tcp_segment_unmap(segment); if (!segment->data) { sg_init_table(&sg, 1); sg_set_page(&sg, sg_page(segment->sg), copied, segment->copied + segment->sg_offset + segment->sg->offset); } else sg_init_one(&sg, segment->data + segment->copied, copied); ahash_request_set_crypt(segment->hash, &sg, NULL, copied); crypto_ahash_update(segment->hash); } segment->copied += copied; if (segment->copied < segment->size) { iscsi_tcp_segment_map(segment, recv); return 0; } segment->total_copied += segment->copied; segment->copied = 0; segment->size = 0; /* Unmap the current scatterlist page, if there is one. */ iscsi_tcp_segment_unmap(segment); /* Do we have more scatterlist entries? */ ISCSI_DBG_TCP(tcp_conn->iscsi_conn, "total copied %u total size %u\n", segment->total_copied, segment->total_size); if (segment->total_copied < segment->total_size) { /* Proceed to the next entry in the scatterlist. */ iscsi_tcp_segment_init_sg(segment, sg_next(segment->sg), 0); iscsi_tcp_segment_map(segment, recv); BUG_ON(segment->size == 0); return 0; } /* Do we need to handle padding? */ if (!(tcp_conn->iscsi_conn->session->tt->caps & CAP_PADDING_OFFLOAD)) { pad = iscsi_padding(segment->total_copied); if (pad != 0) { ISCSI_DBG_TCP(tcp_conn->iscsi_conn, "consume %d pad bytes\n", pad); segment->total_size += pad; segment->size = pad; segment->data = segment->padbuf; return 0; } } /* * Set us up for transferring the data digest. hdr digest * is completely handled in hdr done function. */ if (segment->hash) { ahash_request_set_crypt(segment->hash, NULL, segment->digest, 0); crypto_ahash_final(segment->hash); iscsi_tcp_segment_splice_digest(segment, recv ? segment->recv_digest : segment->digest); return 0; } return 1; }
int hmac_md5(uint8_t *key, uint8_t ksize, char *plaintext, uint8_t psize, uint8_t *output, uint8_t outlen) { int ret = 0; struct crypto_ahash *tfm; struct scatterlist sg; struct ahash_request *req; struct hmac_md5_result tresult = {.err = 0 }; void *hash_buff = NULL; unsigned char hash_result[64]; int i; memset(output, 0, outlen); init_completion(&tresult.completion); tfm = cds_crypto_alloc_ahash("hmac(md5)", CRYPTO_ALG_TYPE_AHASH, CRYPTO_ALG_TYPE_AHASH_MASK); if (IS_ERR(tfm)) { CDF_TRACE(CDF_MODULE_ID_CDF, CDF_TRACE_LEVEL_ERROR, "crypto_alloc_ahash failed"); ret = PTR_ERR(tfm); goto err_tfm; } req = ahash_request_alloc(tfm, GFP_KERNEL); if (!req) { CDF_TRACE(CDF_MODULE_ID_CDF, CDF_TRACE_LEVEL_ERROR, "failed to allocate request for hmac(md5)"); ret = -ENOMEM; goto err_req; } ahash_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG, hmac_md5_complete, &tresult); hash_buff = kzalloc(psize, GFP_KERNEL); if (!hash_buff) { CDF_TRACE(CDF_MODULE_ID_CDF, CDF_TRACE_LEVEL_ERROR, "failed to kzalloc hash_buff"); ret = -ENOMEM; goto err_hash_buf; } memset(hash_result, 0, 64); memcpy(hash_buff, plaintext, psize); sg_init_one(&sg, hash_buff, psize); if (ksize) { crypto_ahash_clear_flags(tfm, ~0); ret = cds_crypto_ahash_setkey(tfm, key, ksize); if (ret) { CDF_TRACE(CDF_MODULE_ID_CDF, CDF_TRACE_LEVEL_ERROR, "crypto_ahash_setkey failed"); goto err_setkey; } } ahash_request_set_crypt(req, &sg, hash_result, psize); ret = cds_crypto_ahash_digest(req); CDF_TRACE(CDF_MODULE_ID_CDF, CDF_TRACE_LEVEL_ERROR, "ret 0x%x", ret); switch (ret) { case 0: for (i = 0; i < outlen; i++) output[i] = hash_result[i]; break; case -EINPROGRESS: case -EBUSY: ret = wait_for_completion_interruptible(&tresult.completion); if (!ret && !tresult.err) { INIT_COMPLETION(tresult.completion); break; } else { CDF_TRACE(CDF_MODULE_ID_CDF, CDF_TRACE_LEVEL_ERROR, "wait_for_completion_interruptible failed"); if (!ret) ret = tresult.err; goto out; } default: goto out; } out: err_setkey: kfree(hash_buff); err_hash_buf: ahash_request_free(req); err_req: cds_crypto_free_ahash(tfm); err_tfm: return ret; } CDF_STATUS cds_md5_hmac_str(uint32_t cryptHandle, /* Handle */ uint8_t *pText, /* pointer to data stream */ uint32_t textLen, /* length of data stream */ uint8_t *pKey, /* pointer to authentication key */ uint32_t keyLen, /* length of authentication key */ uint8_t digest[CDS_DIGEST_MD5_SIZE]) { /* caller digest to be filled in */ int ret = 0; ret = hmac_md5(pKey, /* uint8_t *key, */ (uint8_t) keyLen, /* uint8_t ksize, */ (char *)pText, /* char *plaintext, */ (uint8_t) textLen, /* uint8_t psize, */ digest, /* uint8_t *output, */ CDS_DIGEST_MD5_SIZE /* uint8_t outlen */ ); if (ret != 0) { CDF_TRACE(CDF_MODULE_ID_CDF, CDF_TRACE_LEVEL_ERROR, "hmac_md5() call failed"); return CDF_STATUS_E_FAULT; } return CDF_STATUS_SUCCESS; }
int hmac_md5(v_U8_t *key, v_U8_t ksize, char *plaintext, v_U8_t psize, v_U8_t *output, v_U8_t outlen) { int ret = 0; struct crypto_ahash *tfm; struct scatterlist sg; struct ahash_request *req; struct hmac_md5_result tresult; void *hash_buff = NULL; unsigned char hash_result[64]; int i; memset(output, 0, outlen); init_completion(&tresult.completion); tfm = wcnss_wlan_crypto_alloc_ahash("hmac(md5)", CRYPTO_ALG_TYPE_AHASH, CRYPTO_ALG_TYPE_AHASH_MASK); if (IS_ERR(tfm)) { VOS_TRACE(VOS_MODULE_ID_VOSS,VOS_TRACE_LEVEL_ERROR, "crypto_alloc_ahash failed"); ret = PTR_ERR(tfm); goto err_tfm; } req = ahash_request_alloc(tfm, GFP_KERNEL); if (!req) { VOS_TRACE(VOS_MODULE_ID_VOSS,VOS_TRACE_LEVEL_ERROR, "failed to allocate request for hmac(md5)"); ret = -ENOMEM; goto err_req; } ahash_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG, hmac_md5_complete, &tresult); hash_buff = kzalloc(psize, GFP_KERNEL); if (!hash_buff) { VOS_TRACE(VOS_MODULE_ID_VOSS,VOS_TRACE_LEVEL_ERROR, "failed to kzalloc hash_buff"); ret = -ENOMEM; goto err_hash_buf; } memset(hash_result, 0, 64); memcpy(hash_buff, plaintext, psize); sg_init_one(&sg, hash_buff, psize); if (ksize) { crypto_ahash_clear_flags(tfm, ~0); ret = wcnss_wlan_crypto_ahash_setkey(tfm, key, ksize); if (ret) { VOS_TRACE(VOS_MODULE_ID_VOSS,VOS_TRACE_LEVEL_ERROR, "crypto_ahash_setkey failed"); goto err_setkey; } } ahash_request_set_crypt(req, &sg, hash_result, psize); ret = wcnss_wlan_crypto_ahash_digest(req); VOS_TRACE(VOS_MODULE_ID_VOSS,VOS_TRACE_LEVEL_ERROR, "ret 0x%x"); switch (ret) { case 0: for (i=0; i< outlen; i++) output[i] = hash_result[i]; break; case -EINPROGRESS: case -EBUSY: ret = wait_for_completion_interruptible(&tresult.completion); if (!ret && !tresult.err) { INIT_COMPLETION(tresult.completion); break; } else { VOS_TRACE(VOS_MODULE_ID_VOSS,VOS_TRACE_LEVEL_ERROR, "wait_for_completion_interruptible failed"); if (!ret) ret = tresult.err; goto out; } default: goto out; } out: err_setkey: kfree(hash_buff); err_hash_buf: ahash_request_free(req); err_req: wcnss_wlan_crypto_free_ahash(tfm); err_tfm: return ret; }
/* * Sha/HMAC self tests */ int _fips_qcrypto_sha_selftest(struct fips_selftest_data *selftest_d) { int rc = 0, err, tv_index = 0, num_tv; char *k_out_buf = NULL; struct scatterlist fips_sg; struct crypto_ahash *tfm; struct ahash_request *ahash_req; struct _fips_completion fips_completion; struct _fips_test_vector_sha_hmac tv_sha_hmac; num_tv = (sizeof(fips_test_vector_sha_hmac)) / (sizeof(struct _fips_test_vector_sha_hmac)); /* One-by-one testing */ for (tv_index = 0; tv_index < num_tv; tv_index++) { memcpy(&tv_sha_hmac, &fips_test_vector_sha_hmac[tv_index], (sizeof(struct _fips_test_vector_sha_hmac))); k_out_buf = kzalloc(tv_sha_hmac.diglen, GFP_KERNEL); if (k_out_buf == NULL) { pr_err("qcrypto: Failed to allocate memory for k_out_buf %ld\n", PTR_ERR(k_out_buf)); return -ENOMEM; } memset(k_out_buf, 0, tv_sha_hmac.diglen); init_completion(&fips_completion.completion); /* use_sw flags are set in dtsi file which makes default Linux API calls to go to s/w crypto instead of h/w crypto. This code makes sure that all selftests calls always go to h/w, independent of DTSI flags. */ if (tv_sha_hmac.klen == 0) { if (selftest_d->prefix_ahash_algo) if (_fips_get_alg_cra_name(tv_sha_hmac .hash_alg, selftest_d->algo_prefix, strlen(tv_sha_hmac.hash_alg))) { rc = -1; pr_err("Algo Name is too long for tv %d\n", tv_index); goto clr_buf; } } else { if (selftest_d->prefix_hmac_algo) if (_fips_get_alg_cra_name(tv_sha_hmac .hash_alg, selftest_d->algo_prefix, strlen(tv_sha_hmac.hash_alg))) { rc = -1; pr_err("Algo Name is too long for tv %d\n", tv_index); goto clr_buf; } } tfm = crypto_alloc_ahash(tv_sha_hmac.hash_alg, 0, 0); if (IS_ERR(tfm)) { pr_err("qcrypto: %s algorithm not found\n", tv_sha_hmac.hash_alg); rc = PTR_ERR(tfm); goto clr_buf; } ahash_req = ahash_request_alloc(tfm, GFP_KERNEL); if (!ahash_req) { pr_err("qcrypto: ahash_request_alloc failed\n"); rc = -ENOMEM; goto clr_tfm; } rc = qcrypto_ahash_set_device(ahash_req, selftest_d->ce_device); if (rc != 0) { pr_err("%s qcrypto_cipher_set_device failed with err %d\n", __func__, rc); goto clr_ahash_req; } ahash_request_set_callback(ahash_req, CRYPTO_TFM_REQ_MAY_BACKLOG, _fips_cb, &fips_completion); sg_init_one(&fips_sg, &tv_sha_hmac.input[0], tv_sha_hmac.ilen); crypto_ahash_clear_flags(tfm, ~0); if (tv_sha_hmac.klen != 0) { rc = crypto_ahash_setkey(tfm, tv_sha_hmac.key, tv_sha_hmac.klen); if (rc) { pr_err("qcrypto: crypto_ahash_setkey failed\n"); goto clr_ahash_req; } } ahash_request_set_crypt(ahash_req, &fips_sg, k_out_buf, tv_sha_hmac.ilen); rc = crypto_ahash_digest(ahash_req); if (rc == -EINPROGRESS || rc == -EBUSY) { rc = wait_for_completion_interruptible( &fips_completion.completion); err = fips_completion.err; if (!rc && !err) { INIT_COMPLETION(fips_completion.completion); } else { pr_err("qcrypto:SHA: wait_for_completion failed\n"); goto clr_ahash_req; } } if (memcmp(k_out_buf, tv_sha_hmac.digest, tv_sha_hmac.diglen)) rc = -1; clr_ahash_req: ahash_request_free(ahash_req); clr_tfm: crypto_free_ahash(tfm); clr_buf: kzfree(k_out_buf); /* For any failure, return error */ if (rc) return rc; } return rc; }