/** * Initialize the state descriptor for the specified hash algorithm. * * An internal routine to allocate the hash-specific state in \a hdesc for * use with cfs_crypto_hash_digest() to compute the hash of a single message, * though possibly in multiple chunks. The descriptor internal state should * be freed with cfs_crypto_hash_final(). * * \param[in] hash_alg hash algorithm id (CFS_HASH_ALG_*) * \param[out] type pointer to the hash description in hash_types[] array * \param[in,out] req ahash request to be initialized * \param[in] key initial hash value/state, NULL to use default value * \param[in] key_len length of \a key * * \retval 0 on success * \retval negative errno on failure */ static int cfs_crypto_hash_alloc(enum cfs_crypto_hash_alg hash_alg, const struct cfs_crypto_hash_type **type, struct ahash_request **req, unsigned char *key, unsigned int key_len) { struct crypto_ahash *tfm; int err = 0; *type = cfs_crypto_hash_type(hash_alg); if (*type == NULL) { CWARN("Unsupported hash algorithm id = %d, max id is %d\n", hash_alg, CFS_HASH_ALG_MAX); return -EINVAL; } tfm = crypto_alloc_ahash((*type)->cht_name, 0, CRYPTO_ALG_ASYNC); if (IS_ERR(tfm)) { CDEBUG(D_INFO, "Failed to alloc crypto hash %s\n", (*type)->cht_name); return PTR_ERR(tfm); } *req = ahash_request_alloc(tfm, GFP_KERNEL); if (!*req) { CDEBUG(D_INFO, "Failed to alloc ahash_request for %s\n", (*type)->cht_name); crypto_free_ahash(tfm); return -ENOMEM; } ahash_request_set_callback(*req, 0, NULL, NULL); if (key) err = crypto_ahash_setkey(tfm, key, key_len); else if ((*type)->cht_key != 0) err = crypto_ahash_setkey(tfm, (unsigned char *)&((*type)->cht_key), (*type)->cht_size); if (err != 0) { ahash_request_free(*req); crypto_free_ahash(tfm); return err; } CDEBUG(D_INFO, "Using crypto hash: %s (%s) speed %d MB/s\n", crypto_ahash_alg_name(tfm), crypto_ahash_driver_name(tfm), cfs_crypto_hash_speeds[hash_alg]); err = crypto_ahash_init(*req); if (err) { ahash_request_free(*req); crypto_free_ahash(tfm); } return err; }
int cryptodev_hash_init(struct hash_data *hdata, const char *alg_name, int hmac_mode, void *mackey, size_t mackeylen) { int ret; hdata->async.s = crypto_alloc_ahash(alg_name, 0, 0); if (unlikely(IS_ERR(hdata->async.s))) { ddebug(1, "Failed to load transform for %s", alg_name); return -EINVAL; } /* Copy the key from user and set to TFM. */ if (hmac_mode != 0) { ret = crypto_ahash_setkey(hdata->async.s, mackey, mackeylen); if (unlikely(ret)) { ddebug(1, "Setting hmac key failed for %s-%zu.", alg_name, mackeylen*8); ret = -EINVAL; goto error; } } hdata->digestsize = crypto_ahash_digestsize(hdata->async.s); hdata->alignmask = crypto_ahash_alignmask(hdata->async.s); hdata->async.result = kzalloc(sizeof(*hdata->async.result), GFP_KERNEL); if (unlikely(!hdata->async.result)) { ret = -ENOMEM; goto error; } init_completion(&hdata->async.result->completion); hdata->async.request = ahash_request_alloc(hdata->async.s, GFP_KERNEL); if (unlikely(!hdata->async.request)) { derr(0, "error allocating async crypto request"); ret = -ENOMEM; goto error; } ahash_request_set_callback(hdata->async.request, CRYPTO_TFM_REQ_MAY_BACKLOG, cryptodev_complete, hdata->async.result); ret = crypto_ahash_init(hdata->async.request); if (unlikely(ret)) { derr(0, "error in crypto_hash_init()"); goto error_request; } hdata->init = 1; return 0; error_request: ahash_request_free(hdata->async.request); error: kfree(hdata->async.result); crypto_free_ahash(hdata->async.s); return ret; }
static int ghash_async_setkey(struct crypto_ahash *tfm, const u8 *key, unsigned int keylen) { struct ghash_async_ctx *ctx = crypto_ahash_ctx(tfm); struct crypto_ahash *child = &ctx->cryptd_tfm->base; int err; crypto_ahash_clear_flags(child, CRYPTO_TFM_REQ_MASK); crypto_ahash_set_flags(child, crypto_ahash_get_flags(tfm) & CRYPTO_TFM_REQ_MASK); err = crypto_ahash_setkey(child, key, keylen); crypto_ahash_set_flags(tfm, crypto_ahash_get_flags(child) & CRYPTO_TFM_RES_MASK); return 0; }
int wcnss_wlan_crypto_ahash_setkey(struct crypto_ahash *tfm, const u8 *key, unsigned int keylen) { return crypto_ahash_setkey(tfm, key, keylen); }
static int crypto_gcm_setkey(struct crypto_aead *aead, const u8 *key, unsigned int keylen) { struct crypto_gcm_ctx *ctx = crypto_aead_ctx(aead); struct crypto_ahash *ghash = ctx->ghash; struct crypto_ablkcipher *ctr = ctx->ctr; struct { be128 hash; u8 iv[8]; struct crypto_gcm_setkey_result result; struct scatterlist sg[1]; struct ablkcipher_request req; } *data; int err; crypto_ablkcipher_clear_flags(ctr, CRYPTO_TFM_REQ_MASK); crypto_ablkcipher_set_flags(ctr, crypto_aead_get_flags(aead) & CRYPTO_TFM_REQ_MASK); err = crypto_ablkcipher_setkey(ctr, key, keylen); if (err) return err; crypto_aead_set_flags(aead, crypto_ablkcipher_get_flags(ctr) & CRYPTO_TFM_RES_MASK); data = kzalloc(sizeof(*data) + crypto_ablkcipher_reqsize(ctr), GFP_KERNEL); if (!data) return -ENOMEM; init_completion(&data->result.completion); sg_init_one(data->sg, &data->hash, sizeof(data->hash)); ablkcipher_request_set_tfm(&data->req, ctr); ablkcipher_request_set_callback(&data->req, CRYPTO_TFM_REQ_MAY_SLEEP | CRYPTO_TFM_REQ_MAY_BACKLOG, crypto_gcm_setkey_done, &data->result); ablkcipher_request_set_crypt(&data->req, data->sg, data->sg, sizeof(data->hash), data->iv); err = crypto_ablkcipher_encrypt(&data->req); if (err == -EINPROGRESS || err == -EBUSY) { err = wait_for_completion_interruptible( &data->result.completion); if (!err) err = data->result.err; } if (err) goto out; crypto_ahash_clear_flags(ghash, CRYPTO_TFM_REQ_MASK); crypto_ahash_set_flags(ghash, crypto_aead_get_flags(aead) & CRYPTO_TFM_REQ_MASK); err = crypto_ahash_setkey(ghash, (u8 *)&data->hash, sizeof(be128)); crypto_aead_set_flags(aead, crypto_ahash_get_flags(ghash) & CRYPTO_TFM_RES_MASK); out: kfree(data); return err; }
int hmac_md5(v_U8_t *key, v_U8_t ksize, char *plaintext, v_U8_t psize, v_U8_t *output, v_U8_t outlen) { int ret = 0; struct crypto_ahash *tfm; struct scatterlist sg; struct ahash_request *req; struct hmac_md5_result tresult = {.err = 0}; void *hash_buff = NULL; unsigned char hash_result[64]; int i; memset(output, 0, outlen); init_completion(&tresult.completion); #if !defined(CONFIG_CNSS) && (defined(HIF_USB) || defined(HIF_SDIO)) tfm = crypto_alloc_ahash("hmac(md5)", CRYPTO_ALG_TYPE_AHASH, CRYPTO_ALG_TYPE_AHASH_MASK); #else tfm = wcnss_wlan_crypto_alloc_ahash("hmac(md5)", CRYPTO_ALG_TYPE_AHASH, CRYPTO_ALG_TYPE_AHASH_MASK); #endif if (IS_ERR(tfm)) { VOS_TRACE(VOS_MODULE_ID_VOSS,VOS_TRACE_LEVEL_ERROR, "crypto_alloc_ahash failed"); ret = PTR_ERR(tfm); goto err_tfm; } req = ahash_request_alloc(tfm, GFP_KERNEL); if (!req) { VOS_TRACE(VOS_MODULE_ID_VOSS,VOS_TRACE_LEVEL_ERROR, "failed to allocate request for hmac(md5)"); ret = -ENOMEM; goto err_req; } ahash_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG, hmac_md5_complete, &tresult); hash_buff = kzalloc(psize, GFP_KERNEL); if (!hash_buff) { VOS_TRACE(VOS_MODULE_ID_VOSS,VOS_TRACE_LEVEL_ERROR, "failed to kzalloc hash_buff"); ret = -ENOMEM; goto err_hash_buf; } memset(hash_result, 0, 64); vos_mem_copy(hash_buff, plaintext, psize); sg_init_one(&sg, hash_buff, psize); if (ksize) { crypto_ahash_clear_flags(tfm, ~0); #if !defined(CONFIG_CNSS) && (defined(HIF_USB) || defined(HIF_SDIO)) ret = crypto_ahash_setkey(tfm, key, ksize); #else ret = wcnss_wlan_crypto_ahash_setkey(tfm, key, ksize); #endif if (ret) { VOS_TRACE(VOS_MODULE_ID_VOSS,VOS_TRACE_LEVEL_ERROR, "crypto_ahash_setkey failed"); goto err_setkey; } } ahash_request_set_crypt(req, &sg, hash_result, psize); #if !defined(CONFIG_CNSS) && (defined(HIF_USB) || defined(HIF_SDIO)) ret = crypto_ahash_digest(req); #else ret = wcnss_wlan_crypto_ahash_digest(req); #endif VOS_TRACE(VOS_MODULE_ID_VOSS,VOS_TRACE_LEVEL_ERROR, "ret 0x%x", ret); switch (ret) { case 0: for (i=0; i< outlen; i++) output[i] = hash_result[i]; break; case -EINPROGRESS: case -EBUSY: ret = wait_for_completion_interruptible(&tresult.completion); if (!ret && !tresult.err) { for (i = 0; i < outlen; i++) output[i] = hash_result[i]; INIT_COMPLETION(tresult.completion); break; } else { VOS_TRACE(VOS_MODULE_ID_VOSS,VOS_TRACE_LEVEL_ERROR, "wait_for_completion_interruptible failed"); if (!ret) ret = tresult.err; goto out; } default: goto out; } out: err_setkey: kfree(hash_buff); err_hash_buf: ahash_request_free(req); err_req: #if !defined(CONFIG_CNSS) && (defined(HIF_USB) || defined(HIF_SDIO)) crypto_free_ahash(tfm); #else wcnss_wlan_crypto_free_ahash(tfm); #endif err_tfm: return ret; } VOS_STATUS vos_md5_hmac_str(v_U32_t cryptHandle, /* Handle */ v_U8_t *pText, /* pointer to data stream */ v_U32_t textLen, /* length of data stream */ v_U8_t *pKey, /* pointer to authentication key */ v_U32_t keyLen, /* length of authentication key */ v_U8_t digest[VOS_DIGEST_MD5_SIZE])/* caller digest to be filled in */ { int ret = 0; ret = hmac_md5( pKey, //v_U8_t *key, (v_U8_t) keyLen, //v_U8_t ksize, (char *)pText, //char *plaintext, (v_U8_t) textLen, //v_U8_t psize, digest, //v_U8_t *output, VOS_DIGEST_MD5_SIZE //v_U8_t outlen ); if (ret != 0) { VOS_TRACE(VOS_MODULE_ID_VOSS,VOS_TRACE_LEVEL_ERROR,"hmac_md5() call failed"); return VOS_STATUS_E_FAULT; } return VOS_STATUS_SUCCESS; }
int hmac_sha1(v_U8_t *key, v_U8_t ksize, char *plaintext, v_U8_t psize, v_U8_t *output, v_U8_t outlen) { int ret = 0; struct crypto_ahash *tfm; struct scatterlist sg; struct ahash_request *req; struct hmac_sha1_result tresult; void *hash_buff = NULL; unsigned char hash_result[64]; int i; memset(output, 0, outlen); init_completion(&tresult.completion); #if !defined(CONFIG_CNSS) && (defined(HIF_USB) || defined(HIF_SDIO)) tfm = crypto_alloc_ahash("hmac(sha1)", CRYPTO_ALG_TYPE_AHASH, CRYPTO_ALG_TYPE_AHASH_MASK); #else tfm = wcnss_wlan_crypto_alloc_ahash("hmac(sha1)", CRYPTO_ALG_TYPE_AHASH, CRYPTO_ALG_TYPE_AHASH_MASK); #endif if (IS_ERR(tfm)) { VOS_TRACE(VOS_MODULE_ID_VOSS,VOS_TRACE_LEVEL_ERROR, "crypto_alloc_ahash failed"); ret = PTR_ERR(tfm); goto err_tfm; } req = ahash_request_alloc(tfm, GFP_KERNEL); if (!req) { VOS_TRACE(VOS_MODULE_ID_VOSS,VOS_TRACE_LEVEL_ERROR, "failed to allocate request for hmac(sha1)"); ret = -ENOMEM; goto err_req; } ahash_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG, hmac_sha1_complete, &tresult); hash_buff = kzalloc(psize, GFP_KERNEL); if (!hash_buff) { VOS_TRACE(VOS_MODULE_ID_VOSS,VOS_TRACE_LEVEL_ERROR, "failed to kzalloc hash_buff"); ret = -ENOMEM; goto err_hash_buf; } memset(hash_result, 0, 64); vos_mem_copy(hash_buff, plaintext, psize); sg_init_one(&sg, hash_buff, psize); if (ksize) { crypto_ahash_clear_flags(tfm, ~0); #if !defined(CONFIG_CNSS) && (defined(HIF_USB) || defined(HIF_SDIO)) ret = crypto_ahash_setkey(tfm, key, ksize); #else ret = wcnss_wlan_crypto_ahash_setkey(tfm, key, ksize); #endif if (ret) { VOS_TRACE(VOS_MODULE_ID_VOSS,VOS_TRACE_LEVEL_ERROR, "crypto_ahash_setkey failed"); goto err_setkey; } } ahash_request_set_crypt(req, &sg, hash_result, psize); #if !defined(CONFIG_CNSS) && (defined(HIF_USB) || defined(HIF_SDIO)) ret = crypto_ahash_digest(req); #else ret = wcnss_wlan_crypto_ahash_digest(req); #endif VOS_TRACE(VOS_MODULE_ID_VOSS,VOS_TRACE_LEVEL_ERROR, "ret 0x%x", ret); switch (ret) { case 0: for (i=0; i< outlen; i++) output[i] = hash_result[i]; break; case -EINPROGRESS: case -EBUSY: ret = wait_for_completion_interruptible(&tresult.completion); if (!ret && !tresult.err) { for (i = 0; i < outlen; i++) output[i] = hash_result[i]; INIT_COMPLETION(tresult.completion); break; } else { VOS_TRACE(VOS_MODULE_ID_VOSS,VOS_TRACE_LEVEL_ERROR, "wait_for_completion_interruptible failed"); if (!ret) ret = tresult.err; goto out; } default: goto out; } out: err_setkey: kfree(hash_buff); err_hash_buf: ahash_request_free(req); err_req: #if !defined(CONFIG_CNSS) && (defined(HIF_USB) || defined(HIF_SDIO)) crypto_free_ahash(tfm); #else wcnss_wlan_crypto_free_ahash(tfm); #endif err_tfm: return ret; }
/* * Sha/HMAC self tests */ int _fips_qcrypto_sha_selftest(struct fips_selftest_data *selftest_d) { int rc = 0, err, tv_index = 0, num_tv; char *k_out_buf = NULL; struct scatterlist fips_sg; struct crypto_ahash *tfm; struct ahash_request *ahash_req; struct _fips_completion fips_completion; struct _fips_test_vector_sha_hmac tv_sha_hmac; num_tv = (sizeof(fips_test_vector_sha_hmac)) / (sizeof(struct _fips_test_vector_sha_hmac)); /* One-by-one testing */ for (tv_index = 0; tv_index < num_tv; tv_index++) { memcpy(&tv_sha_hmac, &fips_test_vector_sha_hmac[tv_index], (sizeof(struct _fips_test_vector_sha_hmac))); k_out_buf = kzalloc(tv_sha_hmac.diglen, GFP_KERNEL); if (k_out_buf == NULL) { pr_err("qcrypto: Failed to allocate memory for k_out_buf %ld\n", PTR_ERR(k_out_buf)); return -ENOMEM; } memset(k_out_buf, 0, tv_sha_hmac.diglen); init_completion(&fips_completion.completion); /* use_sw flags are set in dtsi file which makes default Linux API calls to go to s/w crypto instead of h/w crypto. This code makes sure that all selftests calls always go to h/w, independent of DTSI flags. */ if (tv_sha_hmac.klen == 0) { if (selftest_d->prefix_ahash_algo) if (_fips_get_alg_cra_name(tv_sha_hmac .hash_alg, selftest_d->algo_prefix, strlen(tv_sha_hmac.hash_alg))) { rc = -1; pr_err("Algo Name is too long for tv %d\n", tv_index); goto clr_buf; } } else { if (selftest_d->prefix_hmac_algo) if (_fips_get_alg_cra_name(tv_sha_hmac .hash_alg, selftest_d->algo_prefix, strlen(tv_sha_hmac.hash_alg))) { rc = -1; pr_err("Algo Name is too long for tv %d\n", tv_index); goto clr_buf; } } tfm = crypto_alloc_ahash(tv_sha_hmac.hash_alg, 0, 0); if (IS_ERR(tfm)) { pr_err("qcrypto: %s algorithm not found\n", tv_sha_hmac.hash_alg); rc = PTR_ERR(tfm); goto clr_buf; } ahash_req = ahash_request_alloc(tfm, GFP_KERNEL); if (!ahash_req) { pr_err("qcrypto: ahash_request_alloc failed\n"); rc = -ENOMEM; goto clr_tfm; } rc = qcrypto_ahash_set_device(ahash_req, selftest_d->ce_device); if (rc != 0) { pr_err("%s qcrypto_cipher_set_device failed with err %d\n", __func__, rc); goto clr_ahash_req; } ahash_request_set_callback(ahash_req, CRYPTO_TFM_REQ_MAY_BACKLOG, _fips_cb, &fips_completion); sg_init_one(&fips_sg, &tv_sha_hmac.input[0], tv_sha_hmac.ilen); crypto_ahash_clear_flags(tfm, ~0); if (tv_sha_hmac.klen != 0) { rc = crypto_ahash_setkey(tfm, tv_sha_hmac.key, tv_sha_hmac.klen); if (rc) { pr_err("qcrypto: crypto_ahash_setkey failed\n"); goto clr_ahash_req; } } ahash_request_set_crypt(ahash_req, &fips_sg, k_out_buf, tv_sha_hmac.ilen); rc = crypto_ahash_digest(ahash_req); if (rc == -EINPROGRESS || rc == -EBUSY) { rc = wait_for_completion_interruptible( &fips_completion.completion); err = fips_completion.err; if (!rc && !err) { INIT_COMPLETION(fips_completion.completion); } else { pr_err("qcrypto:SHA: wait_for_completion failed\n"); goto clr_ahash_req; } } if (memcmp(k_out_buf, tv_sha_hmac.digest, tv_sha_hmac.diglen)) rc = -1; clr_ahash_req: ahash_request_free(ahash_req); clr_tfm: crypto_free_ahash(tfm); clr_buf: kzfree(k_out_buf); /* For any failure, return error */ if (rc) return rc; } return rc; }
/** * Initialize the state descriptor for the specified hash algorithm. * * An internal routine to allocate the hash-specific state in \a hdesc for * use with cfs_crypto_hash_digest() to compute the hash of a single message, * though possibly in multiple chunks. The descriptor internal state should * be freed with cfs_crypto_hash_final(). * * \param[in] hash_alg hash algorithm id (CFS_HASH_ALG_*) * \param[out] type pointer to the hash description in hash_types[] array * \param[in,out] req ahash request to be initialized * \param[in] key initial hash value/state, NULL to use default value * \param[in] key_len length of \a key * * \retval 0 on success * \retval negative errno on failure */ static int cfs_crypto_hash_alloc(enum cfs_crypto_hash_alg hash_alg, const struct cfs_crypto_hash_type **type, struct ahash_request **req, unsigned char *key, unsigned int key_len) { struct crypto_ahash *tfm; int err = 0; *type = cfs_crypto_hash_type(hash_alg); if (!*type) { CWARN("Unsupported hash algorithm id = %d, max id is %d\n", hash_alg, CFS_HASH_ALG_MAX); return -EINVAL; } /* Keys are only supported for the hmac version */ if (key && key_len > 0) { char *algo_name; algo_name = kasprintf(GFP_KERNEL, "hmac(%s)", (*type)->cht_name); if (!algo_name) return -ENOMEM; tfm = crypto_alloc_ahash(algo_name, 0, CRYPTO_ALG_ASYNC); kfree(algo_name); } else { tfm = crypto_alloc_ahash((*type)->cht_name, 0, CRYPTO_ALG_ASYNC); } if (IS_ERR(tfm)) { CDEBUG(D_INFO, "Failed to alloc crypto hash %s\n", (*type)->cht_name); return PTR_ERR(tfm); } *req = ahash_request_alloc(tfm, GFP_KERNEL); if (!*req) { CDEBUG(D_INFO, "Failed to alloc ahash_request for %s\n", (*type)->cht_name); GOTO(out_free_tfm, err = -ENOMEM); } ahash_request_set_callback(*req, 0, NULL, NULL); if (key) err = crypto_ahash_setkey(tfm, key, key_len); else if ((*type)->cht_key != 0) err = crypto_ahash_setkey(tfm, (unsigned char *)&((*type)->cht_key), (*type)->cht_size); if (err) GOTO(out_free_req, err); CDEBUG(D_INFO, "Using crypto hash: %s (%s) speed %d MB/s\n", crypto_ahash_alg_name(tfm), crypto_ahash_driver_name(tfm), cfs_crypto_hash_speeds[hash_alg]); err = crypto_ahash_init(*req); if (err) { out_free_req: ahash_request_free(*req); out_free_tfm: crypto_free_ahash(tfm); } return err; }
static int tegra_crypto_sha(struct tegra_sha_req *sha_req) { struct crypto_ahash *tfm; struct scatterlist sg[1]; char result[64]; struct ahash_request *req; struct tegra_crypto_completion sha_complete; void *hash_buff; unsigned long *xbuf[XBUFSIZE]; int ret = -ENOMEM; tfm = crypto_alloc_ahash(sha_req->algo, 0, 0); if (IS_ERR(tfm)) { printk(KERN_ERR "alg: hash: Failed to load transform for %s: " "%ld\n", sha_req->algo, PTR_ERR(tfm)); goto out_alloc; } req = ahash_request_alloc(tfm, GFP_KERNEL); if (!req) { printk(KERN_ERR "alg: hash: Failed to allocate request for " "%s\n", sha_req->algo); goto out_noreq; } ret = alloc_bufs(xbuf); if (ret < 0) { pr_err("alloc_bufs failed"); goto out_buf; } init_completion(&sha_complete.restart); memset(result, 0, 64); hash_buff = xbuf[0]; memcpy(hash_buff, sha_req->plaintext, sha_req->plaintext_sz); sg_init_one(&sg[0], hash_buff, sha_req->plaintext_sz); if (sha_req->keylen) { crypto_ahash_clear_flags(tfm, ~0); ret = crypto_ahash_setkey(tfm, sha_req->key, sha_req->keylen); if (ret) { printk(KERN_ERR "alg: hash: setkey failed on " " %s: ret=%d\n", sha_req->algo, -ret); goto out; } } ahash_request_set_crypt(req, sg, result, sha_req->plaintext_sz); ret = sha_async_hash_op(req, &sha_complete, crypto_ahash_init(req)); if (ret) { pr_err("alg: hash: init failed on " "for %s: ret=%d\n", sha_req->algo, -ret); goto out; } ret = sha_async_hash_op(req, &sha_complete, crypto_ahash_update(req)); if (ret) { pr_err("alg: hash: update failed on " "for %s: ret=%d\n", sha_req->algo, -ret); goto out; } ret = sha_async_hash_op(req, &sha_complete, crypto_ahash_final(req)); if (ret) { pr_err("alg: hash: final failed on " "for %s: ret=%d\n", sha_req->algo, -ret); goto out; } ret = copy_to_user((void __user *)sha_req->result, (const void *)result, crypto_ahash_digestsize(tfm)); if (ret) { ret = -EFAULT; pr_err("alg: hash: copy_to_user failed (%d) for %s\n", ret, sha_req->algo); } out: free_bufs(xbuf); out_buf: ahash_request_free(req); out_noreq: crypto_free_ahash(tfm); out_alloc: return ret; }
static int tegra_crypt_rsa(struct tegra_crypto_ctx *ctx, struct tegra_rsa_req *rsa_req) { struct crypto_ahash *tfm = NULL; struct ahash_request *req = NULL; struct scatterlist sg[1]; char *result = NULL; void *hash_buff; int ret = 0; unsigned long *xbuf[XBUFSIZE]; struct tegra_crypto_completion rsa_complete; switch (rsa_req->algo) { case TEGRA_RSA512: req = ahash_request_alloc(ctx->rsa512_tfm, GFP_KERNEL); if (!req) { pr_err("alg: hash: Failed to allocate request for rsa512\n"); goto req_fail; } tfm = ctx->rsa512_tfm; break; case TEGRA_RSA1024: req = ahash_request_alloc(ctx->rsa1024_tfm, GFP_KERNEL); if (!req) { pr_err("alg: hash: Failed to allocate request for rsa1024\n"); goto req_fail; } tfm = ctx->rsa1024_tfm; break; case TEGRA_RSA1536: req = ahash_request_alloc(ctx->rsa1536_tfm, GFP_KERNEL); if (!req) { pr_err("alg: hash: Failed to allocate request for rsa1536\n"); goto req_fail; } tfm = ctx->rsa1536_tfm; break; case TEGRA_RSA2048: req = ahash_request_alloc(ctx->rsa2048_tfm, GFP_KERNEL); if (!req) { pr_err("alg: hash: Failed to allocate request for rsa2048\n"); goto req_fail; } tfm = ctx->rsa2048_tfm; break; default: goto req_fail; } ret = alloc_bufs(xbuf); if (ret < 0) { pr_err("alloc_bufs failed"); goto buf_fail; } init_completion(&rsa_complete.restart); result = kzalloc(rsa_req->keylen >> 16, GFP_KERNEL); if (!result) { pr_err("\nresult alloc fail\n"); goto result_fail; } hash_buff = xbuf[0]; memcpy(hash_buff, rsa_req->message, rsa_req->msg_len); sg_init_one(&sg[0], hash_buff, rsa_req->msg_len); if (!(rsa_req->keylen)) goto rsa_fail; if (!rsa_req->skip_key) { ret = crypto_ahash_setkey(tfm, rsa_req->key, rsa_req->keylen); if (ret) { pr_err("alg: hash: setkey failed\n"); goto rsa_fail; } } ahash_request_set_crypt(req, sg, result, rsa_req->msg_len); ret = crypto_ahash_digest(req); if (ret == -EINPROGRESS || ret == -EBUSY) { ret = wait_for_completion_interruptible(&rsa_complete.restart); if (!ret) ret = rsa_complete.req_err; INIT_COMPLETION(rsa_complete.restart); } if (ret) { pr_err("alg: hash: digest failed\n"); goto rsa_fail; } ret = copy_to_user((void __user *)rsa_req->result, (const void *)result, crypto_ahash_digestsize(tfm)); if (ret) { ret = -EFAULT; pr_err("alg: hash: copy_to_user failed (%d)\n", ret); } rsa_fail: kfree(result); result_fail: free_bufs(xbuf); buf_fail: ahash_request_free(req); req_fail: return ret; }