static int qce_ahash_export(struct ahash_request *req, void *out) { struct crypto_ahash *ahash = crypto_ahash_reqtfm(req); struct qce_sha_reqctx *rctx = ahash_request_ctx(req); unsigned long flags = rctx->flags; unsigned int digestsize = crypto_ahash_digestsize(ahash); unsigned int blocksize = crypto_tfm_alg_blocksize(crypto_ahash_tfm(ahash)); if (IS_SHA1(flags) || IS_SHA1_HMAC(flags)) { struct sha1_state *out_state = out; out_state->count = rctx->count; qce_cpu_to_be32p_array((__be32 *)out_state->state, rctx->digest, digestsize); memcpy(out_state->buffer, rctx->buf, blocksize); } else if (IS_SHA256(flags) || IS_SHA256_HMAC(flags)) { struct sha256_state *out_state = out; out_state->count = rctx->count; qce_cpu_to_be32p_array((__be32 *)out_state->state, rctx->digest, digestsize); memcpy(out_state->buf, rctx->buf, blocksize); } else { return -EINVAL; } return 0; }
static int qce_import_common(struct ahash_request *req, u64 in_count, const u32 *state, const u8 *buffer, bool hmac) { struct crypto_ahash *ahash = crypto_ahash_reqtfm(req); struct qce_sha_reqctx *rctx = ahash_request_ctx(req); unsigned int digestsize = crypto_ahash_digestsize(ahash); unsigned int blocksize; u64 count = in_count; blocksize = crypto_tfm_alg_blocksize(crypto_ahash_tfm(ahash)); rctx->count = in_count; memcpy(rctx->buf, buffer, blocksize); if (in_count <= blocksize) { rctx->first_blk = 1; } else { rctx->first_blk = 0; /* * For HMAC, there is a hardware padding done when first block * is set. Therefore the byte_count must be incremened by 64 * after the first block operation. */ if (hmac) count += SHA_PADDING; } rctx->byte_count[0] = (__force __be32)(count & ~SHA_PADDING_MASK); rctx->byte_count[1] = (__force __be32)(count >> 32); qce_cpu_to_be32p_array((__be32 *)rctx->digest, (const u8 *)state, digestsize); rctx->buflen = (unsigned int)(in_count & (blocksize - 1)); return 0; }
/** * Finish hash calculation, copy hash digest to buffer, clean up hash descriptor * * \param[in] req ahash request * \param[out] hash pointer to hash buffer to store hash digest * \param[in,out] hash_len pointer to hash buffer size, if \a hash == NULL * or hash_len == NULL only free \a hdesc instead * of computing the hash * * \retval 0 for success * \retval -EOVERFLOW if hash_len is too small for the hash digest * \retval negative errno for other errors from lower layers */ int cfs_crypto_hash_final(struct ahash_request *req, unsigned char *hash, unsigned int *hash_len) { int size = crypto_ahash_digestsize(crypto_ahash_reqtfm(req)); int err; if (!hash || !hash_len) { err = 0; goto free; } if (*hash_len < size) { err = -EOVERFLOW; goto free; } ahash_request_set_crypt(req, NULL, hash, 0); err = crypto_ahash_final(req); if (err == 0) *hash_len = size; free: crypto_free_ahash(crypto_ahash_reqtfm(req)); ahash_request_free(req); return err; }
static int mv_cesa_ahash_import(struct ahash_request *req, const void *hash, u64 len, const void *cache) { struct crypto_ahash *ahash = crypto_ahash_reqtfm(req); struct mv_cesa_ahash_req *creq = ahash_request_ctx(req); unsigned int digsize = crypto_ahash_digestsize(ahash); unsigned int blocksize; unsigned int cache_ptr; int ret; ret = crypto_ahash_init(req); if (ret) return ret; blocksize = crypto_ahash_blocksize(ahash); if (len >= blocksize) mv_cesa_update_op_cfg(&creq->op_tmpl, CESA_SA_DESC_CFG_MID_FRAG, CESA_SA_DESC_CFG_FRAG_MSK); creq->len = len; memcpy(creq->state, hash, digsize); creq->cache_ptr = 0; cache_ptr = do_div(len, blocksize); if (!cache_ptr) return 0; memcpy(creq->cache, cache, cache_ptr); creq->cache_ptr = cache_ptr; return 0; }
static int sahara_sha_init(struct ahash_request *req) { struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); struct sahara_sha_reqctx *rctx = ahash_request_ctx(req); memset(rctx, 0, sizeof(*rctx)); switch (crypto_ahash_digestsize(tfm)) { case SHA1_DIGEST_SIZE: rctx->mode |= SAHARA_HDR_MDHA_ALG_SHA1; rctx->digest_size = SHA1_DIGEST_SIZE; break; case SHA256_DIGEST_SIZE: rctx->mode |= SAHARA_HDR_MDHA_ALG_SHA256; rctx->digest_size = SHA256_DIGEST_SIZE; break; default: return -EINVAL; } rctx->context_size = rctx->digest_size + 4; rctx->active = 0; mutex_init(&rctx->mutex); return 0; }
int cryptodev_hash_init(struct hash_data *hdata, const char *alg_name, int hmac_mode, void *mackey, size_t mackeylen) { int ret; hdata->async.s = crypto_alloc_ahash(alg_name, 0, 0); if (unlikely(IS_ERR(hdata->async.s))) { ddebug(1, "Failed to load transform for %s", alg_name); return -EINVAL; } /* Copy the key from user and set to TFM. */ if (hmac_mode != 0) { ret = crypto_ahash_setkey(hdata->async.s, mackey, mackeylen); if (unlikely(ret)) { ddebug(1, "Setting hmac key failed for %s-%zu.", alg_name, mackeylen*8); ret = -EINVAL; goto error; } } hdata->digestsize = crypto_ahash_digestsize(hdata->async.s); hdata->alignmask = crypto_ahash_alignmask(hdata->async.s); hdata->async.result = kzalloc(sizeof(*hdata->async.result), GFP_KERNEL); if (unlikely(!hdata->async.result)) { ret = -ENOMEM; goto error; } init_completion(&hdata->async.result->completion); hdata->async.request = ahash_request_alloc(hdata->async.s, GFP_KERNEL); if (unlikely(!hdata->async.request)) { derr(0, "error allocating async crypto request"); ret = -ENOMEM; goto error; } ahash_request_set_callback(hdata->async.request, CRYPTO_TFM_REQ_MAY_BACKLOG, cryptodev_complete, hdata->async.result); ret = crypto_ahash_init(hdata->async.request); if (unlikely(ret)) { derr(0, "error in crypto_hash_init()"); goto error_request; } hdata->init = 1; return 0; error_request: ahash_request_free(hdata->async.request); error: kfree(hdata->async.result); crypto_free_ahash(hdata->async.s); return ret; }
static void mv_cesa_ahash_complete(struct crypto_async_request *req) { struct ahash_request *ahashreq = ahash_request_cast(req); struct mv_cesa_ahash_req *creq = ahash_request_ctx(ahashreq); struct mv_cesa_engine *engine = creq->base.engine; unsigned int digsize; int i; digsize = crypto_ahash_digestsize(crypto_ahash_reqtfm(ahashreq)); for (i = 0; i < digsize / 4; i++) creq->state[i] = readl_relaxed(engine->regs + CESA_IVDIG(i)); if (creq->last_req) { /* * Hardware's MD5 digest is in little endian format, but * SHA in big endian format */ if (creq->algo_le) { __le32 *result = (void *)ahashreq->result; for (i = 0; i < digsize / 4; i++) result[i] = cpu_to_le32(creq->state[i]); } else { __be32 *result = (void *)ahashreq->result; for (i = 0; i < digsize / 4; i++) result[i] = cpu_to_be32(creq->state[i]); } } atomic_sub(ahashreq->nbytes, &engine->load); }
static int ccp_aes_cmac_complete(struct crypto_async_request *async_req, int ret) { struct ahash_request *req = ahash_request_cast(async_req); struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); struct ccp_aes_cmac_req_ctx *rctx = ahash_request_ctx(req); unsigned int digest_size = crypto_ahash_digestsize(tfm); if (ret) goto e_free; if (rctx->hash_rem) { /* Save remaining data to buffer */ unsigned int offset = rctx->nbytes - rctx->hash_rem; scatterwalk_map_and_copy(rctx->buf, rctx->src, offset, rctx->hash_rem, 0); rctx->buf_count = rctx->hash_rem; } else { rctx->buf_count = 0; } /* Update result area if supplied */ if (req->result) memcpy(req->result, rctx->iv, digest_size); e_free: sg_free_table(&rctx->data_sg); return ret; }
/* * chcr_handle_resp - Unmap the DMA buffers associated with the request * @req: crypto request */ int chcr_handle_resp(struct crypto_async_request *req, unsigned char *input, int error_status) { struct crypto_tfm *tfm = req->tfm; struct chcr_context *ctx = crypto_tfm_ctx(tfm); struct uld_ctx *u_ctx = ULD_CTX(ctx); struct chcr_req_ctx ctx_req; struct cpl_fw6_pld *fw6_pld; unsigned int digestsize, updated_digestsize; switch (tfm->__crt_alg->cra_flags & CRYPTO_ALG_TYPE_MASK) { case CRYPTO_ALG_TYPE_BLKCIPHER: ctx_req.req.ablk_req = (struct ablkcipher_request *)req; ctx_req.ctx.ablk_ctx = ablkcipher_request_ctx(ctx_req.req.ablk_req); if (!error_status) { fw6_pld = (struct cpl_fw6_pld *)input; memcpy(ctx_req.req.ablk_req->info, &fw6_pld->data[2], AES_BLOCK_SIZE); } dma_unmap_sg(&u_ctx->lldi.pdev->dev, ctx_req.req.ablk_req->dst, ABLK_CTX(ctx)->dst_nents, DMA_FROM_DEVICE); if (ctx_req.ctx.ablk_ctx->skb) { kfree_skb(ctx_req.ctx.ablk_ctx->skb); ctx_req.ctx.ablk_ctx->skb = NULL; } break; case CRYPTO_ALG_TYPE_AHASH: ctx_req.req.ahash_req = (struct ahash_request *)req; ctx_req.ctx.ahash_ctx = ahash_request_ctx(ctx_req.req.ahash_req); digestsize = crypto_ahash_digestsize(crypto_ahash_reqtfm( ctx_req.req.ahash_req)); updated_digestsize = digestsize; if (digestsize == SHA224_DIGEST_SIZE) updated_digestsize = SHA256_DIGEST_SIZE; else if (digestsize == SHA384_DIGEST_SIZE) updated_digestsize = SHA512_DIGEST_SIZE; if (ctx_req.ctx.ahash_ctx->skb) ctx_req.ctx.ahash_ctx->skb = NULL; if (ctx_req.ctx.ahash_ctx->result == 1) { ctx_req.ctx.ahash_ctx->result = 0; memcpy(ctx_req.req.ahash_req->result, input + sizeof(struct cpl_fw6_pld), digestsize); } else { memcpy(ctx_req.ctx.ahash_ctx->partial_hash, input + sizeof(struct cpl_fw6_pld), updated_digestsize); } kfree(ctx_req.ctx.ahash_ctx->dummy_payload_ptr); ctx_req.ctx.ahash_ctx->dummy_payload_ptr = NULL; break; } return 0; }
int crypto4xx_hash_digest(struct ahash_request *req) { struct crypto_ahash *ahash = crypto_ahash_reqtfm(req); struct crypto4xx_ctx *ctx = crypto_tfm_ctx(req->base.tfm); struct scatterlist dst; unsigned int ds = crypto_ahash_digestsize(ahash); sg_init_one(&dst, req->result, ds); return crypto4xx_build_pd(&req->base, ctx, req->src, &dst, req->nbytes, NULL, 0, ctx->sa_in, ctx->sa_len, 0); }
static int mv_cesa_ahmac_pad_init(struct ahash_request *req, const u8 *key, unsigned int keylen, u8 *ipad, u8 *opad, unsigned int blocksize) { struct mv_cesa_ahash_result result; struct scatterlist sg; int ret; int i; if (keylen <= blocksize) { memcpy(ipad, key, keylen); } else { u8 *keydup = kmemdup(key, keylen, GFP_KERNEL); if (!keydup) return -ENOMEM; ahash_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG, mv_cesa_hmac_ahash_complete, &result); sg_init_one(&sg, keydup, keylen); ahash_request_set_crypt(req, &sg, ipad, keylen); init_completion(&result.completion); ret = crypto_ahash_digest(req); if (ret == -EINPROGRESS) { wait_for_completion_interruptible(&result.completion); ret = result.error; } /* Set the memory region to 0 to avoid any leak. */ memset(keydup, 0, keylen); kfree(keydup); if (ret) return ret; keylen = crypto_ahash_digestsize(crypto_ahash_reqtfm(req)); } memset(ipad + keylen, 0, blocksize - keylen); memcpy(opad, ipad, blocksize); for (i = 0; i < blocksize; i++) { ipad[i] ^= 0x36; opad[i] ^= 0x5c; } return 0; }
int crypto4xx_hash_init(struct ahash_request *req) { struct crypto4xx_ctx *ctx = crypto_tfm_ctx(req->base.tfm); int ds; struct dynamic_sa_ctl *sa; sa = ctx->sa_in; ds = crypto_ahash_digestsize( __crypto_ahash_cast(req->base.tfm)); sa->sa_command_0.bf.digest_len = ds >> 2; sa->sa_command_0.bf.load_hash_state = SA_LOAD_HASH_FROM_SA; return 0; }
static int mv_cesa_ahash_export(struct ahash_request *req, void *hash, u64 *len, void *cache) { struct crypto_ahash *ahash = crypto_ahash_reqtfm(req); struct mv_cesa_ahash_req *creq = ahash_request_ctx(req); unsigned int digsize = crypto_ahash_digestsize(ahash); unsigned int blocksize; blocksize = crypto_ahash_blocksize(ahash); *len = creq->len; memcpy(hash, creq->state, digsize); memset(cache, 0, blocksize); memcpy(cache, creq->cache, creq->cache_ptr); return 0; }
static int hmac_sha_digest(const char *algo, char *data_in, size_t dlen, char *hash_out, size_t outlen) { int rc = 0; struct crypto_ahash *tfm; struct scatterlist sg; struct ahash_request *req; struct hmac_sha_result tresult; /* Set hash output to 0 initially */ memset(hash_out, 0, outlen); init_completion(&tresult.completion); tfm = crypto_alloc_ahash(algo, 0, 0); if (IS_ERR(tfm)) { printk(KERN_ERR "crypto_alloc_ahash failed\n"); rc = PTR_ERR(tfm); goto err_tfm; } req = ahash_request_alloc(tfm, GFP_KERNEL); if (!req) { printk(KERN_ERR "failed to allocate request\n"); rc = -ENOMEM; goto err_req; } if (crypto_ahash_digestsize(tfm) > outlen) { printk(KERN_ERR "tfm size > result buffer\n"); rc = -EINVAL; goto err_req; } ahash_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG, hmac_sha_complete, &tresult); sg_init_one(&sg, data_in, dlen); crypto_ahash_clear_flags(tfm, -0); ahash_request_set_crypt(req, &sg, hash_out, dlen); rc = do_one_ahash_op(req, crypto_ahash_digest(req)); ahash_request_free(req); err_req: crypto_free_ahash(tfm); err_tfm: return rc; }
static int zero_message_process(struct ahash_request *req) { struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); int rk_digest_size = crypto_ahash_digestsize(tfm); switch (rk_digest_size) { case SHA1_DIGEST_SIZE: memcpy(req->result, sha1_zero_message_hash, rk_digest_size); break; case SHA256_DIGEST_SIZE: memcpy(req->result, sha256_zero_message_hash, rk_digest_size); break; case MD5_DIGEST_SIZE: memcpy(req->result, md5_zero_message_hash, rk_digest_size); break; default: return -EINVAL; } return 0; }
static int calc_buffer_ahash_atfm(const void *buf, loff_t len, struct ima_digest_data *hash, struct crypto_ahash *tfm) { struct ahash_request *req; struct scatterlist sg; struct ahash_completion res; int rc, ahash_rc = 0; hash->length = crypto_ahash_digestsize(tfm); req = ahash_request_alloc(tfm, GFP_KERNEL); if (!req) return -ENOMEM; init_completion(&res.completion); ahash_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG | CRYPTO_TFM_REQ_MAY_SLEEP, ahash_complete, &res); rc = ahash_wait(crypto_ahash_init(req), &res); if (rc) goto out; sg_init_one(&sg, buf, len); ahash_request_set_crypt(req, &sg, NULL, len); ahash_rc = crypto_ahash_update(req); /* wait for the update request to complete */ rc = ahash_wait(ahash_rc, &res); if (!rc) { ahash_request_set_crypt(req, NULL, hash->digest, 0); rc = ahash_wait(crypto_ahash_final(req), &res); } out: ahash_request_free(req); return rc; }
static void qce_ahash_done(void *data) { struct crypto_async_request *async_req = data; struct ahash_request *req = ahash_request_cast(async_req); struct crypto_ahash *ahash = crypto_ahash_reqtfm(req); struct qce_sha_reqctx *rctx = ahash_request_ctx(req); struct qce_alg_template *tmpl = to_ahash_tmpl(async_req->tfm); struct qce_device *qce = tmpl->qce; struct qce_result_dump *result = qce->dma.result_buf; unsigned int digestsize = crypto_ahash_digestsize(ahash); int error; u32 status; error = qce_dma_terminate_all(&qce->dma); if (error) dev_dbg(qce->dev, "ahash dma termination error (%d)\n", error); qce_unmapsg(qce->dev, req->src, rctx->src_nents, DMA_TO_DEVICE, rctx->src_chained); qce_unmapsg(qce->dev, &rctx->result_sg, 1, DMA_FROM_DEVICE, 0); memcpy(rctx->digest, result->auth_iv, digestsize); if (req->result) memcpy(req->result, result->auth_iv, digestsize); rctx->byte_count[0] = cpu_to_be32(result->auth_byte_count[0]); rctx->byte_count[1] = cpu_to_be32(result->auth_byte_count[1]); error = qce_check_status(qce, &status); if (error < 0) dev_dbg(qce->dev, "ahash operation error (%x)\n", status); req->src = rctx->src_orig; req->nbytes = rctx->nbytes_orig; rctx->last_blk = false; rctx->first_blk = false; qce->async_req_done(tmpl->qce, error); }
static void mv_hash_algo_completion(void) { struct ahash_request *req = ahash_request_cast(cpg->cur_req); struct mv_req_hash_ctx *ctx = ahash_request_ctx(req); if (ctx->extra_bytes) copy_src_to_buf(&cpg->p, ctx->buffer, ctx->extra_bytes); sg_miter_stop(&cpg->p.src_sg_it); ctx->state[0] = readl(cpg->reg + DIGEST_INITIAL_VAL_A); ctx->state[1] = readl(cpg->reg + DIGEST_INITIAL_VAL_B); ctx->state[2] = readl(cpg->reg + DIGEST_INITIAL_VAL_C); ctx->state[3] = readl(cpg->reg + DIGEST_INITIAL_VAL_D); ctx->state[4] = readl(cpg->reg + DIGEST_INITIAL_VAL_E); if (likely(ctx->last_chunk)) { if (likely(ctx->count <= MAX_HW_HASH_SIZE)) { memcpy(req->result, cpg->sram + SRAM_DIGEST_BUF, crypto_ahash_digestsize(crypto_ahash_reqtfm (req))); } else mv_hash_final_fallback(req); } }
static void mv_cesa_ahash_std_step(struct ahash_request *req) { struct mv_cesa_ahash_req *creq = ahash_request_ctx(req); struct mv_cesa_ahash_std_req *sreq = &creq->req.std; struct mv_cesa_engine *engine = creq->base.engine; struct mv_cesa_op_ctx *op; unsigned int new_cache_ptr = 0; u32 frag_mode; size_t len; unsigned int digsize; int i; mv_cesa_adjust_op(engine, &creq->op_tmpl); memcpy_toio(engine->sram, &creq->op_tmpl, sizeof(creq->op_tmpl)); digsize = crypto_ahash_digestsize(crypto_ahash_reqtfm(req)); for (i = 0; i < digsize / 4; i++) writel_relaxed(creq->state[i], engine->regs + CESA_IVDIG(i)); mv_cesa_adjust_op(engine, &creq->op_tmpl); memcpy_toio(engine->sram, &creq->op_tmpl, sizeof(creq->op_tmpl)); if (creq->cache_ptr) memcpy_toio(engine->sram + CESA_SA_DATA_SRAM_OFFSET, creq->cache, creq->cache_ptr); len = min_t(size_t, req->nbytes + creq->cache_ptr - sreq->offset, CESA_SA_SRAM_PAYLOAD_SIZE); if (!creq->last_req) { new_cache_ptr = len & CESA_HASH_BLOCK_SIZE_MSK; len &= ~CESA_HASH_BLOCK_SIZE_MSK; } if (len - creq->cache_ptr) sreq->offset += sg_pcopy_to_buffer(req->src, creq->src_nents, engine->sram + CESA_SA_DATA_SRAM_OFFSET + creq->cache_ptr, len - creq->cache_ptr, sreq->offset); op = &creq->op_tmpl; frag_mode = mv_cesa_get_op_cfg(op) & CESA_SA_DESC_CFG_FRAG_MSK; if (creq->last_req && sreq->offset == req->nbytes && creq->len <= CESA_SA_DESC_MAC_SRC_TOTAL_LEN_MAX) { if (frag_mode == CESA_SA_DESC_CFG_FIRST_FRAG) frag_mode = CESA_SA_DESC_CFG_NOT_FRAG; else if (frag_mode == CESA_SA_DESC_CFG_MID_FRAG) frag_mode = CESA_SA_DESC_CFG_LAST_FRAG; } if (frag_mode == CESA_SA_DESC_CFG_NOT_FRAG || frag_mode == CESA_SA_DESC_CFG_LAST_FRAG) { if (len && creq->len <= CESA_SA_DESC_MAC_SRC_TOTAL_LEN_MAX) { mv_cesa_set_mac_op_total_len(op, creq->len); } else { int trailerlen = mv_cesa_ahash_pad_len(creq) + 8; if (len + trailerlen > CESA_SA_SRAM_PAYLOAD_SIZE) { len &= CESA_HASH_BLOCK_SIZE_MSK; new_cache_ptr = 64 - trailerlen; memcpy_fromio(creq->cache, engine->sram + CESA_SA_DATA_SRAM_OFFSET + len, new_cache_ptr); } else { len += mv_cesa_ahash_pad_req(creq, engine->sram + len + CESA_SA_DATA_SRAM_OFFSET); } if (frag_mode == CESA_SA_DESC_CFG_LAST_FRAG) frag_mode = CESA_SA_DESC_CFG_MID_FRAG; else frag_mode = CESA_SA_DESC_CFG_FIRST_FRAG; } } mv_cesa_set_mac_op_frag_len(op, len); mv_cesa_update_op_cfg(op, frag_mode, CESA_SA_DESC_CFG_FRAG_MSK); /* FIXME: only update enc_len field */ memcpy_toio(engine->sram, op, sizeof(*op)); if (frag_mode == CESA_SA_DESC_CFG_FIRST_FRAG) mv_cesa_update_op_cfg(op, CESA_SA_DESC_CFG_MID_FRAG, CESA_SA_DESC_CFG_FRAG_MSK); creq->cache_ptr = new_cache_ptr; mv_cesa_set_int_mask(engine, CESA_SA_INT_ACCEL0_DONE); writel_relaxed(CESA_SA_CFG_PARA_DIS, engine->regs + CESA_SA_CFG); BUG_ON(readl(engine->regs + CESA_SA_CMD) & CESA_SA_CMD_EN_CESA_SA_ACCL0); writel(CESA_SA_CMD_EN_CESA_SA_ACCL0, engine->regs + CESA_SA_CMD); }
static int qce_ahash_hmac_setkey(struct crypto_ahash *tfm, const u8 *key, unsigned int keylen) { unsigned int digestsize = crypto_ahash_digestsize(tfm); struct qce_sha_ctx *ctx = crypto_tfm_ctx(&tfm->base); struct qce_ahash_result result; struct ahash_request *req; struct scatterlist sg; unsigned int blocksize; struct crypto_ahash *ahash_tfm; u8 *buf; int ret; const char *alg_name; blocksize = crypto_tfm_alg_blocksize(crypto_ahash_tfm(tfm)); memset(ctx->authkey, 0, sizeof(ctx->authkey)); if (keylen <= blocksize) { memcpy(ctx->authkey, key, keylen); return 0; } if (digestsize == SHA1_DIGEST_SIZE) alg_name = "sha1-qce"; else if (digestsize == SHA256_DIGEST_SIZE) alg_name = "sha256-qce"; else return -EINVAL; ahash_tfm = crypto_alloc_ahash(alg_name, CRYPTO_ALG_TYPE_AHASH, CRYPTO_ALG_TYPE_AHASH_MASK); if (IS_ERR(ahash_tfm)) return PTR_ERR(ahash_tfm); req = ahash_request_alloc(ahash_tfm, GFP_KERNEL); if (!req) { ret = -ENOMEM; goto err_free_ahash; } init_completion(&result.completion); ahash_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG, qce_digest_complete, &result); crypto_ahash_clear_flags(ahash_tfm, ~0); buf = kzalloc(keylen + QCE_MAX_ALIGN_SIZE, GFP_KERNEL); if (!buf) { ret = -ENOMEM; goto err_free_req; } memcpy(buf, key, keylen); sg_init_one(&sg, buf, keylen); ahash_request_set_crypt(req, &sg, ctx->authkey, keylen); ret = crypto_ahash_digest(req); if (ret == -EINPROGRESS || ret == -EBUSY) { ret = wait_for_completion_interruptible(&result.completion); if (!ret) ret = result.error; } if (ret) crypto_ahash_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN); kfree(buf); err_free_req: ahash_request_free(req); err_free_ahash: crypto_free_ahash(ahash_tfm); return ret; }
static int hmac_sha_update(const char *algo, char *data_in, size_t dlen, char *hash_out, size_t outlen) { int rc = 0; struct crypto_ahash *tfm; struct scatterlist sg[TVMEMSIZE]; struct ahash_request *req; struct hmac_sha_result tresult; int i, j; /* Set hash output to 0 initially */ memset(hash_out, 0, outlen); init_completion(&tresult.completion); tfm = crypto_alloc_ahash(algo, 0, 0); if (IS_ERR(tfm)) { printk(KERN_ERR "crypto_alloc_ahash failed\n"); rc = PTR_ERR(tfm); goto err_tfm; } req = ahash_request_alloc(tfm, GFP_KERNEL); if (!req) { printk(KERN_ERR "failed to allocate request\n"); rc = -ENOMEM; goto err_req; } if (crypto_ahash_digestsize(tfm) > outlen) { printk(KERN_ERR "tfm size > result buffer\n"); rc = -EINVAL; goto err_req; } ahash_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG, hmac_sha_complete, &tresult); sg_init_table(sg, TVMEMSIZE); i = 0; j = dlen; while (j > PAGE_SIZE) { sg_set_buf(sg + i, tvmem[i], PAGE_SIZE); memcpy(tvmem[i], data_in + i * PAGE_SIZE, PAGE_SIZE); i++; j -= PAGE_SIZE; } sg_set_buf(sg + i, tvmem[i], j); memcpy(tvmem[i], data_in + i * PAGE_SIZE, j); crypto_ahash_clear_flags(tfm, -0); ahash_request_set_crypt(req, sg, hash_out, dlen); rc = crypto_ahash_init(req); rc = do_one_ahash_op(req, crypto_ahash_update(req)); if (rc) goto out; rc = do_one_ahash_op(req, crypto_ahash_final(req)); out: ahash_request_free(req); err_req: crypto_free_ahash(tfm); err_tfm: return rc; }
static int ima_calc_file_hash_atfm(struct file *file, struct ima_digest_data *hash, struct crypto_ahash *tfm) { loff_t i_size, offset; char *rbuf[2] = { NULL, }; int rc, read = 0, rbuf_len, active = 0, ahash_rc = 0; struct ahash_request *req; struct scatterlist sg[1]; struct ahash_completion res; size_t rbuf_size[2]; hash->length = crypto_ahash_digestsize(tfm); req = ahash_request_alloc(tfm, GFP_KERNEL); if (!req) return -ENOMEM; init_completion(&res.completion); ahash_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG | CRYPTO_TFM_REQ_MAY_SLEEP, ahash_complete, &res); rc = ahash_wait(crypto_ahash_init(req), &res); if (rc) goto out1; i_size = i_size_read(file_inode(file)); if (i_size == 0) goto out2; /* * Try to allocate maximum size of memory. * Fail if even a single page cannot be allocated. */ rbuf[0] = ima_alloc_pages(i_size, &rbuf_size[0], 1); if (!rbuf[0]) { rc = -ENOMEM; goto out1; } /* Only allocate one buffer if that is enough. */ if (i_size > rbuf_size[0]) { /* * Try to allocate secondary buffer. If that fails fallback to * using single buffering. Use previous memory allocation size * as baseline for possible allocation size. */ rbuf[1] = ima_alloc_pages(i_size - rbuf_size[0], &rbuf_size[1], 0); } if (!(file->f_mode & FMODE_READ)) { file->f_mode |= FMODE_READ; read = 1; } for (offset = 0; offset < i_size; offset += rbuf_len) { if (!rbuf[1] && offset) { /* Not using two buffers, and it is not the first * read/request, wait for the completion of the * previous ahash_update() request. */ rc = ahash_wait(ahash_rc, &res); if (rc) goto out3; } /* read buffer */ rbuf_len = min_t(loff_t, i_size - offset, rbuf_size[active]); rc = integrity_kernel_read(file, offset, rbuf[active], rbuf_len); if (rc != rbuf_len) goto out3; if (rbuf[1] && offset) { /* Using two buffers, and it is not the first * read/request, wait for the completion of the * previous ahash_update() request. */ rc = ahash_wait(ahash_rc, &res); if (rc) goto out3; } sg_init_one(&sg[0], rbuf[active], rbuf_len); ahash_request_set_crypt(req, sg, NULL, rbuf_len); ahash_rc = crypto_ahash_update(req); if (rbuf[1]) active = !active; /* swap buffers, if we use two */ } /* wait for the last update request to complete */ rc = ahash_wait(ahash_rc, &res); out3: if (read) file->f_mode &= ~FMODE_READ; ima_free_pages(rbuf[0], rbuf_size[0]); ima_free_pages(rbuf[1], rbuf_size[1]); out2: if (!rc) { ahash_request_set_crypt(req, NULL, hash->digest, 0); rc = ahash_wait(crypto_ahash_final(req), &res); } out1: ahash_request_free(req); return rc; }
static int rk_ahash_digest(struct ahash_request *req) { struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); struct rk_ahash_ctx *tctx = crypto_tfm_ctx(req->base.tfm); struct rk_crypto_info *dev = NULL; unsigned long flags; int ret; if (!req->nbytes) return zero_message_process(req); dev = tctx->dev; dev->total = req->nbytes; dev->left_bytes = req->nbytes; dev->aligned = 0; dev->mode = 0; dev->align_size = 4; dev->sg_dst = NULL; dev->sg_src = req->src; dev->first = req->src; dev->nents = sg_nents(req->src); switch (crypto_ahash_digestsize(tfm)) { case SHA1_DIGEST_SIZE: dev->mode = RK_CRYPTO_HASH_SHA1; break; case SHA256_DIGEST_SIZE: dev->mode = RK_CRYPTO_HASH_SHA256; break; case MD5_DIGEST_SIZE: dev->mode = RK_CRYPTO_HASH_MD5; break; default: return -EINVAL; } rk_ahash_reg_init(dev); spin_lock_irqsave(&dev->lock, flags); ret = crypto_enqueue_request(&dev->queue, &req->base); spin_unlock_irqrestore(&dev->lock, flags); tasklet_schedule(&dev->crypto_tasklet); /* * it will take some time to process date after last dma transmission. * * waiting time is relative with the last date len, * so cannot set a fixed time here. * 10-50 makes system not call here frequently wasting * efficiency, and make it response quickly when dma * complete. */ while (!CRYPTO_READ(dev, RK_CRYPTO_HASH_STS)) usleep_range(10, 50); memcpy_fromio(req->result, dev->reg + RK_CRYPTO_HASH_DOUT_0, crypto_ahash_digestsize(tfm)); return 0; }
static int tegra_crypto_sha(struct tegra_sha_req *sha_req) { struct crypto_ahash *tfm; struct scatterlist sg[1]; char result[64]; struct ahash_request *req; struct tegra_crypto_completion sha_complete; void *hash_buff; unsigned long *xbuf[XBUFSIZE]; int ret = -ENOMEM; tfm = crypto_alloc_ahash(sha_req->algo, 0, 0); if (IS_ERR(tfm)) { printk(KERN_ERR "alg: hash: Failed to load transform for %s: " "%ld\n", sha_req->algo, PTR_ERR(tfm)); goto out_alloc; } req = ahash_request_alloc(tfm, GFP_KERNEL); if (!req) { printk(KERN_ERR "alg: hash: Failed to allocate request for " "%s\n", sha_req->algo); goto out_noreq; } ret = alloc_bufs(xbuf); if (ret < 0) { pr_err("alloc_bufs failed"); goto out_buf; } init_completion(&sha_complete.restart); memset(result, 0, 64); hash_buff = xbuf[0]; memcpy(hash_buff, sha_req->plaintext, sha_req->plaintext_sz); sg_init_one(&sg[0], hash_buff, sha_req->plaintext_sz); if (sha_req->keylen) { crypto_ahash_clear_flags(tfm, ~0); ret = crypto_ahash_setkey(tfm, sha_req->key, sha_req->keylen); if (ret) { printk(KERN_ERR "alg: hash: setkey failed on " " %s: ret=%d\n", sha_req->algo, -ret); goto out; } } ahash_request_set_crypt(req, sg, result, sha_req->plaintext_sz); ret = sha_async_hash_op(req, &sha_complete, crypto_ahash_init(req)); if (ret) { pr_err("alg: hash: init failed on " "for %s: ret=%d\n", sha_req->algo, -ret); goto out; } ret = sha_async_hash_op(req, &sha_complete, crypto_ahash_update(req)); if (ret) { pr_err("alg: hash: update failed on " "for %s: ret=%d\n", sha_req->algo, -ret); goto out; } ret = sha_async_hash_op(req, &sha_complete, crypto_ahash_final(req)); if (ret) { pr_err("alg: hash: final failed on " "for %s: ret=%d\n", sha_req->algo, -ret); goto out; } ret = copy_to_user((void __user *)sha_req->result, (const void *)result, crypto_ahash_digestsize(tfm)); if (ret) { ret = -EFAULT; pr_err("alg: hash: copy_to_user failed (%d) for %s\n", ret, sha_req->algo); } out: free_bufs(xbuf); out_buf: ahash_request_free(req); out_noreq: crypto_free_ahash(tfm); out_alloc: return ret; }
static int tegra_crypt_rsa(struct tegra_crypto_ctx *ctx, struct tegra_rsa_req *rsa_req) { struct crypto_ahash *tfm = NULL; struct ahash_request *req = NULL; struct scatterlist sg[1]; char *result = NULL; void *hash_buff; int ret = 0; unsigned long *xbuf[XBUFSIZE]; struct tegra_crypto_completion rsa_complete; switch (rsa_req->algo) { case TEGRA_RSA512: req = ahash_request_alloc(ctx->rsa512_tfm, GFP_KERNEL); if (!req) { pr_err("alg: hash: Failed to allocate request for rsa512\n"); goto req_fail; } tfm = ctx->rsa512_tfm; break; case TEGRA_RSA1024: req = ahash_request_alloc(ctx->rsa1024_tfm, GFP_KERNEL); if (!req) { pr_err("alg: hash: Failed to allocate request for rsa1024\n"); goto req_fail; } tfm = ctx->rsa1024_tfm; break; case TEGRA_RSA1536: req = ahash_request_alloc(ctx->rsa1536_tfm, GFP_KERNEL); if (!req) { pr_err("alg: hash: Failed to allocate request for rsa1536\n"); goto req_fail; } tfm = ctx->rsa1536_tfm; break; case TEGRA_RSA2048: req = ahash_request_alloc(ctx->rsa2048_tfm, GFP_KERNEL); if (!req) { pr_err("alg: hash: Failed to allocate request for rsa2048\n"); goto req_fail; } tfm = ctx->rsa2048_tfm; break; default: goto req_fail; } ret = alloc_bufs(xbuf); if (ret < 0) { pr_err("alloc_bufs failed"); goto buf_fail; } init_completion(&rsa_complete.restart); result = kzalloc(rsa_req->keylen >> 16, GFP_KERNEL); if (!result) { pr_err("\nresult alloc fail\n"); goto result_fail; } hash_buff = xbuf[0]; memcpy(hash_buff, rsa_req->message, rsa_req->msg_len); sg_init_one(&sg[0], hash_buff, rsa_req->msg_len); if (!(rsa_req->keylen)) goto rsa_fail; if (!rsa_req->skip_key) { ret = crypto_ahash_setkey(tfm, rsa_req->key, rsa_req->keylen); if (ret) { pr_err("alg: hash: setkey failed\n"); goto rsa_fail; } } ahash_request_set_crypt(req, sg, result, rsa_req->msg_len); ret = crypto_ahash_digest(req); if (ret == -EINPROGRESS || ret == -EBUSY) { ret = wait_for_completion_interruptible(&rsa_complete.restart); if (!ret) ret = rsa_complete.req_err; INIT_COMPLETION(rsa_complete.restart); } if (ret) { pr_err("alg: hash: digest failed\n"); goto rsa_fail; } ret = copy_to_user((void __user *)rsa_req->result, (const void *)result, crypto_ahash_digestsize(tfm)); if (ret) { ret = -EFAULT; pr_err("alg: hash: copy_to_user failed (%d)\n", ret); } rsa_fail: kfree(result); result_fail: free_bufs(xbuf); buf_fail: ahash_request_free(req); req_fail: return ret; }