static int qce_import_common(struct ahash_request *req, u64 in_count, const u32 *state, const u8 *buffer, bool hmac) { struct crypto_ahash *ahash = crypto_ahash_reqtfm(req); struct qce_sha_reqctx *rctx = ahash_request_ctx(req); unsigned int digestsize = crypto_ahash_digestsize(ahash); unsigned int blocksize; u64 count = in_count; blocksize = crypto_tfm_alg_blocksize(crypto_ahash_tfm(ahash)); rctx->count = in_count; memcpy(rctx->buf, buffer, blocksize); if (in_count <= blocksize) { rctx->first_blk = 1; } else { rctx->first_blk = 0; /* * For HMAC, there is a hardware padding done when first block * is set. Therefore the byte_count must be incremened by 64 * after the first block operation. */ if (hmac) count += SHA_PADDING; } rctx->byte_count[0] = (__force __be32)(count & ~SHA_PADDING_MASK); rctx->byte_count[1] = (__force __be32)(count >> 32); qce_cpu_to_be32p_array((__be32 *)rctx->digest, (const u8 *)state, digestsize); rctx->buflen = (unsigned int)(in_count & (blocksize - 1)); return 0; }
static int qce_ahash_export(struct ahash_request *req, void *out) { struct crypto_ahash *ahash = crypto_ahash_reqtfm(req); struct qce_sha_reqctx *rctx = ahash_request_ctx(req); unsigned long flags = rctx->flags; unsigned int digestsize = crypto_ahash_digestsize(ahash); unsigned int blocksize = crypto_tfm_alg_blocksize(crypto_ahash_tfm(ahash)); if (IS_SHA1(flags) || IS_SHA1_HMAC(flags)) { struct sha1_state *out_state = out; out_state->count = rctx->count; qce_cpu_to_be32p_array((__be32 *)out_state->state, rctx->digest, digestsize); memcpy(out_state->buffer, rctx->buf, blocksize); } else if (IS_SHA256(flags) || IS_SHA256_HMAC(flags)) { struct sha256_state *out_state = out; out_state->count = rctx->count; qce_cpu_to_be32p_array((__be32 *)out_state->state, rctx->digest, digestsize); memcpy(out_state->buf, rctx->buf, blocksize); } else { return -EINVAL; } return 0; }
static int mv_cesa_ahmac_setkey(const char *hash_alg_name, const u8 *key, unsigned int keylen, void *istate, void *ostate) { struct ahash_request *req; struct crypto_ahash *tfm; unsigned int blocksize; u8 *ipad = NULL; u8 *opad; int ret; tfm = crypto_alloc_ahash(hash_alg_name, CRYPTO_ALG_TYPE_AHASH, CRYPTO_ALG_TYPE_AHASH_MASK); if (IS_ERR(tfm)) return PTR_ERR(tfm); req = ahash_request_alloc(tfm, GFP_KERNEL); if (!req) { ret = -ENOMEM; goto free_ahash; } crypto_ahash_clear_flags(tfm, ~0); blocksize = crypto_tfm_alg_blocksize(crypto_ahash_tfm(tfm)); ipad = kzalloc(2 * blocksize, GFP_KERNEL); if (!ipad) { ret = -ENOMEM; goto free_req; } opad = ipad + blocksize; ret = mv_cesa_ahmac_pad_init(req, key, keylen, ipad, opad, blocksize); if (ret) goto free_ipad; ret = mv_cesa_ahmac_iv_state_init(req, ipad, istate, blocksize); if (ret) goto free_ipad; ret = mv_cesa_ahmac_iv_state_init(req, opad, ostate, blocksize); free_ipad: kfree(ipad); free_req: ahash_request_free(req); free_ahash: crypto_free_ahash(tfm); return ret; }
static int mv_cesa_ahmac_sha256_setkey(struct crypto_ahash *tfm, const u8 *key, unsigned int keylen) { struct mv_cesa_hmac_ctx *ctx = crypto_tfm_ctx(crypto_ahash_tfm(tfm)); struct sha256_state istate, ostate; int ret, i; ret = mv_cesa_ahmac_setkey("mv-sha256", key, keylen, &istate, &ostate); if (ret) return ret; for (i = 0; i < ARRAY_SIZE(istate.state); i++) ctx->iv[i] = be32_to_cpu(istate.state[i]); for (i = 0; i < ARRAY_SIZE(ostate.state); i++) ctx->iv[i + 8] = be32_to_cpu(ostate.state[i]); return 0; }
static void tcrypt_complete(struct crypto_async_request *req, int err) { struct tcrypt_result *res = req->data; if (err == -EINPROGRESS) return; res->err = err; complete(&res->completion); } static int test_hash(struct crypto_ahash *tfm, struct hash_testvec *template, unsigned int tcount) { const char *algo = crypto_tfm_alg_driver_name(crypto_ahash_tfm(tfm)); unsigned int i, j, k, temp; struct scatterlist sg[8]; char result[64]; struct ahash_request *req; struct tcrypt_result tresult; int ret; void *hash_buff; init_completion(&tresult.completion); req = ahash_request_alloc(tfm, GFP_KERNEL); if (!req) { printk(KERN_ERR "alg: hash: Failed to allocate request for " "%s\n", algo); ret = -ENOMEM;
e_free: sg_free_table(&rctx->data_sg); return ret; } static int ccp_do_cmac_update(struct ahash_request *req, unsigned int nbytes, unsigned int final) { struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); struct ccp_ctx *ctx = crypto_ahash_ctx(tfm); struct ccp_aes_cmac_req_ctx *rctx = ahash_request_ctx(req); struct scatterlist *sg, *cmac_key_sg = NULL; unsigned int block_size = crypto_tfm_alg_blocksize(crypto_ahash_tfm(tfm)); unsigned int need_pad, sg_count; gfp_t gfp; u64 len; int ret; if (!ctx->u.aes.key_len) return -EINVAL; if (nbytes) rctx->null_msg = 0; len = (u64)rctx->buf_count + (u64)nbytes; if (!final && (len <= block_size)) { scatterwalk_map_and_copy(rctx->buf + rctx->buf_count, req->src,
static inline const char *crypto_ahash_driver_name(struct crypto_ahash *tfm) { return crypto_tfm_alg_driver_name(crypto_ahash_tfm(tfm)); }
static int qce_ahash_hmac_setkey(struct crypto_ahash *tfm, const u8 *key, unsigned int keylen) { unsigned int digestsize = crypto_ahash_digestsize(tfm); struct qce_sha_ctx *ctx = crypto_tfm_ctx(&tfm->base); struct qce_ahash_result result; struct ahash_request *req; struct scatterlist sg; unsigned int blocksize; struct crypto_ahash *ahash_tfm; u8 *buf; int ret; const char *alg_name; blocksize = crypto_tfm_alg_blocksize(crypto_ahash_tfm(tfm)); memset(ctx->authkey, 0, sizeof(ctx->authkey)); if (keylen <= blocksize) { memcpy(ctx->authkey, key, keylen); return 0; } if (digestsize == SHA1_DIGEST_SIZE) alg_name = "sha1-qce"; else if (digestsize == SHA256_DIGEST_SIZE) alg_name = "sha256-qce"; else return -EINVAL; ahash_tfm = crypto_alloc_ahash(alg_name, CRYPTO_ALG_TYPE_AHASH, CRYPTO_ALG_TYPE_AHASH_MASK); if (IS_ERR(ahash_tfm)) return PTR_ERR(ahash_tfm); req = ahash_request_alloc(ahash_tfm, GFP_KERNEL); if (!req) { ret = -ENOMEM; goto err_free_ahash; } init_completion(&result.completion); ahash_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG, qce_digest_complete, &result); crypto_ahash_clear_flags(ahash_tfm, ~0); buf = kzalloc(keylen + QCE_MAX_ALIGN_SIZE, GFP_KERNEL); if (!buf) { ret = -ENOMEM; goto err_free_req; } memcpy(buf, key, keylen); sg_init_one(&sg, buf, keylen); ahash_request_set_crypt(req, &sg, ctx->authkey, keylen); ret = crypto_ahash_digest(req); if (ret == -EINPROGRESS || ret == -EBUSY) { ret = wait_for_completion_interruptible(&result.completion); if (!ret) ret = result.error; } if (ret) crypto_ahash_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN); kfree(buf); err_free_req: ahash_request_free(req); err_free_ahash: crypto_free_ahash(ahash_tfm); return ret; }
static int qce_ahash_update(struct ahash_request *req) { struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); struct qce_sha_reqctx *rctx = ahash_request_ctx(req); struct qce_alg_template *tmpl = to_ahash_tmpl(req->base.tfm); struct qce_device *qce = tmpl->qce; struct scatterlist *sg_last, *sg; unsigned int total, len; unsigned int hash_later; unsigned int nbytes; unsigned int blocksize; blocksize = crypto_tfm_alg_blocksize(crypto_ahash_tfm(tfm)); rctx->count += req->nbytes; /* check for buffer from previous updates and append it */ total = req->nbytes + rctx->buflen; if (total <= blocksize) { scatterwalk_map_and_copy(rctx->buf + rctx->buflen, req->src, 0, req->nbytes, 0); rctx->buflen += req->nbytes; return 0; } /* save the original req structure fields */ rctx->src_orig = req->src; rctx->nbytes_orig = req->nbytes; /* * if we have data from previous update copy them on buffer. The old * data will be combined with current request bytes. */ if (rctx->buflen) memcpy(rctx->tmpbuf, rctx->buf, rctx->buflen); /* calculate how many bytes will be hashed later */ hash_later = total % blocksize; if (hash_later) { unsigned int src_offset = req->nbytes - hash_later; scatterwalk_map_and_copy(rctx->buf, req->src, src_offset, hash_later, 0); } /* here nbytes is multiple of blocksize */ nbytes = total - hash_later; len = rctx->buflen; sg = sg_last = req->src; while (len < nbytes && sg) { if (len + sg_dma_len(sg) > nbytes) break; len += sg_dma_len(sg); sg_last = sg; sg = sg_next(sg); } if (!sg_last) return -EINVAL; sg_mark_end(sg_last); if (rctx->buflen) { sg_init_table(rctx->sg, 2); sg_set_buf(rctx->sg, rctx->tmpbuf, rctx->buflen); sg_chain(rctx->sg, 2, req->src); req->src = rctx->sg; } req->nbytes = nbytes; rctx->buflen = hash_later; return qce->async_req_enqueue(tmpl->qce, &req->base); }
static int sahara_sha_prepare_request(struct ahash_request *req) { struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); struct sahara_sha_reqctx *rctx = ahash_request_ctx(req); unsigned int hash_later; unsigned int block_size; unsigned int len; block_size = crypto_tfm_alg_blocksize(crypto_ahash_tfm(tfm)); /* append bytes from previous operation */ len = rctx->buf_cnt + req->nbytes; /* only the last transfer can be padded in hardware */ if (!rctx->last && (len < block_size)) { /* to few data, save for next operation */ scatterwalk_map_and_copy(rctx->buf + rctx->buf_cnt, req->src, 0, req->nbytes, 0); rctx->buf_cnt += req->nbytes; return 0; } /* add data from previous operation first */ if (rctx->buf_cnt) memcpy(rctx->rembuf, rctx->buf, rctx->buf_cnt); /* data must always be a multiple of block_size */ hash_later = rctx->last ? 0 : len & (block_size - 1); if (hash_later) { unsigned int offset = req->nbytes - hash_later; /* Save remaining bytes for later use */ scatterwalk_map_and_copy(rctx->buf, req->src, offset, hash_later, 0); } /* nbytes should now be multiple of blocksize */ req->nbytes = req->nbytes - hash_later; sahara_walk_and_recalc(req->src, req->nbytes); /* have data from previous operation and current */ if (rctx->buf_cnt && req->nbytes) { sg_init_table(rctx->in_sg_chain, 2); sg_set_buf(rctx->in_sg_chain, rctx->rembuf, rctx->buf_cnt); scatterwalk_sg_chain(rctx->in_sg_chain, 2, req->src); rctx->total = req->nbytes + rctx->buf_cnt; rctx->in_sg = rctx->in_sg_chain; rctx->in_sg_chained = true; req->src = rctx->in_sg_chain; /* only data from previous operation */ } else if (rctx->buf_cnt) { if (req->src) rctx->in_sg = req->src; else rctx->in_sg = rctx->in_sg_chain; /* buf was copied into rembuf above */ sg_init_one(rctx->in_sg, rctx->rembuf, rctx->buf_cnt); rctx->total = rctx->buf_cnt; rctx->in_sg_chained = false; /* no data from previous operation */ } else { rctx->in_sg = req->src; rctx->total = req->nbytes; req->src = rctx->in_sg; rctx->in_sg_chained = false; } /* on next call, we only have the remaining data in the buffer */ rctx->buf_cnt = hash_later; return -EINPROGRESS; }