static int crc32c_digest(struct ahash_request *req) { struct crypto_hash_walk walk; u32 *mctx = crypto_ahash_ctx(crypto_ahash_reqtfm(req)); u32 crc = *mctx; int nbytes; for (nbytes = crypto_hash_walk_first(req, &walk); nbytes; nbytes = crypto_hash_walk_done(&walk, 0)) crc = crc32c(crc, walk.data, nbytes); *(__le32 *)req->result = ~cpu_to_le32(crc); return 0; }
static int crc32c_update(struct ahash_request *req) { struct crypto_hash_walk walk; u32 *crcp = ahash_request_ctx(req); u32 crc = *crcp; int nbytes; for (nbytes = crypto_hash_walk_first(req, &walk); nbytes; nbytes = crypto_hash_walk_done(&walk, 0)) crc = crc32c(crc, walk.data, nbytes); *crcp = crc; return 0; }
static int n2_hash_async_digest(struct ahash_request *req, unsigned int auth_type, unsigned int digest_size, unsigned int result_size, void *hash_loc) { struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); struct n2_hash_ctx *ctx = crypto_ahash_ctx(tfm); struct cwq_initial_entry *ent; struct crypto_hash_walk walk; struct spu_queue *qp; unsigned long flags; int err = -ENODEV; int nbytes, cpu; /* The total effective length of the operation may not * exceed 2^16. */ if (unlikely(req->nbytes > (1 << 16))) { struct n2_hash_req_ctx *rctx = ahash_request_ctx(req); ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback_tfm); rctx->fallback_req.base.flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP; rctx->fallback_req.nbytes = req->nbytes; rctx->fallback_req.src = req->src; rctx->fallback_req.result = req->result; return crypto_ahash_digest(&rctx->fallback_req); } n2_base_ctx_init(&ctx->base); nbytes = crypto_hash_walk_first(req, &walk); cpu = get_cpu(); qp = cpu_to_cwq[cpu]; if (!qp) goto out; spin_lock_irqsave(&qp->lock, flags); /* XXX can do better, improve this later by doing a by-hand scatterlist * XXX walk, etc. */ ent = qp->q + qp->tail; ent->control = control_word_base(nbytes, 0, 0, auth_type, digest_size, false, true, false, false, OPCODE_INPLACE_BIT | OPCODE_AUTH_MAC); ent->src_addr = __pa(walk.data); ent->auth_key_addr = 0UL; ent->auth_iv_addr = __pa(hash_loc); ent->final_auth_state_addr = 0UL; ent->enc_key_addr = 0UL; ent->enc_iv_addr = 0UL; ent->dest_addr = __pa(hash_loc); nbytes = crypto_hash_walk_done(&walk, 0); while (nbytes > 0) { ent = spu_queue_next(qp, ent); ent->control = (nbytes - 1); ent->src_addr = __pa(walk.data); ent->auth_key_addr = 0UL; ent->auth_iv_addr = 0UL; ent->final_auth_state_addr = 0UL; ent->enc_key_addr = 0UL; ent->enc_iv_addr = 0UL; ent->dest_addr = 0UL; nbytes = crypto_hash_walk_done(&walk, 0); } ent->control |= CONTROL_END_OF_BLOCK; if (submit_and_wait_for_tail(qp, ent) != HV_EOK) err = -EINVAL; else err = 0; spin_unlock_irqrestore(&qp->lock, flags); if (!err) memcpy(req->result, hash_loc, result_size); out: put_cpu(); return err; }