static int sha1_ce_final(struct shash_desc *desc, u8 *out) { kernel_neon_begin_partial(16); sha1_base_do_finalize(desc, (sha1_block_fn *)sha1_ce_transform); kernel_neon_end(); return sha1_base_finish(desc, out); }
static int sha1_ce_finup(struct shash_desc *desc, const u8 *data, unsigned int len, u8 *out) { struct sha1_ce_state *sctx = shash_desc_ctx(desc); bool finalize = !sctx->sst.count && !(len % SHA1_BLOCK_SIZE); ASM_EXPORT(sha1_ce_offsetof_count, offsetof(struct sha1_ce_state, sst.count)); ASM_EXPORT(sha1_ce_offsetof_finalize, offsetof(struct sha1_ce_state, finalize)); /* * Allow the asm code to perform the finalization if there is no * partial data and the input is a round multiple of the block size. */ sctx->finalize = finalize; kernel_neon_begin_partial(16); sha1_base_do_update(desc, data, len, (sha1_block_fn *)sha1_ce_transform); if (!finalize) sha1_base_do_finalize(desc, (sha1_block_fn *)sha1_ce_transform); kernel_neon_end(); return sha1_base_finish(desc, out); }
static int sha1_finup(struct shash_desc *desc, const u8 *data, unsigned int len, u8 *out, sha1_transform_fn *sha1_xform) { if (!irq_fpu_usable()) return crypto_sha1_finup(desc, data, len, out); kernel_fpu_begin(); if (len) sha1_base_do_update(desc, data, len, (sha1_block_fn *)sha1_xform); sha1_base_do_finalize(desc, (sha1_block_fn *)sha1_xform); kernel_fpu_end(); return sha1_base_finish(desc, out); }
static int sha1_neon_finup(struct shash_desc *desc, const u8 *data, unsigned int len, u8 *out) { if (!crypto_simd_usable()) return sha1_finup_arm(desc, data, len, out); kernel_neon_begin(); if (len) sha1_base_do_update(desc, data, len, (sha1_block_fn *)sha1_transform_neon); sha1_base_do_finalize(desc, (sha1_block_fn *)sha1_transform_neon); kernel_neon_end(); return sha1_base_finish(desc, out); }