static int ccm_encrypt(struct aead_request *req) { struct crypto_aead *aead = crypto_aead_reqtfm(req); struct crypto_aes_ctx *ctx = crypto_aead_ctx(aead); struct skcipher_walk walk; u8 __aligned(8) mac[AES_BLOCK_SIZE]; u8 buf[AES_BLOCK_SIZE]; u32 len = req->cryptlen; int err; err = ccm_init_mac(req, mac, len); if (err) return err; if (req->assoclen) ccm_calculate_auth_mac(req, mac); /* preserve the original iv for the final round */ memcpy(buf, req->iv, AES_BLOCK_SIZE); err = skcipher_walk_aead_encrypt(&walk, req, false); if (crypto_simd_usable()) { while (walk.nbytes) { u32 tail = walk.nbytes % AES_BLOCK_SIZE; if (walk.nbytes == walk.total) tail = 0; kernel_neon_begin(); ce_aes_ccm_encrypt(walk.dst.virt.addr, walk.src.virt.addr, walk.nbytes - tail, ctx->key_enc, num_rounds(ctx), mac, walk.iv); kernel_neon_end(); err = skcipher_walk_done(&walk, tail); } if (!err) { kernel_neon_begin(); ce_aes_ccm_final(mac, buf, ctx->key_enc, num_rounds(ctx)); kernel_neon_end(); } } else { err = ccm_crypt_fallback(&walk, mac, buf, ctx, true); } if (err) return err; /* copy authtag to end of dst */ scatterwalk_map_and_copy(mac, req->dst, req->assoclen + req->cryptlen, crypto_aead_authsize(aead), 1); return 0; }
static int sm3_ce_final(struct shash_desc *desc, u8 *out) { if (!crypto_simd_usable()) return crypto_sm3_finup(desc, NULL, 0, out); kernel_neon_begin(); sm3_base_do_finalize(desc, sm3_ce_transform); kernel_neon_end(); return sm3_base_finish(desc, out); }
static int sm3_ce_finup(struct shash_desc *desc, const u8 *data, unsigned int len, u8 *out) { if (!crypto_simd_usable()) return crypto_sm3_finup(desc, data, len, out); kernel_neon_begin(); sm3_base_do_update(desc, data, len, sm3_ce_transform); kernel_neon_end(); return sm3_ce_final(desc, out); }
static int p8_aes_ctr_crypt(struct blkcipher_desc *desc, struct scatterlist *dst, struct scatterlist *src, unsigned int nbytes) { int ret; u64 inc; struct blkcipher_walk walk; struct p8_aes_ctr_ctx *ctx = crypto_tfm_ctx(crypto_blkcipher_tfm(desc->tfm)); if (!crypto_simd_usable()) { SYNC_SKCIPHER_REQUEST_ON_STACK(req, ctx->fallback); skcipher_request_set_sync_tfm(req, ctx->fallback); skcipher_request_set_callback(req, desc->flags, NULL, NULL); skcipher_request_set_crypt(req, src, dst, nbytes, desc->info); ret = crypto_skcipher_encrypt(req); skcipher_request_zero(req); } else { blkcipher_walk_init(&walk, dst, src, nbytes); ret = blkcipher_walk_virt_block(desc, &walk, AES_BLOCK_SIZE); while ((nbytes = walk.nbytes) >= AES_BLOCK_SIZE) { preempt_disable(); pagefault_disable(); enable_kernel_vsx(); aes_p8_ctr32_encrypt_blocks(walk.src.virt.addr, walk.dst.virt.addr, (nbytes & AES_BLOCK_MASK) / AES_BLOCK_SIZE, &ctx->enc_key, walk.iv); disable_kernel_vsx(); pagefault_enable(); preempt_enable(); /* We need to update IV mostly for last bytes/round */ inc = (nbytes & AES_BLOCK_MASK) / AES_BLOCK_SIZE; if (inc > 0) while (inc--) crypto_inc(walk.iv, AES_BLOCK_SIZE); nbytes &= AES_BLOCK_SIZE - 1; ret = blkcipher_walk_done(desc, &walk, nbytes); } if (walk.nbytes) { p8_aes_ctr_final(ctx, &walk); ret = blkcipher_walk_done(desc, &walk, 0); } } return ret; }
static int sha1_finup(struct shash_desc *desc, const u8 *data, unsigned int len, u8 *out, sha1_transform_fn *sha1_xform) { if (!crypto_simd_usable()) return crypto_sha1_finup(desc, data, len, out); kernel_fpu_begin(); if (len) sha1_base_do_update(desc, data, len, (sha1_block_fn *)sha1_xform); sha1_base_do_finalize(desc, (sha1_block_fn *)sha1_xform); kernel_fpu_end(); return sha1_base_finish(desc, out); }
static int sha1_neon_update(struct shash_desc *desc, const u8 *data, unsigned int len) { struct sha1_state *sctx = shash_desc_ctx(desc); if (!crypto_simd_usable() || (sctx->count % SHA1_BLOCK_SIZE) + len < SHA1_BLOCK_SIZE) return sha1_update_arm(desc, data, len); kernel_neon_begin(); sha1_base_do_update(desc, data, len, (sha1_block_fn *)sha1_transform_neon); kernel_neon_end(); return 0; }
static int sha1_update(struct shash_desc *desc, const u8 *data, unsigned int len, sha1_transform_fn *sha1_xform) { struct sha1_state *sctx = shash_desc_ctx(desc); if (!crypto_simd_usable() || (sctx->count % SHA1_BLOCK_SIZE) + len < SHA1_BLOCK_SIZE) return crypto_sha1_update(desc, data, len); /* make sure casting to sha1_block_fn() is safe */ BUILD_BUG_ON(offsetof(struct sha1_state, state) != 0); kernel_fpu_begin(); sha1_base_do_update(desc, data, len, (sha1_block_fn *)sha1_xform); kernel_fpu_end(); return 0; }
static int simd_skcipher_decrypt(struct skcipher_request *req) { struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); struct simd_skcipher_ctx *ctx = crypto_skcipher_ctx(tfm); struct skcipher_request *subreq; struct crypto_skcipher *child; subreq = skcipher_request_ctx(req); *subreq = *req; if (!crypto_simd_usable() || (in_atomic() && cryptd_skcipher_queued(ctx->cryptd_tfm))) child = &ctx->cryptd_tfm->base; else child = cryptd_skcipher_child(ctx->cryptd_tfm); skcipher_request_set_tfm(subreq, child); return crypto_skcipher_decrypt(subreq); }
static void ccm_update_mac(struct crypto_aes_ctx *key, u8 mac[], u8 const in[], u32 abytes, u32 *macp) { if (crypto_simd_usable()) { kernel_neon_begin(); ce_aes_ccm_auth_data(mac, in, abytes, macp, key->key_enc, num_rounds(key)); kernel_neon_end(); } else { if (*macp > 0 && *macp < AES_BLOCK_SIZE) { int added = min(abytes, AES_BLOCK_SIZE - *macp); crypto_xor(&mac[*macp], in, added); *macp += added; in += added; abytes -= added; } while (abytes >= AES_BLOCK_SIZE) { __aes_arm64_encrypt(key->key_enc, mac, mac, num_rounds(key)); crypto_xor(mac, in, AES_BLOCK_SIZE); in += AES_BLOCK_SIZE; abytes -= AES_BLOCK_SIZE; } if (abytes > 0) { __aes_arm64_encrypt(key->key_enc, mac, mac, num_rounds(key)); crypto_xor(mac, in, abytes); *macp = abytes; } } }