static void __save_processor_state(struct saved_context *ctxt) { mtrr_save_fixed_ranges(NULL); kernel_fpu_begin(); /* * descriptor tables */ store_gdt(&ctxt->gdt); store_idt(&ctxt->idt); store_tr(ctxt->tr); /* * segment registers */ savesegment(es, ctxt->es); savesegment(fs, ctxt->fs); savesegment(gs, ctxt->gs); savesegment(ss, ctxt->ss); /* * control registers */ ctxt->cr0 = read_cr0(); ctxt->cr2 = read_cr2(); ctxt->cr3 = read_cr3(); ctxt->cr4 = read_cr4_safe(); }
static int ctr_crypt(struct blkcipher_desc *desc, struct scatterlist *dst, struct scatterlist *src, unsigned int nbytes) { struct crypto_aes_ctx *ctx = aes_ctx(crypto_blkcipher_ctx(desc->tfm)); struct blkcipher_walk walk; int err; blkcipher_walk_init(&walk, dst, src, nbytes); err = blkcipher_walk_virt_block(desc, &walk, AES_BLOCK_SIZE); desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP; kernel_fpu_begin(); while ((nbytes = walk.nbytes) >= AES_BLOCK_SIZE) { aesni_ctr_enc(ctx, walk.dst.virt.addr, walk.src.virt.addr, nbytes & AES_BLOCK_MASK, walk.iv); nbytes &= AES_BLOCK_SIZE - 1; err = blkcipher_walk_done(desc, &walk, nbytes); } if (walk.nbytes) { ctr_crypt_final(ctx, &walk); err = blkcipher_walk_done(desc, &walk, 0); } kernel_fpu_end(); return err; }
static int ctr_crypt(struct skcipher_request *req) { struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); struct crypto_aes_ctx *ctx = aes_ctx(crypto_skcipher_ctx(tfm)); struct skcipher_walk walk; unsigned int nbytes; int err; err = skcipher_walk_virt(&walk, req, true); kernel_fpu_begin(); while ((nbytes = walk.nbytes) >= AES_BLOCK_SIZE) { aesni_ctr_enc_tfm(ctx, walk.dst.virt.addr, walk.src.virt.addr, nbytes & AES_BLOCK_MASK, walk.iv); nbytes &= AES_BLOCK_SIZE - 1; err = skcipher_walk_done(&walk, nbytes); } if (walk.nbytes) { ctr_crypt_final(ctx, &walk); err = skcipher_walk_done(&walk, 0); } kernel_fpu_end(); return err; }
/* No final XOR 0xFFFFFFFF, like crc32_le */ static int __crc32c_pclmul_finup(u32 *crcp, const u8 *data, unsigned int len, u8 *out) { kernel_fpu_begin(); *(__le32 *)out = ~cpu_to_le32(crc_pcl(data, len, *crcp)); kernel_fpu_end(); return 0; }
unsigned long tdb_hash_calc(const char *data, size_t len) { #define MUL sizeof(long) int i; unsigned long crc0 = 0, crc1 = 0, h; unsigned long *d = (unsigned long *)data; size_t n = (len / MUL) & ~1UL; /* TODO fallback to plain C for small data. */ kernel_fpu_begin(); for (i = 0; i < n; i += 2) { CRCQ(crc0, d[i]); CRCQ(crc1, d[i + 1]); } if (n * MUL + MUL <= len) { CRCQ(crc0, d[n]); n++; } kernel_fpu_end(); h = (crc1 << 32) | crc0; /* * Generate relatively small and dense hash tail values - they are good * for short strings in htrie which uses less significant bits at root, * however collisions are very probable. */ n *= MUL; switch (len - n) { case 7: h += data[n] * n; ++n; case 6: h += data[n] * n; ++n; case 5: h += data[n] * n; ++n; case 4: h += data[n] * n; ++n; case 3: h += data[n] * n; ++n; case 2: h += data[n] * n; ++n; case 1: h += data[n] * n; } return h; #undef MUL }
static int rfc4106_set_key(struct crypto_aead *parent, const u8 *key, unsigned int key_len) { int ret = 0; struct crypto_tfm *tfm = crypto_aead_tfm(parent); struct aesni_rfc4106_gcm_ctx *ctx = aesni_rfc4106_gcm_ctx_get(parent); struct crypto_aead *cryptd_child = cryptd_aead_child(ctx->cryptd_tfm); struct aesni_rfc4106_gcm_ctx *child_ctx = aesni_rfc4106_gcm_ctx_get(cryptd_child); u8 *new_key_mem = NULL; if (key_len < 4) { crypto_tfm_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN); return -EINVAL; } /*Account for 4 byte nonce at the end.*/ key_len -= 4; if (key_len != AES_KEYSIZE_128) { crypto_tfm_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN); return -EINVAL; } memcpy(ctx->nonce, key + key_len, sizeof(ctx->nonce)); /*This must be on a 16 byte boundary!*/ if ((unsigned long)(&(ctx->aes_key_expanded.key_enc[0])) % AESNI_ALIGN) return -EINVAL; if ((unsigned long)key % AESNI_ALIGN) { /*key is not aligned: use an auxuliar aligned pointer*/ new_key_mem = kmalloc(key_len+AESNI_ALIGN, GFP_KERNEL); if (!new_key_mem) return -ENOMEM; new_key_mem = PTR_ALIGN(new_key_mem, AESNI_ALIGN); memcpy(new_key_mem, key, key_len); key = new_key_mem; } if (!irq_fpu_usable()) ret = crypto_aes_expand_key(&(ctx->aes_key_expanded), key, key_len); else { kernel_fpu_begin(); ret = aesni_set_key(&(ctx->aes_key_expanded), key, key_len); kernel_fpu_end(); } /*This must be on a 16 byte boundary!*/ if ((unsigned long)(&(ctx->hash_subkey[0])) % AESNI_ALIGN) { ret = -EINVAL; goto exit; } ret = rfc4106_set_hash_subkey(ctx->hash_subkey, key, key_len); memcpy(child_ctx, ctx, sizeof(*ctx)); exit: kfree(new_key_mem); return ret; }
static int crc32c_pclmul_update(struct shash_desc *desc, const u8 *data, unsigned int len) { u32 *crcp = shash_desc_ctx(desc); kernel_fpu_begin(); *crcp = crc_pcl(data, len, *crcp); kernel_fpu_end(); return 0; }
static int __chksum_finup(__u16 *crcp, const u8 *data, unsigned int len, u8 *out) { if (irq_fpu_usable()) { kernel_fpu_begin(); *(__u16 *)out = crc_t10dif_pcl(*crcp, data, len); kernel_fpu_end(); } else *(__u16 *)out = crc_t10dif_generic(*crcp, data, len); return 0; }
static int __crc32c_pcl_intel_finup(u32 *crcp, const u8 *data, unsigned int len, u8 *out) { if (len >= CRC32C_PCL_BREAKEVEN && irq_fpu_usable()) { kernel_fpu_begin(); *(__le32 *)out = ~cpu_to_le32(crc_pcl(data, len, *crcp)); kernel_fpu_end(); } else *(__le32 *)out = ~cpu_to_le32(crc32c_intel_le_hw(*crcp, data, len)); return 0; }
static void aes_decrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src) { struct crypto_aes_ctx *ctx = aes_ctx(crypto_tfm_ctx(tfm)); if (!irq_fpu_usable()) crypto_aes_decrypt_x86(ctx, dst, src); else { kernel_fpu_begin(); aesni_dec(ctx, dst, src); kernel_fpu_end(); } }
static int chksum_update(struct shash_desc *desc, const u8 *data, unsigned int length) { struct chksum_desc_ctx *ctx = shash_desc_ctx(desc); if (irq_fpu_usable()) { kernel_fpu_begin(); ctx->crc = crc_t10dif_pcl(ctx->crc, data, length); kernel_fpu_end(); } else ctx->crc = crc_t10dif_generic(ctx->crc, data, length); return 0; }
static int helper_rfc4106_decrypt(struct aead_request *req) { int ret; if (unlikely(!irq_fpu_usable())) { WARN_ONCE(1, "__gcm-aes-aesni alg used in invalid context"); ret = -EINVAL; } else { kernel_fpu_begin(); ret = __driver_rfc4106_decrypt(req); kernel_fpu_end(); } return ret; }
static int sha512_ssse3_finup(struct shash_desc *desc, const u8 *data, unsigned int len, u8 *out) { if (!irq_fpu_usable()) return crypto_sha512_finup(desc, data, len, out); kernel_fpu_begin(); if (len) sha512_base_do_update(desc, data, len, (sha512_block_fn *)sha512_transform_asm); sha512_base_do_finalize(desc, (sha512_block_fn *)sha512_transform_asm); kernel_fpu_end(); return sha512_base_finish(desc, out); }
static int ecb_encrypt(struct blkcipher_desc *desc, struct scatterlist *dst, struct scatterlist *src, unsigned int nbytes) { struct crypto_aes_ctx *ctx = aes_ctx(crypto_blkcipher_ctx(desc->tfm)); struct blkcipher_walk walk; int err; blkcipher_walk_init(&walk, dst, src, nbytes); err = blkcipher_walk_virt(desc, &walk); desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP; kernel_fpu_begin(); while ((nbytes = walk.nbytes)) { aesni_ecb_enc(ctx, walk.dst
static void ghash_flush(struct ghash_ctx *ctx, struct ghash_desc_ctx *dctx) { u8 *dst = dctx->buffer; if (dctx->bytes) { u8 *tmp = dst + (GHASH_BLOCK_SIZE - dctx->bytes); while (dctx->bytes--) *tmp++ ^= 0; kernel_fpu_begin(); clmul_ghash_mul(dst, &ctx->shash); kernel_fpu_end(); } dctx->bytes = 0; }
static int crc32c_pcl_intel_update(struct shash_desc *desc, const u8 *data, unsigned int len) { u32 *crcp = shash_desc_ctx(desc); /* * use faster PCL version if datasize is large enough to * overcome kernel fpu state save/restore overhead */ if (len >= CRC32C_PCL_BREAKEVEN && irq_fpu_usable()) { kernel_fpu_begin(); *crcp = crc_pcl(data, len, *crcp); kernel_fpu_end(); } else *crcp = crc32c_intel_le_hw(*crcp, data, len); return 0; }
static void crypto_aegis256_aesni_crypt(struct aead_request *req, struct aegis_block *tag_xor, unsigned int cryptlen, const struct aegis_crypt_ops *ops) { struct crypto_aead *tfm = crypto_aead_reqtfm(req); struct aegis_ctx *ctx = crypto_aegis256_aesni_ctx(tfm); struct aegis_state state; kernel_fpu_begin(); crypto_aegis256_aesni_init(&state, ctx->key, req->iv); crypto_aegis256_aesni_process_ad(&state, req->src, req->assoclen); crypto_aegis256_aesni_process_crypt(&state, req, ops); crypto_aegis256_aesni_final(&state, tag_xor, req->assoclen, cryptlen); kernel_fpu_end(); }
static int sha512_ssse3_update(struct shash_desc *desc, const u8 *data, unsigned int len) { struct sha512_state *sctx = shash_desc_ctx(desc); if (!irq_fpu_usable() || (sctx->count[0] % SHA512_BLOCK_SIZE) + len < SHA512_BLOCK_SIZE) return crypto_sha512_update(desc, data, len); /* make sure casting to sha512_block_fn() is safe */ BUILD_BUG_ON(offsetof(struct sha512_state, state) != 0); kernel_fpu_begin(); sha512_base_do_update(desc, data, len, (sha512_block_fn *)sha512_transform_asm); kernel_fpu_end(); return 0; }
static int rfc4106_decrypt(struct aead_request *req) { int ret; struct crypto_aead *tfm = crypto_aead_reqtfm(req); struct aesni_rfc4106_gcm_ctx *ctx = aesni_rfc4106_gcm_ctx_get(tfm); if (!irq_fpu_usable()) { struct aead_request *cryptd_req = (struct aead_request *) aead_request_ctx(req); memcpy(cryptd_req, req, sizeof(*req)); aead_request_set_tfm(cryptd_req, &ctx->cryptd_tfm->base); return crypto_aead_decrypt(cryptd_req); } else { struct crypto_aead *cryptd_child = cryptd_aead_child(ctx->cryptd_tfm); kernel_fpu_begin(); ret = cryptd_child->base.crt_aead.decrypt(req); kernel_fpu_end(); return ret; } }
/* Add padding and return the message digest. */ static int sha256_ssse3_final(struct shash_desc *desc, u8 *out) { struct sha256_state *sctx = shash_desc_ctx(desc); unsigned int i, index, padlen; __be32 *dst = (__be32 *)out; __be64 bits; static const u8 padding[SHA256_BLOCK_SIZE] = { 0x80, }; bits = cpu_to_be64(sctx->count << 3); /* Pad out to 56 mod 64 and append length */ index = sctx->count % SHA256_BLOCK_SIZE; padlen = (index < 56) ? (56 - index) : ((SHA256_BLOCK_SIZE+56)-index); if (!irq_fpu_usable()) { crypto_sha256_update(desc, padding, padlen); crypto_sha256_update(desc, (const u8 *)&bits, sizeof(bits)); } else { kernel_fpu_begin(); /* We need to fill a whole block for __sha256_ssse3_update() */ if (padlen <= 56) { sctx->count += padlen; memcpy(sctx->buf + index, padding, padlen); } else { __sha256_ssse3_update(desc, padding, padlen, index); } __sha256_ssse3_update(desc, (const u8 *)&bits, sizeof(bits), 56); kernel_fpu_end(); } /* Store state in digest */ for (i = 0; i < 8; i++) dst[i] = cpu_to_be32(sctx->state[i]); /* Wipe context */ memset(sctx, 0, sizeof(*sctx)); return 0; }
static int aes_set_key_common(struct crypto_tfm *tfm, void *raw_ctx, const u8 *in_key, unsigned int key_len) { struct crypto_aes_ctx *ctx = aes_ctx(raw_ctx); u32 *flags = &tfm->crt_flags; int err; if (key_len != AES_KEYSIZE_128 && key_len != AES_KEYSIZE_192 && key_len != AES_KEYSIZE_256) { *flags |= CRYPTO_TFM_RES_BAD_KEY_LEN; return -EINVAL; } if (!irq_fpu_usable()) err = crypto_aes_expand_key(ctx, in_key, key_len); else { kernel_fpu_begin(); err = aesni_set_key(ctx, in_key, key_len); kernel_fpu_end(); } return err; }
static int ecb_decrypt(struct blkcipher_desc *desc, struct scatterlist *dst, struct scatterlist *src, unsigned int nbytes) { struct crypto_aes_ctx *ctx = aes_ctx(crypto_blkcipher_ctx(desc->tfm)); struct blkcipher_walk walk; int err; blkcipher_walk_init(&walk, dst, src, nbytes); err = blkcipher_walk_virt(desc, &walk); desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP; while ((nbytes = walk.nbytes)) { kernel_fpu_begin(); aesni_ecb_dec(ctx, walk.dst.virt.addr, walk.src.virt.addr, nbytes & AES_BLOCK_MASK); kernel_fpu_end(); nbytes &= AES_BLOCK_SIZE - 1; err = blkcipher_walk_done(desc, &walk, nbytes); } return err; }
int ckpt_restore_fpu(ckpt_desc_t desc) { int ret; int flag; log_restore_fpu("restoring fpu ..."); if (ckpt_read(desc, &flag, sizeof(int)) != sizeof(int)) { log_err("failed to get file"); return -EIO; } kernel_fpu_begin(); clear_used_math(); if (flag) { if (!ckpt_get_i387(current)) { init_fpu(current); if (!ckpt_get_i387(current)) { log_err("failed to get i387"); return -EFAULT; } } if (ckpt_read(desc, ckpt_get_i387(current), xstate_size) != xstate_size) { log_err("failed to get i387"); return -EFAULT; } ret = ckpt_check_fpu_state(); if (ret) { log_err("failed to restore i387"); return ret; } set_used_math(); } kernel_fpu_end(); log_restore_pos(desc); return 0; }
static int chacha20_simd(struct skcipher_request *req) { struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); struct chacha20_ctx *ctx = crypto_skcipher_ctx(tfm); u32 *state, state_buf[16 + 2] __aligned(8); struct skcipher_walk walk; int err; BUILD_BUG_ON(CHACHA20_STATE_ALIGN != 16); state = PTR_ALIGN(state_buf + 0, CHACHA20_STATE_ALIGN); if (req->cryptlen <= CHACHA20_BLOCK_SIZE || !may_use_simd()) return crypto_chacha20_crypt(req); err = skcipher_walk_virt(&walk, req, true); crypto_chacha20_init(state, ctx, walk.iv); kernel_fpu_begin(); while (walk.nbytes >= CHACHA20_BLOCK_SIZE) { chacha20_dosimd(state, walk.dst.virt.addr, walk.src.virt.addr, rounddown(walk.nbytes, CHACHA20_BLOCK_SIZE)); err = skcipher_walk_done(&walk, walk.nbytes % CHACHA20_BLOCK_SIZE); } if (walk.nbytes) { chacha20_dosimd(state, walk.dst.virt.addr, walk.src.virt.addr, walk.nbytes); err = skcipher_walk_done(&walk, 0); } kernel_fpu_end(); return err; }
static int chacha20_simd(struct blkcipher_desc *desc, struct scatterlist *dst, struct scatterlist *src, unsigned int nbytes) { u32 *state, state_buf[16 + (CHACHA20_STATE_ALIGN / sizeof(u32)) - 1]; struct blkcipher_walk walk; int err; if (!may_use_simd()) return crypto_chacha20_crypt(desc, dst, src, nbytes); state = (u32 *)roundup((uintptr_t)state_buf, CHACHA20_STATE_ALIGN); blkcipher_walk_init(&walk, dst, src, nbytes); err = blkcipher_walk_virt_block(desc, &walk, CHACHA20_BLOCK_SIZE); crypto_chacha20_init(state, crypto_blkcipher_ctx(desc->tfm), walk.iv); kernel_fpu_begin(); while (walk.nbytes >= CHACHA20_BLOCK_SIZE) { chacha20_dosimd(state, walk.dst.virt.addr, walk.src.virt.addr, rounddown(walk.nbytes, CHACHA20_BLOCK_SIZE)); err = blkcipher_walk_done(desc, &walk, walk.nbytes % CHACHA20_BLOCK_SIZE); } if (walk.nbytes) { chacha20_dosimd(state, walk.dst.virt.addr, walk.src.virt.addr, walk.nbytes); err = blkcipher_walk_done(desc, &walk, 0); } kernel_fpu_end(); return err; }
static int xts_encrypt(struct blkcipher_desc *desc, struct scatterlist *dst, struct scatterlist *src, unsigned int nbytes) { struct aesni_xts_ctx *ctx = crypto_blkcipher_ctx(desc->tfm); be128 buf[8]; struct xts_crypt_req req = { .tbuf = buf, .tbuflen = sizeof(buf), .tweak_ctx = aes_ctx(ctx->raw_tweak_ctx), .tweak_fn = aesni_xts_tweak, .crypt_ctx = aes_ctx(ctx->raw_crypt_ctx), .crypt_fn = lrw_xts_encrypt_callback, }; int ret; desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP; kernel_fpu_begin(); ret = xts_crypt(desc, dst, src, nbytes, &req); kernel_fpu_end(); return ret; } static int xts_decrypt(struct blkcipher_desc *desc, struct scatterlist *dst, struct scatterlist *src, unsigned int nbytes) { struct aesni_xts_ctx *ctx = crypto_blkcipher_ctx(desc->tfm); be128 buf[8]; struct xts_crypt_req req = { .tbuf = buf, .tbuflen = sizeof(buf), .tweak_ctx = aes_ctx(ctx->raw_tweak_ctx), .tweak_fn = aesni_xts_tweak, .crypt_ctx = aes_ctx(ctx->raw_crypt_ctx), .crypt_fn = lrw_xts_decrypt_callback, }; int ret; desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP; kernel_fpu_begin(); ret = xts_crypt(desc, dst, src, nbytes, &req); kernel_fpu_end(); return ret; } #endif #ifdef CONFIG_X86_64 static int rfc4106_init(struct crypto_tfm *tfm) { struct cryptd_aead *cryptd_tfm; struct aesni_rfc4106_gcm_ctx *ctx = (struct aesni_rfc4106_gcm_ctx *) PTR_ALIGN((u8 *)crypto_tfm_ctx(tfm), AESNI_ALIGN); struct crypto_aead *cryptd_child; struct aesni_rfc4106_gcm_ctx *child_ctx; cryptd_tfm = cryptd_alloc_aead("__driver-gcm-aes-aesni", 0, 0); if (IS_ERR(cryptd_tfm)) return PTR_ERR(cryptd_tfm); cryptd_child = cryptd_aead_child(cryptd_tfm); child_ctx = aesni_rfc4106_gcm_ctx_get(cryptd_child); memcpy(child_ctx, ctx, sizeof(*ctx)); ctx->cryptd_tfm = cryptd_tfm; tfm->crt_aead.reqsize = sizeof(struct aead_request) + crypto_aead_reqsize(&cryptd_tfm->base); return 0; } static void rfc4106_exit(struct crypto_tfm *tfm) { struct aesni_rfc4106_gcm_ctx *ctx = (struct aesni_rfc4106_gcm_ctx *) PTR_ALIGN((u8 *)crypto_tfm_ctx(tfm), AESNI_ALIGN); if (!IS_ERR(ctx->cryptd_tfm)) cryptd_free_aead(ctx->cryptd_tfm); return; } static void rfc4106_set_hash_subkey_done(struct crypto_async_request *req, int err) { struct aesni_gcm_set_hash_subkey_result *result = req->data; if (err == -EINPROGRESS) return; result->err = err; complete(&result->completion); } static int rfc4106_set_hash_subkey(u8 *hash_subkey, const u8 *key, unsigned int key_len) { struct crypto_ablkcipher *ctr_tfm; struct ablkcipher_request *req; int ret = -EINVAL; struct aesni_hash_subkey_req_data *req_data; ctr_tfm = crypto_alloc_ablkcipher("ctr(aes)", 0, 0); if (IS_ERR(ctr_tfm)) return PTR_ERR(ctr_tfm); crypto_ablkcipher_clear_flags(ctr_tfm, ~0); ret = crypto_ablkcipher_setkey(ctr_tfm, key, key_len); if (ret) goto out_free_ablkcipher; ret = -ENOMEM; req = ablkcipher_request_alloc(ctr_tfm, GFP_KERNEL); if (!req) goto out_free_ablkcipher; req_data = kmalloc(sizeof(*req_data), GFP_KERNEL); if (!req_data) goto out_free_request; memset(req_data->iv, 0, sizeof(req_data->iv)); /* Clear the data in the hash sub key container to zero.*/ /* We want to cipher all zeros to create the hash sub key. */ memset(hash_subkey, 0, RFC4106_HASH_SUBKEY_SIZE); init_completion(&req_data->result.completion); sg_init_one(&req_data->sg, hash_subkey, RFC4106_HASH_SUBKEY_SIZE); ablkcipher_request_set_tfm(req, ctr_tfm); ablkcipher_request_set_callback(req, CRYPTO_TFM_REQ_MAY_SLEEP | CRYPTO_TFM_REQ_MAY_BACKLOG, rfc4106_set_hash_subkey_done, &req_data->result); ablkcipher_request_set_crypt(req, &req_data->sg, &req_data->sg, RFC4106_HASH_SUBKEY_SIZE, req_data->iv); ret = crypto_ablkcipher_encrypt(req); if (ret == -EINPROGRESS || ret == -EBUSY) { ret = wait_for_completion_interruptible (&req_data->result.completion); if (!ret) ret = req_data->result.err; } kfree(req_data); out_free_request: ablkcipher_request_free(req); out_free_ablkcipher: crypto_free_ablkcipher(ctr_tfm); return ret; } static int rfc4106_set_key(struct crypto_aead *parent, const u8 *key, unsigned int key_len) { int ret = 0; struct crypto_tfm *tfm = crypto_aead_tfm(parent); struct aesni_rfc4106_gcm_ctx *ctx = aesni_rfc4106_gcm_ctx_get(parent); struct crypto_aead *cryptd_child = cryptd_aead_child(ctx->cryptd_tfm); struct aesni_rfc4106_gcm_ctx *child_ctx = aesni_rfc4106_gcm_ctx_get(cryptd_child); u8 *new_key_align, *new_key_mem = NULL; if (key_len < 4) { crypto_tfm_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN); return -EINVAL; } /*Account for 4 byte nonce at the end.*/ key_len -= 4; if (key_len != AES_KEYSIZE_128) { crypto_tfm_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN); return -EINVAL; } memcpy(ctx->nonce, key + key_len, sizeof(ctx->nonce)); /*This must be on a 16 byte boundary!*/ if ((unsigned long)(&(ctx->aes_key_expanded.key_enc[0])) % AESNI_ALIGN) return -EINVAL; if ((unsigned long)key % AESNI_ALIGN) { /*key is not aligned: use an auxuliar aligned pointer*/ new_key_mem = kmalloc(key_len+AESNI_ALIGN, GFP_KERNEL); if (!new_key_mem) return -ENOMEM; new_key_align = PTR_ALIGN(new_key_mem, AESNI_ALIGN); memcpy(new_key_align, key, key_len); key = new_key_align; } if (!irq_fpu_usable()) ret = crypto_aes_expand_key(&(ctx->aes_key_expanded), key, key_len); else { kernel_fpu_begin(); ret = aesni_set_key(&(ctx->aes_key_expanded), key, key_len); kernel_fpu_end(); } /*This must be on a 16 byte boundary!*/ if ((unsigned long)(&(ctx->hash_subkey[0])) % AESNI_ALIGN) { ret = -EINVAL; goto exit; } ret = rfc4106_set_hash_subkey(ctx->hash_subkey, key, key_len); memcpy(child_ctx, ctx, sizeof(*ctx)); exit: kfree(new_key_mem); return ret; } /* This is the Integrity Check Value (aka the authentication tag length and can * be 8, 12 or 16 bytes long. */ static int rfc4106_set_authsize(struct crypto_aead *parent, unsigned int authsize) { struct aesni_rfc4106_gcm_ctx *ctx = aesni_rfc4106_gcm_ctx_get(parent); struct crypto_aead *cryptd_child = cryptd_aead_child(ctx->cryptd_tfm); switch (authsize) { case 8: case 12: case 16: break; default: return -EINVAL; } crypto_aead_crt(parent)->authsize = authsize; crypto_aead_crt(cryptd_child)->authsize = authsize; return 0; } static int rfc4106_encrypt(struct aead_request *req) { int ret; struct crypto_aead *tfm = crypto_aead_reqtfm(req); struct aesni_rfc4106_gcm_ctx *ctx = aesni_rfc4106_gcm_ctx_get(tfm); if (!irq_fpu_usable()) { struct aead_request *cryptd_req = (struct aead_request *) aead_request_ctx(req); memcpy(cryptd_req, req, sizeof(*req)); aead_request_set_tfm(cryptd_req, &ctx->cryptd_tfm->base); return crypto_aead_encrypt(cryptd_req); } else { struct crypto_aead *cryptd_child = cryptd_aead_child(ctx->cryptd_tfm); kernel_fpu_begin(); ret = cryptd_child->base.crt_aead.encrypt(req); kernel_fpu_end(); return ret; } }
static int crypto_fpu_encrypt(struct blkcipher_desc *desc_in, struct scatterlist *dst, struct scatterlist *src, unsigned int nbytes) { int err; struct crypto_fpu_ctx *ctx = crypto_blkcipher_ctx(desc_in->tfm); struct crypto_blkcipher *child = ctx->child; struct blkcipher_desc desc = { .tfm = child, .info = desc_in->info, .flags = desc_in->flags & ~CRYPTO_TFM_REQ_MAY_SLEEP, }; kernel_fpu_begin(); err = crypto_blkcipher_crt(desc.tfm)->encrypt(&desc, dst, src, nbytes); kernel_fpu_end(); return err; } static int crypto_fpu_decrypt(struct blkcipher_desc *desc_in, struct scatterlist *dst, struct scatterlist *src, unsigned int nbytes) { int err; struct crypto_fpu_ctx *ctx = crypto_blkcipher_ctx(desc_in->tfm); struct crypto_blkcipher *child = ctx->child; struct blkcipher_desc desc = { .tfm = child, .info = desc_in->info, .flags = desc_in->flags & ~CRYPTO_TFM_REQ_MAY_SLEEP, }; kernel_fpu_begin(); err = crypto_blkcipher_crt(desc.tfm)->decrypt(&desc, dst, src, nbytes); kernel_fpu_end(); return err; } static int crypto_fpu_init_tfm(struct crypto_tfm *tfm) { struct crypto_instance *inst = crypto_tfm_alg_instance(tfm); struct crypto_spawn *spawn = crypto_instance_ctx(inst); struct crypto_fpu_ctx *ctx = crypto_tfm_ctx(tfm); struct crypto_blkcipher *cipher; cipher = crypto_spawn_blkcipher(spawn); if (IS_ERR(cipher)) return PTR_ERR(cipher); ctx->child = cipher; return 0; } static void crypto_fpu_exit_tfm(struct crypto_tfm *tfm) { struct crypto_fpu_ctx *ctx = crypto_tfm_ctx(tfm); crypto_free_blkcipher(ctx->child); } static struct crypto_instance *crypto_fpu_alloc(struct rtattr **tb) { struct crypto_instance *inst; struct crypto_alg *alg; int err; err = crypto_check_attr_type(tb, CRYPTO_ALG_TYPE_BLKCIPHER); if (err) return ERR_PTR(err); alg = crypto_get_attr_alg(tb, CRYPTO_ALG_TYPE_BLKCIPHER, CRYPTO_ALG_TYPE_MASK); if (IS_ERR(alg)) return ERR_CAST(alg); inst = crypto_alloc_instance("fpu", alg); if (IS_ERR(inst)) goto out_put_alg; inst->alg.cra_flags = alg->cra_flags; inst->alg.cra_priority = alg->cra_priority; inst->alg.cra_blocksize = alg->cra_blocksize; inst->alg.cra_alignmask = alg->cra_alignmask; inst->alg.cra_type = alg->cra_type; inst->alg.cra_blkcipher.ivsize = alg->cra_blkcipher.ivsize; inst->alg.cra_blkcipher.min_keysize = alg->cra_blkcipher.min_keysize; inst->alg.cra_blkcipher.max_keysize = alg->cra_blkcipher.max_keysize; inst->alg.cra_ctxsize = sizeof(struct crypto_fpu_ctx); inst->alg.cra_init = crypto_fpu_init_tfm; inst->alg.cra_exit = crypto_fpu_exit_tfm; inst->alg.cra_blkcipher.setkey = crypto_fpu_setkey; inst->alg.cra_blkcipher.encrypt = crypto_fpu_encrypt; inst->alg.cra_blkcipher.decrypt = crypto_fpu_decrypt; out_put_alg: crypto_mod_put(alg); return inst; } static void crypto_fpu_free(struct crypto_instance *inst) { crypto_drop_spawn(crypto_instance_ctx(inst)); kfree(inst); }
void fpu_begin(void) { /* asm ("fstenv b_SaveFPUReg"); */ kernel_fpu_begin(); }
static int lrw_encrypt(struct blkcipher_desc *desc, struct scatterlist *dst, struct scatterlist *src, unsigned int nbytes) { struct aesni_lrw_ctx *ctx = crypto_blkcipher_ctx(desc->tfm); be128 buf[8]; struct lrw_crypt_req req = { .tbuf = buf, .tbuflen = sizeof(buf), .table_ctx = &ctx->lrw_table, .crypt_ctx = aes_ctx(ctx->raw_aes_ctx), .crypt_fn = lrw_xts_encrypt_callback, }; int ret; desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP; kernel_fpu_begin(); ret = lrw_crypt(desc, dst, src, nbytes, &req); kernel_fpu_end(); return ret; } static int lrw_decrypt(struct blkcipher_desc *desc, struct scatterlist *dst, struct scatterlist *src, unsigned int nbytes) { struct aesni_lrw_ctx *ctx = crypto_blkcipher_ctx(desc->tfm); be128 buf[8]; struct lrw_crypt_req req = { .tbuf = buf, .tbuflen = sizeof(buf), .table_ctx = &ctx->lrw_table, .crypt_ctx = aes_ctx(ctx->raw_aes_ctx), .crypt_fn = lrw_xts_decrypt_callback, }; int ret; desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP; kernel_fpu_begin(); ret = lrw_crypt(desc, dst, src, nbytes, &req); kernel_fpu_end(); return ret; } static int xts_aesni_setkey(struct crypto_tfm *tfm, const u8 *key, unsigned int keylen) { struct aesni_xts_ctx *ctx = crypto_tfm_ctx(tfm); int err; err = xts_check_key(tfm, key, keylen); if (err) return err; /* first half of xts-key is for crypt */ err = aes_set_key_common(tfm, ctx->raw_crypt_ctx, key, keylen / 2); if (err) return err; /* second half of xts-key is for tweak */ return aes_set_key_common(tfm, ctx->raw_tweak_ctx, key + keylen / 2, keylen / 2); } static void aesni_xts_tweak(void *ctx, u8 *out, const u8 *in) { aesni_enc(ctx, out, in); } #ifdef CONFIG_X86_64 static void aesni_xts_enc(void *ctx, u128 *dst, const u128 *src, le128 *iv) { glue_xts_crypt_128bit_one(ctx, dst, src, iv, GLUE_FUNC_CAST(aesni_enc)); }
static int xts_encrypt(struct blkcipher_desc *desc, struct scatterlist *dst, struct scatterlist *src, unsigned int nbytes) { struct aesni_xts_ctx *ctx = crypto_blkcipher_ctx(desc->tfm); be128 buf[8]; struct xts_crypt_req req = { .tbuf = buf, .tbuflen = sizeof(buf), .tweak_ctx = aes_ctx(ctx->raw_tweak_ctx), .tweak_fn = aesni_xts_tweak, .crypt_ctx = aes_ctx(ctx->raw_crypt_ctx), .crypt_fn = lrw_xts_encrypt_callback, }; int ret; desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP; kernel_fpu_begin(); ret = xts_crypt(desc, dst, src, nbytes, &req); kernel_fpu_end(); return ret; } static int xts_decrypt(struct blkcipher_desc *desc, struct scatterlist *dst, struct scatterlist *src, unsigned int nbytes) { struct aesni_xts_ctx *ctx = crypto_blkcipher_ctx(desc->tfm); be128 buf[8]; struct xts_crypt_req req = { .tbuf = buf, .tbuflen = sizeof(buf), .tweak_ctx = aes_ctx(ctx->raw_tweak_ctx), .tweak_fn = aesni_xts_tweak, .crypt_ctx = aes_ctx(ctx->raw_crypt_ctx), .crypt_fn = lrw_xts_decrypt_callback, }; int ret; desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP; kernel_fpu_begin(); ret = xts_crypt(desc, dst, src, nbytes, &req); kernel_fpu_end(); return ret; } #endif #ifdef CONFIG_X86_64 static int rfc4106_init(struct crypto_aead *aead) { struct cryptd_aead *cryptd_tfm; struct cryptd_aead **ctx = crypto_aead_ctx(aead); cryptd_tfm = cryptd_alloc_aead("__driver-gcm-aes-aesni", CRYPTO_ALG_INTERNAL, CRYPTO_ALG_INTERNAL); if (IS_ERR(cryptd_tfm)) return PTR_ERR(cryptd_tfm); *ctx = cryptd_tfm; crypto_aead_set_reqsize(aead, crypto_aead_reqsize(&cryptd_tfm->base)); return 0; } static void rfc4106_exit(struct crypto_aead *aead) { struct cryptd_aead **ctx = crypto_aead_ctx(aead); cryptd_free_aead(*ctx); } static int rfc4106_set_hash_subkey(u8 *hash_subkey, const u8 *key, unsigned int key_len) { struct crypto_cipher *tfm; int ret; tfm = crypto_alloc_cipher("aes", 0, 0); if (IS_ERR(tfm)) return PTR_ERR(tfm); ret = crypto_cipher_setkey(tfm, key, key_len); if (ret) goto out_free_cipher; /* Clear the data in the hash sub key container to zero.*/ /* We want to cipher all zeros to create the hash sub key. */ memset(hash_subkey, 0, RFC4106_HASH_SUBKEY_SIZE); crypto_cipher_encrypt_one(tfm, hash_subkey, hash_subkey); out_free_cipher: crypto_free_cipher(tfm); return ret; } static int common_rfc4106_set_key(struct crypto_aead *aead, const u8 *key, unsigned int key_len) { struct aesni_rfc4106_gcm_ctx *ctx = aesni_rfc4106_gcm_ctx_get(aead); if (key_len < 4) { crypto_aead_set_flags(aead, CRYPTO_TFM_RES_BAD_KEY_LEN); return -EINVAL; } /*Account for 4 byte nonce at the end.*/ key_len -= 4; memcpy(ctx->nonce, key + key_len, sizeof(ctx->nonce)); return aes_set_key_common(crypto_aead_tfm(aead), &ctx->aes_key_expanded, key, key_len) ?: rfc4106_set_hash_subkey(ctx->hash_subkey, key, key_len); } static int rfc4106_set_key(struct crypto_aead *parent, const u8 *key, unsigned int key_len) { struct cryptd_aead **ctx = crypto_aead_ctx(parent); struct cryptd_aead *cryptd_tfm = *ctx; return crypto_aead_setkey(&cryptd_tfm->base, key, key_len); } static int common_rfc4106_set_authsize(struct crypto_aead *aead, unsigned int authsize) { switch (authsize) { case 8: case 12: case 16: break; default: return -EINVAL; } return 0; } /* This is the Integrity Check Value (aka the authentication tag length and can * be 8, 12 or 16 bytes long. */ static int rfc4106_set_authsize(struct crypto_aead *parent, unsigned int authsize) { struct cryptd_aead **ctx = crypto_aead_ctx(parent); struct cryptd_aead *cryptd_tfm = *ctx; return crypto_aead_setauthsize(&cryptd_tfm->base, authsize); } static int helper_rfc4106_encrypt(struct aead_request *req) { u8 one_entry_in_sg = 0; u8 *src, *dst, *assoc; __be32 counter = cpu_to_be32(1); struct crypto_aead *tfm = crypto_aead_reqtfm(req); struct aesni_rfc4106_gcm_ctx *ctx = aesni_rfc4106_gcm_ctx_get(tfm); void *aes_ctx = &(ctx->aes_key_expanded); unsigned long auth_tag_len = crypto_aead_authsize(tfm); u8 iv[16] __attribute__ ((__aligned__(AESNI_ALIGN))); struct scatter_walk src_sg_walk; struct scatter_walk dst_sg_walk = {}; unsigned int i; /* Assuming we are supporting rfc4106 64-bit extended */ /* sequence numbers We need to have the AAD length equal */ /* to 16 or 20 bytes */ if (unlikely(req->assoclen != 16 && req->assoclen != 20)) return -EINVAL; /* IV below built */ for (i = 0; i < 4; i++) *(iv+i) = ctx->nonce[i]; for (i = 0; i < 8; i++) *(iv+4+i) = req->iv[i]; *((__be32 *)(iv+12)) = counter; if (sg_is_last(req->src) && req->src->offset + req->src->length <= PAGE_SIZE && sg_is_last(req->dst) && req->dst->offset + req->dst->length <= PAGE_SIZE) { one_entry_in_sg = 1; scatterwalk_start(&src_sg_walk, req->src); assoc = scatterwalk_map(&src_sg_walk); src = assoc + req->assoclen; dst = src; if (unlikely(req->src != req->dst)) { scatterwalk_start(&dst_sg_walk, req->dst); dst = scatterwalk_map(&dst_sg_walk) + req->assoclen; } } else { /* Allocate memory for src, dst, assoc */ assoc = kmalloc(req->cryptlen + auth_tag_len + req->assoclen, GFP_ATOMIC); if (unlikely(!assoc)) return -ENOMEM; scatterwalk_map_and_copy(assoc, req->src, 0, req->assoclen + req->cryptlen, 0); src = assoc + req->assoclen; dst = src; } kernel_fpu_begin(); aesni_gcm_enc_tfm(aes_ctx, dst, src, req->cryptlen, iv, ctx->hash_subkey, assoc, req->assoclen - 8, dst + req->cryptlen, auth_tag_len); kernel_fpu_end(); /* The authTag (aka the Integrity Check Value) needs to be written * back to the packet. */ if (one_entry_in_sg) { if (unlikely(req->src != req->dst)) { scatterwalk_unmap(dst - req->assoclen); scatterwalk_advance(&dst_sg_walk, req->dst->length); scatterwalk_done(&dst_sg_walk, 1, 0); } scatterwalk_unmap(assoc); scatterwalk_advance(&src_sg_walk, req->src->length); scatterwalk_done(&src_sg_walk, req->src == req->dst, 0); } else { scatterwalk_map_and_copy(dst, req->dst, req->assoclen, req->cryptlen + auth_tag_len, 1); kfree(assoc); } return 0; } static int helper_rfc4106_decrypt(struct aead_request *req) { u8 one_entry_in_sg = 0; u8 *src, *dst, *assoc; unsigned long tempCipherLen = 0; __be32 counter = cpu_to_be32(1); int retval = 0; struct crypto_aead *tfm = crypto_aead_reqtfm(req); struct aesni_rfc4106_gcm_ctx *ctx = aesni_rfc4106_gcm_ctx_get(tfm); void *aes_ctx = &(ctx->aes_key_expanded); unsigned long auth_tag_len = crypto_aead_authsize(tfm); u8 iv[16] __attribute__ ((__aligned__(AESNI_ALIGN))); u8 authTag[16]; struct scatter_walk src_sg_walk; struct scatter_walk dst_sg_walk = {}; unsigned int i; if (unlikely(req->assoclen != 16 && req->assoclen != 20)) return -EINVAL; /* Assuming we are supporting rfc4106 64-bit extended */ /* sequence numbers We need to have the AAD length */ /* equal to 16 or 20 bytes */ tempCipherLen = (unsigned long)(req->cryptlen - auth_tag_len); /* IV below built */ for (i = 0; i < 4; i++) *(iv+i) = ctx->nonce[i]; for (i = 0; i < 8; i++) *(iv+4+i) = req->iv[i]; *((__be32 *)(iv+12)) = counter; if (sg_is_last(req->src) && req->src->offset + req->src->length <= PAGE_SIZE && sg_is_last(req->dst) && req->dst->offset + req->dst->length <= PAGE_SIZE) { one_entry_in_sg = 1; scatterwalk_start(&src_sg_walk, req->src); assoc = scatterwalk_map(&src_sg_walk); src = assoc + req->assoclen; dst = src; if (unlikely(req->src != req->dst)) { scatterwalk_start(&dst_sg_walk, req->dst); dst = scatterwalk_map(&dst_sg_walk) + req->assoclen; } } else { /* Allocate memory for src, dst, assoc */ assoc = kmalloc(req->cryptlen + req->assoclen, GFP_ATOMIC); if (!assoc) return -ENOMEM; scatterwalk_map_and_copy(assoc, req->src, 0, req->assoclen + req->cryptlen, 0); src = assoc + req->assoclen; dst = src; } kernel_fpu_begin(); aesni_gcm_dec_tfm(aes_ctx, dst, src, tempCipherLen, iv, ctx->hash_subkey, assoc, req->assoclen - 8, authTag, auth_tag_len); kernel_fpu_end(); /* Compare generated tag with passed in tag. */ retval = crypto_memneq(src + tempCipherLen, authTag, auth_tag_len) ? -EBADMSG : 0; if (one_entry_in_sg) { if (unlikely(req->src != req->dst)) { scatterwalk_unmap(dst - req->assoclen); scatterwalk_advance(&dst_sg_walk, req->dst->length); scatterwalk_done(&dst_sg_walk, 1, 0); } scatterwalk_unmap(assoc); scatterwalk_advance(&src_sg_walk, req->src->length); scatterwalk_done(&src_sg_walk, req->src == req->dst, 0); } else { scatterwalk_map_and_copy(dst, req->dst, req->assoclen, tempCipherLen, 1); kfree(assoc); } return retval; }