static int stack_strcmp(struct estack *stack, int top, const char *cmp_type) { size_t offset_bx = 0, offset_ax = 0; int diff, has_user = 0; mm_segment_t old_fs; if (estack_bx(stack, top)->u.s.user || estack_ax(stack, top)->u.s.user) { has_user = 1; old_fs = get_fs(); set_fs(KERNEL_DS); pagefault_disable(); } for (;;) { int ret; int escaped_r0 = 0; char char_bx, char_ax; char_bx = get_char(estack_bx(stack, top), offset_bx); char_ax = get_char(estack_ax(stack, top), offset_ax); if (unlikely(char_bx == '\0')) { if (char_ax == '\0') { diff = 0; break; } else { if (estack_ax(stack, top)->u.s.literal) { ret = parse_char(estack_ax(stack, top), &char_ax, &offset_ax); if (ret == -1) { diff = 0; break; } } diff = -1; break; } } if (unlikely(char_ax == '\0')) { if (char_bx == '\0') { diff = 0; break; } else { if (estack_bx(stack, top)->u.s.literal) { ret = parse_char(estack_bx(stack, top), &char_bx, &offset_bx); if (ret == -1) { diff = 0; break; } } diff = 1; break; } } if (estack_bx(stack, top)->u.s.literal) { ret = parse_char(estack_bx(stack, top), &char_bx, &offset_bx); if (ret == -1) { diff = 0; break; } else if (ret == -2) { escaped_r0 = 1; } /* else compare both char */ } if (estack_ax(stack, top)->u.s.literal) { ret = parse_char(estack_ax(stack, top), &char_ax, &offset_ax); if (ret == -1) { diff = 0; break; } else if (ret == -2) { if (!escaped_r0) { diff = -1; break; } } else { if (escaped_r0) { diff = 1; break; } } } else { if (escaped_r0) { diff = 1; break; } } diff = char_bx - char_ax; if (diff != 0) break; offset_bx++; offset_ax++; } if (has_user) { pagefault_enable(); set_fs(old_fs); } return diff; }
static int p8_aes_ctr_crypt(struct blkcipher_desc *desc, struct scatterlist *dst, struct scatterlist *src, unsigned int nbytes) { int ret; u64 inc; struct blkcipher_walk walk; struct p8_aes_ctr_ctx *ctx = crypto_tfm_ctx(crypto_blkcipher_tfm(desc->tfm)); struct blkcipher_desc fallback_desc = { .tfm = ctx->fallback, .info = desc->info, .flags = desc->flags }; if (in_interrupt()) { ret = crypto_blkcipher_encrypt(&fallback_desc, dst, src, nbytes); } else { blkcipher_walk_init(&walk, dst, src, nbytes); ret = blkcipher_walk_virt_block(desc, &walk, AES_BLOCK_SIZE); while ((nbytes = walk.nbytes) >= AES_BLOCK_SIZE) { preempt_disable(); pagefault_disable(); enable_kernel_vsx(); aes_p8_ctr32_encrypt_blocks(walk.src.virt.addr, walk.dst.virt.addr, (nbytes & AES_BLOCK_MASK) / AES_BLOCK_SIZE, &ctx->enc_key, walk.iv); disable_kernel_vsx(); pagefault_enable(); preempt_enable(); /* We need to update IV mostly for last bytes/round */ inc = (nbytes & AES_BLOCK_MASK) / AES_BLOCK_SIZE; if (inc > 0) while (inc--) crypto_inc(walk.iv, AES_BLOCK_SIZE); nbytes &= AES_BLOCK_SIZE - 1; ret = blkcipher_walk_done(desc, &walk, nbytes); } if (walk.nbytes) { p8_aes_ctr_final(ctx, &walk); ret = blkcipher_walk_done(desc, &walk, 0); } } return ret; } struct crypto_alg p8_aes_ctr_alg = { .cra_name = "ctr(aes)", .cra_driver_name = "p8_aes_ctr", .cra_module = THIS_MODULE, .cra_priority = 2000, .cra_type = &crypto_blkcipher_type, .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER | CRYPTO_ALG_NEED_FALLBACK, .cra_alignmask = 0, .cra_blocksize = 1, .cra_ctxsize = sizeof(struct p8_aes_ctr_ctx), .cra_init = p8_aes_ctr_init, .cra_exit = p8_aes_ctr_exit, .cra_blkcipher = { .ivsize = AES_BLOCK_SIZE, .min_keysize = AES_MIN_KEY_SIZE, .max_keysize = AES_MAX_KEY_SIZE, .setkey = p8_aes_ctr_setkey, .encrypt = p8_aes_ctr_crypt, .decrypt = p8_aes_ctr_crypt, }, };
static int p8_aes_cbc_encrypt(struct blkcipher_desc *desc, struct scatterlist *dst, struct scatterlist *src, unsigned int nbytes) { int ret; struct blkcipher_walk walk; struct p8_aes_cbc_ctx *ctx = crypto_tfm_ctx(crypto_blkcipher_tfm(desc->tfm)); struct blkcipher_desc fallback_desc = { .tfm = ctx->fallback, .info = desc->info, .flags = desc->flags }; if (in_interrupt()) { ret = crypto_blkcipher_encrypt(&fallback_desc, dst, src, nbytes); } else { preempt_disable(); pagefault_disable(); enable_kernel_altivec(); enable_kernel_vsx(); blkcipher_walk_init(&walk, dst, src, nbytes); ret = blkcipher_walk_virt(desc, &walk); while ((nbytes = walk.nbytes)) { aes_p8_cbc_encrypt(walk.src.virt.addr, walk.dst.virt.addr, nbytes & AES_BLOCK_MASK, &ctx->enc_key, walk.iv, 1); nbytes &= AES_BLOCK_SIZE - 1; ret = blkcipher_walk_done(desc, &walk, nbytes); } pagefault_enable(); preempt_enable(); } return ret; } static int p8_aes_cbc_decrypt(struct blkcipher_desc *desc, struct scatterlist *dst, struct scatterlist *src, unsigned int nbytes) { int ret; struct blkcipher_walk walk; struct p8_aes_cbc_ctx *ctx = crypto_tfm_ctx(crypto_blkcipher_tfm(desc->tfm)); struct blkcipher_desc fallback_desc = { .tfm = ctx->fallback, .info = desc->info, .flags = desc->flags }; if (in_interrupt()) { ret = crypto_blkcipher_decrypt(&fallback_desc, dst, src, nbytes); } else { preempt_disable(); pagefault_disable(); enable_kernel_altivec(); enable_kernel_vsx(); blkcipher_walk_init(&walk, dst, src, nbytes); ret = blkcipher_walk_virt(desc, &walk); while ((nbytes = walk.nbytes)) { aes_p8_cbc_encrypt(walk.src.virt.addr, walk.dst.virt.addr, nbytes & AES_BLOCK_MASK, &ctx->dec_key, walk.iv, 0); nbytes &= AES_BLOCK_SIZE - 1; ret = blkcipher_walk_done(desc, &walk, nbytes); } pagefault_enable(); preempt_enable(); } return ret; } struct crypto_alg p8_aes_cbc_alg = { .cra_name = "cbc(aes)", .cra_driver_name = "p8_aes_cbc", .cra_module = THIS_MODULE, .cra_priority = 1000, .cra_type = &crypto_blkcipher_type, .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER | CRYPTO_ALG_NEED_FALLBACK, .cra_alignmask = 0, .cra_blocksize = AES_BLOCK_SIZE, .cra_ctxsize = sizeof(struct p8_aes_cbc_ctx), .cra_init = p8_aes_cbc_init, .cra_exit = p8_aes_cbc_exit, .cra_blkcipher = { .ivsize = 0, .min_keysize = AES_MIN_KEY_SIZE, .max_keysize = AES_MAX_KEY_SIZE, .setkey = p8_aes_cbc_setkey, .encrypt = p8_aes_cbc_encrypt, .decrypt = p8_aes_cbc_decrypt, }, };