void
iounmap_atomic(void *kvaddr, enum km_type type)
{
	unsigned long vaddr = (unsigned long) kvaddr & PAGE_MASK;
	enum fixed_addresses idx = type + KM_TYPE_NR*smp_processor_id();

	/*
	 * Force other mappings to Oops if they'll try to access this pte
	 * without first remap it.  Keeping stale mappings around is a bad idea
	 * also, in case the page changes cacheability attributes or becomes
	 * a protected page in a hypervisor.
	 */
	if (vaddr == __fix_to_virt(FIX_KMAP_BEGIN+idx))
		kpte_clear_flush(kmap_pte-idx, vaddr);

	pagefault_enable();
}
static void kmap_remove_unused_cpu(int cpu)
{
	int start_idx, idx, type;

	pagefault_disable();
	type = kmap_atomic_idx();
	start_idx = FIX_KMAP_BEGIN + type + 1 + KM_TYPE_NR * cpu;

	for (idx = start_idx; idx < KM_TYPE_NR + KM_TYPE_NR * cpu; idx++) {
		unsigned long vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
		pte_t ptep;

		ptep = get_top_pte(vaddr);
		if (ptep)
			set_top_pte(vaddr, __pte(0));
	}
	pagefault_enable();
}
Beispiel #3
0
static void p8_aes_ctr_final(struct p8_aes_ctr_ctx *ctx,
                struct blkcipher_walk *walk)
{
    u8 *ctrblk = walk->iv;
    u8 keystream[AES_BLOCK_SIZE];
    u8 *src = walk->src.virt.addr;
    u8 *dst = walk->dst.virt.addr;
    unsigned int nbytes = walk->nbytes;

    pagefault_disable();
    enable_kernel_altivec();
    aes_p8_encrypt(ctrblk, keystream, &ctx->enc_key);
    pagefault_enable();

    crypto_xor(keystream, src, nbytes);
    memcpy(dst, keystream, nbytes);
    crypto_inc(ctrblk, AES_BLOCK_SIZE);
}
Beispiel #4
0
static int p8_aes_cbc_setkey(struct crypto_tfm *tfm, const u8 *key,
			     unsigned int keylen)
{
	int ret;
	struct p8_aes_cbc_ctx *ctx = crypto_tfm_ctx(tfm);

	preempt_disable();
	pagefault_disable();
	enable_kernel_vsx();
	ret = aes_p8_set_encrypt_key(key, keylen * 8, &ctx->enc_key);
	ret += aes_p8_set_decrypt_key(key, keylen * 8, &ctx->dec_key);
	disable_kernel_vsx();
	pagefault_enable();
	preempt_enable();

	ret += crypto_blkcipher_setkey(ctx->fallback, key, keylen);
	return ret;
}
Beispiel #5
0
/* simple helper to fault in pages and copy.  This should go away
 * and be replaced with calls into generic code.
 */
static noinline int btrfs_copy_from_user(loff_t pos, int num_pages,
        int write_bytes,
        struct page **prepared_pages,
        struct iov_iter *i)
{
    size_t copied = 0;
    int pg = 0;
    int offset = pos & (PAGE_CACHE_SIZE - 1);
    int total_copied = 0;

    while (write_bytes > 0) {
        size_t count = min_t(size_t,
                             PAGE_CACHE_SIZE - offset, write_bytes);
        struct page *page = prepared_pages[pg];
        /*
         * Copy data from userspace to the current page
         *
         * Disable pagefault to avoid recursive lock since
         * the pages are already locked
         */
        pagefault_disable();
        copied = iov_iter_copy_from_user_atomic(page, i, offset, count);
        pagefault_enable();

        /* Flush processor's dcache for this page */
        flush_dcache_page(page);
        iov_iter_advance(i, copied);
        write_bytes -= copied;
        total_copied += copied;

        /* Return to btrfs_file_aio_write to fault page */
        if (unlikely(copied == 0)) {
            break;
        }

        if (unlikely(copied < PAGE_CACHE_SIZE - offset)) {
            offset += copied;
        } else {
            pg++;
            offset = 0;
        }
    }
    return total_copied;
}
Beispiel #6
0
/*
 * We rely on the nested NMI work to allow atomic faults from the NMI path; the
 * nested NMI paths are careful to preserve CR2.
 */
unsigned long
copy_from_user_nmi(void *to, const void __user *from, unsigned long n)
{
	unsigned long ret;

	if (__range_not_ok(from, n, TASK_SIZE))
		return n;

	/*
	 * Even though this function is typically called from NMI/IRQ context
	 * disable pagefaults so that its behaviour is consistent even when
	 * called form other contexts.
	 */
	pagefault_disable();
	ret = __copy_from_user_inatomic(to, from, n);
	pagefault_enable();

	return ret;
}
Beispiel #7
0
void kunmap_atomic(void *kvaddr, enum km_type type)
{
	unsigned long vaddr = (unsigned long) kvaddr & PAGE_MASK;
	unsigned int idx = type + KM_TYPE_NR * smp_processor_id();

	if (kvaddr >= (void *)FIXADDR_START) {
		__cpuc_flush_dcache_page((void *)vaddr);
#ifdef CONFIG_DEBUG_HIGHMEM
		BUG_ON(vaddr != __fix_to_virt(FIX_KMAP_BEGIN + idx));
		set_pte_ext(TOP_PTE(vaddr), __pte(0), 0);
		local_flush_tlb_kernel_page(vaddr);
#else
		(void) idx;  /* to kill a warning */
#endif
	} else if (vaddr >= PKMAP_ADDR(0) && vaddr < PKMAP_ADDR(LAST_PKMAP)) {
		/* this address was obtained through kmap_high_get() */
		kunmap_high(pte_page(pkmap_page_table[PKMAP_NR(vaddr)]));
	}
	pagefault_enable();
}
Beispiel #8
0
static int kplib_user_string(ktap_state *ks)
{
	unsigned long addr;
	char str[256] = {0};
	int ret;

	kp_arg_check(ks, 1, KTAP_TYPE_NUMBER);

	addr = nvalue(kp_arg(ks, 1));

	pagefault_disable();
	ret = __copy_from_user_inatomic((void *)str, (const void *)addr, 256);
	(void) &ret;  /* Silence compiler warning. */
	pagefault_enable();
	str[255] = '\0';
	set_string(ks->top, kp_str_new(ks, str));

	incr_top(ks);
	return 1;
}
Beispiel #9
0
static int kplib_user_string(ktap_state_t *ks)
{
	unsigned long addr = kp_arg_checknumber(ks, 1);
	char str[256] = {0};
	ktap_str_t *ts;
	int ret;

	pagefault_disable();
	ret = __copy_from_user_inatomic((void *)str, (const void *)addr, 256);
	(void) &ret;  /* Silence compiler warning. */
	pagefault_enable();
	str[255] = '\0';

	ts = kp_str_newz(ks, str);
	if (unlikely(!ts))
		return -1;

	set_string(ks->top, ts);
	incr_top(ks);
	return 1;
}
Beispiel #10
0
/* Callback for backtracer; basically a glorified memcpy */
static bool read_memory_func(void *result, unsigned long address,
			     unsigned int size, void *vkbt)
{
	int retval;
	struct KBacktraceIterator *kbt = (struct KBacktraceIterator *)vkbt;
	if (__kernel_text_address(address)) {
		/* OK to read kernel code. */
	} else if (address >= PAGE_OFFSET) {
		/* We only tolerate kernel-space reads of this task's stack */
		if (!in_kernel_stack(kbt, address))
			return 0;
	} else if (!valid_address(kbt, address)) {
		return 0;	/* invalid user-space address */
	}
	pagefault_disable();
	retval = __copy_from_user_inatomic(result,
					   (void __user __force *)address,
					   size);
	pagefault_enable();
	return (retval == 0);
}
Beispiel #11
0
/* Return the length of string -- including null terminal byte */
static __kprobes void FETCH_FUNC_NAME(memory, string_size)(struct pt_regs *regs,
							void *addr, void *dest)
{
	int ret, len = 0;
	u8 c;
	mm_segment_t old_fs = get_fs();

	set_fs(KERNEL_DS);
	pagefault_disable();
	do {
		ret = __copy_from_user_inatomic(&c, (u8 *)addr + len, 1);
		len++;
	} while (c && ret == 0 && len < MAX_STRING_SIZE);
	pagefault_enable();
	set_fs(old_fs);

	if (ret < 0)	/* Failed to check the length */
		*(u32 *)dest = 0;
	else
		*(u32 *)dest = len;
}
Beispiel #12
0
/**
 * strncpy_from_unsafe: - Copy a NUL terminated string from unsafe address.
 * @dst:   Destination address, in kernel space.  This buffer must be at
 *         least @count bytes long.
 * @unsafe_addr: Unsafe address.
 * @count: Maximum number of bytes to copy, including the trailing NUL.
 *
 * Copies a NUL-terminated string from unsafe address to kernel buffer.
 *
 * On success, returns the length of the string INCLUDING the trailing NUL.
 *
 * If access fails, returns -EFAULT (some data may have been copied
 * and the trailing NUL added).
 *
 * If @count is smaller than the length of the string, copies @count-1 bytes,
 * sets the last byte of @dst buffer to NUL and returns @count.
 */
long strncpy_from_unsafe(char *dst, const void *unsafe_addr, long count)
{
	mm_segment_t old_fs = get_fs();
	const void *src = unsafe_addr;
	long ret;

	if (unlikely(count <= 0))
		return 0;

	set_fs(KERNEL_DS);
	pagefault_disable();

	do {
		ret = __get_user(*dst++, (const char __user __force *)src++);
	} while (dst[-1] && ret == 0 && src - unsafe_addr < count);

	dst[-1] = '\0';
	pagefault_enable();
	set_fs(old_fs);

	return ret ? -EFAULT : src - unsafe_addr;
}
Beispiel #13
0
void __kunmap_atomic(void *kvaddr)
{
	if (kvaddr >= (void *)FIXADDR_START &&
	    kvaddr < (void *)FIXADDR_TOP) {
		int idx = kmap_idx(kmap_atomic_idx(),
				   DCACHE_ALIAS((unsigned long)kvaddr));

		/*
		 * Force other mappings to Oops if they'll try to access this
		 * pte without first remap it.  Keeping stale mappings around
		 * is a bad idea also, in case the page changes cacheability
		 * attributes or becomes a protected page in a hypervisor.
		 */
		pte_clear(&init_mm, kvaddr, kmap_pte + idx);
		local_flush_tlb_kernel_range((unsigned long)kvaddr,
					     (unsigned long)kvaddr + PAGE_SIZE);

		kmap_atomic_idx_pop();
	}

	pagefault_enable();
}
static int p8_aes_xts_setkey(struct crypto_tfm *tfm, const u8 *key,
			     unsigned int keylen)
{
	int ret;
	struct p8_aes_xts_ctx *ctx = crypto_tfm_ctx(tfm);

	ret = xts_check_key(tfm, key, keylen);
	if (ret)
		return ret;

	preempt_disable();
	pagefault_disable();
	enable_kernel_vsx();
	ret = aes_p8_set_encrypt_key(key + keylen/2, (keylen/2) * 8, &ctx->tweak_key);
	ret += aes_p8_set_encrypt_key(key, (keylen/2) * 8, &ctx->enc_key);
	ret += aes_p8_set_decrypt_key(key, (keylen/2) * 8, &ctx->dec_key);
	disable_kernel_vsx();
	pagefault_enable();
	preempt_enable();

	ret += crypto_skcipher_setkey(ctx->fallback, key, keylen);
	return ret;
}
Beispiel #15
0
void kunmap_atomic(void *kvaddr, enum km_type type)
{
	unsigned long vaddr = (unsigned long) kvaddr & PAGE_MASK;
	enum fixed_addresses idx = type + KM_TYPE_NR*smp_processor_id();

	/*
	 * Force other mappings to Oops if they'll try to access this pte
	 * without first remap it.  Keeping stale mappings around is a bad idea
	 * also, in case the page changes cacheability attributes or becomes
	 * a protected page in a hypervisor.
	 */
	if (vaddr == __fix_to_virt(FIX_KMAP_BEGIN+idx))
		kpte_clear_flush(kmap_pte-idx, vaddr);
	else {
#ifdef CONFIG_DEBUG_HIGHMEM
		BUG_ON(vaddr < PAGE_OFFSET);
		BUG_ON(vaddr >= (unsigned long)high_memory);
#endif
	}

	arch_flush_lazy_mmu_mode();
	pagefault_enable();
}
Beispiel #16
0
void __kunmap_atomic(void *kvaddr)
{
	unsigned long vaddr = (unsigned long) kvaddr & PAGE_MASK;
	int idx, type;

	if (kvaddr >= (void *)FIXADDR_START) {
		type = kmap_atomic_idx();
		idx = type + KM_TYPE_NR * smp_processor_id();

		/*
		 * Force other mappings to Oops if they'll try to access this
		 * pte without first remap it.  Keeping stale mappings around
		 * is a bad idea also, in case the page changes cacheability
		 * attributes or becomes a protected page in a hypervisor.
		 */
		pte_clear(&init_mm, vaddr, kmap_pte-idx);
		flush_tlb_kernel_range(vaddr, vaddr + PAGE_SIZE);

		kmap_atomic_idx_pop();
	}

	pagefault_enable();
}
Beispiel #17
0
void __kunmap_atomic(void *kvaddr)
{
	unsigned long vaddr = (unsigned long) kvaddr & PAGE_MASK;

	if (vaddr >= __fix_to_virt(FIX_KMAP_END) &&
	    vaddr <= __fix_to_virt(FIX_KMAP_BEGIN)) {
		int idx, type;

		type = kmap_atomic_idx();
		idx = type + KM_TYPE_NR * smp_processor_id();

#ifdef CONFIG_DEBUG_HIGHMEM
		WARN_ON_ONCE(vaddr != __fix_to_virt(FIX_KMAP_BEGIN + idx));
#endif
		/*
		 * Force other mappings to Oops if they'll try to access this
		 * pte without first remap it.  Keeping stale mappings around
		 * is a bad idea also, in case the page changes cacheability
		 * attributes or becomes a protected page in a hypervisor.
		 */
#ifdef CONFIG_PREEMPT_RT_FULL
		current->kmap_pte[type] = __pte(0);
#endif
		kpte_clear_flush(kmap_pte-idx, vaddr);
		kmap_atomic_idx_pop();
		arch_flush_lazy_mmu_mode();
	}
#ifdef CONFIG_DEBUG_HIGHMEM
	else {
		BUG_ON(vaddr < PAGE_OFFSET);
		BUG_ON(vaddr >= (unsigned long)high_memory);
	}
#endif

	pagefault_enable();
}
Beispiel #18
0
void __kunmap_atomic(void *kvaddr)
{
	unsigned long vaddr = (unsigned long) kvaddr & PAGE_MASK;
	int idx, type;

	if (kvaddr >= (void *)FIXADDR_START) {
		type = kmap_atomic_idx();
		idx = type + KM_TYPE_NR * smp_processor_id();

		if (cache_is_vivt())
			__cpuc_flush_dcache_area((void *)vaddr, PAGE_SIZE);
#ifdef CONFIG_DEBUG_HIGHMEM
		BUG_ON(vaddr != __fix_to_virt(FIX_KMAP_BEGIN + idx));
		set_top_pte(vaddr, __pte(0));
#else
		(void) idx;  /* to kill a warning */
#endif
		kmap_atomic_idx_pop();
	} else if (vaddr >= PKMAP_ADDR(0) && vaddr < PKMAP_ADDR(LAST_PKMAP)) {
		/* this address was obtained through kmap_high_get() */
		kunmap_high(pte_page(pkmap_page_table[PKMAP_NR(vaddr)]));
	}
	pagefault_enable();
}
Beispiel #19
0
static int signal_return(struct mm_struct *mm, struct pt_regs *regs,
			 unsigned long address, unsigned long error_code)
{
	u16 instruction;
	int rc;
#ifdef CONFIG_COMPAT
	int compat;
#endif

	pagefault_disable();
	rc = __get_user(instruction, (u16 __user *) regs->psw.addr);
	pagefault_enable();
	if (rc)
		return -EFAULT;

	up_read(&mm->mmap_sem);
	clear_tsk_thread_flag(current, TIF_SINGLE_STEP);
#ifdef CONFIG_COMPAT
	compat = test_tsk_thread_flag(current, TIF_31BIT);
	if (compat && instruction == 0x0a77)
		sys32_sigreturn(regs);
	else if (compat && instruction == 0x0aad)
		sys32_rt_sigreturn(regs);
	else
#endif
	if (instruction == 0x0a77)
		sys_sigreturn(regs);
	else if (instruction == 0x0aad)
		sys_rt_sigreturn(regs);
	else {
		current->thread.prot_addr = address;
		current->thread.trap_no = error_code;
		do_sigsegv(regs, error_code, SEGV_MAPERR, address);
	}
	return 0;
}
Beispiel #20
0
static int p8_ghash_final(struct shash_desc *desc, u8 *out)
{
    int i;
    struct p8_ghash_ctx *ctx = crypto_tfm_ctx(crypto_shash_tfm(desc->tfm));
    struct p8_ghash_desc_ctx *dctx = shash_desc_ctx(desc);

    if (IN_INTERRUPT) {
        return crypto_shash_final(&dctx->fallback_desc, out);
    } else {
        if (dctx->bytes) {
            for (i = dctx->bytes; i < GHASH_DIGEST_SIZE; i++)
                dctx->buffer[i] = 0;
            pagefault_disable();
            enable_kernel_altivec();
            enable_kernel_fp();
            gcm_ghash_p8(dctx->shash, ctx->htable, dctx->buffer,
                    GHASH_DIGEST_SIZE);
            pagefault_enable();
            dctx->bytes = 0;
        }
        memcpy(out, dctx->shash, GHASH_DIGEST_SIZE);
        return 0;
    }
}
Beispiel #21
0
static int p8_aes_cbc_encrypt(struct blkcipher_desc *desc,
			      struct scatterlist *dst,
			      struct scatterlist *src, unsigned int nbytes)
{
	int ret;
	struct blkcipher_walk walk;
	struct p8_aes_cbc_ctx *ctx =
		crypto_tfm_ctx(crypto_blkcipher_tfm(desc->tfm));
	struct blkcipher_desc fallback_desc = {
		.tfm = ctx->fallback,
		.info = desc->info,
		.flags = desc->flags
	};

	if (in_interrupt()) {
		ret = crypto_blkcipher_encrypt(&fallback_desc, dst, src,
					       nbytes);
	} else {
		preempt_disable();
		pagefault_disable();
		enable_kernel_vsx();

		blkcipher_walk_init(&walk, dst, src, nbytes);
		ret = blkcipher_walk_virt(desc, &walk);
		while ((nbytes = walk.nbytes)) {
			aes_p8_cbc_encrypt(walk.src.virt.addr,
					   walk.dst.virt.addr,
					   nbytes & AES_BLOCK_MASK,
					   &ctx->enc_key, walk.iv, 1);
			nbytes &= AES_BLOCK_SIZE - 1;
			ret = blkcipher_walk_done(desc, &walk, nbytes);
		}

		disable_kernel_vsx();
		pagefault_enable();
		preempt_enable();
	}

	return ret;
}

static int p8_aes_cbc_decrypt(struct blkcipher_desc *desc,
			      struct scatterlist *dst,
			      struct scatterlist *src, unsigned int nbytes)
{
	int ret;
	struct blkcipher_walk walk;
	struct p8_aes_cbc_ctx *ctx =
		crypto_tfm_ctx(crypto_blkcipher_tfm(desc->tfm));
	struct blkcipher_desc fallback_desc = {
		.tfm = ctx->fallback,
		.info = desc->info,
		.flags = desc->flags
	};

	if (in_interrupt()) {
		ret = crypto_blkcipher_decrypt(&fallback_desc, dst, src,
					       nbytes);
	} else {
		preempt_disable();
		pagefault_disable();
		enable_kernel_vsx();

		blkcipher_walk_init(&walk, dst, src, nbytes);
		ret = blkcipher_walk_virt(desc, &walk);
		while ((nbytes = walk.nbytes)) {
			aes_p8_cbc_encrypt(walk.src.virt.addr,
					   walk.dst.virt.addr,
					   nbytes & AES_BLOCK_MASK,
					   &ctx->dec_key, walk.iv, 0);
			nbytes &= AES_BLOCK_SIZE - 1;
			ret = blkcipher_walk_done(desc, &walk, nbytes);
		}

		disable_kernel_vsx();
		pagefault_enable();
		preempt_enable();
	}

	return ret;
}


struct crypto_alg p8_aes_cbc_alg = {
	.cra_name = "cbc(aes)",
	.cra_driver_name = "p8_aes_cbc",
	.cra_module = THIS_MODULE,
	.cra_priority = 1000,
	.cra_type = &crypto_blkcipher_type,
	.cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER | CRYPTO_ALG_NEED_FALLBACK,
	.cra_alignmask = 0,
	.cra_blocksize = AES_BLOCK_SIZE,
	.cra_ctxsize = sizeof(struct p8_aes_cbc_ctx),
	.cra_init = p8_aes_cbc_init,
	.cra_exit = p8_aes_cbc_exit,
	.cra_blkcipher = {
			  .ivsize = AES_BLOCK_SIZE,
			  .min_keysize = AES_MIN_KEY_SIZE,
			  .max_keysize = AES_MAX_KEY_SIZE,
			  .setkey = p8_aes_cbc_setkey,
			  .encrypt = p8_aes_cbc_encrypt,
			  .decrypt = p8_aes_cbc_decrypt,
	},
};
static
int stack_strcmp(struct estack *stack, int top, const char *cmp_type)
{
	size_t offset_bx = 0, offset_ax = 0;
	int diff, has_user = 0;
	mm_segment_t old_fs;

	if (estack_bx(stack, top)->u.s.user
			|| estack_ax(stack, top)->u.s.user) {
		has_user = 1;
		old_fs = get_fs();
		set_fs(KERNEL_DS);
		pagefault_disable();
	}

	for (;;) {
		int ret;
		int escaped_r0 = 0;
		char char_bx, char_ax;

		char_bx = get_char(estack_bx(stack, top), offset_bx);
		char_ax = get_char(estack_ax(stack, top), offset_ax);

		if (unlikely(char_bx == '\0')) {
			if (char_ax == '\0') {
				diff = 0;
				break;
			} else {
				if (estack_ax(stack, top)->u.s.literal) {
					ret = parse_char(estack_ax(stack, top),
						&char_ax, &offset_ax);
					if (ret == -1) {
						diff = 0;
						break;
					}
				}
				diff = -1;
				break;
			}
		}
		if (unlikely(char_ax == '\0')) {
			if (char_bx == '\0') {
				diff = 0;
				break;
			} else {
				if (estack_bx(stack, top)->u.s.literal) {
					ret = parse_char(estack_bx(stack, top),
						&char_bx, &offset_bx);
					if (ret == -1) {
						diff = 0;
						break;
					}
				}
				diff = 1;
				break;
			}
		}
		if (estack_bx(stack, top)->u.s.literal) {
			ret = parse_char(estack_bx(stack, top),
				&char_bx, &offset_bx);
			if (ret == -1) {
				diff = 0;
				break;
			} else if (ret == -2) {
				escaped_r0 = 1;
			}
			/* else compare both char */
		}
		if (estack_ax(stack, top)->u.s.literal) {
			ret = parse_char(estack_ax(stack, top),
				&char_ax, &offset_ax);
			if (ret == -1) {
				diff = 0;
				break;
			} else if (ret == -2) {
				if (!escaped_r0) {
					diff = -1;
					break;
				}
			} else {
				if (escaped_r0) {
					diff = 1;
					break;
				}
			}
		} else {
			if (escaped_r0) {
				diff = 1;
				break;
			}
		}
		diff = char_bx - char_ax;
		if (diff != 0)
			break;
		offset_bx++;
		offset_ax++;
	}
	if (has_user) {
		pagefault_enable();
		set_fs(old_fs);
	}
	return diff;
}
Beispiel #23
0
static int p8_aes_ctr_crypt(struct blkcipher_desc *desc,
			    struct scatterlist *dst,
			    struct scatterlist *src, unsigned int nbytes)
{
	int ret;
	u64 inc;
	struct blkcipher_walk walk;
	struct p8_aes_ctr_ctx *ctx =
		crypto_tfm_ctx(crypto_blkcipher_tfm(desc->tfm));
	struct blkcipher_desc fallback_desc = {
		.tfm = ctx->fallback,
		.info = desc->info,
		.flags = desc->flags
	};

	if (in_interrupt()) {
		ret = crypto_blkcipher_encrypt(&fallback_desc, dst, src,
					       nbytes);
	} else {
		blkcipher_walk_init(&walk, dst, src, nbytes);
		ret = blkcipher_walk_virt_block(desc, &walk, AES_BLOCK_SIZE);
		while ((nbytes = walk.nbytes) >= AES_BLOCK_SIZE) {
			preempt_disable();
			pagefault_disable();
			enable_kernel_vsx();
			aes_p8_ctr32_encrypt_blocks(walk.src.virt.addr,
						    walk.dst.virt.addr,
						    (nbytes &
						     AES_BLOCK_MASK) /
						    AES_BLOCK_SIZE,
						    &ctx->enc_key,
						    walk.iv);
			disable_kernel_vsx();
			pagefault_enable();
			preempt_enable();

			/* We need to update IV mostly for last bytes/round */
			inc = (nbytes & AES_BLOCK_MASK) / AES_BLOCK_SIZE;
			if (inc > 0)
				while (inc--)
					crypto_inc(walk.iv, AES_BLOCK_SIZE);

			nbytes &= AES_BLOCK_SIZE - 1;
			ret = blkcipher_walk_done(desc, &walk, nbytes);
		}
		if (walk.nbytes) {
			p8_aes_ctr_final(ctx, &walk);
			ret = blkcipher_walk_done(desc, &walk, 0);
		}
	}

	return ret;
}

struct crypto_alg p8_aes_ctr_alg = {
	.cra_name = "ctr(aes)",
	.cra_driver_name = "p8_aes_ctr",
	.cra_module = THIS_MODULE,
	.cra_priority = 2000,
	.cra_type = &crypto_blkcipher_type,
	.cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER | CRYPTO_ALG_NEED_FALLBACK,
	.cra_alignmask = 0,
	.cra_blocksize = 1,
	.cra_ctxsize = sizeof(struct p8_aes_ctr_ctx),
	.cra_init = p8_aes_ctr_init,
	.cra_exit = p8_aes_ctr_exit,
	.cra_blkcipher = {
			  .ivsize = AES_BLOCK_SIZE,
			  .min_keysize = AES_MIN_KEY_SIZE,
			  .max_keysize = AES_MAX_KEY_SIZE,
			  .setkey = p8_aes_ctr_setkey,
			  .encrypt = p8_aes_ctr_crypt,
			  .decrypt = p8_aes_ctr_crypt,
	},
};
Beispiel #24
0
/*
 * This function must return 0 because we tail call optimise when calling
 * from __copy_tofrom_user_power7 which returns 0 on success.
 */
int exit_vmx_usercopy(void)
{
	pagefault_enable();
	return 0;
}