Example #1
0
static unsigned long __user *
user_backtrace(unsigned long __user *tail,
	       struct quadd_callchain *callchain_data,
	       struct vm_area_struct *stack_vma)
{
	unsigned long value, value_lr = 0, value_fp = 0;
	unsigned long __user *fp_prev = NULL;

	if (check_vma_address((unsigned long)tail, stack_vma))
		return NULL;

	if (__copy_from_user_inatomic(&value, tail, sizeof(unsigned long)))
		return NULL;

	if (!check_vma_address(value, stack_vma)) {
		/* clang's frame */
		value_fp = value;

		if (check_vma_address((unsigned long)(tail + 1), stack_vma))
			return NULL;

		if (__copy_from_user_inatomic(&value_lr, tail + 1,
					      sizeof(unsigned long)))
			return NULL;
	} else {
		/* gcc's frame */
		if (__copy_from_user_inatomic(&value_fp, tail - 1,
					      sizeof(unsigned long)))
			return NULL;

		if (check_vma_address(value_fp, stack_vma))
			return NULL;

		value_lr = value;
	}

	fp_prev = (unsigned long __user *)value_fp;

	if (value_lr < QUADD_USER_SPACE_MIN_ADDR)
		return NULL;

	quadd_callchain_store(callchain_data, value_lr);

	if (fp_prev <= tail)
		return NULL;

	return fp_prev;
}
Example #2
0
static int
pipe_iov_copy_from_user(void *addr, int *offset, struct iovec *iov,
			size_t *remaining, int atomic)
{
	unsigned long copy;

	while (*remaining > 0) {
		while (!iov->iov_len)
			iov++;
		copy = min_t(unsigned long, *remaining, iov->iov_len);

		if (atomic) {
			if (__copy_from_user_inatomic(addr + *offset,
						      iov->iov_base, copy))
				return -EFAULT;
		} else {
			if (copy_from_user(addr + *offset,
					   iov->iov_base, copy))
				return -EFAULT;
		}
		*offset += copy;
		*remaining -= copy;
		iov->iov_base += copy;
		iov->iov_len -= copy;
	}
	return 0;
}
Example #3
0
/*
 * Fetch a null-terminated string. Caller MUST set *(u32 *)dest with max
 * length and relative data location.
 */
static __kprobes void FETCH_FUNC_NAME(memory, string)(struct pt_regs *regs,
						      void *addr, void *dest)
{
	long ret;
	int maxlen = get_rloc_len(*(u32 *)dest);
	u8 *dst = get_rloc_data(dest);
	u8 *src = addr;
	mm_segment_t old_fs = get_fs();
	if (!maxlen)
		return;
	/*
	 * Try to get string again, since the string can be changed while
	 * probing.
	 */
	set_fs(KERNEL_DS);
	pagefault_disable();
	do
		ret = __copy_from_user_inatomic(dst++, src++, 1);
	while (dst[-1] && ret == 0 && src - (u8 *)addr < maxlen);
	dst[-1] = '\0';
	pagefault_enable();
	set_fs(old_fs);

	if (ret < 0) {	/* Failed to fetch string */
		((u8 *)get_rloc_data(dest))[0] = '\0';
		*(u32 *)dest = make_data_rloc(0, get_rloc_offs(*(u32 *)dest));
	} else
		*(u32 *)dest = make_data_rloc(src - (u8 *)addr,
					      get_rloc_offs(*(u32 *)dest));
}
Example #4
0
static unsigned int user_getsp32(unsigned int sp, int is_first)
{
	unsigned int stack_frame[2];
	void __user *p = compat_ptr(sp);

	if (!access_ok(VERIFY_READ, p, sizeof(stack_frame)))
		return 0;

	/*
	 * The most likely reason for this is that we returned -EFAULT,
	 * which means that we've done all that we can do from
	 * interrupt context.
	 */
	if (__copy_from_user_inatomic(stack_frame, p, sizeof(stack_frame)))
		return 0;

	if (!is_first)
		oprofile_add_trace(STACK_LR32(stack_frame));

	/*
	 * We do not enforce increasing stack addresses here because
	 * we may transition to a different stack, eg a signal handler.
	 */
	return STACK_SP(stack_frame);
}
static struct compat_frame_tail __user *
compat_user_backtrace(struct compat_frame_tail __user *tail,
		      struct perf_callchain_entry_ctx *entry)
{
	struct compat_frame_tail buftail;
	unsigned long err;

	/* Also check accessibility of one struct frame_tail beyond */
	if (!access_ok(VERIFY_READ, tail, sizeof(buftail)))
		return NULL;

	pagefault_disable();
	err = __copy_from_user_inatomic(&buftail, tail, sizeof(buftail));
	pagefault_enable();

	if (err)
		return NULL;

	perf_callchain_store(entry, buftail.lr);

	/*
	 * Frame pointers should strictly progress back up the stack
	 * (towards higher addresses).
	 */
	if (tail + 1 >= (struct compat_frame_tail __user *)
			compat_ptr(buftail.fp))
		return NULL;

	return (struct compat_frame_tail __user *)compat_ptr(buftail.fp) - 1;
}
/*
 * Calculate string length. Include final null terminating character if there is
 * one, or ends at first fault. Disabling page faults ensures that we can safely
 * call this from pretty much any context, including those where the caller
 * holds mmap_sem, or any lock which nests in mmap_sem.
 */
long lttng_strlen_user_inatomic(const char *addr)
{
	long count = 0;
	mm_segment_t old_fs = get_fs();

	set_fs(KERNEL_DS);
	pagefault_disable();
	for (;;) {
		char v;
		unsigned long ret;

		if (unlikely(!access_ok(VERIFY_READ,
				(__force const char __user *) addr,
				sizeof(v))))
			break;
		ret = __copy_from_user_inatomic(&v,
			(__force const char __user *)(addr),
			sizeof(v));
		if (unlikely(ret > 0))
			break;
		count++;
		if (unlikely(!v))
			break;
		addr++;
	}
	pagefault_enable();
	set_fs(old_fs);
	return count;
}
/*
 * Expects va args : (int elem_num, const char __user *s)
 * Element size is implicit (sizeof(char)).
 */
static char *ltt_serialize_fs_data(char *buffer, char *str,
	struct ltt_serialize_closure *closure,
	void *serialize_private,
	int align, const char *fmt, va_list *args)
{
	int elem_size;
	int elem_num;
	const char __user  *s;
	unsigned long noncopy;

	elem_num = va_arg(*args, int);
	s = va_arg(*args, const char __user *);
	elem_size = sizeof(*s);

	if (align)
		str += ltt_align((long)str, sizeof(int));
	if (buffer)
		*(int*)str = elem_num;
	str += sizeof(int);

	if (elem_num > 0) {
		/* No alignment required for char */
		if (buffer) {
			noncopy = __copy_from_user_inatomic(str, s,
					elem_num*elem_size);
			memset(str+(elem_num*elem_size)-noncopy, 0, noncopy);
		}
		str += (elem_num*elem_size);
	}
	/* Following alignment for genevent compatibility */
	if (align)
		str += ltt_align((long)str, sizeof(void*));
	return str;
}
Example #8
0
static int
pipe_iov_copy_from_user(void *to, struct iovec *iov, unsigned long len,
			int atomic)
{
	unsigned long copy;

	while (len > 0) {
		while (!iov->iov_len)
			iov++;
		copy = min_t(unsigned long, len, iov->iov_len);

		if (atomic) {
			if (__copy_from_user_inatomic(to, iov->iov_base, copy))
				return -EFAULT;
		} else {
			if (copy_from_user(to, iov->iov_base, copy))
				return -EFAULT;
		}
		to += copy;
		len -= copy;
		iov->iov_base += copy;
		iov->iov_len -= copy;
	}
	return 0;
}
Example #9
0
unsigned long __copy_from_user_zeroing(void *to, const void __user *from,
				       unsigned long n)
{
	unsigned long rc = __copy_from_user_inatomic(to, from, n);
	if (unlikely(rc))
		memset(to + n - rc, 0, rc);
	return rc;
}
Example #10
0
/**
 * probe_kernel_read(): safely attempt to read from a location
 * @dst: pointer to the buffer that shall take the data
 * @src: address to read from
 * @size: size of the data chunk
 *
 * Safely read from address @src to the buffer at @dst.  If a kernel fault
 * happens, handle that and return -EFAULT.
 */
long probe_kernel_read(void *dst, void *src, size_t size)
{
	long ret; 

       	ret = __copy_from_user_inatomic(dst,
                        (__force const void __user *)src, size);
	
	return ret ? -EFAULT : 0;
}
Example #11
0
static inline int get_mem(unsigned long addr, unsigned long *result)
{
	unsigned long *address = (unsigned long *) addr;
	if (!access_ok(address, sizeof(unsigned long)))
		return -1;
	if (__copy_from_user_inatomic(result, address, sizeof(unsigned long)))
		return -3;
	return 0;
}
Example #12
0
//
// On some kernels (e.g. 2.6.39), even with preemption disabled, the strncpy_from_user,
// instead of returning -1 after a page fault, schedules the process, so we drop events
// because of the preemption. This function reads the user buffer in atomic chunks, and
// returns when there's an error or the terminator is found
//
long ppm_strncpy_from_user(char *to, const char __user *from, unsigned long n)
{
	long string_length = 0;
	long res = -1;
	unsigned long bytes_to_read = 4;
	int j;

	pagefault_disable();

	while(n)
	{
		//
		// Read bytes_to_read bytes at a time, and look for the terminator. Should be fast
		// since the copy_from_user is optimized for the processor
		//
		if(n < bytes_to_read)
		{
			bytes_to_read = n;
		}

		if(!access_ok(VERIFY_READ, from, n))
		{
			res = -1;
			goto strncpy_end;
		}

		if(__copy_from_user_inatomic(to, from, bytes_to_read))
		{
			//
			// Page fault
			//
			res = -1;
			goto strncpy_end;
		}

		n -= bytes_to_read;
		from += bytes_to_read;

		for(j = 0; j < bytes_to_read; ++j)
		{
			++string_length;

			if(!*to)
			{
				res = string_length;
				goto strncpy_end;
			}

			++to;
		}
	}

strncpy_end:
	pagefault_enable();
	return res;
}
Example #13
0
static unsigned long _stp_copy_from_user(char *dst, const char __user *src, unsigned long count)
{
	if (count) {
		if (access_ok(VERIFY_READ, src, count))
			count = __copy_from_user_inatomic(dst, src, count);
		else
			memset(dst, 0, count);
	}
	return count;
}
Example #14
0
static void arm_backtrace_eabi(int cpu, int buftype, struct pt_regs * const regs, unsigned int depth)
{
#if defined(__arm__)
	struct frame_tail_eabi *tail;
	struct frame_tail_eabi *next;
	struct frame_tail_eabi *ptrtail;
	struct frame_tail_eabi buftail;
	unsigned long fp = regs->ARM_fp;
	unsigned long sp = regs->ARM_sp;
	unsigned long lr = regs->ARM_lr;
	int is_user_mode = user_mode(regs);

	if (!is_user_mode) {
		return;
	}

	/* entry preamble may not have executed */
	gator_add_trace(cpu, buftype, lr);

	/* check tail is valid */
	if (fp == 0 || fp < sp) {
		return;
	}

	tail = (struct frame_tail_eabi *)(fp - 4);

	while (depth-- && tail && !((unsigned long) tail & 3)) {
		/* Also check accessibility of one struct frame_tail beyond */
		if (!access_ok(VERIFY_READ, tail, sizeof(struct frame_tail_eabi)))
			return;
		if (__copy_from_user_inatomic(&buftail, tail, sizeof(struct frame_tail_eabi)))
			return;
		ptrtail = &buftail;

		lr = ptrtail[0].lr;
		gator_add_trace(cpu, buftype, lr);

		/* frame pointers should progress back up the stack, towards higher addresses */
		next = (struct frame_tail_eabi *)(lr - 4);
		if (tail >= next || lr == 0) {
			fp = ptrtail[0].fp;
			next = (struct frame_tail_eabi *)(fp - 4);
			/* check tail is valid */
			if (tail >= next || fp == 0) {
				return;
			}
		}

		tail = next;
	}
#endif
}
Example #15
0
/*
 * What this function does is basically a special memcpy
 * so that, if the page fault handler detects the address is invalid,
 * won't kill the process but will return a positive number
 * Plus, this doesn't sleep.
 * The risk is that if the buffer is partially paged out, we get an error.
 * Returns the number of bytes NOT read.
 */
unsigned long ppm_copy_from_user(void *to, const void __user *from, unsigned long n)
{
	unsigned long res = n;

	pagefault_disable();

	if (likely(ppm_access_ok(VERIFY_READ, from, n)))
		res = __copy_from_user_inatomic(to, from, n);

	pagefault_enable();

	return res;
}
Example #16
0
static bool read_data_from_addr(u32 address, void * data, unsigned int size)
{
	if (!access_ok(VERIFY_READ, address, size))
	{
		return false;
	}

	if (__copy_from_user_inatomic(data, (void *)address, size))
	{
		return false;
	}

	return true;
}
Example #17
0
size_t iov_iter_copy_from_user_atomic(struct page *page,
		struct iov_iter *i, unsigned long offset, size_t bytes)
{
	char *kaddr = kmap_atomic(page), *p = kaddr + offset;
	iterate_all_kinds(i, bytes, v,
		__copy_from_user_inatomic((p += v.iov_len) - v.iov_len,
					  v.iov_base, v.iov_len),
		memcpy_from_page((p += v.bv_len) - v.bv_len, v.bv_page,
				 v.bv_offset, v.bv_len),
		memcpy((p += v.iov_len) - v.iov_len, v.iov_base, v.iov_len)
	)
	kunmap_atomic(kaddr);
	return bytes;
}
Example #18
0
long __probe_kernel_read(void *dst, void *src, size_t size)
{
	long ret;
	mm_segment_t old_fs = get_fs();

	set_fs(KERNEL_DS);
	pagefault_disable();
	ret = __copy_from_user_inatomic(dst,
			(__force const void __user *)src, size);
	pagefault_enable();
	set_fs(old_fs);

	return ret ? -EFAULT : 0;
}
static int copy_stack_frame(const void __user *fp, struct stack_frame *frame)
{
	int ret;

	if (!access_ok(VERIFY_READ, fp, sizeof(*frame)))
		return 0;

	ret = 1;
	pagefault_disable();
	if (__copy_from_user_inatomic(frame, fp, sizeof(*frame)))
		ret = 0;
	pagefault_enable();

	return ret;
}
Example #20
0
File: copy.c Project: 5kg/systemtap
/* XXX: see also kread/uread in loc2c-runtime.h */
static unsigned long _stp_copy_from_user(char *dst, const char __user *src, unsigned long count)
{
	if (count) {
                mm_segment_t _oldfs = get_fs();
                set_fs(USER_DS);
                pagefault_disable();
		if (access_ok(VERIFY_READ, src, count))
			count = __copy_from_user_inatomic(dst, src, count);
		else
			memset(dst, 0, count);
                pagefault_enable();
                set_fs(_oldfs);
	}
	return count;
}
Example #21
0
static unsigned long user_getsp64(unsigned long sp, int is_first)
{
	unsigned long stack_frame[3];

	if (!access_ok(VERIFY_READ, (void __user *)sp, sizeof(stack_frame)))
		return 0;

	if (__copy_from_user_inatomic(stack_frame, (void __user *)sp,
					sizeof(stack_frame)))
		return 0;

	if (!is_first)
		oprofile_add_trace(STACK_LR64(stack_frame));

	return STACK_SP(stack_frame);
}
Example #22
0
File: baselib.c Project: WeiY/ktap
static int ktap_lib_user_string(ktap_State *ks)
{
	unsigned long addr = nvalue(GetArg(ks, 1));
	char str[256] = {0};
	int ret;

	pagefault_disable();
	ret = __copy_from_user_inatomic((void *)str, (const void *)addr, 256);
	(void) &ret;  /* Silence compiler warning. */
	pagefault_enable();
	str[255] = '\0';
	setsvalue(ks->top, kp_tstring_new(ks, str));

	incr_top(ks);
	return 1;
}
Example #23
0
static struct frame_head *dump_user_backtrace(struct frame_head *head)
{
	struct frame_head bufhead[2];

	
	if (!access_ok(VERIFY_READ, head, sizeof(bufhead)))
		return NULL;
	if (__copy_from_user_inatomic(bufhead, head, sizeof(bufhead)))
		return NULL;

	oprofile_add_trace(bufhead[0].lr);

	if (bufhead[0].fp <= head)
		return NULL;

	return bufhead[0].fp;
}
Example #24
0
static noinline void __init copy_user_test(void)
{
	char *kmem;
	char __user *usermem;
	size_t size = 10;
	int unused;

	kmem = kmalloc(size, GFP_KERNEL);
	if (!kmem)
		return;

	usermem = (char __user *)vm_mmap(NULL, 0, PAGE_SIZE,
			    PROT_READ | PROT_WRITE | PROT_EXEC,
			    MAP_ANONYMOUS | MAP_PRIVATE, 0);
	if (IS_ERR(usermem)) {
		pr_err("Failed to allocate user memory\n");
		kfree(kmem);
		return;
	}

	pr_info("out-of-bounds in copy_from_user()\n");
	unused = copy_from_user(kmem, usermem, size + 1);

	pr_info("out-of-bounds in copy_to_user()\n");
	unused = copy_to_user(usermem, kmem, size + 1);

	pr_info("out-of-bounds in __copy_from_user()\n");
	unused = __copy_from_user(kmem, usermem, size + 1);

	pr_info("out-of-bounds in __copy_to_user()\n");
	unused = __copy_to_user(usermem, kmem, size + 1);

	pr_info("out-of-bounds in __copy_from_user_inatomic()\n");
	unused = __copy_from_user_inatomic(kmem, usermem, size + 1);

	pr_info("out-of-bounds in __copy_to_user_inatomic()\n");
	unused = __copy_to_user_inatomic(usermem, kmem, size + 1);

	pr_info("out-of-bounds in strncpy_from_user()\n");
	unused = strncpy_from_user(kmem, usermem, size + 1);

	vm_munmap((unsigned long)usermem, PAGE_SIZE);
	kfree(kmem);
}
Example #25
0
static struct frame_tail* user_backtrace(struct frame_tail *tail)
{
	struct frame_tail buftail[2];

	/* Also check accessibility of one struct frame_tail beyond */
	if (!access_ok(VERIFY_READ, tail, sizeof(buftail)))
		return NULL;
	if (__copy_from_user_inatomic(buftail, tail, sizeof(buftail)))
		return NULL;

	oprofile_add_trace(buftail[0].lr);

	/* frame pointers should strictly progress back up the stack
	 * (towards higher addresses) */
	if (tail + 1 >= buftail[0].fp)
		return NULL;

	return buftail[0].fp-1;
}
Example #26
0
static struct stack_frame *dump_user_backtrace(struct stack_frame *head)
{
	struct stack_frame bufhead[2];

	/* Also check accessibility of one struct stack_frame beyond */
	if (!access_ok(VERIFY_READ, head, sizeof(bufhead)))
		return NULL;
	if (__copy_from_user_inatomic(bufhead, head, sizeof(bufhead)))
		return NULL;

	oprofile_add_trace(bufhead[0].return_address);

	/* frame pointers should strictly progress back up the stack
	 * (towards higher addresses) */
	if (head >= bufhead[0].next_frame)
		return NULL;

	return bufhead[0].next_frame;
}
Example #27
0
/*
 * We rely on the nested NMI work to allow atomic faults from the NMI path; the
 * nested NMI paths are careful to preserve CR2.
 */
unsigned long
copy_from_user_nmi(void *to, const void __user *from, unsigned long n)
{
	unsigned long ret;

	if (__range_not_ok(from, n, TASK_SIZE))
		return n;

	/*
	 * Even though this function is typically called from NMI/IRQ context
	 * disable pagefaults so that its behaviour is consistent even when
	 * called form other contexts.
	 */
	pagefault_disable();
	ret = __copy_from_user_inatomic(to, from, n);
	pagefault_enable();

	return ret;
}
Example #28
0
static int kplib_user_string(ktap_state *ks)
{
	unsigned long addr;
	char str[256] = {0};
	int ret;

	kp_arg_check(ks, 1, KTAP_TYPE_NUMBER);

	addr = nvalue(kp_arg(ks, 1));

	pagefault_disable();
	ret = __copy_from_user_inatomic((void *)str, (const void *)addr, 256);
	(void) &ret;  /* Silence compiler warning. */
	pagefault_enable();
	str[255] = '\0';
	set_string(ks->top, kp_str_new(ks, str));

	incr_top(ks);
	return 1;
}
Example #29
0
/*
 * Copy as much as we can into the page and return the number of bytes which
 * were successfully copied.  If a fault is encountered then return the number of
 * bytes which were copied.
 */
static size_t copy_from_user_atomic_iovec(struct page *page,
		struct iov_iter *i, unsigned long offset, size_t bytes)
{
	char *kaddr;
	size_t copied;

	kaddr = kmap_atomic(page);
	if (likely(i->nr_segs == 1)) {
		int left;
		char __user *buf = i->iov->iov_base + i->iov_offset;
		left = __copy_from_user_inatomic(kaddr + offset, buf, bytes);
		copied = bytes - left;
	} else {
		copied = __iovec_copy_from_user_inatomic(kaddr + offset,
						i->iov, i->iov_offset, bytes);
	}
	kunmap_atomic(kaddr);

	return copied;
}
Example #30
0
/* Return the length of string -- including null terminal byte */
static __kprobes void FETCH_FUNC_NAME(memory, string_size)(struct pt_regs *regs,
							void *addr, void *dest)
{
	int ret, len = 0;
	u8 c;
	mm_segment_t old_fs = get_fs();

	set_fs(KERNEL_DS);
	pagefault_disable();
	do {
		ret = __copy_from_user_inatomic(&c, (u8 *)addr + len, 1);
		len++;
	} while (c && ret == 0 && len < MAX_STRING_SIZE);
	pagefault_enable();
	set_fs(old_fs);

	if (ret < 0)	/* Failed to check the length */
		*(u32 *)dest = 0;
	else
		*(u32 *)dest = len;
}