示例#1
0
void iov_iter_init(struct iov_iter *i, int direction,
			const struct iovec *iov, unsigned long nr_segs,
			size_t count)
{
	/* It will get better.  Eventually... */
#ifndef CONFIG_KERNEL_MODE_LINUX
	if (segment_eq(get_fs(), KERNEL_DS)) {
#else
	if (segment_eq(get_fs(), KERNEL_DS) && !test_thread_flag(TIF_KU)) {
#endif
		direction |= ITER_KVEC;
		i->type = direction;
		i->kvec = (struct kvec *)iov;
	} else {
		i->type = direction;
		i->iov = iov;
	}
	i->nr_segs = nr_segs;
	i->iov_offset = 0;
	i->count = count;
}
EXPORT_SYMBOL(iov_iter_init);

static void memcpy_from_page(char *to, struct page *page, size_t offset, size_t len)
{
	char *from = kmap_atomic(page);
	memcpy(to, from + offset, len);
	kunmap_atomic(from);
}
示例#2
0
unsigned long __must_check
copy_to_user(void __user *to, const void *from, unsigned long n)
{
	unsigned long copy_size = (unsigned long)to & ~PAGE_MASK;

#ifdef DEBUG_MEMCPY_TOFS
	printk("copy_to_user called from: %08lx to: %p, "
	       "from: %p, len: %08lx\n",
	       *((unsigned long *)&to - 1), to, from, n);
#endif

	/* kernel access */
	if (segment_eq(get_fs(), KERNEL_DS)) {
		memcpy(to, from, n);
		return 0;
	}

	if (copy_size) {
		copy_size = min(PAGE_SIZE - copy_size, n);
		if (__copy_to_user_page(to, from, copy_size) == -EFAULT)
			return n;
		n -= copy_size;
	}

	while (n) {
		from +=copy_size;
		to += copy_size;
		copy_size = min(PAGE_SIZE, n);
		if (__copy_to_user_page(to, from, copy_size) == -EFAULT)
			return n;
		n -= copy_size;
	}
	return 0;
}
示例#3
0
文件: file.c 项目: Mr-Aloof/wl500g
static struct page * ocfs2_get_write_source(struct ocfs2_buffered_write_priv *bp,
					    const struct iovec *cur_iov,
					    size_t iov_offset)
{
	int ret;
	char *buf;
	struct page *src_page = NULL;

	buf = cur_iov->iov_base + iov_offset;

	if (!segment_eq(get_fs(), KERNEL_DS)) {
		/*
		 * Pull in the user page. We want to do this outside
		 * of the meta data locks in order to preserve locking
		 * order in case of page fault.
		 */
		ret = get_user_pages(current, current->mm,
				     (unsigned long)buf & PAGE_CACHE_MASK, 1,
				     0, 0, &src_page, NULL);
		if (ret == 1)
			bp->b_src_buf = kmap(src_page);
		else
			src_page = ERR_PTR(-EFAULT);
	} else {
		bp->b_src_buf = (char *)((unsigned long)buf & PAGE_CACHE_MASK);
	}

	return src_page;
}
示例#4
0
void show_regs(struct pt_regs *regs)
{
	printk("PC is at %pS\n", (void *)instruction_pointer(regs));
	printk("LP is at %pS\n", (void *)regs->lp);
	pr_info("pc : [<%08lx>]    lp : [<%08lx>]    %s\n"
		"sp : %08lx  fp : %08lx  gp : %08lx\n",
		instruction_pointer(regs),
		regs->lp, print_tainted(), regs->sp, regs->fp, regs->gp);
	pr_info("r25: %08lx  r24: %08lx\n", regs->uregs[25], regs->uregs[24]);

	pr_info("r23: %08lx  r22: %08lx  r21: %08lx  r20: %08lx\n",
		regs->uregs[23], regs->uregs[22],
		regs->uregs[21], regs->uregs[20]);
	pr_info("r19: %08lx  r18: %08lx  r17: %08lx  r16: %08lx\n",
		regs->uregs[19], regs->uregs[18],
		regs->uregs[17], regs->uregs[16]);
	pr_info("r15: %08lx  r14: %08lx  r13: %08lx  r12: %08lx\n",
		regs->uregs[15], regs->uregs[14],
		regs->uregs[13], regs->uregs[12]);
	pr_info("r11: %08lx  r10: %08lx  r9 : %08lx  r8 : %08lx\n",
		regs->uregs[11], regs->uregs[10],
		regs->uregs[9], regs->uregs[8]);
	pr_info("r7 : %08lx  r6 : %08lx  r5 : %08lx  r4 : %08lx\n",
		regs->uregs[7], regs->uregs[6], regs->uregs[5], regs->uregs[4]);
	pr_info("r3 : %08lx  r2 : %08lx  r1 : %08lx  r0 : %08lx\n",
		regs->uregs[3], regs->uregs[2], regs->uregs[1], regs->uregs[0]);
	pr_info("  IRQs o%s  Segment %s\n",
		interrupts_enabled(regs) ? "n" : "ff",
		segment_eq(get_fs(), get_ds())? "kernel" : "user");
}
示例#5
0
unsigned long
copy_from_user(void *to, const void __user *from, unsigned long n)
{
	unsigned long copy_size = (unsigned long)from & ~PAGE_MASK;

	if (segment_eq(get_fs(), KERNEL_DS)) {
		memcpy(to, from, n);
		return 0;
	}

#ifdef DEBUG_MEMCPY_FROMFS
	printk("copy_from_user called from: %08lx "
	       "to: %p, from: %p, len: %08lx\n",
	       *((unsigned long *)&to - 1), to, from, n);
#endif
	if (copy_size) {
		copy_size = min(PAGE_SIZE - copy_size, n);
		if (__copy_from_user_page(to, from, copy_size) == -EFAULT) {
			memset(to, 0, n);
			return n;
		}
		n -= copy_size;
	}
	while (n) {
		from +=copy_size;
		to += copy_size;
		copy_size = min(PAGE_SIZE, n);
		if (__copy_from_user_page(to, from, copy_size) == -EFAULT) {
			memset(to, 0, n);
			return n;
		}
		n -= copy_size;
	}
	return 0;
}
示例#6
0
static int copy_mount_options (const void * data, unsigned long *where)
{
	int i;
	unsigned long page;
	struct vm_area_struct * vma;

	*where = 0;
	if (!data)
		return 0;

	/* If this is the kernel, just trust the pointer. */
	if (segment_eq(get_fs(), KERNEL_DS)) {
		*where = (unsigned long) data;
		return 0;
	}

	vma = find_vma(current->mm, (unsigned long) data);
	if (!vma || (unsigned long) data < vma->vm_start)
		return -EFAULT;
	if (!(vma->vm_flags & VM_READ))
		return -EFAULT;
	i = vma->vm_end - (unsigned long) data;
	if (PAGE_SIZE <= (unsigned long) i)
		i = PAGE_SIZE-1;
	if (!(page = __get_free_page(GFP_KERNEL))) {
		return -ENOMEM;
	}
	if (copy_from_user((void *) page,data,i)) {
		free_page(page); 
		return -EFAULT;
	}
	*where = page;
	return 0;
}
示例#7
0
int ali_ce_umemcpy(void *dest, const void *src, __u32 n)
{
	int ret = 0;
	int sflag = access_ok(VERIFY_READ, (void __user *)src, n);
	int dflag = access_ok(VERIFY_WRITE, (void __user *)dest, n);

	if(segment_eq(get_fs(), USER_DS))
	{
		if(sflag && !dflag)
		{
			ret = copy_from_user(dest, (void __user *)src, n);
		}
		else	if(dflag && !sflag)
		{
			ret = copy_to_user(dest, src, n);
		}
		else if(!sflag && !dflag)
		{
			memcpy(dest, src, n);
		}
		else
		{
			return -1; 
		}
	}
	else
	{
		memcpy(dest, src, n);
	}
	return ret;
}
示例#8
0
unsigned long l4x_clear_user(void *address, unsigned long n)
{
    unsigned long clear_size = (unsigned long)address & ~PAGE_MASK;

#ifdef DEBUG_MEMCPY_TOFS
    printk("%s called from: %08lx to: %p, len: %08lx\n",
           __func__, *((unsigned long *)&address - 1), address, n);
#endif

    if (segment_eq(get_fs(), KERNEL_DS)) {
        if (L4X_CHECK_IN_KERNEL_ACCESS && l4x_check_kern_region(address, n, 1))
            return -EFAULT;
        memset(address, 0, n);
        return 0;
    }

    if (clear_size) {
        clear_size = min(PAGE_SIZE - clear_size, n);
        if (__clear_user_page(address, clear_size) == -EFAULT)
            return n;
        n -= clear_size;
    }
    while (n) {
        address += clear_size;
        clear_size = min(PAGE_SIZE, n);
        if (__clear_user_page(address, clear_size) == -EFAULT)
            return n;
        n -= clear_size;
    }
    return 0;
}
size_t copy_to_user_pt(size_t n, void __user *to, const void *from)
{
	if (segment_eq(get_fs(), KERNEL_DS)) {
		memcpy((void __kernel __force *) to, from, n);
		return 0;
	}
	return __user_copy_pt((unsigned long) to, (void *) from, n, 1);
}
示例#10
0
void init_buffer_descriptor(struct buffer_descriptor *bd, void *buf,
			    unsigned long long len, int write, int user)
{
	bd->buf = buf;
	bd->len = len;
	bd->write = (write == 0) ? 0 : 1;
	bd->user = (user == 0) ? 0 : segment_eq(get_fs(), USER_DS) ? 1 : 0;
}
示例#11
0
int clear_user(void __user *mem, int len)
{
	if (segment_eq(get_fs(), KERNEL_DS)) {
		memset((__force void*)mem, 0, len);
		return 0;
	}

	return access_ok(VERIFY_WRITE, mem, len) ?
	       buffer_op((unsigned long) mem, len, 1, clear_chunk, NULL) : len;
}
示例#12
0
int copy_to_user(void __user *to, const void *from, int n)
{
	if (segment_eq(get_fs(), KERNEL_DS)) {
		memcpy((__force void *) to, from, n);
		return 0;
	}

	return access_ok(VERIFY_WRITE, to, n) ?
	       buffer_op((unsigned long) to, n, 1, copy_chunk_to_user, &from) :
	       n;
}
示例#13
0
int copy_from_user(void *to, const void __user *from, int n)
{
	if (segment_eq(get_fs(), KERNEL_DS)) {
		memcpy(to, (__force void*)from, n);
		return 0;
	}

	return access_ok(VERIFY_READ, from, n) ?
	       buffer_op((unsigned long) from, n, 0, copy_chunk_from_user, &to):
	       n;
}
示例#14
0
int strnlen_user(const void __user *str, int len)
{
	int count = 0, n;

	if (segment_eq(get_fs(), KERNEL_DS))
		return strnlen((__force char*)str, len) + 1;

	n = buffer_op((unsigned long) str, len, 0, strnlen_chunk, &count);
	if (n == 0)
		return count + 1;
	return -EFAULT;
}
示例#15
0
unsigned long noinline
__copy_to_user_memcpy(void __user *to, const void *from, unsigned long n)
{
	unsigned long ua_flags;
	int atomic;

	if (unlikely(segment_eq(get_fs(), KERNEL_DS))) {
		memcpy((void *)to, from, n);
		return 0;
	}

	/* the mmap semaphore is taken only if not in an atomic context */
	atomic = faulthandler_disabled();

	if (!atomic)
		down_read(&current->mm->mmap_sem);
	while (n) {
		pte_t *pte;
		spinlock_t *ptl;
		int tocopy;

		while (!pin_page_for_write(to, &pte, &ptl)) {
			if (!atomic)
				up_read(&current->mm->mmap_sem);
			if (__put_user(0, (char __user *)to))
				goto out;
			if (!atomic)
				down_read(&current->mm->mmap_sem);
		}

		tocopy = (~(unsigned long)to & ~PAGE_MASK) + 1;
		if (tocopy > n)
			tocopy = n;

		ua_flags = uaccess_save_and_enable();
		memcpy((void *)to, from, tocopy);
		uaccess_restore(ua_flags);
		to += tocopy;
		from += tocopy;
		n -= tocopy;

		if (pte)
			pte_unmap_unlock(pte, ptl);
		else
			spin_unlock(ptl);
	}
	if (!atomic)
		up_read(&current->mm->mmap_sem);

out:
	return n;
}
示例#16
0
文件: iov_iter.c 项目: MaxChina/linux
void iov_iter_init(struct iov_iter *i, int direction,
			const struct iovec *iov, unsigned long nr_segs,
			size_t count)
{
	/* It will get better.  Eventually... */
	if (segment_eq(get_fs(), KERNEL_DS))
		direction |= ITER_KVEC;
	i->type = direction;
	i->iov = iov;
	i->nr_segs = nr_segs;
	i->iov_offset = 0;
	i->count = count;
}
size_t copy_from_user_pt(size_t n, const void __user *from, void *to)
{
	size_t rc;

	if (segment_eq(get_fs(), KERNEL_DS)) {
		memcpy(to, (void __kernel __force *) from, n);
		return 0;
	}
	rc = __user_copy_pt((unsigned long) from, to, n, 0);
	if (unlikely(rc))
		memset(to + n - rc, 0, rc);
	return rc;
}
示例#18
0
unsigned long noinline
__copy_from_user_memcpy(void *to, const void __user *from, unsigned long n)
{
	unsigned long ua_flags;
	int atomic;

	if (unlikely(segment_eq(get_fs(), KERNEL_DS))) {
		memcpy(to, (const void *)from, n);
		return 0;
	}

	/* the mmap semaphore is taken only if not in an atomic context */
	atomic = in_atomic();

	if (!atomic)
		down_read(&current->mm->mmap_sem);
	while (n) {
		pte_t *pte;
		spinlock_t *ptl;
		int tocopy;

		while (!pin_page_for_read(from, &pte, &ptl)) {
			char temp;
			if (!atomic)
				up_read(&current->mm->mmap_sem);
			if (__get_user(temp, (char __user *)from))
				goto out;
			if (!atomic)
				down_read(&current->mm->mmap_sem);
		}

		tocopy = (~(unsigned long)from & ~PAGE_MASK) + 1;
		if (tocopy > n)
			tocopy = n;

		ua_flags = uaccess_save_and_enable();
		memcpy(to, (const void *)from, tocopy);
		uaccess_restore(ua_flags);
		to += tocopy;
		from += tocopy;
		n -= tocopy;

		pte_unmap_unlock(pte, ptl);
	}
	if (!atomic)
		up_read(&current->mm->mmap_sem);

out:
	return n;
}
示例#19
0
long l4x_strncpy_from_user(char *dst, const char *src, long count)
{
    unsigned long copy_size = (unsigned long)src & ~PAGE_MASK;
    long res;
    unsigned long n = count;

#ifdef DEBUG_MEMCPY_FROMFS
    printk("l4x_strncpy_from_user called from: %08lx "
           "to: %p, from: %p, len: 0x%lx (copy_size: 0x%lx)\n",
           *((unsigned long *)&dst - 1), dst, src, n, copy_size);
#endif
    if (segment_eq(get_fs(), KERNEL_DS)) {
        /* strncpy the data but deliver back the bytes copied */
        long c = 0;
        if (L4X_CHECK_IN_KERNEL_ACCESS && l4x_check_kern_region(dst, count, 1))
            return -EFAULT;
        while (c++ < count && (*dst++ = *src++) != '\0')
            /* nothing */;
        return c;
    }

    if (copy_size) {
        copy_size = min(PAGE_SIZE - copy_size, n);
        res = __strncpy_from_user_page(dst, src, copy_size);
        n -= copy_size;
        if (res == -EFAULT) {
            return -EFAULT;
        }
        else if (res)
            return count - (n + res);
    }
    while (n) {
        src += copy_size;
        dst += copy_size;
        copy_size = min(PAGE_SIZE, n);
        n -= copy_size;
        res = __strncpy_from_user_page(dst, src, copy_size);
        if (res == -EFAULT) {
            return -EFAULT;
        }
        else if (res)
            return count - (n + res);
    }
    return count;
}
示例#20
0
int strncpy_from_user(char *dst, const char __user *src, int count)
{
	int n;
	char *ptr = dst;

	if (segment_eq(get_fs(), KERNEL_DS)) {
		strncpy(dst, (__force void *) src, count);
		return strnlen(dst, count);
	}

	if (!access_ok(VERIFY_READ, src, 1))
		return -EFAULT;

	n = buffer_op((unsigned long) src, count, 0, strncpy_chunk_from_user,
		      &ptr);
	if (n != 0)
		return -EFAULT;
	return strnlen(dst, count);
}
示例#21
0
int _access_ok(unsigned long addr, unsigned long size)
{
	if (!size)
		return 1;

	if (!addr || addr > (0xffffffffUL - (size - 1)))
		goto _bad_access;

	if (segment_eq(get_fs(), KERNEL_DS))
		return 1;

	if (memory_start <= addr && (addr + size - 1) < memory_end)
		return 1;

_bad_access:
	pr_debug("Bad access attempt: pid[%d] addr[%08lx] size[0x%lx]\n",
		 current->pid, addr, size);
	return 0;
}
示例#22
0
static unsigned long noinline
__clear_user_memset(void __user *addr, unsigned long n)
{
	unsigned long ua_flags;

	if (unlikely(segment_eq(get_fs(), KERNEL_DS))) {
		memset((void *)addr, 0, n);
		return 0;
	}

	down_read(&current->mm->mmap_sem);
	while (n) {
		pte_t *pte;
		spinlock_t *ptl;
		int tocopy;

		while (!pin_page_for_write(addr, &pte, &ptl)) {
			up_read(&current->mm->mmap_sem);
			if (__put_user(0, (char __user *)addr))
				goto out;
			down_read(&current->mm->mmap_sem);
		}

		tocopy = (~(unsigned long)addr & ~PAGE_MASK) + 1;
		if (tocopy > n)
			tocopy = n;

		ua_flags = uaccess_save_and_enable();
		memset((void *)addr, 0, tocopy);
		uaccess_restore(ua_flags);
		addr += tocopy;
		n -= tocopy;

		if (pte)
			pte_unmap_unlock(pte, ptl);
		else
			spin_unlock(ptl);
	}
	up_read(&current->mm->mmap_sem);

out:
	return n;
}
示例#23
0
文件: namei.c 项目: Rick33/freevms
/* In order to reduce some races, while at the same time doing additional
 * checking and hopefully speeding things up, we copy filenames to the
 * kernel data space before using them..
 *
 * POSIX.1 2.4: an empty pathname is invalid (ENOENT).
 * PATH_MAX includes the nul terminator --RR.
 */
static inline int do_getname(const char *filename, char *page)
{
	int retval;
	unsigned long len = PATH_MAX;

	if ((unsigned long) filename >= TASK_SIZE) {
		if (!segment_eq(get_fs(), KERNEL_DS))
			return -EFAULT;
	} else if (TASK_SIZE - (unsigned long) filename < PATH_MAX)
		len = TASK_SIZE - (unsigned long) filename;

	retval = strncpy_from_user((char *)page, filename, len);
	if (retval > 0) {
		if (retval < len)
			return 0;
		return -ENAMETOOLONG;
	} else if (!retval)
		retval = -ENOENT;
	return retval;
}
示例#24
0
/* kernel-internal execve() */
asmlinkage int
l4_kernelinternal_execve(const char * file,
                         const char * const * argv,
                         const char * const * envp)
{
	int ret;
	struct thread_struct *t = &current->thread;

	ASSERT(l4_is_invalid_cap(t->user_thread_id));

	/* we are going to become a real user task now, so prepare a real
	 * pt_regs structure. */
	/* Enable Interrupts, Set IOPL (needed for X, hwclock etc.) */
	t->regs.flags = 0x3200; /* XXX hardcoded */

	/* do_execve() will create the user task for us in start_thread()
	   and call set_fs(USER_DS) in flush_thread. I know this sounds
	   strange but there are places in the kernel (kernel/kmod.c) which
	   call execve with parameters inside the kernel. They set fs to
	   KERNEL_DS before calling execve so we can't set it back to
	   USER_DS before execve had a chance to look at the name of the
	   executable. */

	ASSERT(segment_eq(get_fs(), KERNEL_DS));
	ret = do_execve(file, argv, envp, &t->regs);

	if (ret < 0) {
		/* we failed -- become a kernel thread again */
		if (!l4_is_invalid_cap(t->user_thread_id))
			l4lx_task_number_free(t->user_thread_id);
		set_fs(KERNEL_DS);
		t->user_thread_id = L4_INVALID_CAP;
		return -1;
	}

	l4x_user_dispatcher();

	/* not reached */
	return 0;
}
示例#25
0
文件: process.c 项目: ForayJones/iods
/* Return 1 if access to memory range is OK, 0 otherwise */
int _access_ok(unsigned long addr, unsigned long size)
{
	if (size == 0)
		return 1;
	if (addr > (addr + size))
		return 0;
	if (segment_eq(get_fs(), KERNEL_DS))
		return 1;
#ifdef CONFIG_MTD_UCLINUX
	if (addr >= memory_start && (addr + size) <= memory_end)
		return 1;
	if (addr >= memory_mtd_end && (addr + size) <= physical_mem_end)
		return 1;
#else
	if (addr >= memory_start && (addr + size) <= physical_mem_end)
		return 1;
#endif
	if (addr >= (unsigned long)__init_begin &&
	    addr + size <= (unsigned long)__init_end)
		return 1;
	if (addr >= L1_SCRATCH_START
	    && addr + size <= L1_SCRATCH_START + L1_SCRATCH_LENGTH)
		return 1;
#if L1_CODE_LENGTH != 0
	if (addr >= L1_CODE_START + (_etext_l1 - _stext_l1)
	    && addr + size <= L1_CODE_START + L1_CODE_LENGTH)
		return 1;
#endif
#if L1_DATA_A_LENGTH != 0
	if (addr >= L1_DATA_A_START + (_ebss_l1 - _sdata_l1)
	    && addr + size <= L1_DATA_A_START + L1_DATA_A_LENGTH)
		return 1;
#endif
#if L1_DATA_B_LENGTH != 0
	if (addr >= L1_DATA_B_START
	    && addr + size <= L1_DATA_B_START + L1_DATA_B_LENGTH)
		return 1;
#endif
	return 0;
}
示例#26
0
BPF_CALL_3(bpf_probe_write_user, void *, unsafe_ptr, const void *, src,
	   u32, size)
{
	/*
	 * Ensure we're in user context which is safe for the helper to
	 * run. This helper has no business in a kthread.
	 *
	 * access_ok() should prevent writing to non-user memory, but in
	 * some situations (nommu, temporary switch, etc) access_ok() does
	 * not provide enough validation, hence the check on KERNEL_DS.
	 */

	if (unlikely(in_interrupt() ||
		     current->flags & (PF_KTHREAD | PF_EXITING)))
		return -EPERM;
	if (unlikely(segment_eq(get_fs(), KERNEL_DS)))
		return -EPERM;
	if (!access_ok(VERIFY_WRITE, unsafe_ptr, size))
		return -EPERM;

	return probe_kernel_write(unsafe_ptr, src, size);
}
static size_t strnlen_user_pt(size_t count, const char __user *src)
{
	char *addr;
	unsigned long uaddr = (unsigned long) src;
	struct mm_struct *mm = current->mm;
	unsigned long offset, pfn, done, len;
	pte_t *pte;
	size_t len_str;

	if (segment_eq(get_fs(), KERNEL_DS))
		return strnlen((const char __kernel __force *) src, count) + 1;
	done = 0;
retry:
	spin_lock(&mm->page_table_lock);
	do {
		pte = follow_table(mm, uaddr);
		if ((unsigned long) pte < 0x1000)
			goto fault;
		if (!pte_present(*pte)) {
			pte = (pte_t *) 0x11;
			goto fault;
		}

		pfn = pte_pfn(*pte);
		offset = uaddr & (PAGE_SIZE-1);
		addr = (char *)(pfn << PAGE_SHIFT) + offset;
		len = min(count - done, PAGE_SIZE - offset);
		len_str = strnlen(addr, len);
		done += len_str;
		uaddr += len_str;
	} while ((len_str == len) && (done < count));
	spin_unlock(&mm->page_table_lock);
	return done + 1;
fault:
	spin_unlock(&mm->page_table_lock);
	if (__handle_fault(uaddr, (unsigned long) pte, 0))
		return 0;
	goto retry;
}
static size_t clear_user_pt(size_t n, void __user *to)
{
	long done, size, ret;

	if (segment_eq(get_fs(), KERNEL_DS)) {
		memset((void __kernel __force *) to, 0, n);
		return 0;
	}
	done = 0;
	do {
		if (n - done > PAGE_SIZE)
			size = PAGE_SIZE;
		else
			size = n - done;
		ret = __user_copy_pt((unsigned long) to + done,
				      &empty_zero_page, size, 1);
		done += size;
		if (ret)
			return ret + n - done;
	} while (done < n);
	return 0;
}
static size_t strncpy_from_user_pt(size_t count, const char __user *src,
				   char *dst)
{
	size_t n = strnlen_user_pt(count, src);

	if (!n)
		return -EFAULT;
	if (n > count)
		n = count;
	if (segment_eq(get_fs(), KERNEL_DS)) {
		memcpy(dst, (const char __kernel __force *) src, n);
		if (dst[n-1] == '\0')
			return n-1;
		else
			return n;
	}
	if (__user_copy_pt((unsigned long) src, dst, n, 0))
		return -EFAULT;
	if (dst[n-1] == '\0')
		return n-1;
	else
		return n;
}
示例#30
0
/*
 * Pin down all the iovec pages needed for len bytes.
 * Return a struct dma_pinned_list to keep track of pages pinned down.
 *
 * We are allocating a single chunk of memory, and then carving it up into
 * 3 sections, the latter 2 whose size depends on the number of iovecs and the
 * total number of pages, respectively.
 */
struct dma_pinned_list *dma_pin_iovec_pages(struct iovec *iov, size_t len)
{
	struct dma_pinned_list *local_list;
	struct page **pages;
	int i;
	int ret;
	int nr_iovecs = 0;
	int iovec_len_used = 0;
	int iovec_pages_used = 0;

	/* don't pin down non-user-based iovecs */
	if (segment_eq(get_fs(), KERNEL_DS))
		return NULL;

	/* determine how many iovecs/pages there are, up front */
	do {
		iovec_len_used += iov[nr_iovecs].iov_len;
		iovec_pages_used += num_pages_spanned(&iov[nr_iovecs]);
		nr_iovecs++;
	} while (iovec_len_used < len);

	/* single kmalloc for pinned list, page_list[], and the page arrays */
	local_list = kmalloc(sizeof(*local_list)
		+ (nr_iovecs * sizeof (struct dma_page_list))
		+ (iovec_pages_used * sizeof (struct page*)), GFP_KERNEL);
	if (!local_list)
		goto out;

	/* list of pages starts right after the page list array */
	pages = (struct page **) &local_list->page_list[nr_iovecs];

	local_list->nr_iovecs = 0;

	for (i = 0; i < nr_iovecs; i++) {
		struct dma_page_list *page_list = &local_list->page_list[i];

		len -= iov[i].iov_len;

		if (!access_ok(VERIFY_WRITE, iov[i].iov_base, iov[i].iov_len))
			goto unpin;

		page_list->nr_pages = num_pages_spanned(&iov[i]);
		page_list->base_address = iov[i].iov_base;

		page_list->pages = pages;
		pages += page_list->nr_pages;

		/* pin pages down */
		down_read(&current->mm->mmap_sem);
		ret = get_user_pages(
			current,
			current->mm,
			(unsigned long) iov[i].iov_base,
			page_list->nr_pages,
			1,	/* write */
			0,	/* force */
			page_list->pages,
			NULL);
		up_read(&current->mm->mmap_sem);

		if (ret != page_list->nr_pages)
			goto unpin;

		local_list->nr_iovecs = i + 1;
	}

	return local_list;

unpin:
	dma_unpin_iovec_pages(local_list);
out:
	return NULL;
}