Пример #1
0
static int cfs_access_process_vm(struct task_struct *tsk,
				 struct mm_struct *mm,
				 unsigned long addr,
				 void *buf, int len, int write)
{
	/* Just copied from kernel for the kernels which doesn't
	 * have access_process_vm() exported */
	struct vm_area_struct *vma;
	struct page *page;
	void *old_buf = buf;

	/* Avoid deadlocks on mmap_sem if called from sys_mmap_pgoff(),
	 * which is already holding mmap_sem for writes.  If some other
	 * thread gets the write lock in the meantime, this thread will
	 * block, but at least it won't deadlock on itself.  LU-1735 */
	if (down_read_trylock(&mm->mmap_sem) == 0)
		return -EDEADLK;

	/* ignore errors, just check how much was successfully transferred */
	while (len) {
		int bytes, rc, offset;
		void *maddr;

		rc = get_user_pages(tsk, mm, addr, 1,
				     write, 1, &page, &vma);
		if (rc <= 0)
			break;

		bytes = len;
		offset = addr & (PAGE_SIZE-1);
		if (bytes > PAGE_SIZE-offset)
			bytes = PAGE_SIZE-offset;

		maddr = kmap(page);
		if (write) {
			copy_to_user_page(vma, page, addr,
					  maddr + offset, buf, bytes);
			set_page_dirty_lock(page);
		} else {
			copy_from_user_page(vma, page, addr,
					    buf, maddr + offset, bytes);
		}
		kunmap(page);
		page_cache_release(page);
		len -= bytes;
		buf += bytes;
		addr += bytes;
	}
	up_read(&mm->mmap_sem);

	return buf - old_buf;
}
Пример #2
0
static int cfs_access_process_vm(struct task_struct *tsk, unsigned long addr,
				 void *buf, int len, int write)
{
	/* Just copied from kernel for the kernels which doesn't
	 * have access_process_vm() exported */
	struct mm_struct *mm;
	struct vm_area_struct *vma;
	struct page *page;
	void *old_buf = buf;

	mm = get_task_mm(tsk);
	if (!mm)
		return 0;

	down_read(&mm->mmap_sem);
	/* ignore errors, just check how much was sucessfully transfered */
	while (len) {
		int bytes, rc, offset;
		void *maddr;

		rc = get_user_pages(tsk, mm, addr, 1,
				     write, 1, &page, &vma);
		if (rc <= 0)
			break;

		bytes = len;
		offset = addr & (PAGE_SIZE-1);
		if (bytes > PAGE_SIZE-offset)
			bytes = PAGE_SIZE-offset;

		maddr = kmap(page);
		if (write) {
			copy_to_user_page(vma, page, addr,
					  maddr + offset, buf, bytes);
			set_page_dirty_lock(page);
		} else {
			copy_from_user_page(vma, page, addr,
					    buf, maddr + offset, bytes);
		}
		kunmap(page);
		page_cache_release(page);
		len -= bytes;
		buf += bytes;
		addr += bytes;
	}
	up_read(&mm->mmap_sem);
	mmput(mm);

	return buf - old_buf;
}
Пример #3
0
long arch_ptrace(struct task_struct *child, long request, long addr, long data)
{
	int ret;
	unsigned long __user *datap = (unsigned long __user *)data;

	switch (request) {
		/* when I and D space are separate, these will need to be fixed. */
	case PTRACE_PEEKDATA:
		pr_debug("ptrace: PEEKDATA\n");
		/* fall through */
	case PTRACE_PEEKTEXT:	/* read word at location addr. */
		{
			unsigned long tmp = 0;
			int copied;

			ret = -EIO;
			pr_debug("ptrace: PEEKTEXT at addr 0x%08lx + %ld\n", addr, sizeof(data));
			if (is_user_addr_valid(child, addr, sizeof(tmp)) < 0)
				break;
			pr_debug("ptrace: user address is valid\n");

			if (L1_CODE_LENGTH != 0 && addr >= get_l1_code_start()
			    && addr + sizeof(tmp) <= get_l1_code_start() + L1_CODE_LENGTH) {
				safe_dma_memcpy (&tmp, (const void *)(addr), sizeof(tmp));
				copied = sizeof(tmp);

			} else if (L1_DATA_A_LENGTH != 0 && addr >= L1_DATA_A_START
			    && addr + sizeof(tmp) <= L1_DATA_A_START + L1_DATA_A_LENGTH) {
				memcpy(&tmp, (const void *)(addr), sizeof(tmp));
				copied = sizeof(tmp);

			} else if (L1_DATA_B_LENGTH != 0 && addr >= L1_DATA_B_START
			    && addr + sizeof(tmp) <= L1_DATA_B_START + L1_DATA_B_LENGTH) {
				memcpy(&tmp, (const void *)(addr), sizeof(tmp));
				copied = sizeof(tmp);

			} else if (addr >= FIXED_CODE_START
			    && addr + sizeof(tmp) <= FIXED_CODE_END) {
				copy_from_user_page(0, 0, 0, &tmp, (const void *)(addr), sizeof(tmp));
				copied = sizeof(tmp);

			} else
				copied = access_process_vm(child, addr, &tmp,
							   sizeof(tmp), 0);

			pr_debug("ptrace: copied size %d [0x%08lx]\n", copied, tmp);
			if (copied != sizeof(tmp))
				break;
			ret = put_user(tmp, datap);
			break;
		}

		/* read the word at location addr in the USER area. */
	case PTRACE_PEEKUSR:
		{
			unsigned long tmp;
			ret = -EIO;
			tmp = 0;
			if ((addr & 3) || (addr > (sizeof(struct pt_regs) + 16))) {
				printk(KERN_WARNING "ptrace error : PEEKUSR : temporarily returning "
				                    "0 - %x sizeof(pt_regs) is %lx\n",
				     (int)addr, sizeof(struct pt_regs));
				break;
			}
			if (addr == sizeof(struct pt_regs)) {
				/* PT_TEXT_ADDR */
				tmp = child->mm->start_code + TEXT_OFFSET;
			} else if (addr == (sizeof(struct pt_regs) + 4)) {
				/* PT_TEXT_END_ADDR */
				tmp = child->mm->end_code;
			} else if (addr == (sizeof(struct pt_regs) + 8)) {
				/* PT_DATA_ADDR */
				tmp = child->mm->start_data;
#ifdef CONFIG_BINFMT_ELF_FDPIC
			} else if (addr == (sizeof(struct pt_regs) + 12)) {
				tmp = child->mm->context.exec_fdpic_loadmap;
			} else if (addr == (sizeof(struct pt_regs) + 16)) {
				tmp = child->mm->context.interp_fdpic_loadmap;
#endif
			} else {
				tmp = get_reg(child, addr);
			}
			ret = put_user(tmp, datap);
			break;
		}

		/* when I and D space are separate, this will have to be fixed. */
	case PTRACE_POKEDATA:
		pr_debug("ptrace: PTRACE_PEEKDATA\n");
		/* fall through */
	case PTRACE_POKETEXT:	/* write the word at location addr. */
		{
			int copied;

			ret = -EIO;
			pr_debug("ptrace: POKETEXT at addr 0x%08lx + %ld bytes %lx\n",
			         addr, sizeof(data), data);
			if (is_user_addr_valid(child, addr, sizeof(data)) < 0)
				break;
			pr_debug("ptrace: user address is valid\n");

			if (L1_CODE_LENGTH != 0 && addr >= get_l1_code_start()
			    && addr + sizeof(data) <= get_l1_code_start() + L1_CODE_LENGTH) {
				safe_dma_memcpy ((void *)(addr), &data, sizeof(data));
				copied = sizeof(data);

			} else if (L1_DATA_A_LENGTH != 0 && addr >= L1_DATA_A_START
			    && addr + sizeof(data) <= L1_DATA_A_START + L1_DATA_A_LENGTH) {
				memcpy((void *)(addr), &data, sizeof(data));
				copied = sizeof(data);

			} else if (L1_DATA_B_LENGTH != 0 && addr >= L1_DATA_B_START
			    && addr + sizeof(data) <= L1_DATA_B_START + L1_DATA_B_LENGTH) {
				memcpy((void *)(addr), &data, sizeof(data));
				copied = sizeof(data);

			} else if (addr >= FIXED_CODE_START
			    && addr + sizeof(data) <= FIXED_CODE_END) {
				copy_to_user_page(0, 0, 0, (void *)(addr), &data, sizeof(data));
				copied = sizeof(data);

			} else
				copied = access_process_vm(child, addr, &data,
							   sizeof(data), 1);

			pr_debug("ptrace: copied size %d\n", copied);
			if (copied != sizeof(data))
				break;
			ret = 0;
			break;
		}

	case PTRACE_POKEUSR:	/* write the word at location addr in the USER area */
		ret = -EIO;
		if ((addr & 3) || (addr > (sizeof(struct pt_regs) + 16))) {
			printk(KERN_WARNING "ptrace error : POKEUSR: temporarily returning 0\n");
			break;
		}

		if (addr >= (sizeof(struct pt_regs))) {
			ret = 0;
			break;
		}
		if (addr == PT_SYSCFG) {
			data &= SYSCFG_MASK;
			data |= get_reg(child, PT_SYSCFG);
		}
		ret = put_reg(child, addr, data);
		break;

	case PTRACE_SYSCALL:	/* continue and stop at next (return from) syscall */
	case PTRACE_CONT:	/* restart after signal. */
		pr_debug("ptrace: syscall/cont\n");

		ret = -EIO;
		if (!valid_signal(data))
			break;
		if (request == PTRACE_SYSCALL)
			set_tsk_thread_flag(child, TIF_SYSCALL_TRACE);
		else
			clear_tsk_thread_flag(child, TIF_SYSCALL_TRACE);
		child->exit_code = data;
		ptrace_disable(child);
		pr_debug("ptrace: before wake_up_process\n");
		wake_up_process(child);
		ret = 0;
		break;

	/*
	 * make the child exit.  Best I can do is send it a sigkill.
	 * perhaps it should be put in the status that it wants to
	 * exit.
	 */
	case PTRACE_KILL:
		ret = 0;
		if (child->exit_state == EXIT_ZOMBIE)	/* already dead */
			break;
		child->exit_code = SIGKILL;
		ptrace_disable(child);
		wake_up_process(child);
		break;

	case PTRACE_SINGLESTEP:	/* set the trap flag. */
		pr_debug("ptrace: single step\n");
		ret = -EIO;
		if (!valid_signal(data))
			break;
		clear_tsk_thread_flag(child, TIF_SYSCALL_TRACE);
		ptrace_enable(child);
		child->exit_code = data;
		wake_up_process(child);
		ret = 0;
		break;

	case PTRACE_GETREGS:
		/* Get all gp regs from the child. */
		ret = ptrace_getregs(child, datap);
		break;

	case PTRACE_SETREGS:
		printk(KERN_WARNING "ptrace: SETREGS: **** NOT IMPLEMENTED ***\n");
		/* Set all gp regs in the child. */
		ret = 0;
		break;

	default:
		ret = ptrace_request(child, request, addr, data);
		break;
	}

	return ret;
}
Пример #4
0
int access_process_vm(struct task_struct *tsk, unsigned long addr, void *buf, int len, int write)
{
	struct mm_struct *mm;
	struct vm_area_struct *vma;
	struct page *page;
	void *old_buf = buf;

	mm = get_task_mm(tsk);
	if (!mm)
		return 0;

	down_read(&mm->mmap_sem);
	/* ignore errors, just check how much was sucessfully transfered */
	while (len) {
		int bytes, ret, offset;
		void *maddr;
		unsigned long paddr;
		int xip = 0;
#ifdef CONFIG_CRAMFS_XIP_DEBUGGABLE
		if (xip_enable_debug && !write) {
			vma = find_extend_vma(mm, addr);
			if (vma && (vma->vm_flags & VM_XIP))
				xip = find_xip_untouched_entry(mm, addr, &paddr);
		}
#endif
		if (xip) {
			maddr = ioremap(paddr, PAGE_SIZE);
			if (!maddr) 
				break;
			page = NULL;
		} else {
			ret = get_user_pages(tsk, mm, addr, 1,
					     write, 1, &page, &vma);
			if (ret <= 0)
				break;
			maddr = kmap(page);
		}
		
		bytes = len;
		offset = addr & (PAGE_SIZE-1);
		if (bytes > PAGE_SIZE-offset)
			bytes = PAGE_SIZE-offset;

		if (write) {
			copy_to_user_page(vma, page, addr,
					  maddr + offset, buf, bytes);
			set_page_dirty_lock(page);
		} else {
			copy_from_user_page(vma, page, addr,
					    buf, maddr + offset, bytes);
		}
		
		if (xip) 
			iounmap(maddr);
		else {
			kunmap(page);
			page_cache_release(page);
		}

		len -= bytes;
		buf += bytes;
		addr += bytes;
	}
	up_read(&mm->mmap_sem);
	mmput(mm);
	
	return buf - old_buf;
}
Пример #5
0
/* pvfs_bufmap_copy_to_user_task_iovec()
 *
 * copies data out of a mapped buffer to a vector of user space address
 * of a given task specified by the task structure argument (tsk)
 * This is used by the client-daemon for completing an aio
 * operation that was issued by an arbitrary user program.
 * Unfortunately, we cannot use a copy_to_user
 * in that case and need to map in the user pages before
 * attempting the copy!
 *
 * NOTE: There is no need for an analogous copy from user task since
 * the data buffers get copied in the context of the process initiating
 * the write system call!
 *
 * Returns number of bytes copied on success, -errno on failure.
 */
size_t pvfs_bufmap_copy_to_user_task_iovec(
        struct task_struct *tsk,
        struct iovec *iovec, unsigned long nr_segs,
        int buffer_index,
        size_t size_to_be_copied)
{
    size_t ret = 0, amt_copied = 0, cur_copy_size = 0;
    int from_page_index = 0;
    void *from_kaddr = NULL;
    struct iovec *copied_iovec = NULL;
    struct pvfs_bufmap_desc *from = &desc_array[buffer_index];

    struct mm_struct *mm = NULL;
    struct vm_area_struct *vma = NULL;
    struct page *page = NULL;
    unsigned long to_addr = 0;
    void *maddr = NULL;
    unsigned int to_offset = 0;
    unsigned int seg, from_page_offset = 0;

    gossip_debug(GOSSIP_BUFMAP_DEBUG, "pvfs_bufmap_copy_to_user_task_iovec: "
            " PID: %d, iovec %p, from %p, index %d, "
            " size %zd\n", tsk->pid, iovec, from, buffer_index, size_to_be_copied);

    down_read(&bufmap_init_sem);
    if (bufmap_init == 0)
    {
        gossip_err("pvfs2_bufmap_copy_to_user: not yet "
                    "initialized.\n");
        gossip_err("pvfs2: please confirm that pvfs2-client "
                "daemon is running.\n");
        up_read(&bufmap_init_sem);
        return -EIO;
    }
    /*
     * copy the passed in iovec so that we can change some of its fields
     */
    copied_iovec = kmalloc(nr_segs * sizeof(*copied_iovec),
                           PVFS2_BUFMAP_GFP_FLAGS);
    if (copied_iovec == NULL)
    {
        gossip_err("pvfs_bufmap_copy_to_user_iovec: failed allocating memory\n");
        up_read(&bufmap_init_sem);
        return -ENOMEM;
    }
    memcpy(copied_iovec, iovec, nr_segs * sizeof(*copied_iovec));
    /*
     * Go through each segment in the iovec and make sure that
     * the summation of iov_len is greater than the given size.
     */
    for (seg = 0, amt_copied = 0; seg < nr_segs; seg++)
    {
        amt_copied += copied_iovec[seg].iov_len;
    }
    if (amt_copied < size_to_be_copied)
    {
        gossip_err("pvfs_bufmap_copy_to_user_task_iovec: computed total (%zd) "
                "is less than (%zd)\n", amt_copied, size_to_be_copied);
        kfree(copied_iovec);
        up_read(&bufmap_init_sem);
        return -EINVAL;
    }
    mm = get_task_mm(tsk);
    if (!mm) 
    {
        kfree(copied_iovec);
        up_read(&bufmap_init_sem);
        return -EIO;
    }
    from_page_index = 0;
    amt_copied = 0;
    seg = 0;
    from_page_offset = 0;
    /* 
     * Go through each of the page in the specified process
     * address space and copy from the mapped
     * buffer, and make sure to do this one page at a time!
     */
    down_read(&mm->mmap_sem);
    while (amt_copied < size_to_be_copied)
    {
        int inc_from_page_index = 0;
	struct iovec *iv = &copied_iovec[seg];

        if (iv->iov_len < (PAGE_SIZE - from_page_offset))
        {
            cur_copy_size = PVFS_util_min(iv->iov_len, size_to_be_copied - amt_copied);
            seg++;
            to_addr = (unsigned long) iv->iov_base;
            inc_from_page_index = 0;
        }
        else if (iv->iov_len == (PAGE_SIZE - from_page_offset))
        {
            cur_copy_size = PVFS_util_min(iv->iov_len, size_to_be_copied - amt_copied);
            seg++;
            to_addr = (unsigned long) iv->iov_base;
            inc_from_page_index = 1;
        }
        else 
        {
            cur_copy_size = PVFS_util_min(PAGE_SIZE - from_page_offset, size_to_be_copied - amt_copied);
            to_addr = (unsigned long) iv->iov_base;
            iv->iov_base += cur_copy_size;
            iv->iov_len  -= cur_copy_size;
            inc_from_page_index = 1;
        }
        ret = get_user_pages(tsk, mm, to_addr, 
                1,/* count */
                1,/* write */
                1,/* force */
                &page, &vma);
        if (ret <= 0)
            break;
        to_offset = to_addr & (PAGE_SIZE - 1);
        maddr = pvfs2_kmap(page);
        from_kaddr = pvfs2_kmap(from->page_array[from_page_index]);
        copy_to_user_page(vma, page, to_addr,
             maddr + to_offset /* dst */, 
             from_kaddr + from_page_offset, /* src */
             cur_copy_size /* len */);
        set_page_dirty_lock(page);
        pvfs2_kunmap(from->page_array[from_page_index]);
        pvfs2_kunmap(page);
        page_cache_release(page);

        amt_copied += cur_copy_size;
        if (inc_from_page_index)
        {
            from_page_offset = 0;
            from_page_index++;
        }
        else 
        {
            from_page_offset += cur_copy_size;
        }
    }
    up_read(&mm->mmap_sem);
    mmput(mm);
    up_read(&bufmap_init_sem);
    kfree(copied_iovec);
    return (amt_copied < size_to_be_copied) ? -EFAULT: amt_copied;
}