Beispiel #1
0
static void
get_proc_size_info(struct lwp *l, unsigned long *stext, unsigned long *etext, unsigned long *sstack)
{
	struct proc *p = l->l_proc;
	struct vmspace *vm;
	struct vm_map *map;
	struct vm_map_entry *entry;

	*stext = 0;
	*etext = 0;
	*sstack = 0;

	proc_vmspace_getref(p, &vm);
	map = &vm->vm_map;
	vm_map_lock_read(map);

	for (entry = map->header.next; entry != &map->header;
	    entry = entry->next) {
		if (UVM_ET_ISSUBMAP(entry))
			continue;
		/* assume text is the first entry */
		if (*stext == *etext) {
			*stext = entry->start;
			*etext = entry->end;
			break;
		}
	}
#if defined(LINUX_USRSTACK32) && defined(USRSTACK32)
	if (strcmp(p->p_emul->e_name, "linux32") == 0 &&
	    LINUX_USRSTACK32 < USRSTACK32)
		*sstack = (unsigned long)LINUX_USRSTACK32;
	else
#endif
#ifdef LINUX_USRSTACK
	if (strcmp(p->p_emul->e_name, "linux") == 0 &&
	    LINUX_USRSTACK < USRSTACK)
		*sstack = (unsigned long)LINUX_USRSTACK;
	else
#endif
#ifdef	USRSTACK32
	if (strstr(p->p_emul->e_name, "32") != NULL)
		*sstack = (unsigned long)USRSTACK32;
	else
#endif
		*sstack = (unsigned long)USRSTACK;

	/*
	 * jdk 1.6 compares low <= addr && addr < high
	 * if we put addr == high, then the test fails
	 * so eat one page.
	 */
	*sstack -= PAGE_SIZE;

	vm_map_unlock_read(map);
	uvmspace_free(vm);
}
int
ptrace_machdep_dorequest(
    struct lwp *l,
    struct lwp *lt,
    int req,
    void *addr,
    int data
)
{
	struct uio uio;
	struct iovec iov;
	int write = 0;

	switch (req) {
	case PT_SETXMMREGS:
		write = 1;

	case PT_GETXMMREGS:
		/* write = 0 done above. */
		if (!process_machdep_validxmmregs(lt->l_proc))
			return (EINVAL);
		else {
			struct vmspace *vm;
			int error;

			error = proc_vmspace_getref(l->l_proc, &vm);
			if (error) {
				return error;
			}
			iov.iov_base = addr;
			iov.iov_len = sizeof(struct xmmregs);
			uio.uio_iov = &iov;
			uio.uio_iovcnt = 1;
			uio.uio_offset = 0;
			uio.uio_resid = sizeof(struct xmmregs);
			uio.uio_rw = write ? UIO_WRITE : UIO_READ;
			uio.uio_vmspace = vm;
			error = process_machdep_doxmmregs(l, lt, &uio);
			uvmspace_free(vm);
			return error;
		}
	}

#ifdef DIAGNOSTIC
	panic("ptrace_machdep: impossible");
#endif

	return (0);
}
Beispiel #3
0
/*
 * Linux compatible /proc/<pid>/statm. Only active when the -o linux
 * mountflag is used.
 */
int
procfs_do_pid_statm(struct lwp *curl, struct lwp *l,
    struct pfsnode *pfs, struct uio *uio)
{
	struct vmspace	*vm;
	struct proc	*p = l->l_proc;
	struct rusage	*ru = &p->p_stats->p_ru;
	char		*bf;
	int	 	 error;
	int	 	 len;

	error = ENAMETOOLONG;
	bf = malloc(LBFSZ, M_TEMP, M_WAITOK);

	/* XXX - we use values from vmspace, since dsl says that ru figures
	   are always 0 except for zombies. See kvm_proc.c::kvm_getproc2() */
	if ((error = proc_vmspace_getref(p, &vm)) != 0) {
		goto out;
	}

	len = snprintf(bf, LBFSZ,
	        "%lu %lu %lu %lu %lu %lu %lu\n",
		(unsigned long)(vm->vm_tsize + vm->vm_dsize + vm->vm_ssize), /* size */
		(unsigned long)(vm->vm_rssize),	/* resident */
		(unsigned long)(ru->ru_ixrss),	/* shared */
		(unsigned long)(vm->vm_tsize),	/* text size in pages */
		(unsigned long)(vm->vm_dsize),	/* data size in pages */
		(unsigned long)(vm->vm_ssize),	/* stack size in pages */
		(unsigned long) 0);

	uvmspace_free(vm);

	if (len == 0)
		goto out;

	error = uiomove_frombuf(bf, len, uio);
out:
	free(bf, M_TEMP);
	return error;
}
Beispiel #4
0
/*
 * Linux compatible /proc/<pid>/stat. Only active when the -o linux
 * mountflag is used.
 */
int
procfs_do_pid_stat(struct lwp *curl, struct lwp *l,
    struct pfsnode *pfs, struct uio *uio)
{
	char *bf;
	struct proc *p = l->l_proc;
	int len;
	struct tty *tty = p->p_session->s_ttyp;
	struct rusage *ru = &p->p_stats->p_ru;
	struct rusage *cru = &p->p_stats->p_cru;
	unsigned long stext = 0, etext = 0, sstack = 0;
	struct timeval rt;
	struct vmspace	*vm;
	int error = 0;

	bf = malloc(LBFSZ, M_TEMP, M_WAITOK);

	if ((error = proc_vmspace_getref(p, &vm)) != 0) {
		goto out;
	}

	get_proc_size_info(l, &stext, &etext, &sstack);

	mutex_enter(proc_lock);
	mutex_enter(p->p_lock);

	calcru(p, NULL, NULL, NULL, &rt);

	len = snprintf(bf, LBFSZ,
	    "%d (%s) %c %d %d %d %lld %d "
	    "%u "
	    "%lu %lu %lu %lu %lu %lu %lu %lu "
	    "%d %d %d "
	    "%lld %lld %lu %lu %" PRIu64 " "
	    "%lu %lu %lu "
	    "%u %u "
	    "%u %u %u %u "
	    "%lu %lu %lu %d %d\n",

	    p->p_pid,
	    p->p_comm,
	    "0IR3SZD"[(p->p_stat > 6) ? 0 : (int)p->p_stat],
	    (p->p_pptr != NULL) ? p->p_pptr->p_pid : 0,

	    p->p_pgid,
	    p->p_session->s_sid,
	    (unsigned long long)(tty ? tty->t_dev : 0),
	    (tty && tty->t_pgrp) ? tty->t_pgrp->pg_id : 0,

	    p->p_flag,

	    ru->ru_minflt,
	    cru->ru_minflt,
	    ru->ru_majflt,
	    cru->ru_majflt,
	    (long)USEC_2_TICKS(ru->ru_utime.tv_usec),
	    (long)USEC_2_TICKS(ru->ru_stime.tv_usec),
	    (long)USEC_2_TICKS(cru->ru_utime.tv_usec),
	    (long)USEC_2_TICKS(cru->ru_stime.tv_usec),

	    l->l_priority,				/* XXX: priority */
	    p->p_nice - 20,
	    0,

	    (long long)rt.tv_sec,
	    (long long)p->p_stats->p_start.tv_sec,
	    (unsigned long)(vm->vm_tsize + vm->vm_dsize + vm->vm_ssize), /* size */
	    (unsigned long)(vm->vm_rssize),	/* resident */
	    p->p_rlimit[RLIMIT_RSS].rlim_cur,

	    stext,					/* start code */
	    etext,					/* end code */
	    sstack,					/* mm start stack */
	    0,						/* XXX: pc */
	    0,						/* XXX: sp */
	    p->p_sigpend.sp_set.__bits[0],		/* XXX: pending */
	    0,						/* XXX: held */
	    p->p_sigctx.ps_sigignore.__bits[0],		/* ignored */
	    p->p_sigctx.ps_sigcatch.__bits[0],		/* caught */

	    (unsigned long)(intptr_t)l->l_wchan,
	    ru->ru_nvcsw,
	    ru->ru_nivcsw,
	    p->p_exitsig,
	    0);						/* XXX: processor */

	mutex_exit(p->p_lock);
	mutex_exit(proc_lock);

	uvmspace_free(vm);

	if (len == 0)
		goto out;

	error = uiomove_frombuf(bf, len, uio);
out:
	free(bf, M_TEMP);
	return error;
}
Beispiel #5
0
/*
 * Linux compatible /proc/<pid>/stat. Only active when the -o linux
 * mountflag is used.
 */
int
procfs_do_pid_stat(struct lwp *curl, struct lwp *l,
    struct pfsnode *pfs, struct uio *uio)
{
	char *bf;
	struct proc *p = l->l_proc;
	int len;
	struct rusage *cru = &p->p_stats->p_cru;
	unsigned long stext = 0, etext = 0, sstack = 0;
	struct timeval rt;
	struct vmspace	*vm;
	struct kinfo_proc2 ki;
	int error = 0;

	bf = malloc(LBFSZ, M_TEMP, M_WAITOK);

	if ((error = proc_vmspace_getref(p, &vm)) != 0) {
		goto out;
	}

	get_proc_size_info(l, &stext, &etext, &sstack);

	mutex_enter(proc_lock);
	mutex_enter(p->p_lock);

	fill_kproc2(p, &ki, false);
	calcru(p, NULL, NULL, NULL, &rt);

	len = snprintf(bf, LBFSZ,
	    "%d (%s) %c %d %d %d %u %d "
	    "%u "
	    "%"PRIu64" %lu %"PRIu64" %lu %"PRIu64" %"PRIu64" %"PRIu64" %"PRIu64" "
	    "%d %d %"PRIu64" "
	    "%lld %"PRIu64" %"PRId64" %lu %"PRIu64" "
	    "%lu %lu %lu "
	    "%u %u "
	    "%u %u %u %u "
	    "%"PRIu64" %"PRIu64" %"PRIu64" %d %"PRIu64"\n",

	    ki.p_pid,						/* 1 pid */
	    ki.p_comm,						/* 2 tcomm */
	    "0RRSTZXR8"[(ki.p_stat > 8) ? 0 : (int)ki.p_stat],	/* 3 state */
	    ki.p_ppid,						/* 4 ppid */
	    ki.p__pgid,						/* 5 pgrp */
	    ki.p_sid,						/* 6 sid */
	    (ki.p_tdev != (uint32_t)NODEV) ? ki.p_tdev : 0,	/* 7 tty_nr */
	    ki.p_tpgid,						/* 8 tty_pgrp */

	    ki.p_flag,						/* 9 flags */

	    ki.p_uru_minflt,					/* 10 min_flt */
	    cru->ru_minflt,
	    ki.p_uru_majflt,					/* 12 maj_flt */
	    cru->ru_majflt,
	    UTIME2TICKS(ki.p_uutime_sec, ki.p_uutime_usec),	/* 14 utime */
	    UTIME2TICKS(ki.p_ustime_sec, ki.p_ustime_usec),	/* 15 stime */
	    UTIME2TICKS(cru->ru_utime.tv_sec, cru->ru_utime.tv_usec), /* 16 cutime */
	    UTIME2TICKS(cru->ru_stime.tv_sec, cru->ru_stime.tv_usec), /* 17 cstime */

	    ki.p_priority,				/* XXX: 18 priority */
	    ki.p_nice - NZERO,				/* 19 nice */
	    ki.p_nlwps,					/* 20 num_threads */

	    (long long)rt.tv_sec,
	    UTIME2TICKS(ki.p_ustart_sec, ki.p_ustart_usec), /* 22 start_time */
	    ki.p_vm_msize,				/* 23 vsize */
	    PGTOKB(ki.p_vm_rssize),			/* 24 rss */
	    p->p_rlimit[RLIMIT_RSS].rlim_cur,		/* 25 rsslim */

	    stext,					/* 26 start_code */
	    etext,					/* 27 end_code */
	    sstack,					/* 28 start_stack */

	    0,						/* XXX: 29 esp */
	    0,						/* XXX: 30 eip */

	    ki.p_siglist.__bits[0],			/* XXX: 31 pending */
	    0,						/* XXX: 32 blocked */
	    ki.p_sigignore.__bits[0],		/* 33 sigign */
	    ki.p_sigcatch.__bits[0],		/* 34 sigcatch */

	    ki.p_wchan,					/* 35 wchan */
	    ki.p_uru_nvcsw,
	    ki.p_uru_nivcsw,
	    ki.p_exitsig,				/* 38 exit_signal */
	    ki.p_cpuid);				/* 39 task_cpu */

	mutex_exit(p->p_lock);
	mutex_exit(proc_lock);

	uvmspace_free(vm);

	if (len == 0)
		goto out;

	error = uiomove_frombuf(bf, len, uio);
out:
	free(bf, M_TEMP);
	return error;
}
Beispiel #6
0
/*
 * Process debugging system call.
 */
int
sys_ptrace(struct lwp *l, const struct sys_ptrace_args *uap, register_t *retval)
{
	/* {
		syscallarg(int) req;
		syscallarg(pid_t) pid;
		syscallarg(void *) addr;
		syscallarg(int) data;
	} */
	struct proc *p = l->l_proc;
	struct lwp *lt;
	struct proc *t;				/* target process */
	struct uio uio;
	struct iovec iov;
	struct ptrace_io_desc piod;
	struct ptrace_lwpinfo pl;
	struct vmspace *vm;
	int error, write, tmp, req, pheld;
	int signo;
	ksiginfo_t ksi;
#ifdef COREDUMP
	char *path;
#endif

	error = 0;
	req = SCARG(uap, req);

	/*
	 * If attaching or detaching, we need to get a write hold on the
	 * proclist lock so that we can re-parent the target process.
	 */
	mutex_enter(proc_lock);

	/* "A foolish consistency..." XXX */
	if (req == PT_TRACE_ME) {
		t = p;
		mutex_enter(t->p_lock);
	} else {
		/* Find the process we're supposed to be operating on. */
		if ((t = p_find(SCARG(uap, pid), PFIND_LOCKED)) == NULL) {
			mutex_exit(proc_lock);
			return (ESRCH);
		}

		/* XXX-elad */
		mutex_enter(t->p_lock);
		error = kauth_authorize_process(l->l_cred, KAUTH_PROCESS_CANSEE,
		    t, KAUTH_ARG(KAUTH_REQ_PROCESS_CANSEE_ENTRY), NULL, NULL);
		if (error) {
			mutex_exit(proc_lock);
			mutex_exit(t->p_lock);
			return (ESRCH);
		}
	}

	/*
	 * Grab a reference on the process to prevent it from execing or
	 * exiting.
	 */
	if (!rw_tryenter(&t->p_reflock, RW_READER)) {
		mutex_exit(proc_lock);
		mutex_exit(t->p_lock);
		return EBUSY;
	}

	/* Make sure we can operate on it. */
	switch (req) {
	case  PT_TRACE_ME:
		/* Saying that you're being traced is always legal. */
		break;

	case  PT_ATTACH:
		/*
		 * You can't attach to a process if:
		 *	(1) it's the process that's doing the attaching,
		 */
		if (t->p_pid == p->p_pid) {
			error = EINVAL;
			break;
		}

		/*
		 *  (2) it's a system process
		 */
		if (t->p_flag & PK_SYSTEM) {
			error = EPERM;
			break;
		}

		/*
		 *	(3) it's already being traced, or
		 */
		if (ISSET(t->p_slflag, PSL_TRACED)) {
			error = EBUSY;
			break;
		}

		/*
		 * 	(4) the tracer is chrooted, and its root directory is
		 * 	    not at or above the root directory of the tracee
		 */
		mutex_exit(t->p_lock);	/* XXXSMP */
		tmp = proc_isunder(t, l);
		mutex_enter(t->p_lock);	/* XXXSMP */
		if (!tmp) {
			error = EPERM;
			break;
		}
		break;

	case  PT_READ_I:
	case  PT_READ_D:
	case  PT_WRITE_I:
	case  PT_WRITE_D:
	case  PT_IO:
#ifdef PT_GETREGS
	case  PT_GETREGS:
#endif
#ifdef PT_SETREGS
	case  PT_SETREGS:
#endif
#ifdef PT_GETFPREGS
	case  PT_GETFPREGS:
#endif
#ifdef PT_SETFPREGS
	case  PT_SETFPREGS:
#endif
#ifdef __HAVE_PTRACE_MACHDEP
	PTRACE_MACHDEP_REQUEST_CASES
#endif
		/*
		 * You can't read/write the memory or registers of a process
		 * if the tracer is chrooted, and its root directory is not at
		 * or above the root directory of the tracee.
		 */
		mutex_exit(t->p_lock);	/* XXXSMP */
		tmp = proc_isunder(t, l);
		mutex_enter(t->p_lock);	/* XXXSMP */
		if (!tmp) {
			error = EPERM;
			break;
		}
		/*FALLTHROUGH*/

	case  PT_CONTINUE:
	case  PT_KILL:
	case  PT_DETACH:
	case  PT_LWPINFO:
	case  PT_SYSCALL:
#ifdef COREDUMP
	case  PT_DUMPCORE:
#endif
#ifdef PT_STEP
	case  PT_STEP:
#endif
		/*
		 * You can't do what you want to the process if:
		 *	(1) It's not being traced at all,
		 */
		if (!ISSET(t->p_slflag, PSL_TRACED)) {
			error = EPERM;
			break;
		}

		/*
		 *	(2) it's being traced by procfs (which has
		 *	    different signal delivery semantics),
		 */
		if (ISSET(t->p_slflag, PSL_FSTRACE)) {
			uprintf("file system traced\n");
			error = EBUSY;
			break;
		}

		/*
		 *	(3) it's not being traced by _you_, or
		 */
		if (t->p_pptr != p) {
			uprintf("parent %d != %d\n", t->p_pptr->p_pid, p->p_pid);
			error = EBUSY;
			break;
		}

		/*
		 *	(4) it's not currently stopped.
		 */
		if (t->p_stat != SSTOP || !t->p_waited /* XXXSMP */) {
			uprintf("stat %d flag %d\n", t->p_stat,
			    !t->p_waited);
			error = EBUSY;
			break;
		}
		break;

	default:			/* It was not a legal request. */
		error = EINVAL;
		break;
	}

	if (error == 0)
		error = kauth_authorize_process(l->l_cred,
		    KAUTH_PROCESS_PTRACE, t, KAUTH_ARG(req),
		    NULL, NULL);

	if (error != 0) {
		mutex_exit(proc_lock);
		mutex_exit(t->p_lock);
		rw_exit(&t->p_reflock);
		return error;
	}

	/* Do single-step fixup if needed. */
	FIX_SSTEP(t);

	/*
	 * XXX NJWLWP
	 *
	 * The entire ptrace interface needs work to be useful to a
	 * process with multiple LWPs. For the moment, we'll kluge
	 * this; memory access will be fine, but register access will
	 * be weird.
	 */
	lt = LIST_FIRST(&t->p_lwps);
	KASSERT(lt != NULL);
	lwp_addref(lt);

	/*
	 * Which locks do we need held? XXX Ugly.
	 */
	switch (req) {
#ifdef PT_STEP
	case PT_STEP:
#endif
	case PT_CONTINUE:
	case PT_DETACH:
	case PT_KILL:
	case PT_SYSCALL:
	case PT_ATTACH:
	case PT_TRACE_ME:
		pheld = 1;
		break;
	default:
		mutex_exit(proc_lock);
		mutex_exit(t->p_lock);
		pheld = 0;
		break;
	}

	/* Now do the operation. */
	write = 0;
	*retval = 0;
	tmp = 0;

	switch (req) {
	case  PT_TRACE_ME:
		/* Just set the trace flag. */
		SET(t->p_slflag, PSL_TRACED);
		t->p_opptr = t->p_pptr;
		break;

	case  PT_WRITE_I:		/* XXX no separate I and D spaces */
	case  PT_WRITE_D:
#if defined(__HAVE_RAS)
		/*
		 * Can't write to a RAS
		 */
		if (ras_lookup(t, SCARG(uap, addr)) != (void *)-1) {
			error = EACCES;
			break;
		}
#endif
		write = 1;
		tmp = SCARG(uap, data);
		/* FALLTHROUGH */

	case  PT_READ_I:		/* XXX no separate I and D spaces */
	case  PT_READ_D:
		/* write = 0 done above. */
		iov.iov_base = (void *)&tmp;
		iov.iov_len = sizeof(tmp);
		uio.uio_iov = &iov;
		uio.uio_iovcnt = 1;
		uio.uio_offset = (off_t)(unsigned long)SCARG(uap, addr);
		uio.uio_resid = sizeof(tmp);
		uio.uio_rw = write ? UIO_WRITE : UIO_READ;
		UIO_SETUP_SYSSPACE(&uio);

		error = process_domem(l, lt, &uio);
		if (!write)
			*retval = tmp;
		break;

	case  PT_IO:
		error = copyin(SCARG(uap, addr), &piod, sizeof(piod));
		if (error)
			break;
		switch (piod.piod_op) {
		case PIOD_READ_D:
		case PIOD_READ_I:
			uio.uio_rw = UIO_READ;
			break;
		case PIOD_WRITE_D:
		case PIOD_WRITE_I:
			/*
			 * Can't write to a RAS
			 */
			if (ras_lookup(t, SCARG(uap, addr)) != (void *)-1) {
				return (EACCES);
			}
			uio.uio_rw = UIO_WRITE;
			break;
		default:
			error = EINVAL;
			break;
		}
		if (error)
			break;
		error = proc_vmspace_getref(l->l_proc, &vm);
		if (error)
			break;
		iov.iov_base = piod.piod_addr;
		iov.iov_len = piod.piod_len;
		uio.uio_iov = &iov;
		uio.uio_iovcnt = 1;
		uio.uio_offset = (off_t)(unsigned long)piod.piod_offs;
		uio.uio_resid = piod.piod_len;
		uio.uio_vmspace = vm;

		error = process_domem(l, lt, &uio);
		piod.piod_len -= uio.uio_resid;
		(void) copyout(&piod, SCARG(uap, addr), sizeof(piod));
		uvmspace_free(vm);
		break;

#ifdef COREDUMP
	case  PT_DUMPCORE:
		if ((path = SCARG(uap, addr)) != NULL) {
			char *dst;
			int len = SCARG(uap, data);
			if (len < 0 || len >= MAXPATHLEN) {
				error = EINVAL;
				break;
			}
			dst = malloc(len + 1, M_TEMP, M_WAITOK);
			if ((error = copyin(path, dst, len)) != 0) {
				free(dst, M_TEMP);
				break;
			}
			path = dst;
			path[len] = '\0';
		}
		error = coredump(lt, path);
		if (path)
			free(path, M_TEMP);
		break;
#endif

#ifdef PT_STEP
	case  PT_STEP:
		/*
		 * From the 4.4BSD PRM:
		 * "Execution continues as in request PT_CONTINUE; however
		 * as soon as possible after execution of at least one
		 * instruction, execution stops again. [ ... ]"
		 */
#endif
	case  PT_CONTINUE:
	case  PT_SYSCALL:
	case  PT_DETACH:
		if (req == PT_SYSCALL) {
			if (!ISSET(t->p_slflag, PSL_SYSCALL)) {
				SET(t->p_slflag, PSL_SYSCALL);
#ifdef __HAVE_SYSCALL_INTERN
				(*t->p_emul->e_syscall_intern)(t);
#endif
			}
		} else {
			if (ISSET(t->p_slflag, PSL_SYSCALL)) {
				CLR(t->p_slflag, PSL_SYSCALL);
#ifdef __HAVE_SYSCALL_INTERN
				(*t->p_emul->e_syscall_intern)(t);
#endif
			}
		}
		p->p_trace_enabled = trace_is_enabled(p);

		/*
		 * From the 4.4BSD PRM:
		 * "The data argument is taken as a signal number and the
		 * child's execution continues at location addr as if it
		 * incurred that signal.  Normally the signal number will
		 * be either 0 to indicate that the signal that caused the
		 * stop should be ignored, or that value fetched out of
		 * the process's image indicating which signal caused
		 * the stop.  If addr is (int *)1 then execution continues
		 * from where it stopped."
		 */

		/* Check that the data is a valid signal number or zero. */
		if (SCARG(uap, data) < 0 || SCARG(uap, data) >= NSIG) {
			error = EINVAL;
			break;
		}

		uvm_lwp_hold(lt);

		/* If the address parameter is not (int *)1, set the pc. */
		if ((int *)SCARG(uap, addr) != (int *)1)
			if ((error = process_set_pc(lt, SCARG(uap, addr))) != 0) {
				uvm_lwp_rele(lt);
				break;
			}

#ifdef PT_STEP
		/*
		 * Arrange for a single-step, if that's requested and possible.
		 */
		error = process_sstep(lt, req == PT_STEP);
		if (error) {
			uvm_lwp_rele(lt);
			break;
		}
#endif

		uvm_lwp_rele(lt);

		if (req == PT_DETACH) {
			CLR(t->p_slflag, PSL_TRACED|PSL_FSTRACE|PSL_SYSCALL);

			/* give process back to original parent or init */
			if (t->p_opptr != t->p_pptr) {
				struct proc *pp = t->p_opptr;
				proc_reparent(t, pp ? pp : initproc);
			}

			/* not being traced any more */
			t->p_opptr = NULL;
		}

		signo = SCARG(uap, data);
	sendsig:
		/* Finally, deliver the requested signal (or none). */
		if (t->p_stat == SSTOP) {
			/*
			 * Unstop the process.  If it needs to take a
			 * signal, make all efforts to ensure that at
			 * an LWP runs to see it.
			 */
			t->p_xstat = signo;
			proc_unstop(t);
		} else if (signo != 0) {
			KSI_INIT_EMPTY(&ksi);
			ksi.ksi_signo = signo;
			kpsignal2(t, &ksi);
		}
		break;

	case  PT_KILL:
		/* just send the process a KILL signal. */
		signo = SIGKILL;
		goto sendsig;	/* in PT_CONTINUE, above. */

	case  PT_ATTACH:
		/*
		 * Go ahead and set the trace flag.
		 * Save the old parent (it's reset in
		 *   _DETACH, and also in kern_exit.c:wait4()
		 * Reparent the process so that the tracing
		 *   proc gets to see all the action.
		 * Stop the target.
		 */
		t->p_opptr = t->p_pptr;
		if (t->p_pptr != p) {
			struct proc *parent = t->p_pptr;

			if (parent->p_lock < t->p_lock) {
				if (!mutex_tryenter(parent->p_lock)) {
					mutex_exit(t->p_lock);
					mutex_enter(parent->p_lock);
				}
			} else if (parent->p_lock > t->p_lock) {
				mutex_enter(parent->p_lock);
			}
			parent->p_slflag |= PSL_CHTRACED;
			proc_reparent(t, p);
			if (parent->p_lock != t->p_lock)
				mutex_exit(parent->p_lock);
		}
		SET(t->p_slflag, PSL_TRACED);
		signo = SIGSTOP;
		goto sendsig;

	case PT_LWPINFO:
		if (SCARG(uap, data) != sizeof(pl)) {
			error = EINVAL;
			break;
		}
		error = copyin(SCARG(uap, addr), &pl, sizeof(pl));
		if (error)
			break;
		tmp = pl.pl_lwpid;
		lwp_delref(lt);
		mutex_enter(t->p_lock);
		if (tmp == 0)
			lt = LIST_FIRST(&t->p_lwps);
		else {
			lt = lwp_find(t, tmp);
			if (lt == NULL) {
				mutex_exit(t->p_lock);
				error = ESRCH;
				break;
			}
			lt = LIST_NEXT(lt, l_sibling);
		}
		while (lt != NULL && lt->l_stat == LSZOMB)
			lt = LIST_NEXT(lt, l_sibling);
		pl.pl_lwpid = 0;
		pl.pl_event = 0;
		if (lt) {
			lwp_addref(lt);
			pl.pl_lwpid = lt->l_lid;
			if (lt->l_lid == t->p_sigctx.ps_lwp)
				pl.pl_event = PL_EVENT_SIGNAL;
		}
		mutex_exit(t->p_lock);

		error = copyout(&pl, SCARG(uap, addr), sizeof(pl));
		break;

#ifdef PT_SETREGS
	case  PT_SETREGS:
		write = 1;
#endif
#ifdef PT_GETREGS
	case  PT_GETREGS:
		/* write = 0 done above. */
#endif
#if defined(PT_SETREGS) || defined(PT_GETREGS)
		tmp = SCARG(uap, data);
		if (tmp != 0 && t->p_nlwps > 1) {
			lwp_delref(lt);
			mutex_enter(t->p_lock);
			lt = lwp_find(t, tmp);
			if (lt == NULL) {
				mutex_exit(t->p_lock);
				error = ESRCH;
				break;
			}
			lwp_addref(lt);
			mutex_exit(t->p_lock);
		}
		if (!process_validregs(lt))
			error = EINVAL;
		else {
			error = proc_vmspace_getref(l->l_proc, &vm);
			if (error)
				break;
			iov.iov_base = SCARG(uap, addr);
			iov.iov_len = sizeof(struct reg);
			uio.uio_iov = &iov;
			uio.uio_iovcnt = 1;
			uio.uio_offset = 0;
			uio.uio_resid = sizeof(struct reg);
			uio.uio_rw = write ? UIO_WRITE : UIO_READ;
			uio.uio_vmspace = vm;

			error = process_doregs(l, lt, &uio);
			uvmspace_free(vm);
		}
		break;
#endif

#ifdef PT_SETFPREGS
	case  PT_SETFPREGS:
		write = 1;
#endif
#ifdef PT_GETFPREGS
	case  PT_GETFPREGS:
		/* write = 0 done above. */
#endif
#if defined(PT_SETFPREGS) || defined(PT_GETFPREGS)
		tmp = SCARG(uap, data);
		if (tmp != 0 && t->p_nlwps > 1) {
			lwp_delref(lt);
			mutex_enter(t->p_lock);
			lt = lwp_find(t, tmp);
			if (lt == NULL) {
				mutex_exit(t->p_lock);
				error = ESRCH;
				break;
			}
			lwp_addref(lt);
			mutex_exit(t->p_lock);
		}
		if (!process_validfpregs(lt))
			error = EINVAL;
		else {
			error = proc_vmspace_getref(l->l_proc, &vm);
			if (error)
				break;
			iov.iov_base = SCARG(uap, addr);
			iov.iov_len = sizeof(struct fpreg);
			uio.uio_iov = &iov;
			uio.uio_iovcnt = 1;
			uio.uio_offset = 0;
			uio.uio_resid = sizeof(struct fpreg);
			uio.uio_rw = write ? UIO_WRITE : UIO_READ;
			uio.uio_vmspace = vm;

			error = process_dofpregs(l, lt, &uio);
			uvmspace_free(vm);
		}
		break;
#endif

#ifdef __HAVE_PTRACE_MACHDEP
	PTRACE_MACHDEP_REQUEST_CASES
		error = ptrace_machdep_dorequest(l, lt,
		    req, SCARG(uap, addr), SCARG(uap, data));
		break;
#endif
	}

	if (pheld) {
		mutex_exit(t->p_lock);
		mutex_exit(proc_lock);
	}
	if (lt != NULL)
		lwp_delref(lt);
	rw_exit(&t->p_reflock);

	return error;
}
Beispiel #7
0
/*
 * code for returning process's command line arguments
 */
int
procfs_docmdline(
    struct lwp *curl,
    struct proc *p,
    struct pfsnode *pfs,
    struct uio *uio
)
{
	struct ps_strings pss;
	int count, error;
	size_t i, len, xlen, upper_bound;
	struct uio auio;
	struct iovec aiov;
	struct vmspace *vm;
	vaddr_t argv;
	char *arg;

	/* Don't allow writing. */
	if (uio->uio_rw != UIO_READ)
		return (EOPNOTSUPP);

	/*
	 * Allocate a temporary buffer to hold the arguments.
	 */
	arg = malloc(PAGE_SIZE, M_TEMP, M_WAITOK);

	/*
	 * Zombies don't have a stack, so we can't read their psstrings.
	 * System processes also don't have a user stack.  This is what
	 * ps(1) would display.
	 */
	if (P_ZOMBIE(p) || (p->p_flag & PK_SYSTEM) != 0) {
		len = snprintf(arg, PAGE_SIZE, "(%s)", p->p_comm) + 1;
		error = uiomove_frombuf(arg, len, uio);
		free(arg, M_TEMP);
		return (error);
	}

	/*
	 * NOTE: Don't bother doing a process_checkioperm() here
	 * because the psstrings info is available by using ps(1),
	 * so it's not like there's anything to protect here.
	 */

	/*
	 * Lock the process down in memory.
	 */
	if ((error = proc_vmspace_getref(p, &vm)) != 0) {
		free(arg, M_TEMP);
		return (error);
	}

	/*
	 * Read in the ps_strings structure.
	 */
	aiov.iov_base = &pss;
	aiov.iov_len = sizeof(pss);
	auio.uio_iov = &aiov;
	auio.uio_iovcnt = 1;
	auio.uio_offset = (vaddr_t)p->p_psstr;
	auio.uio_resid = sizeof(pss);
	auio.uio_rw = UIO_READ;
	UIO_SETUP_SYSSPACE(&auio);
	error = uvm_io(&vm->vm_map, &auio);
	if (error)
		goto bad;

	/*
	 * Now read the address of the argument vector.
	 */
	aiov.iov_base = &argv;
	aiov.iov_len = sizeof(argv);
	auio.uio_iov = &aiov;
	auio.uio_iovcnt = 1;
	auio.uio_offset = (vaddr_t)pss.ps_argvstr;
	auio.uio_resid = sizeof(argv);
	auio.uio_rw = UIO_READ;
	UIO_SETUP_SYSSPACE(&auio);
	error = uvm_io(&vm->vm_map, &auio);
	if (error)
		goto bad;

	/*
	 * Now copy in the actual argument vector, one page at a time,
	 * since we don't know how long the vector is (though, we do
	 * know how many NUL-terminated strings are in the vector).
	 */
	len = 0;
	count = pss.ps_nargvstr;
	upper_bound = round_page(uio->uio_offset + uio->uio_resid);
	for (; count && len < upper_bound; len += xlen) {
		aiov.iov_base = arg;
		aiov.iov_len = PAGE_SIZE;
		auio.uio_iov = &aiov;
		auio.uio_iovcnt = 1;
		auio.uio_offset = argv + len;
		xlen = PAGE_SIZE - ((argv + len) & PAGE_MASK);
		auio.uio_resid = xlen;
		auio.uio_rw = UIO_READ;
		UIO_SETUP_SYSSPACE(&auio);
		error = uvm_io(&vm->vm_map, &auio);
		if (error)
			goto bad;

		for (i = 0; i < xlen && count != 0; i++) {
			if (arg[i] == '\0')
				count--;	/* one full string */
		}

		if (len + i > uio->uio_offset) {
			/* Have data in this page, copy it out */
			error = uiomove(arg + uio->uio_offset - len,
			    i + len - uio->uio_offset, uio);
			if (error || uio->uio_resid <= 0)
				break;
		}
	}

 bad:
	/*
	 * Release the process.
	 */
	uvmspace_free(vm);

	free(arg, M_TEMP);
	return (error);
}
Beispiel #8
0
/*
 * The map entries can *almost* be read with programs like cat.  However,
 * large maps need special programs to read.  It is not easy to implement
 * a program that can sense the required size of the buffer, and then
 * subsequently do a read with the appropriate size.  This operation cannot
 * be atomic.  The best that we can do is to allow the program to do a read
 * with an arbitrarily large buffer, and return as much as we can.  We can
 * return an error code if the buffer is too small (EFBIG), then the program
 * can try a bigger buffer.
 */
int
procfs_domap(struct lwp *curl, struct proc *p, struct pfsnode *pfs,
	     struct uio *uio, int linuxmode)
{
	int error;
	struct vmspace *vm;
	struct vm_map *map;
	struct vm_map_entry *entry;
	char *buffer = NULL;
	size_t bufsize = BUFFERSIZE;
	char *path;
	struct vnode *vp;
	struct vattr va;
	dev_t dev;
	long fileid;
	size_t pos;
	int width = (int)((curl->l_proc->p_flag & PK_32) ? sizeof(int32_t) : 
	    sizeof(void *)) * 2;

	if (uio->uio_rw != UIO_READ)
		return EOPNOTSUPP;

	if (uio->uio_offset != 0) {
		/*
		 * we return 0 here, so that the second read returns EOF
		 * we don't support reading from an offset because the
		 * map could have changed between the two reads.
		 */
		return 0;
	}

	error = 0;

	if (linuxmode != 0)
		path = malloc(MAXPATHLEN * 4, M_TEMP, M_WAITOK);
	else
		path = NULL;

	if ((error = proc_vmspace_getref(p, &vm)) != 0)
		goto out;

	map = &vm->vm_map;
	vm_map_lock_read(map);

again:
	buffer = malloc(bufsize, M_TEMP, M_WAITOK);
	pos = 0;
	for (entry = map->header.next; entry != &map->header;
	    entry = entry->next) {

		if (UVM_ET_ISSUBMAP(entry))
			continue;

		if (linuxmode != 0) {
			*path = 0;
			dev = (dev_t)0;
			fileid = 0;
			if (UVM_ET_ISOBJ(entry) &&
			    UVM_OBJ_IS_VNODE(entry->object.uvm_obj)) {
				vp = (struct vnode *)entry->object.uvm_obj;
				vn_lock(vp, LK_SHARED | LK_RETRY);
				error = VOP_GETATTR(vp, &va, curl->l_cred);
				VOP_UNLOCK(vp);
				if (error == 0 && vp != pfs->pfs_vnode) {
					fileid = va.va_fileid;
					dev = va.va_fsid;
					error = vnode_to_path(path,
					    MAXPATHLEN * 4, vp, curl, p);
				}
			}
			pos += snprintf(buffer + pos, bufsize - pos,
			    "%.*"PRIxVADDR"-%.*"PRIxVADDR" %c%c%c%c "
			    "%.*lx %.2llx:%.2llx %-8ld %25.s %s\n",
			    width, entry->start,
			    width, entry->end,
			    (entry->protection & VM_PROT_READ) ? 'r' : '-',
			    (entry->protection & VM_PROT_WRITE) ? 'w' : '-',
			    (entry->protection & VM_PROT_EXECUTE) ? 'x' : '-',
			    (entry->etype & UVM_ET_COPYONWRITE) ? 'p' : 's',
			    width, (unsigned long)entry->offset,
			    (unsigned long long)major(dev),
			    (unsigned long long)minor(dev), fileid, "", path);
		} else {
			pos += snprintf(buffer + pos, bufsize - pos,
			    "%#"PRIxVADDR" %#"PRIxVADDR" "
			    "%c%c%c %c%c%c %s %s %d %d %d\n",
			    entry->start, entry->end,
			    (entry->protection & VM_PROT_READ) ? 'r' : '-',
			    (entry->protection & VM_PROT_WRITE) ? 'w' : '-',
			    (entry->protection & VM_PROT_EXECUTE) ? 'x' : '-',
			    (entry->max_protection & VM_PROT_READ) ? 'r' : '-',
			    (entry->max_protection & VM_PROT_WRITE) ? 'w' : '-',
			    (entry->max_protection & VM_PROT_EXECUTE) ?
				'x' : '-',
			    (entry->etype & UVM_ET_COPYONWRITE) ?
				"COW" : "NCOW",
			    (entry->etype & UVM_ET_NEEDSCOPY) ? "NC" : "NNC",
			    entry->inheritance, entry->wired_count,
			    entry->advice);
		}
		if (pos >= bufsize) {
			bufsize <<= 1;
			if (bufsize > MAXBUFFERSIZE) {
				error = ENOMEM;
				vm_map_unlock_read(map);
				uvmspace_free(vm);
				goto out;
			}
			free(buffer, M_TEMP);
			goto again;
		}
	}

	vm_map_unlock_read(map);
	uvmspace_free(vm);

	error = uiomove(buffer, pos, uio);
out:
	if (path != NULL)
		free(path, M_TEMP);
	if (buffer != NULL)
		free(buffer, M_TEMP);

	return error;
}
Beispiel #9
0
/*
 * dmio_usrreq_init:
 *
 *	Build a request structure.
 */
static int
dmio_usrreq_init(struct file *fp, struct dmio_usrreq_state *dus,
    struct dmio_usrreq *req, struct dmover_request *dreq)
{
	struct dmio_state *ds = (struct dmio_state *) fp->f_data;
	struct dmover_session *dses = ds->ds_session;
	struct uio *uio_out = &dus->dus_uio_out;
	struct uio *uio_in;
	dmio_buffer inbuf;
	size_t len;
	int i, error;
	u_int j;

	/* XXX How should malloc interact w/ FNONBLOCK? */

	error = RUN_ONCE(&dmio_cleaner_control, dmio_cleaner_init);
	if (error) {
		return error;
	}

	error = proc_vmspace_getref(curproc, &dus->dus_vmspace);
	if (error) {
		return error;
	}

	if (req->req_outbuf.dmbuf_iovcnt != 0) {
		if (req->req_outbuf.dmbuf_iovcnt > IOV_MAX)
			return (EINVAL);
		len = sizeof(struct iovec) * req->req_outbuf.dmbuf_iovcnt;
		uio_out->uio_iov = malloc(len, M_TEMP, M_WAITOK);
		error = copyin(req->req_outbuf.dmbuf_iov, uio_out->uio_iov,
		    len);
		if (error) {
			free(uio_out->uio_iov, M_TEMP);
			return (error);
		}

		for (j = 0, len = 0; j < req->req_outbuf.dmbuf_iovcnt; j++) {
			len += uio_out->uio_iov[j].iov_len;
			if (len > SSIZE_MAX) {
				free(uio_out->uio_iov, M_TEMP);
				return (EINVAL);
			}
		}

		uio_out->uio_iovcnt = req->req_outbuf.dmbuf_iovcnt;
		uio_out->uio_resid = len;
		uio_out->uio_rw = UIO_READ;
		uio_out->uio_vmspace = dus->dus_vmspace;

		dreq->dreq_outbuf_type = DMOVER_BUF_UIO;
		dreq->dreq_outbuf.dmbuf_uio = uio_out;
	} else {
		uio_out->uio_iov = NULL;
		uio_out = NULL;
		dreq->dreq_outbuf_type = DMOVER_BUF_NONE;
	}

	memcpy(dreq->dreq_immediate, req->req_immediate,
	    sizeof(dreq->dreq_immediate));

	if (dses->dses_ninputs == 0) {
		/* No inputs; all done. */
		return (0);
	}

	dreq->dreq_inbuf_type = DMOVER_BUF_UIO;

	dus->dus_uio_in = malloc(sizeof(struct uio) * dses->dses_ninputs,
	    M_TEMP, M_WAITOK);
	memset(dus->dus_uio_in, 0, sizeof(struct uio) * dses->dses_ninputs);

	for (i = 0; i < dses->dses_ninputs; i++) {
		uio_in = &dus->dus_uio_in[i];

		error = copyin(&req->req_inbuf[i], &inbuf, sizeof(inbuf));
		if (error)
			goto bad;

		if (inbuf.dmbuf_iovcnt > IOV_MAX) {
			error = EINVAL;
			goto bad;
		}
		len = sizeof(struct iovec) * inbuf.dmbuf_iovcnt;
		if (len == 0) {
			error = EINVAL;
			goto bad;
		}
		uio_in->uio_iov = malloc(len, M_TEMP, M_WAITOK);

		error = copyin(inbuf.dmbuf_iov, uio_in->uio_iov, len);
		if (error) {
			free(uio_in->uio_iov, M_TEMP);
			goto bad;
		}

		for (j = 0, len = 0; j < inbuf.dmbuf_iovcnt; j++) {
			len += uio_in->uio_iov[j].iov_len;
			if (len > SSIZE_MAX) {
				free(uio_in->uio_iov, M_TEMP);
				error = EINVAL;
				goto bad;
			}
		}

		if (uio_out != NULL && len != uio_out->uio_resid) {
			free(uio_in->uio_iov, M_TEMP);
			error = EINVAL;
			goto bad;
		}

		uio_in->uio_iovcnt = inbuf.dmbuf_iovcnt;
		uio_in->uio_resid = len;
		uio_in->uio_rw = UIO_WRITE;
		uio_in->uio_vmspace = dus->dus_vmspace;

		dreq->dreq_inbuf[i].dmbuf_uio = uio_in;
	}

	return (0);

 bad:
	if (i > 0) {
		for (--i; i >= 0; i--) {
			uio_in = &dus->dus_uio_in[i];
			free(uio_in->uio_iov, M_TEMP);
		}
	}
	free(dus->dus_uio_in, M_TEMP);
	if (uio_out != NULL)
		free(uio_out->uio_iov, M_TEMP);
	uvmspace_free(dus->dus_vmspace);
	return (error);
}