Пример #1
0
int
procfs_doprocfpregs(PFS_FILL_ARGS)
{
	int error;
	struct fpreg r;
	struct thread *td2;
#ifdef COMPAT_FREEBSD32
	struct fpreg32 r32;
	int wrap32 = 0;
#endif

	if (uio->uio_offset != 0)
		return (0);

	PROC_LOCK(p);
	PROC_ASSERT_HELD(p);
	if (p_candebug(td, p)) {
		PROC_UNLOCK(p);
		return (EPERM);
	}
	if (!P_SHOULDSTOP(p)) {
		PROC_UNLOCK(p);
		return (EBUSY);
	}

	td2 = FIRST_THREAD_IN_PROC(p);
#ifdef COMPAT_FREEBSD32
	if (SV_CURPROC_FLAG(SV_ILP32)) {
		if (SV_PROC_FLAG(td2->td_proc, SV_ILP32) == 0) {
			PROC_UNLOCK(p);
			return (EINVAL);
		}
		wrap32 = 1;
		memset(&r32, 0, sizeof(r32));
	} else
#endif
		memset(&r, 0, sizeof(r));
	error = PROC(read, fpregs, td2, &r);
	if (error == 0) {
		PROC_UNLOCK(p);
		error = UIOMOVE_FROMBUF(r, uio);
		PROC_LOCK(p);
	}
	if (error == 0 && uio->uio_rw == UIO_WRITE) {
		if (!P_SHOULDSTOP(p))
			error = EBUSY;
		else
			/* XXXKSE: */
			error = PROC(write, fpregs, td2, &r);
	}
	PROC_UNLOCK(p);

	return (error);
}
Пример #2
0
int
procfs_doprocdbregs(PFS_FILL_ARGS)
{
	int error;
	struct dbreg r;
	struct thread *td2;
#ifdef COMPAT_FREEBSD32
	struct dbreg32 r32;
	int wrap32 = 0;
#endif

	if (uio->uio_offset != 0)
		return (0);

	PROC_LOCK(p);
	KASSERT(p->p_lock > 0, ("proc not held"));
	if (p_candebug(td, p) != 0) {
		PROC_UNLOCK(p);
		return (EPERM);
	}

	td2 = FIRST_THREAD_IN_PROC(p);
#ifdef COMPAT_FREEBSD32
	if (SV_CURPROC_FLAG(SV_ILP32)) {
		if (SV_PROC_FLAG(td2->td_proc, SV_ILP32) == 0) {
			PROC_UNLOCK(p);
			return (EINVAL);
		}
		wrap32 = 1;
	}
#endif
	error = PROC(read, dbregs, td2, &r);
	if (error == 0) {
		PROC_UNLOCK(p);
		error = UIOMOVE_FROMBUF(r, uio);
		PROC_LOCK(p);
	}
	if (error == 0 && uio->uio_rw == UIO_WRITE) {
		if (!P_SHOULDSTOP(p)) /* XXXKSE should be P_TRACED? */
			error = EBUSY;
		else
			/* XXXKSE: */
			error = PROC(write, dbregs, td2, &r);
	}
	PROC_UNLOCK(p);

	return (error);
}
Пример #3
0
static int
linux_clone_thread(struct thread *td, struct linux_clone_args *args)
{
	struct linux_emuldata *em;
	struct thread *newtd;
	struct proc *p;
	int error;

#ifdef DEBUG
	if (ldebug(clone)) {
		printf(ARGS(clone, "thread: flags %x, stack %p, parent tid: %p, "
		    "child tid: %p"), (unsigned)args->flags,
		    args->stack, args->parent_tidptr, args->child_tidptr);
	}
#endif

	LINUX_CTR4(clone_thread, "thread(%d) flags %x ptid %p ctid %p",
	    td->td_tid, (unsigned)args->flags,
	    args->parent_tidptr, args->child_tidptr);

	if (args->flags & LINUX_CLONE_PARENT_SETTID)
		if (args->parent_tidptr == NULL)
			return (EINVAL);

	/* Threads should be created with own stack */
	if (args->stack == NULL)
		return (EINVAL);

	p = td->td_proc;

#ifdef RACCT
	if (racct_enable) {
		PROC_LOCK(p);
		error = racct_add(p, RACCT_NTHR, 1);
		PROC_UNLOCK(p);
		if (error != 0)
			return (EPROCLIM);
	}
#endif

	/* Initialize our td */
	error = kern_thr_alloc(p, 0, &newtd);
	if (error)
		goto fail;

	cpu_copy_thread(newtd, td);

	bzero(&newtd->td_startzero,
	    __rangeof(struct thread, td_startzero, td_endzero));
	bcopy(&td->td_startcopy, &newtd->td_startcopy,
	    __rangeof(struct thread, td_startcopy, td_endcopy));

	newtd->td_proc = p;
	thread_cow_get(newtd, td);

	/* create the emuldata */
	linux_proc_init(td, newtd, args->flags);

	em = em_find(newtd);
	KASSERT(em != NULL, ("clone_thread: emuldata not found.\n"));

	if (args->flags & LINUX_CLONE_SETTLS)
		linux_set_cloned_tls(newtd, args->tls);

	if (args->flags & LINUX_CLONE_CHILD_SETTID)
		em->child_set_tid = args->child_tidptr;
	else
	   	em->child_set_tid = NULL;

	if (args->flags & LINUX_CLONE_CHILD_CLEARTID)
		em->child_clear_tid = args->child_tidptr;
	else
	   	em->child_clear_tid = NULL;

	cpu_thread_clean(newtd);
	
	linux_set_upcall_kse(newtd, PTROUT(args->stack));

	PROC_LOCK(p);
	p->p_flag |= P_HADTHREADS;
	bcopy(p->p_comm, newtd->td_name, sizeof(newtd->td_name));

	if (args->flags & LINUX_CLONE_PARENT)
		thread_link(newtd, p->p_pptr);
	else
		thread_link(newtd, p);

	thread_lock(td);
	/* let the scheduler know about these things. */
	sched_fork_thread(td, newtd);
	thread_unlock(td);
	if (P_SHOULDSTOP(p))
		newtd->td_flags |= TDF_ASTPENDING | TDF_NEEDSUSPCHK;
	PROC_UNLOCK(p);

	tidhash_add(newtd);

#ifdef DEBUG
	if (ldebug(clone))
		printf(ARGS(clone, "successful clone to %d, stack %p"),
		(int)newtd->td_tid, args->stack);
#endif

	LINUX_CTR2(clone_thread, "thread(%d) successful clone to %d",
	    td->td_tid, newtd->td_tid);

	if (args->flags & LINUX_CLONE_PARENT_SETTID) {
		error = copyout(&newtd->td_tid, args->parent_tidptr,
		    sizeof(newtd->td_tid));
		if (error)
			printf(LMSG("clone_thread: copyout failed!"));
	}

	/*
	 * Make this runnable after we are finished with it.
	 */
	thread_lock(newtd);
	TD_SET_CAN_RUN(newtd);
	sched_add(newtd, SRQ_BORING);
	thread_unlock(newtd);

	td->td_retval[0] = newtd->td_tid;

	return (0);

fail:
#ifdef RACCT
	if (racct_enable) {
		PROC_LOCK(p);
		racct_sub(p, RACCT_NTHR, 1);
		PROC_UNLOCK(p);
	}
#endif
	return (error);
}
Пример #4
0
/*
 * Layout:
 * - column counts
 * - header
 * - single-threaded process
 * - multi-threaded process
 * - thread in a MT process
 *
 *          1         2         3         4         5         6         7
 * 1234567890123456789012345678901234567890123456789012345678901234567890
 *   pid  ppid  pgrp   uid   state   wmesg     wchan    cmd
 * <pid> <ppi> <pgi> <uid>  <stat> < wmesg > < wchan  > <name>
 * <pid> <ppi> <pgi> <uid>  <stat>  (threaded)          <command>
 * <tid >                   <stat> < wmesg > < wchan  > <name>
 *
 * For machines with 64-bit pointers, we expand the wchan field 8 more
 * characters.
 */
void
db_ps(db_expr_t addr, boolean_t hasaddr, db_expr_t count, char *modif)
{
	volatile struct proc *p, *pp;
	volatile struct thread *td;
	struct ucred *cred;
	struct pgrp *pgrp;
	char state[9];
	int np, rflag, sflag, dflag, lflag, wflag;

	np = nprocs;

	if (!LIST_EMPTY(&allproc))
		p = LIST_FIRST(&allproc);
	else
		p = &proc0;

#ifdef __LP64__
	db_printf("  pid  ppid  pgrp   uid   state   wmesg         wchan        cmd\n");
#else
	db_printf("  pid  ppid  pgrp   uid   state   wmesg     wchan    cmd\n");
#endif
	while (--np >= 0 && !db_pager_quit) {
		if (p == NULL) {
			db_printf("oops, ran out of processes early!\n");
			break;
		}
		pp = p->p_pptr;
		if (pp == NULL)
			pp = p;

		cred = p->p_ucred;
		pgrp = p->p_pgrp;
		db_printf("%5d %5d %5d %5d ", p->p_pid, pp->p_pid,
		    pgrp != NULL ? pgrp->pg_id : 0,
		    cred != NULL ? cred->cr_ruid : 0);

		/* Determine our primary process state. */
		switch (p->p_state) {
		case PRS_NORMAL:
			if (P_SHOULDSTOP(p))
				state[0] = 'T';
			else {
				/*
				 * One of D, L, R, S, W.  For a
				 * multithreaded process we will use
				 * the state of the thread with the
				 * highest precedence.  The
				 * precendence order from high to low
				 * is R, L, D, S, W.  If no thread is
				 * in a sane state we use '?' for our
				 * primary state.
				 */
				rflag = sflag = dflag = lflag = wflag = 0;
				FOREACH_THREAD_IN_PROC(p, td) {
					if (td->td_state == TDS_RUNNING ||
					    td->td_state == TDS_RUNQ ||
					    td->td_state == TDS_CAN_RUN)
						rflag++;
					if (TD_ON_LOCK(td))
						lflag++;
					if (TD_IS_SLEEPING(td)) {
						if (!(td->td_flags & TDF_SINTR))
							dflag++;
						else
							sflag++;
					}
					if (TD_AWAITING_INTR(td))
						wflag++;
				}
				if (rflag)
					state[0] = 'R';
				else if (lflag)
					state[0] = 'L';
				else if (dflag)
					state[0] = 'D';
				else if (sflag)
					state[0] = 'S';
				else if (wflag)
					state[0] = 'W';
				else
					state[0] = '?';				
			}
			break;
		case PRS_NEW:
			state[0] = 'N';
			break;
		case PRS_ZOMBIE:
			state[0] = 'Z';
			break;
		default:
			state[0] = 'U';
			break;
		}
		state[1] = '\0';

		/* Additional process state flags. */
		if (!(p->p_flag & P_INMEM))
			strlcat(state, "W", sizeof(state));
		if (p->p_flag & P_TRACED)
			strlcat(state, "X", sizeof(state));
		if (p->p_flag & P_WEXIT && p->p_state != PRS_ZOMBIE)
			strlcat(state, "E", sizeof(state));
		if (p->p_flag & P_PPWAIT)
			strlcat(state, "V", sizeof(state));
		if (p->p_flag & P_SYSTEM || p->p_lock > 0)
			strlcat(state, "L", sizeof(state));
		if (p->p_session != NULL && SESS_LEADER(p))
			strlcat(state, "s", sizeof(state));
		/* Cheated here and didn't compare pgid's. */
		if (p->p_flag & P_CONTROLT)
			strlcat(state, "+", sizeof(state));
		if (cred != NULL && jailed(cred))
			strlcat(state, "J", sizeof(state));
		db_printf(" %-6.6s ", state);
		if (p->p_flag & P_HADTHREADS) {
#ifdef __LP64__
			db_printf(" (threaded)                  ");
#else
			db_printf(" (threaded)          ");
#endif
			if (p->p_flag & P_SYSTEM)
				db_printf("[");
			db_printf("%s", p->p_comm);
			if (p->p_flag & P_SYSTEM)
				db_printf("]");
			db_printf("\n");
		}
		FOREACH_THREAD_IN_PROC(p, td) {
			dumpthread(p, td, p->p_flag & P_HADTHREADS);
			if (db_pager_quit)
				break;
		}

		p = LIST_NEXT(p, p_list);
		if (p == NULL && np > 0)
			p = LIST_FIRST(&zombproc);
    	}
Пример #5
0
static int
create_thread(struct thread *td, mcontext_t *ctx,
	    void (*start_func)(void *), void *arg,
	    char *stack_base, size_t stack_size,
	    char *tls_base,
	    long *child_tid, long *parent_tid,
	    int flags, struct rtprio *rtp)
{
	stack_t stack;
	struct thread *newtd;
	struct proc *p;
	int error;

	p = td->td_proc;

	/* Have race condition but it is cheap. */
	if (p->p_numthreads >= max_threads_per_proc) {
		++max_threads_hits;
		return (EPROCLIM);
	}

	if (rtp != NULL) {
		switch(rtp->type) {
		case RTP_PRIO_REALTIME:
		case RTP_PRIO_FIFO:
			/* Only root can set scheduler policy */
			if (priv_check(td, PRIV_SCHED_SETPOLICY) != 0)
				return (EPERM);
			if (rtp->prio > RTP_PRIO_MAX)
				return (EINVAL);
			break;
		case RTP_PRIO_NORMAL:
			rtp->prio = 0;
			break;
		default:
			return (EINVAL);
		}
	}

#ifdef RACCT
	PROC_LOCK(td->td_proc);
	error = racct_add(p, RACCT_NTHR, 1);
	PROC_UNLOCK(td->td_proc);
	if (error != 0)
		return (EPROCLIM);
#endif

	/* Initialize our td */
	newtd = thread_alloc(0);
	if (newtd == NULL) {
		error = ENOMEM;
		goto fail;
	}

	cpu_set_upcall(newtd, td);

	/*
	 * Try the copyout as soon as we allocate the td so we don't
	 * have to tear things down in a failure case below.
	 * Here we copy out tid to two places, one for child and one
	 * for parent, because pthread can create a detached thread,
	 * if parent wants to safely access child tid, it has to provide 
	 * its storage, because child thread may exit quickly and
	 * memory is freed before parent thread can access it.
	 */
	if ((child_tid != NULL &&
	    suword_lwpid(child_tid, newtd->td_tid)) ||
	    (parent_tid != NULL &&
	    suword_lwpid(parent_tid, newtd->td_tid))) {
		thread_free(newtd);
		error = EFAULT;
		goto fail;
	}

	bzero(&newtd->td_startzero,
	    __rangeof(struct thread, td_startzero, td_endzero));
	bcopy(&td->td_startcopy, &newtd->td_startcopy,
	    __rangeof(struct thread, td_startcopy, td_endcopy));
	newtd->td_proc = td->td_proc;
	newtd->td_ucred = crhold(td->td_ucred);

	if (ctx != NULL) { /* old way to set user context */
		error = set_mcontext(newtd, ctx);
		if (error != 0) {
			thread_free(newtd);
			crfree(td->td_ucred);
			goto fail;
		}
	} else {
		/* Set up our machine context. */
		stack.ss_sp = stack_base;
		stack.ss_size = stack_size;
		/* Set upcall address to user thread entry function. */
		cpu_set_upcall_kse(newtd, start_func, arg, &stack);
		/* Setup user TLS address and TLS pointer register. */
		error = cpu_set_user_tls(newtd, tls_base);
		if (error != 0) {
			thread_free(newtd);
			crfree(td->td_ucred);
			goto fail;
		}
	}

	PROC_LOCK(td->td_proc);
	td->td_proc->p_flag |= P_HADTHREADS;
	thread_link(newtd, p); 
	bcopy(p->p_comm, newtd->td_name, sizeof(newtd->td_name));
	thread_lock(td);
	/* let the scheduler know about these things. */
	sched_fork_thread(td, newtd);
	thread_unlock(td);
	if (P_SHOULDSTOP(p))
		newtd->td_flags |= TDF_ASTPENDING | TDF_NEEDSUSPCHK;
	PROC_UNLOCK(p);

	tidhash_add(newtd);

	thread_lock(newtd);
	if (rtp != NULL) {
		if (!(td->td_pri_class == PRI_TIMESHARE &&
		      rtp->type == RTP_PRIO_NORMAL)) {
			rtp_to_pri(rtp, newtd);
			sched_prio(newtd, newtd->td_user_pri);
		} /* ignore timesharing class */
	}
	TD_SET_CAN_RUN(newtd);
	sched_add(newtd, SRQ_BORING);
	thread_unlock(newtd);

	return (0);

fail:
#ifdef RACCT
	PROC_LOCK(p);
	racct_sub(p, RACCT_NTHR, 1);
	PROC_UNLOCK(p);
#endif
	return (error);
}
Пример #6
0
int
linux_ptrace(struct thread *td, struct linux_ptrace_args *uap)
{
	union {
		struct linux_pt_reg	reg;
		struct linux_pt_fpreg	fpreg;
		struct linux_pt_fpxreg	fpxreg;
	} r;
	union {
		struct reg		bsd_reg;
		struct fpreg		bsd_fpreg;
		struct dbreg		bsd_dbreg;
	} u;
	void *addr;
	pid_t pid;
	int error, req;

	error = 0;

	/* by default, just copy data intact */
	req  = uap->req;
	pid  = (pid_t)uap->pid;
	addr = (void *)uap->addr;

	switch (req) {
	case PTRACE_TRACEME:
	case PTRACE_POKETEXT:
	case PTRACE_POKEDATA:
	case PTRACE_KILL:
		error = kern_ptrace(td, req, pid, addr, uap->data);
		break;
	case PTRACE_PEEKTEXT:
	case PTRACE_PEEKDATA: {
		/* need to preserve return value */
		int rval = td->td_retval[0];
		error = kern_ptrace(td, req, pid, addr, 0);
		if (error == 0)
			error = copyout(td->td_retval, (void *)uap->data,
			    sizeof(l_int));
		td->td_retval[0] = rval;
		break;
	}
	case PTRACE_DETACH:
		error = kern_ptrace(td, PT_DETACH, pid, (void *)1,
		     map_signum(uap->data));
		break;
	case PTRACE_SINGLESTEP:
	case PTRACE_CONT:
		error = kern_ptrace(td, req, pid, (void *)1,
		     map_signum(uap->data));
		break;
	case PTRACE_ATTACH:
		error = kern_ptrace(td, PT_ATTACH, pid, addr, uap->data);
		break;
	case PTRACE_GETREGS:
		/* Linux is using data where FreeBSD is using addr */
		error = kern_ptrace(td, PT_GETREGS, pid, &u.bsd_reg, 0);
		if (error == 0) {
			map_regs_to_linux(&u.bsd_reg, &r.reg);
			error = copyout(&r.reg, (void *)uap->data,
			    sizeof(r.reg));
		}
		break;
	case PTRACE_SETREGS:
		/* Linux is using data where FreeBSD is using addr */
		error = copyin((void *)uap->data, &r.reg, sizeof(r.reg));
		if (error == 0) {
			map_regs_from_linux(&u.bsd_reg, &r.reg);
			error = kern_ptrace(td, PT_SETREGS, pid, &u.bsd_reg, 0);
		}
		break;
	case PTRACE_GETFPREGS:
		/* Linux is using data where FreeBSD is using addr */
		error = kern_ptrace(td, PT_GETFPREGS, pid, &u.bsd_fpreg, 0);
		if (error == 0) {
			map_fpregs_to_linux(&u.bsd_fpreg, &r.fpreg);
			error = copyout(&r.fpreg, (void *)uap->data,
			    sizeof(r.fpreg));
		}
		break;
	case PTRACE_SETFPREGS:
		/* Linux is using data where FreeBSD is using addr */
		error = copyin((void *)uap->data, &r.fpreg, sizeof(r.fpreg));
		if (error == 0) {
			map_fpregs_from_linux(&u.bsd_fpreg, &r.fpreg);
			error = kern_ptrace(td, PT_SETFPREGS, pid,
			    &u.bsd_fpreg, 0);
		}
		break;
	case PTRACE_SETFPXREGS:
#ifdef CPU_ENABLE_SSE
		error = copyin((void *)uap->data, &r.fpxreg, sizeof(r.fpxreg));
		if (error)
			break;
#endif
		/* FALL THROUGH */
	case PTRACE_GETFPXREGS: {
#ifdef CPU_ENABLE_SSE
		struct proc *p;
		struct thread *td2;

		if (sizeof(struct linux_pt_fpxreg) != sizeof(struct savexmm)) {
			static int once = 0;
			if (!once) {
				printf("linux: savexmm != linux_pt_fpxreg\n");
				once = 1;
			}
			error = EIO;
			break;
		}

		if ((p = pfind(uap->pid)) == NULL) {
			error = ESRCH;
			break;
		}

		/* Exiting processes can't be debugged. */
		if ((p->p_flag & P_WEXIT) != 0) {
			error = ESRCH;
			goto fail;
		}

		if ((error = p_candebug(td, p)) != 0)
			goto fail;

		/* System processes can't be debugged. */
		if ((p->p_flag & P_SYSTEM) != 0) {
			error = EINVAL;
			goto fail;
		}

		/* not being traced... */
		if ((p->p_flag & P_TRACED) == 0) {
			error = EPERM;
			goto fail;
		}

		/* not being traced by YOU */
		if (p->p_pptr != td->td_proc) {
			error = EBUSY;
			goto fail;
		}

		/* not currently stopped */
		if (!P_SHOULDSTOP(p) || (p->p_flag & P_WAITED) == 0) {
			error = EBUSY;
			goto fail;
		}

		if (req == PTRACE_GETFPXREGS) {
			_PHOLD(p);	/* may block */
			td2 = FIRST_THREAD_IN_PROC(p);
			error = linux_proc_read_fpxregs(td2, &r.fpxreg);
			_PRELE(p);
			PROC_UNLOCK(p);
			if (error == 0)
				error = copyout(&r.fpxreg, (void *)uap->data,
				    sizeof(r.fpxreg));
		} else {
			/* clear dangerous bits exactly as Linux does*/
			r.fpxreg.mxcsr &= 0xffbf;
			_PHOLD(p);	/* may block */
			td2 = FIRST_THREAD_IN_PROC(p);
			error = linux_proc_write_fpxregs(td2, &r.fpxreg);
			_PRELE(p);
			PROC_UNLOCK(p);
		}
		break;

	fail:
		PROC_UNLOCK(p);
#else
		error = EIO;
#endif
		break;
	}
	case PTRACE_PEEKUSR:
	case PTRACE_POKEUSR: {
		error = EIO;

		/* check addr for alignment */
		if (uap->addr < 0 || uap->addr & (sizeof(l_int) - 1))
			break;
		/*
		 * Allow linux programs to access register values in
		 * user struct. We simulate this through PT_GET/SETREGS
		 * as necessary.
		 */
		if (uap->addr < sizeof(struct linux_pt_reg)) {
			error = kern_ptrace(td, PT_GETREGS, pid, &u.bsd_reg, 0);
			if (error != 0)
				break;

			map_regs_to_linux(&u.bsd_reg, &r.reg);
			if (req == PTRACE_PEEKUSR) {
				error = copyout((char *)&r.reg + uap->addr,
				    (void *)uap->data, sizeof(l_int));
				break;
			}

			*(l_int *)((char *)&r.reg + uap->addr) =
			    (l_int)uap->data;

			map_regs_from_linux(&u.bsd_reg, &r.reg);
			error = kern_ptrace(td, PT_SETREGS, pid, &u.bsd_reg, 0);
		}

		/*
		 * Simulate debug registers access
		 */
		if (uap->addr >= LINUX_DBREG_OFFSET &&
		    uap->addr <= LINUX_DBREG_OFFSET + LINUX_DBREG_SIZE) {
			error = kern_ptrace(td, PT_GETDBREGS, pid, &u.bsd_dbreg,
			    0);
			if (error != 0)
				break;

			uap->addr -= LINUX_DBREG_OFFSET;
			if (req == PTRACE_PEEKUSR) {
				error = copyout((char *)&u.bsd_dbreg +
				    uap->addr, (void *)uap->data,
				    sizeof(l_int));
				break;
			}

			*(l_int *)((char *)&u.bsd_dbreg + uap->addr) =
			     uap->data;
			error = kern_ptrace(td, PT_SETDBREGS, pid,
			    &u.bsd_dbreg, 0);
		}

		break;
	}
	case PTRACE_SYSCALL:
		/* fall through */
	default:
		printf("linux: ptrace(%u, ...) not implemented\n",
		    (unsigned int)uap->req);
		error = EINVAL;
		break;
	}

	return (error);
}
Пример #7
0
/*
 * Exit: deallocate address space and other resources, change proc state to
 * zombie, and unlink proc from allproc and parent's lists.  Save exit status
 * and rusage for wait().  Check for child processes and orphan them.
 */
void
exit1(struct thread *td, int rv)
{
	struct proc *p, *nq, *q;
	struct vnode *vtmp;
	struct vnode *ttyvp = NULL;
	struct plimit *plim;

	mtx_assert(&Giant, MA_NOTOWNED);

	p = td->td_proc;
	/*
	 * XXX in case we're rebooting we just let init die in order to
	 * work around an unsolved stack overflow seen very late during
	 * shutdown on sparc64 when the gmirror worker process exists.
	 */
	if (p == initproc && rebooting == 0) {
		printf("init died (signal %d, exit %d)\n",
		    WTERMSIG(rv), WEXITSTATUS(rv));
		panic("Going nowhere without my init!");
	}

	/*
	 * MUST abort all other threads before proceeding past here.
	 */
	PROC_LOCK(p);
	while (p->p_flag & P_HADTHREADS) {
		/*
		 * First check if some other thread got here before us.
		 * If so, act appropriately: exit or suspend.
		 */
		thread_suspend_check(0);

		/*
		 * Kill off the other threads. This requires
		 * some co-operation from other parts of the kernel
		 * so it may not be instantaneous.  With this state set
		 * any thread entering the kernel from userspace will
		 * thread_exit() in trap().  Any thread attempting to
		 * sleep will return immediately with EINTR or EWOULDBLOCK
		 * which will hopefully force them to back out to userland
		 * freeing resources as they go.  Any thread attempting
		 * to return to userland will thread_exit() from userret().
		 * thread_exit() will unsuspend us when the last of the
		 * other threads exits.
		 * If there is already a thread singler after resumption,
		 * calling thread_single will fail; in that case, we just
		 * re-check all suspension request, the thread should
		 * either be suspended there or exit.
		 */
		if (!thread_single(SINGLE_EXIT))
			break;

		/*
		 * All other activity in this process is now stopped.
		 * Threading support has been turned off.
		 */
	}
	KASSERT(p->p_numthreads == 1,
	    ("exit1: proc %p exiting with %d threads", p, p->p_numthreads));
	racct_sub(p, RACCT_NTHR, 1);
	/*
	 * Wakeup anyone in procfs' PIOCWAIT.  They should have a hold
	 * on our vmspace, so we should block below until they have
	 * released their reference to us.  Note that if they have
	 * requested S_EXIT stops we will block here until they ack
	 * via PIOCCONT.
	 */
	_STOPEVENT(p, S_EXIT, rv);

	/*
	 * Ignore any pending request to stop due to a stop signal.
	 * Once P_WEXIT is set, future requests will be ignored as
	 * well.
	 */
	p->p_flag &= ~P_STOPPED_SIG;
	KASSERT(!P_SHOULDSTOP(p), ("exiting process is stopped"));

	/*
	 * Note that we are exiting and do another wakeup of anyone in
	 * PIOCWAIT in case they aren't listening for S_EXIT stops or
	 * decided to wait again after we told them we are exiting.
	 */
	p->p_flag |= P_WEXIT;
	wakeup(&p->p_stype);

	/*
	 * Wait for any processes that have a hold on our vmspace to
	 * release their reference.
	 */
	while (p->p_lock > 0)
		msleep(&p->p_lock, &p->p_mtx, PWAIT, "exithold", 0);

	p->p_xstat = rv;	/* Let event handler change exit status */
	PROC_UNLOCK(p);
	/* Drain the limit callout while we don't have the proc locked */
	callout_drain(&p->p_limco);

#ifdef AUDIT
	/*
	 * The Sun BSM exit token contains two components: an exit status as
	 * passed to exit(), and a return value to indicate what sort of exit
	 * it was.  The exit status is WEXITSTATUS(rv), but it's not clear
	 * what the return value is.
	 */
	AUDIT_ARG_EXIT(WEXITSTATUS(rv), 0);
	AUDIT_SYSCALL_EXIT(0, td);
#endif

	/* Are we a task leader? */
	if (p == p->p_leader) {
		mtx_lock(&ppeers_lock);
		q = p->p_peers;
		while (q != NULL) {
			PROC_LOCK(q);
			kern_psignal(q, SIGKILL);
			PROC_UNLOCK(q);
			q = q->p_peers;
		}
		while (p->p_peers != NULL)
			msleep(p, &ppeers_lock, PWAIT, "exit1", 0);
		mtx_unlock(&ppeers_lock);
	}

	/*
	 * Check if any loadable modules need anything done at process exit.
	 * E.g. SYSV IPC stuff
	 * XXX what if one of these generates an error?
	 */
	EVENTHANDLER_INVOKE(process_exit, p);

	/*
	 * If parent is waiting for us to exit or exec,
	 * P_PPWAIT is set; we will wakeup the parent below.
	 */
	PROC_LOCK(p);
	rv = p->p_xstat;	/* Event handler could change exit status */
	stopprofclock(p);
	p->p_flag &= ~(P_TRACED | P_PPWAIT | P_PPTRACE);

	/*
	 * Stop the real interval timer.  If the handler is currently
	 * executing, prevent it from rearming itself and let it finish.
	 */
	if (timevalisset(&p->p_realtimer.it_value) &&
	    callout_stop(&p->p_itcallout) == 0) {
		timevalclear(&p->p_realtimer.it_interval);
		msleep(&p->p_itcallout, &p->p_mtx, PWAIT, "ritwait", 0);
		KASSERT(!timevalisset(&p->p_realtimer.it_value),
		    ("realtime timer is still armed"));
	}
	PROC_UNLOCK(p);

	/*
	 * Reset any sigio structures pointing to us as a result of
	 * F_SETOWN with our pid.
	 */
	funsetownlst(&p->p_sigiolst);

	/*
	 * If this process has an nlminfo data area (for lockd), release it
	 */
	if (nlminfo_release_p != NULL && p->p_nlminfo != NULL)
		(*nlminfo_release_p)(p);

	/*
	 * Close open files and release open-file table.
	 * This may block!
	 */
	fdescfree(td);

	/*
	 * If this thread tickled GEOM, we need to wait for the giggling to
	 * stop before we return to userland
	 */
	if (td->td_pflags & TDP_GEOM)
		g_waitidle();

	/*
	 * Remove ourself from our leader's peer list and wake our leader.
	 */
	mtx_lock(&ppeers_lock);
	if (p->p_leader->p_peers) {
		q = p->p_leader;
		while (q->p_peers != p)
			q = q->p_peers;
		q->p_peers = p->p_peers;
		wakeup(p->p_leader);
	}
	mtx_unlock(&ppeers_lock);

	vmspace_exit(td);

	sx_xlock(&proctree_lock);
	if (SESS_LEADER(p)) {
		struct session *sp = p->p_session;
		struct tty *tp;

		/*
		 * s_ttyp is not zero'd; we use this to indicate that
		 * the session once had a controlling terminal. (for
		 * logging and informational purposes)
		 */
		SESS_LOCK(sp);
		ttyvp = sp->s_ttyvp;
		tp = sp->s_ttyp;
		sp->s_ttyvp = NULL;
		sp->s_ttydp = NULL;
		sp->s_leader = NULL;
		SESS_UNLOCK(sp);

		/*
		 * Signal foreground pgrp and revoke access to
		 * controlling terminal if it has not been revoked
		 * already.
		 *
		 * Because the TTY may have been revoked in the mean
		 * time and could already have a new session associated
		 * with it, make sure we don't send a SIGHUP to a
		 * foreground process group that does not belong to this
		 * session.
		 */

		if (tp != NULL) {
			tty_lock(tp);
			if (tp->t_session == sp)
				tty_signal_pgrp(tp, SIGHUP);
			tty_unlock(tp);
		}

		if (ttyvp != NULL) {
			sx_xunlock(&proctree_lock);
			if (vn_lock(ttyvp, LK_EXCLUSIVE) == 0) {
				VOP_REVOKE(ttyvp, REVOKEALL);
				VOP_UNLOCK(ttyvp, 0);
			}
			sx_xlock(&proctree_lock);
		}
	}
	fixjobc(p, p->p_pgrp, 0);
	sx_xunlock(&proctree_lock);
	(void)acct_process(td);

	/* Release the TTY now we've unlocked everything. */
	if (ttyvp != NULL)
		vrele(ttyvp);
#ifdef KTRACE
	ktrprocexit(td);
#endif
	/*
	 * Release reference to text vnode
	 */
	if ((vtmp = p->p_textvp) != NULL) {
		p->p_textvp = NULL;
		vrele(vtmp);
	}

	/*
	 * Release our limits structure.
	 */
	plim = p->p_limit;
	p->p_limit = NULL;
	lim_free(plim);

	tidhash_remove(td);

	/*
	 * Remove proc from allproc queue and pidhash chain.
	 * Place onto zombproc.  Unlink from parent's child list.
	 */
	sx_xlock(&allproc_lock);
	LIST_REMOVE(p, p_list);
	LIST_INSERT_HEAD(&zombproc, p, p_list);
	LIST_REMOVE(p, p_hash);
	sx_xunlock(&allproc_lock);

	/*
	 * Call machine-dependent code to release any
	 * machine-dependent resources other than the address space.
	 * The address space is released by "vmspace_exitfree(p)" in
	 * vm_waitproc().
	 */
	cpu_exit(td);

	WITNESS_WARN(WARN_PANIC, NULL, "process (pid %d) exiting", p->p_pid);

	/*
	 * Reparent all of our children to init.
	 */
	sx_xlock(&proctree_lock);
	q = LIST_FIRST(&p->p_children);
	if (q != NULL)		/* only need this if any child is S_ZOMB */
		wakeup(initproc);
	for (; q != NULL; q = nq) {
		nq = LIST_NEXT(q, p_sibling);
		PROC_LOCK(q);
		proc_reparent(q, initproc);
		q->p_sigparent = SIGCHLD;
		/*
		 * Traced processes are killed
		 * since their existence means someone is screwing up.
		 */
		if (q->p_flag & P_TRACED) {
			struct thread *temp;

			/*
			 * Since q was found on our children list, the
			 * proc_reparent() call moved q to the orphan
			 * list due to present P_TRACED flag. Clear
			 * orphan link for q now while q is locked.
			 */
			clear_orphan(q);
			q->p_flag &= ~(P_TRACED | P_STOPPED_TRACE);
			FOREACH_THREAD_IN_PROC(q, temp)
				temp->td_dbgflags &= ~TDB_SUSPEND;
			kern_psignal(q, SIGKILL);
		}
		PROC_UNLOCK(q);
	}

	/*
	 * Also get rid of our orphans.
	 */
	while ((q = LIST_FIRST(&p->p_orphans)) != NULL) {
		PROC_LOCK(q);
		clear_orphan(q);
		PROC_UNLOCK(q);
	}

	/* Save exit status. */
	PROC_LOCK(p);
	p->p_xthread = td;

	/* Tell the prison that we are gone. */
	prison_proc_free(p->p_ucred->cr_prison);

#ifdef KDTRACE_HOOKS
	/*
	 * Tell the DTrace fasttrap provider about the exit if it
	 * has declared an interest.
	 */
	if (dtrace_fasttrap_exit)
		dtrace_fasttrap_exit(p);
#endif

	/*
	 * Notify interested parties of our demise.
	 */
	KNOTE_LOCKED(&p->p_klist, NOTE_EXIT);

#ifdef KDTRACE_HOOKS
	int reason = CLD_EXITED;
	if (WCOREDUMP(rv))
		reason = CLD_DUMPED;
	else if (WIFSIGNALED(rv))
		reason = CLD_KILLED;
	SDT_PROBE(proc, kernel, , exit, reason, 0, 0, 0, 0);
#endif

	/*
	 * Just delete all entries in the p_klist. At this point we won't
	 * report any more events, and there are nasty race conditions that
	 * can beat us if we don't.
	 */
	knlist_clear(&p->p_klist, 1);

	/*
	 * If this is a process with a descriptor, we may not need to deliver
	 * a signal to the parent.  proctree_lock is held over
	 * procdesc_exit() to serialize concurrent calls to close() and
	 * exit().
	 */
	if (p->p_procdesc == NULL || procdesc_exit(p)) {
		/*
		 * Notify parent that we're gone.  If parent has the
		 * PS_NOCLDWAIT flag set, or if the handler is set to SIG_IGN,
		 * notify process 1 instead (and hope it will handle this
		 * situation).
		 */
		PROC_LOCK(p->p_pptr);
		mtx_lock(&p->p_pptr->p_sigacts->ps_mtx);
		if (p->p_pptr->p_sigacts->ps_flag &
		    (PS_NOCLDWAIT | PS_CLDSIGIGN)) {
			struct proc *pp;

			mtx_unlock(&p->p_pptr->p_sigacts->ps_mtx);
			pp = p->p_pptr;
			PROC_UNLOCK(pp);
			proc_reparent(p, initproc);
			p->p_sigparent = SIGCHLD;
			PROC_LOCK(p->p_pptr);

			/*
			 * Notify parent, so in case he was wait(2)ing or
			 * executing waitpid(2) with our pid, he will
			 * continue.
			 */
			wakeup(pp);
		} else
			mtx_unlock(&p->p_pptr->p_sigacts->ps_mtx);

		if (p->p_pptr == initproc)
			kern_psignal(p->p_pptr, SIGCHLD);
		else if (p->p_sigparent != 0) {
			if (p->p_sigparent == SIGCHLD)
				childproc_exited(p);
			else	/* LINUX thread */
				kern_psignal(p->p_pptr, p->p_sigparent);
		}
	} else
		PROC_LOCK(p->p_pptr);
	sx_xunlock(&proctree_lock);

	/*
	 * The state PRS_ZOMBIE prevents other proesses from sending
	 * signal to the process, to avoid memory leak, we free memory
	 * for signal queue at the time when the state is set.
	 */
	sigqueue_flush(&p->p_sigqueue);
	sigqueue_flush(&td->td_sigqueue);

	/*
	 * We have to wait until after acquiring all locks before
	 * changing p_state.  We need to avoid all possible context
	 * switches (including ones from blocking on a mutex) while
	 * marked as a zombie.  We also have to set the zombie state
	 * before we release the parent process' proc lock to avoid
	 * a lost wakeup.  So, we first call wakeup, then we grab the
	 * sched lock, update the state, and release the parent process'
	 * proc lock.
	 */
	wakeup(p->p_pptr);
	cv_broadcast(&p->p_pwait);
	sched_exit(p->p_pptr, td);
	PROC_SLOCK(p);
	p->p_state = PRS_ZOMBIE;
	PROC_UNLOCK(p->p_pptr);

	/*
	 * Hopefully no one will try to deliver a signal to the process this
	 * late in the game.
	 */
	knlist_destroy(&p->p_klist);

	/*
	 * Save our children's rusage information in our exit rusage.
	 */
	ruadd(&p->p_ru, &p->p_rux, &p->p_stats->p_cru, &p->p_crux);

	/*
	 * Make sure the scheduler takes this thread out of its tables etc.
	 * This will also release this thread's reference to the ucred.
	 * Other thread parts to release include pcb bits and such.
	 */
	thread_exit();
}
Пример #8
0
int
thread_create(struct thread *td, struct rtprio *rtp,
    int (*initialize_thread)(struct thread *, void *), void *thunk)
{
	struct thread *newtd;
	struct proc *p;
	int error;

	p = td->td_proc;

	if (rtp != NULL) {
		switch(rtp->type) {
		case RTP_PRIO_REALTIME:
		case RTP_PRIO_FIFO:
			/* Only root can set scheduler policy */
			if (priv_check(td, PRIV_SCHED_SETPOLICY) != 0)
				return (EPERM);
			if (rtp->prio > RTP_PRIO_MAX)
				return (EINVAL);
			break;
		case RTP_PRIO_NORMAL:
			rtp->prio = 0;
			break;
		default:
			return (EINVAL);
		}
	}

#ifdef RACCT
	if (racct_enable) {
		PROC_LOCK(p);
		error = racct_add(p, RACCT_NTHR, 1);
		PROC_UNLOCK(p);
		if (error != 0)
			return (EPROCLIM);
	}
#endif

	/* Initialize our td */
	error = kern_thr_alloc(p, 0, &newtd);
	if (error)
		goto fail;

	cpu_set_upcall(newtd, td);

	bzero(&newtd->td_startzero,
	    __rangeof(struct thread, td_startzero, td_endzero));
	bcopy(&td->td_startcopy, &newtd->td_startcopy,
	    __rangeof(struct thread, td_startcopy, td_endcopy));
	newtd->td_proc = td->td_proc;
	thread_cow_get(newtd, td);

	error = initialize_thread(newtd, thunk);
	if (error != 0) {
		thread_cow_free(newtd);
		thread_free(newtd);
		goto fail;
	}

	PROC_LOCK(p);
	p->p_flag |= P_HADTHREADS;
	thread_link(newtd, p);
	bcopy(p->p_comm, newtd->td_name, sizeof(newtd->td_name));
	newtd->td_pax = p->p_pax;
	thread_lock(td);
	/* let the scheduler know about these things. */
	sched_fork_thread(td, newtd);
	thread_unlock(td);
	if (P_SHOULDSTOP(p))
		newtd->td_flags |= TDF_ASTPENDING | TDF_NEEDSUSPCHK;
	if (p->p_flag2 & P2_LWP_EVENTS)
		newtd->td_dbgflags |= TDB_BORN;

	/*
	 * Copy the existing thread VM policy into the new thread.
	 */
	vm_domain_policy_localcopy(&newtd->td_vm_dom_policy,
	    &td->td_vm_dom_policy);

	PROC_UNLOCK(p);

	tidhash_add(newtd);

	thread_lock(newtd);
	if (rtp != NULL) {
		if (!(td->td_pri_class == PRI_TIMESHARE &&
		      rtp->type == RTP_PRIO_NORMAL)) {
			rtp_to_pri(rtp, newtd);
			sched_prio(newtd, newtd->td_user_pri);
		} /* ignore timesharing class */
	}
	TD_SET_CAN_RUN(newtd);
	sched_add(newtd, SRQ_BORING);
	thread_unlock(newtd);

	return (0);

fail:
#ifdef RACCT
	if (racct_enable) {
		PROC_LOCK(p);
		racct_sub(p, RACCT_NTHR, 1);
		PROC_UNLOCK(p);
	}
#endif
	return (error);
}
Пример #9
0
static int
procfs_control(struct thread *td, struct proc *p, int op)
{
	int error = 0;
	struct thread *temp;

	/*
	 * Attach - attaches the target process for debugging
	 * by the calling process.
	 */
	if (op == PROCFS_CTL_ATTACH) {
		sx_xlock(&proctree_lock);
		PROC_LOCK(p);
		if ((error = p_candebug(td, p)) != 0)
			goto out;
		if (p->p_flag & P_TRACED) {
			error = EBUSY;
			goto out;
		}

		/* Can't trace yourself! */
		if (p->p_pid == td->td_proc->p_pid) {
			error = EINVAL;
			goto out;
		}

		/*
		 * Go ahead and set the trace flag.
		 * Save the old parent (it's reset in
		 *   _DETACH, and also in kern_exit.c:wait4()
		 * Reparent the process so that the tracing
		 *   proc gets to see all the action.
		 * Stop the target.
		 */
		p->p_flag |= P_TRACED;
		faultin(p);
		p->p_xstat = 0;		/* XXX ? */
		if (p->p_pptr != td->td_proc) {
			p->p_oppid = p->p_pptr->p_pid;
			proc_reparent(p, td->td_proc);
		}
		psignal(p, SIGSTOP);
out:
		PROC_UNLOCK(p);
		sx_xunlock(&proctree_lock);
		return (error);
	}

	/*
	 * Authorization check: rely on normal debugging protection, except
	 * allow processes to disengage debugging on a process onto which
	 * they have previously attached, but no longer have permission to
	 * debug.
	 */
	PROC_LOCK(p);
	if (op != PROCFS_CTL_DETACH &&
	    ((error = p_candebug(td, p)))) {
		PROC_UNLOCK(p);
		return (error);
	}

	/*
	 * Target process must be stopped, owned by (td) and
	 * be set up for tracing (P_TRACED flag set).
	 * Allow DETACH to take place at any time for sanity.
	 * Allow WAIT any time, of course.
	 */
	switch (op) {
	case PROCFS_CTL_DETACH:
	case PROCFS_CTL_WAIT:
		break;

	default:
		if (!TRACE_WAIT_P(td->td_proc, p)) {
			PROC_UNLOCK(p);
			return (EBUSY);
		}
	}


#ifdef FIX_SSTEP
	/*
	 * do single-step fixup if needed
	 */
	FIX_SSTEP(FIRST_THREAD_IN_PROC(p));
#endif

	/*
	 * Don't deliver any signal by default.
	 * To continue with a signal, just send
	 * the signal name to the ctl file
	 */
	p->p_xstat = 0;

	switch (op) {
	/*
	 * Detach.  Cleans up the target process, reparent it if possible
	 * and set it running once more.
	 */
	case PROCFS_CTL_DETACH:
		/* if not being traced, then this is a painless no-op */
		if ((p->p_flag & P_TRACED) == 0) {
			PROC_UNLOCK(p);
			return (0);
		}

		/* not being traced any more */
		p->p_flag &= ~(P_TRACED | P_STOPPED_TRACE);

		/* remove pending SIGTRAP, else the process will die */
		sigqueue_delete_proc(p, SIGTRAP);
		FOREACH_THREAD_IN_PROC(p, temp)
			temp->td_dbgflags &= ~TDB_SUSPEND;
		PROC_UNLOCK(p);

		/* give process back to original parent */
		sx_xlock(&proctree_lock);
		if (p->p_oppid != p->p_pptr->p_pid) {
			struct proc *pp;

			pp = pfind(p->p_oppid);
			PROC_LOCK(p);
			if (pp) {
				PROC_UNLOCK(pp);
				proc_reparent(p, pp);
			}
		} else
			PROC_LOCK(p);
		p->p_oppid = 0;
		p->p_flag &= ~P_WAITED;	/* XXX ? */
		sx_xunlock(&proctree_lock);

		wakeup(td->td_proc);	/* XXX for CTL_WAIT below ? */

		break;

	/*
	 * Step.  Let the target process execute a single instruction.
	 * What does it mean to single step a threaded program?
	 */
	case PROCFS_CTL_STEP:
		error = proc_sstep(FIRST_THREAD_IN_PROC(p));
		if (error) {
			PROC_UNLOCK(p);
			return (error);
		}
		break;

	/*
	 * Run.  Let the target process continue running until a breakpoint
	 * or some other trap.
	 */
	case PROCFS_CTL_RUN:
		p->p_flag &= ~P_STOPPED_SIG;	/* this uses SIGSTOP */
		break;

	/*
	 * Wait for the target process to stop.
	 * If the target is not being traced then just wait
	 * to enter
	 */
	case PROCFS_CTL_WAIT:
		if (p->p_flag & P_TRACED) {
			while (error == 0 &&
					(P_SHOULDSTOP(p)) &&
					(p->p_flag & P_TRACED) &&
					(p->p_pptr == td->td_proc))
				error = msleep(p, &p->p_mtx,
						PWAIT|PCATCH, "procfsx", 0);
			if (error == 0 && !TRACE_WAIT_P(td->td_proc, p))
				error = EBUSY;
		} else {
			while (error == 0 && P_SHOULDSTOP(p))
				error = msleep(p, &p->p_mtx,
						PWAIT|PCATCH, "procfs", 0);
		}
		PROC_UNLOCK(p);
		return (error);
	default:
		panic("procfs_control");
	}

	PROC_SLOCK(p);
	thread_unsuspend(p); /* If it can run, let it do so. */
	PROC_SUNLOCK(p);
	PROC_UNLOCK(p);
	return (0);
}
Пример #10
0
/*
 * Process ioctls
 */
int
procfs_ioctl(PFS_IOCTL_ARGS)
{
	struct procfs_status *ps;
#ifdef COMPAT_FREEBSD32
	struct procfs_status32 *ps32;
#endif
	int error, flags, sig;
#ifdef COMPAT_FREEBSD6
	int ival;
#endif

	KASSERT(p != NULL,
	    ("%s() called without a process", __func__));
	PROC_LOCK_ASSERT(p, MA_OWNED);

	error = 0;
	switch (cmd) {
#if defined(COMPAT_FREEBSD5) || defined(COMPAT_FREEBSD4) || defined(COMPAT_43)
	case _IOC(IOC_IN, 'p', 1, 0):
#endif
#ifdef COMPAT_FREEBSD6
	case _IO('p', 1):
		ival = IOCPARM_IVAL(data);
		data = &ival;
#endif
	case PIOCBIS:
		p->p_stops |= *(unsigned int *)data;
		break;
#if defined(COMPAT_FREEBSD5) || defined(COMPAT_FREEBSD4) || defined(COMPAT_43)
	case _IOC(IOC_IN, 'p', 2, 0):
#endif
#ifdef COMPAT_FREEBSD6
	case _IO('p', 2):
		ival = IOCPARM_IVAL(data);
		data = &ival;
#endif
	case PIOCBIC:
		p->p_stops &= ~*(unsigned int *)data;
		break;
#if defined(COMPAT_FREEBSD5) || defined(COMPAT_FREEBSD4) || defined(COMPAT_43)
	case _IOC(IOC_IN, 'p', 3, 0):
#endif
#ifdef COMPAT_FREEBSD6
	case _IO('p', 3):
		ival = IOCPARM_IVAL(data);
		data = &ival;
#endif
	case PIOCSFL:
		flags = *(unsigned int *)data;
		if (flags & PF_ISUGID) {
			/*
			 * XXXRW: Is this specific check required here, as
			 * p_candebug() should implement it, or other checks
			 * are missing.
			 */
			error = priv_check(td, PRIV_DEBUG_SUGID);
			if (error)
				break;
		}
		p->p_pfsflags = flags;
		break;
	case PIOCGFL:
		*(unsigned int *)data = p->p_pfsflags;
		break;
	case PIOCWAIT:
		while (p->p_step == 0 && (p->p_flag & P_WEXIT) == 0) {
			/* sleep until p stops */
			_PHOLD(p);
			error = msleep(&p->p_stype, &p->p_mtx,
			    PWAIT|PCATCH, "pioctl", 0);
			_PRELE(p);
			if (error != 0)
				break;
		}
		/* fall through to PIOCSTATUS */
	case PIOCSTATUS:
		ps = (struct procfs_status *)data;
		ps->state = (p->p_step == 0);
		ps->flags = 0; /* nope */
		ps->events = p->p_stops;
		ps->why = p->p_step ? p->p_stype : 0;
		ps->val = p->p_step ? p->p_xstat : 0;
		break;
#ifdef COMPAT_FREEBSD32
	case PIOCWAIT32:
		while (p->p_step == 0 && (p->p_flag & P_WEXIT) == 0) {
			/* sleep until p stops */
			_PHOLD(p);
			error = msleep(&p->p_stype, &p->p_mtx,
			    PWAIT|PCATCH, "pioctl", 0);
			_PRELE(p);
			if (error != 0)
				break;
		}
		/* fall through to PIOCSTATUS32 */
	case PIOCSTATUS32:
		ps32 = (struct procfs_status32 *)data;
		ps32->state = (p->p_step == 0);
		ps32->flags = 0; /* nope */
		ps32->events = p->p_stops;
		ps32->why = p->p_step ? p->p_stype : 0;
		ps32->val = p->p_step ? p->p_xstat : 0;
		break;
#endif
#if defined(COMPAT_FREEBSD5) || defined(COMPAT_FREEBSD4) || defined(COMPAT_43)
	case _IOC(IOC_IN, 'p', 5, 0):
#endif
#ifdef COMPAT_FREEBSD6
	case _IO('p', 5):
		ival = IOCPARM_IVAL(data);
		data = &ival;
#endif
	case PIOCCONT:
		if (p->p_step == 0)
			break;
		sig = *(unsigned int *)data;
		if (sig != 0 && !_SIG_VALID(sig)) {
			error = EINVAL;
			break;
		}
#if 0
		p->p_step = 0;
		if (P_SHOULDSTOP(p)) {
			p->p_xstat = sig;
			p->p_flag &= ~(P_STOPPED_TRACE|P_STOPPED_SIG);
			PROC_SLOCK(p);
			thread_unsuspend(p);
			PROC_SUNLOCK(p);
		} else if (sig)
			kern_psignal(p, sig);
#else
		if (sig)
			kern_psignal(p, sig);
		p->p_step = 0;
		wakeup(&p->p_step);
#endif
		break;
	default:
		error = (ENOTTY);
	}

	return (error);
}
Пример #11
0
int
thread_create(struct thread *td, struct rtprio *rtp,
    int (*initialize_thread)(struct thread *, void *), void *thunk)
{
	struct thread *newtd;
	struct proc *p;
	int error;

	p = td->td_proc;

	if (rtp != NULL) {
		switch(rtp->type) {
		case RTP_PRIO_REALTIME:
		case RTP_PRIO_FIFO:
			/* Only root can set scheduler policy */
			if (priv_check(td, PRIV_SCHED_SETPOLICY) != 0)
				return (EPERM);
			if (rtp->prio > RTP_PRIO_MAX)
				return (EINVAL);
			break;
		case RTP_PRIO_NORMAL:
			rtp->prio = 0;
			break;
		default:
			return (EINVAL);
		}
	}

#ifdef RACCT
	if (racct_enable) {
		PROC_LOCK(p);
		error = racct_add(p, RACCT_NTHR, 1);
		PROC_UNLOCK(p);
		if (error != 0)
			return (EPROCLIM);
	}
#endif

	/* Initialize our td */
	error = kern_thr_alloc(p, 0, &newtd);
	if (error)
		goto fail;

	cpu_copy_thread(newtd, td);

	bzero(&newtd->td_startzero,
	    __rangeof(struct thread, td_startzero, td_endzero));
	bcopy(&td->td_startcopy, &newtd->td_startcopy,
	    __rangeof(struct thread, td_startcopy, td_endcopy));
	newtd->td_proc = td->td_proc;
	newtd->td_rb_list = newtd->td_rbp_list = newtd->td_rb_inact = 0;
	thread_cow_get(newtd, td);

	error = initialize_thread(newtd, thunk);
	if (error != 0) {
		thread_cow_free(newtd);
		thread_free(newtd);
		goto fail;
	}

	PROC_LOCK(p);
	p->p_flag |= P_HADTHREADS;
	thread_link(newtd, p);
	bcopy(p->p_comm, newtd->td_name, sizeof(newtd->td_name));
	thread_lock(td);
	/* let the scheduler know about these things. */
	sched_fork_thread(td, newtd);
	thread_unlock(td);
	if (P_SHOULDSTOP(p))
		newtd->td_flags |= TDF_ASTPENDING | TDF_NEEDSUSPCHK;
	if (p->p_ptevents & PTRACE_LWP)
		newtd->td_dbgflags |= TDB_BORN;

	PROC_UNLOCK(p);
#ifdef	HWPMC_HOOKS
	if (PMC_PROC_IS_USING_PMCS(p))
		PMC_CALL_HOOK(newtd, PMC_FN_THR_CREATE, NULL);
	else if (PMC_SYSTEM_SAMPLING_ACTIVE())
		PMC_CALL_HOOK_UNLOCKED(newtd, PMC_FN_THR_CREATE_LOG, NULL);
#endif

	tidhash_add(newtd);

	thread_lock(newtd);
	if (rtp != NULL) {
		if (!(td->td_pri_class == PRI_TIMESHARE &&
		      rtp->type == RTP_PRIO_NORMAL)) {
			rtp_to_pri(rtp, newtd);
			sched_prio(newtd, newtd->td_user_pri);
		} /* ignore timesharing class */
	}
	TD_SET_CAN_RUN(newtd);
	sched_add(newtd, SRQ_BORING);
	thread_unlock(newtd);

	return (0);

fail:
#ifdef RACCT
	if (racct_enable) {
		PROC_LOCK(p);
		racct_sub(p, RACCT_NTHR, 1);
		PROC_UNLOCK(p);
	}
#endif
	return (error);
}
Пример #12
0
/*
 * Read proc's from memory file into buffer bp, which has space to hold
 * at most maxcnt procs.
 */
static int
kvm_proclist(kvm_t *kd, int what, int arg, struct proc *p,
    struct kinfo_proc *bp, int maxcnt)
{
	int cnt = 0;
	struct kinfo_proc kinfo_proc, *kp;
	struct pgrp pgrp;
	struct session sess;
	struct cdev t_cdev;
	struct tty tty;
	struct vmspace vmspace;
	struct sigacts sigacts;
#if 0
	struct pstats pstats;
#endif
	struct ucred ucred;
	struct prison pr;
	struct thread mtd;
	struct proc proc;
	struct proc pproc;
	struct sysentvec sysent;
	char svname[KI_EMULNAMELEN];

	kp = &kinfo_proc;
	kp->ki_structsize = sizeof(kinfo_proc);
	/*
	 * Loop on the processes. this is completely broken because we need to be
	 * able to loop on the threads and merge the ones that are the same process some how.
	 */
	for (; cnt < maxcnt && p != NULL; p = LIST_NEXT(&proc, p_list)) {
		memset(kp, 0, sizeof *kp);
		if (KREAD(kd, (u_long)p, &proc)) {
			_kvm_err(kd, kd->program, "can't read proc at %p", p);
			return (-1);
		}
		if (proc.p_state == PRS_NEW)
			continue;
		if (proc.p_state != PRS_ZOMBIE) {
			if (KREAD(kd, (u_long)TAILQ_FIRST(&proc.p_threads),
			    &mtd)) {
				_kvm_err(kd, kd->program,
				    "can't read thread at %p",
				    TAILQ_FIRST(&proc.p_threads));
				return (-1);
			}
		}
		if (KREAD(kd, (u_long)proc.p_ucred, &ucred) == 0) {
			kp->ki_ruid = ucred.cr_ruid;
			kp->ki_svuid = ucred.cr_svuid;
			kp->ki_rgid = ucred.cr_rgid;
			kp->ki_svgid = ucred.cr_svgid;
			kp->ki_cr_flags = ucred.cr_flags;
			if (ucred.cr_ngroups > KI_NGROUPS) {
				kp->ki_ngroups = KI_NGROUPS;
				kp->ki_cr_flags |= KI_CRF_GRP_OVERFLOW;
			} else
				kp->ki_ngroups = ucred.cr_ngroups;
			kvm_read(kd, (u_long)ucred.cr_groups, kp->ki_groups,
			    kp->ki_ngroups * sizeof(gid_t));
			kp->ki_uid = ucred.cr_uid;
			if (ucred.cr_prison != NULL) {
				if (KREAD(kd, (u_long)ucred.cr_prison, &pr)) {
					_kvm_err(kd, kd->program,
					    "can't read prison at %p",
					    ucred.cr_prison);
					return (-1);
				}
				kp->ki_jid = pr.pr_id;
			}
		}

		switch(what & ~KERN_PROC_INC_THREAD) {

		case KERN_PROC_GID:
			if (kp->ki_groups[0] != (gid_t)arg)
				continue;
			break;

		case KERN_PROC_PID:
			if (proc.p_pid != (pid_t)arg)
				continue;
			break;

		case KERN_PROC_RGID:
			if (kp->ki_rgid != (gid_t)arg)
				continue;
			break;

		case KERN_PROC_UID:
			if (kp->ki_uid != (uid_t)arg)
				continue;
			break;

		case KERN_PROC_RUID:
			if (kp->ki_ruid != (uid_t)arg)
				continue;
			break;
		}
		/*
		 * We're going to add another proc to the set.  If this
		 * will overflow the buffer, assume the reason is because
		 * nprocs (or the proc list) is corrupt and declare an error.
		 */
		if (cnt >= maxcnt) {
			_kvm_err(kd, kd->program, "nprocs corrupt");
			return (-1);
		}
		/*
		 * gather kinfo_proc
		 */
		kp->ki_paddr = p;
		kp->ki_addr = 0;	/* XXX uarea */
		/* kp->ki_kstack = proc.p_thread.td_kstack; XXXKSE */
		kp->ki_args = proc.p_args;
		kp->ki_tracep = proc.p_tracevp;
		kp->ki_textvp = proc.p_textvp;
		kp->ki_fd = proc.p_fd;
		kp->ki_vmspace = proc.p_vmspace;
		if (proc.p_sigacts != NULL) {
			if (KREAD(kd, (u_long)proc.p_sigacts, &sigacts)) {
				_kvm_err(kd, kd->program,
				    "can't read sigacts at %p", proc.p_sigacts);
				return (-1);
			}
			kp->ki_sigignore = sigacts.ps_sigignore;
			kp->ki_sigcatch = sigacts.ps_sigcatch;
		}
#if 0
		if ((proc.p_flag & P_INMEM) && proc.p_stats != NULL) {
			if (KREAD(kd, (u_long)proc.p_stats, &pstats)) {
				_kvm_err(kd, kd->program,
				    "can't read stats at %x", proc.p_stats);
				return (-1);
			}
			kp->ki_start = pstats.p_start;

			/*
			 * XXX: The times here are probably zero and need
			 * to be calculated from the raw data in p_rux and
			 * p_crux.
			 */
			kp->ki_rusage = pstats.p_ru;
			kp->ki_childstime = pstats.p_cru.ru_stime;
			kp->ki_childutime = pstats.p_cru.ru_utime;
			/* Some callers want child-times in a single value */
			timeradd(&kp->ki_childstime, &kp->ki_childutime,
			    &kp->ki_childtime);
		}
#endif
		if (proc.p_oppid)
			kp->ki_ppid = proc.p_oppid;
		else if (proc.p_pptr) {
			if (KREAD(kd, (u_long)proc.p_pptr, &pproc)) {
				_kvm_err(kd, kd->program,
				    "can't read pproc at %p", proc.p_pptr);
				return (-1);
			}
			kp->ki_ppid = pproc.p_pid;
		} else
			kp->ki_ppid = 0;
		if (proc.p_pgrp == NULL)
			goto nopgrp;
		if (KREAD(kd, (u_long)proc.p_pgrp, &pgrp)) {
			_kvm_err(kd, kd->program, "can't read pgrp at %p",
				 proc.p_pgrp);
			return (-1);
		}
		kp->ki_pgid = pgrp.pg_id;
		kp->ki_jobc = pgrp.pg_jobc;
		if (KREAD(kd, (u_long)pgrp.pg_session, &sess)) {
			_kvm_err(kd, kd->program, "can't read session at %p",
				pgrp.pg_session);
			return (-1);
		}
		kp->ki_sid = sess.s_sid;
		(void)memcpy(kp->ki_login, sess.s_login,
						sizeof(kp->ki_login));
		kp->ki_kiflag = sess.s_ttyvp ? KI_CTTY : 0;
		if (sess.s_leader == p)
			kp->ki_kiflag |= KI_SLEADER;
		if ((proc.p_flag & P_CONTROLT) && sess.s_ttyp != NULL) {
			if (KREAD(kd, (u_long)sess.s_ttyp, &tty)) {
				_kvm_err(kd, kd->program,
					 "can't read tty at %p", sess.s_ttyp);
				return (-1);
			}
			if (tty.t_dev != NULL) {
				if (KREAD(kd, (u_long)tty.t_dev, &t_cdev)) {
					_kvm_err(kd, kd->program,
						 "can't read cdev at %p",
						tty.t_dev);
					return (-1);
				}
#if 0
				kp->ki_tdev = t_cdev.si_udev;
#else
				kp->ki_tdev = NODEV;
#endif
			}
			if (tty.t_pgrp != NULL) {
				if (KREAD(kd, (u_long)tty.t_pgrp, &pgrp)) {
					_kvm_err(kd, kd->program,
						 "can't read tpgrp at %p",
						tty.t_pgrp);
					return (-1);
				}
				kp->ki_tpgid = pgrp.pg_id;
			} else
				kp->ki_tpgid = -1;
			if (tty.t_session != NULL) {
				if (KREAD(kd, (u_long)tty.t_session, &sess)) {
					_kvm_err(kd, kd->program,
					    "can't read session at %p",
					    tty.t_session);
					return (-1);
				}
				kp->ki_tsid = sess.s_sid;
			}
		} else {
nopgrp:
			kp->ki_tdev = NODEV;
		}
		if ((proc.p_state != PRS_ZOMBIE) && mtd.td_wmesg)
			(void)kvm_read(kd, (u_long)mtd.td_wmesg,
			    kp->ki_wmesg, WMESGLEN);

		(void)kvm_read(kd, (u_long)proc.p_vmspace,
		    (char *)&vmspace, sizeof(vmspace));
		kp->ki_size = vmspace.vm_map.size;
		/*
		 * Approximate the kernel's method of calculating
		 * this field.
		 */
#define		pmap_resident_count(pm) ((pm)->pm_stats.resident_count)
		kp->ki_rssize = pmap_resident_count(&vmspace.vm_pmap);
		kp->ki_swrss = vmspace.vm_swrss;
		kp->ki_tsize = vmspace.vm_tsize;
		kp->ki_dsize = vmspace.vm_dsize;
		kp->ki_ssize = vmspace.vm_ssize;

		switch (what & ~KERN_PROC_INC_THREAD) {

		case KERN_PROC_PGRP:
			if (kp->ki_pgid != (pid_t)arg)
				continue;
			break;

		case KERN_PROC_SESSION:
			if (kp->ki_sid != (pid_t)arg)
				continue;
			break;

		case KERN_PROC_TTY:
			if ((proc.p_flag & P_CONTROLT) == 0 ||
			     kp->ki_tdev != (dev_t)arg)
				continue;
			break;
		}
		if (proc.p_comm[0] != 0)
			strlcpy(kp->ki_comm, proc.p_comm, MAXCOMLEN);
		(void)kvm_read(kd, (u_long)proc.p_sysent, (char *)&sysent,
		    sizeof(sysent));
		(void)kvm_read(kd, (u_long)sysent.sv_name, (char *)&svname,
		    sizeof(svname));
		if (svname[0] != 0)
			strlcpy(kp->ki_emul, svname, KI_EMULNAMELEN);
		if ((proc.p_state != PRS_ZOMBIE) &&
		    (mtd.td_blocked != 0)) {
			kp->ki_kiflag |= KI_LOCKBLOCK;
			if (mtd.td_lockname)
				(void)kvm_read(kd,
				    (u_long)mtd.td_lockname,
				    kp->ki_lockname, LOCKNAMELEN);
			kp->ki_lockname[LOCKNAMELEN] = 0;
		}
		kp->ki_runtime = cputick2usec(proc.p_rux.rux_runtime);
		kp->ki_pid = proc.p_pid;
		kp->ki_siglist = proc.p_siglist;
		SIGSETOR(kp->ki_siglist, mtd.td_siglist);
		kp->ki_sigmask = mtd.td_sigmask;
		kp->ki_xstat = KW_EXITCODE(proc.p_xexit, proc.p_xsig);
		kp->ki_acflag = proc.p_acflag;
		kp->ki_lock = proc.p_lock;
		if (proc.p_state != PRS_ZOMBIE) {
			kp->ki_swtime = (ticks - proc.p_swtick) / hz;
			kp->ki_flag = proc.p_flag;
			kp->ki_sflag = 0;
			kp->ki_nice = proc.p_nice;
			kp->ki_traceflag = proc.p_traceflag;
			if (proc.p_state == PRS_NORMAL) {
				if (TD_ON_RUNQ(&mtd) ||
				    TD_CAN_RUN(&mtd) ||
				    TD_IS_RUNNING(&mtd)) {
					kp->ki_stat = SRUN;
				} else if (mtd.td_state ==
				    TDS_INHIBITED) {
					if (P_SHOULDSTOP(&proc)) {
						kp->ki_stat = SSTOP;
					} else if (
					    TD_IS_SLEEPING(&mtd)) {
						kp->ki_stat = SSLEEP;
					} else if (TD_ON_LOCK(&mtd)) {
						kp->ki_stat = SLOCK;
					} else {
						kp->ki_stat = SWAIT;
					}
				}
			} else {
				kp->ki_stat = SIDL;
			}
			/* Stuff from the thread */
			kp->ki_pri.pri_level = mtd.td_priority;
			kp->ki_pri.pri_native = mtd.td_base_pri;
			kp->ki_lastcpu = mtd.td_lastcpu;
			kp->ki_wchan = mtd.td_wchan;
			kp->ki_oncpu = mtd.td_oncpu;
			if (mtd.td_name[0] != '\0')
				strlcpy(kp->ki_tdname, mtd.td_name, sizeof(kp->ki_tdname));
			kp->ki_pctcpu = 0;
			kp->ki_rqindex = 0;

			/*
			 * Note: legacy fields; wraps at NO_CPU_OLD or the
			 * old max CPU value as appropriate
			 */
			if (mtd.td_lastcpu == NOCPU)
				kp->ki_lastcpu_old = NOCPU_OLD;
			else if (mtd.td_lastcpu > MAXCPU_OLD)
				kp->ki_lastcpu_old = MAXCPU_OLD;
			else
				kp->ki_lastcpu_old = mtd.td_lastcpu;

			if (mtd.td_oncpu == NOCPU)
				kp->ki_oncpu_old = NOCPU_OLD;
			else if (mtd.td_oncpu > MAXCPU_OLD)
				kp->ki_oncpu_old = MAXCPU_OLD;
			else
				kp->ki_oncpu_old = mtd.td_oncpu;
		} else {
			kp->ki_stat = SZOMB;
		}
		kp->ki_tdev_freebsd11 = kp->ki_tdev; /* truncate */
		bcopy(&kinfo_proc, bp, sizeof(kinfo_proc));
		++bp;
		++cnt;
	}
	return (cnt);
}
Пример #13
0
int
kern_ptrace(struct thread *td, int req, pid_t pid, void *addr, int data)
{
	struct iovec iov;
	struct uio uio;
	struct proc *curp, *p, *pp;
	struct thread *td2;
	struct ptrace_io_desc *piod;
	int error, write, tmp;
	int proctree_locked = 0;

	curp = td->td_proc;

	/* Lock proctree before locking the process. */
	switch (req) {
	case PT_TRACE_ME:
	case PT_ATTACH:
	case PT_STEP:
	case PT_CONTINUE:
	case PT_DETACH:
		sx_xlock(&proctree_lock);
		proctree_locked = 1;
		break;
	default:
		break;
	}
		
	write = 0;
	if (req == PT_TRACE_ME) {
		p = td->td_proc;
		PROC_LOCK(p);
	} else {
		if ((p = pfind(pid)) == NULL) {
			if (proctree_locked)
				sx_xunlock(&proctree_lock);
			return (ESRCH);
		}
	}
	if ((error = p_cansee(td, p)) != 0)
		goto fail;

	if ((error = p_candebug(td, p)) != 0)
		goto fail;

	/*
	 * System processes can't be debugged.
	 */
	if ((p->p_flag & P_SYSTEM) != 0) {
		error = EINVAL;
		goto fail;
	}
	
	/*
	 * Permissions check
	 */
	switch (req) {
	case PT_TRACE_ME:
		/* Always legal. */
		break;

	case PT_ATTACH:
		/* Self */
		if (p->p_pid == td->td_proc->p_pid) {
			error = EINVAL;
			goto fail;
		}

		/* Already traced */
		if (p->p_flag & P_TRACED) {
			error = EBUSY;
			goto fail;
		}

		/* Can't trace an ancestor if you're being traced. */
		if (curp->p_flag & P_TRACED) {
			for (pp = curp->p_pptr; pp != NULL; pp = pp->p_pptr) {
				if (pp == p) {
					error = EINVAL;
					goto fail;
				}
			}
		}


		/* OK */
		break;

	case PT_READ_I:
	case PT_READ_D:
	case PT_WRITE_I:
	case PT_WRITE_D:
	case PT_IO:
	case PT_CONTINUE:
	case PT_KILL:
	case PT_STEP:
	case PT_DETACH:
	case PT_GETREGS:
	case PT_SETREGS:
	case PT_GETFPREGS:
	case PT_SETFPREGS:
	case PT_GETDBREGS:
	case PT_SETDBREGS:
		/* not being traced... */
		if ((p->p_flag & P_TRACED) == 0) {
			error = EPERM;
			goto fail;
		}

		/* not being traced by YOU */
		if (p->p_pptr != td->td_proc) {
			error = EBUSY;
			goto fail;
		}

		/* not currently stopped */
		if (!P_SHOULDSTOP(p) || (p->p_flag & P_WAITED) == 0) {
			error = EBUSY;
			goto fail;
		}

		/* OK */
		break;

	default:
		error = EINVAL;
		goto fail;
	}

	td2 = FIRST_THREAD_IN_PROC(p);
#ifdef FIX_SSTEP
	/*
	 * Single step fixup ala procfs
	 */
	FIX_SSTEP(td2);			/* XXXKSE */
#endif

	/*
	 * Actually do the requests
	 */

	td->td_retval[0] = 0;

	switch (req) {
	case PT_TRACE_ME:
		/* set my trace flag and "owner" so it can read/write me */
		p->p_flag |= P_TRACED;
		p->p_oppid = p->p_pptr->p_pid;
		PROC_UNLOCK(p);
		sx_xunlock(&proctree_lock);
		return (0);

	case PT_ATTACH:
		/* security check done above */
		p->p_flag |= P_TRACED;
		p->p_oppid = p->p_pptr->p_pid;
		if (p->p_pptr != td->td_proc)
			proc_reparent(p, td->td_proc);
		data = SIGSTOP;
		goto sendsig;	/* in PT_CONTINUE below */

	case PT_STEP:
	case PT_CONTINUE:
	case PT_DETACH:
		/* XXX data is used even in the PT_STEP case. */
		if (req != PT_STEP && (unsigned)data > _SIG_MAXSIG) {
			error = EINVAL;
			goto fail;
		}

		_PHOLD(p);

		if (req == PT_STEP) {
			error = ptrace_single_step(td2);
			if (error) {
				_PRELE(p);
				goto fail;
			}
		}

		if (addr != (void *)1) {
			error = ptrace_set_pc(td2, (u_long)(uintfptr_t)addr);
			if (error) {
				_PRELE(p);
				goto fail;
			}
		}
		_PRELE(p);

		if (req == PT_DETACH) {
			/* reset process parent */
			if (p->p_oppid != p->p_pptr->p_pid) {
				struct proc *pp;

				PROC_UNLOCK(p);
				pp = pfind(p->p_oppid);
				if (pp == NULL)
					pp = initproc;
				else
					PROC_UNLOCK(pp);
				PROC_LOCK(p);
				proc_reparent(p, pp);
			}
			p->p_flag &= ~(P_TRACED | P_WAITED);
			p->p_oppid = 0;

			/* should we send SIGCHLD? */
		}

	sendsig:
		if (proctree_locked)
			sx_xunlock(&proctree_lock);
		/* deliver or queue signal */
		if (P_SHOULDSTOP(p)) {
			p->p_xstat = data;
			mtx_lock_spin(&sched_lock);
			p->p_flag &= ~(P_STOPPED_TRACE|P_STOPPED_SIG);
			thread_unsuspend(p);
			setrunnable(td2);	/* XXXKSE */
			/* Need foreach kse in proc, ... make_kse_queued(). */
			mtx_unlock_spin(&sched_lock);
		} else if (data)
			psignal(p, data);
		PROC_UNLOCK(p);
		
		return (0);

	case PT_WRITE_I:
	case PT_WRITE_D:
		write = 1;
		/* FALLTHROUGH */
	case PT_READ_I:
	case PT_READ_D:
		PROC_UNLOCK(p);
		tmp = 0;
		/* write = 0 set above */
		iov.iov_base = write ? (caddr_t)&data : (caddr_t)&tmp;
		iov.iov_len = sizeof(int);
		uio.uio_iov = &iov;
		uio.uio_iovcnt = 1;
		uio.uio_offset = (off_t)(uintptr_t)addr;
		uio.uio_resid = sizeof(int);
		uio.uio_segflg = UIO_SYSSPACE;	/* i.e.: the uap */
		uio.uio_rw = write ? UIO_WRITE : UIO_READ;
		uio.uio_td = td;
		error = proc_rwmem(p, &uio);
		if (uio.uio_resid != 0) {
			/*
			 * XXX proc_rwmem() doesn't currently return ENOSPC,
			 * so I think write() can bogusly return 0.
			 * XXX what happens for short writes?  We don't want
			 * to write partial data.
			 * XXX proc_rwmem() returns EPERM for other invalid
			 * addresses.  Convert this to EINVAL.  Does this
			 * clobber returns of EPERM for other reasons?
			 */
			if (error == 0 || error == ENOSPC || error == EPERM)
				error = EINVAL;	/* EOF */
		}
		if (!write)
			td->td_retval[0] = tmp;
		return (error);

	case PT_IO:
		PROC_UNLOCK(p);
		piod = addr;
		iov.iov_base = piod->piod_addr;
		iov.iov_len = piod->piod_len;
		uio.uio_iov = &iov;
		uio.uio_iovcnt = 1;
		uio.uio_offset = (off_t)(uintptr_t)piod->piod_offs;
		uio.uio_resid = piod->piod_len;
		uio.uio_segflg = UIO_USERSPACE;
		uio.uio_td = td;
		switch (piod->piod_op) {
		case PIOD_READ_D:
		case PIOD_READ_I:
			uio.uio_rw = UIO_READ;
			break;
		case PIOD_WRITE_D:
		case PIOD_WRITE_I:
			uio.uio_rw = UIO_WRITE;
			break;
		default:
			return (EINVAL);
		}
		error = proc_rwmem(p, &uio);
		piod->piod_len -= uio.uio_resid;
		return (error);

	case PT_KILL:
		data = SIGKILL;
		goto sendsig;	/* in PT_CONTINUE above */

	case PT_SETREGS:
		_PHOLD(p);
		error = proc_write_regs(td2, addr);
		_PRELE(p);
		PROC_UNLOCK(p);
		return (error);

	case PT_GETREGS:
		_PHOLD(p);
		error = proc_read_regs(td2, addr);
		_PRELE(p);
		PROC_UNLOCK(p);
		return (error);

	case PT_SETFPREGS:
		_PHOLD(p);
		error = proc_write_fpregs(td2, addr);
		_PRELE(p);
		PROC_UNLOCK(p);
		return (error);

	case PT_GETFPREGS:
		_PHOLD(p);
		error = proc_read_fpregs(td2, addr);
		_PRELE(p);
		PROC_UNLOCK(p);
		return (error);

	case PT_SETDBREGS:
		_PHOLD(p);
		error = proc_write_dbregs(td2, addr);
		_PRELE(p);
		PROC_UNLOCK(p);
		return (error);

	case PT_GETDBREGS:
		_PHOLD(p);
		error = proc_read_dbregs(td2, addr);
		_PRELE(p);
		PROC_UNLOCK(p);
		return (error);

	default:
		KASSERT(0, ("unreachable code\n"));
		break;
	}

	KASSERT(0, ("unreachable code\n"));
	return (0);

fail:
	PROC_UNLOCK(p);
	if (proctree_locked)
		sx_xunlock(&proctree_lock);
	return (error);
}