Exemple #1
0
/* take rwlock.  prefer writers over readers (see rw_tryenter and rw_exit) */
void
rw_enter(krwlock_t *rw, const krw_t op)
{
	UPRW(rw);
	struct rumpuser_cv *rucv;
	uint16_t *wp;

	if (rw_tryenter(rw, op))
		return;

	/* lagpath */
	if (op == RW_READER) {
		rucv = uprw->uprw_rucv_reader;
		wp = &uprw->uprw_rwant;
	} else {
		rucv = uprw->uprw_rucv_writer;
		wp = &uprw->uprw_wwant;
	}

	(*wp)++;
	while (!rw_tryenter(rw, op)) {
		rump_schedlock_cv_wait(rucv);
	}
	(*wp)--;
}
Exemple #2
0
/*
 * Walk the list of zstreams in the given zfetch, find an old one (by time), and
 * reclaim it for use by the caller.
 */
static zstream_t *
dmu_zfetch_stream_reclaim(zfetch_t *zf)
{
	zstream_t	*zs;

	if (! rw_tryenter(&zf->zf_rwlock, RW_WRITER))
		return (0);

	for (zs = list_head(&zf->zf_stream); zs;
	    zs = list_next(&zf->zf_stream, zs)) {

		if (((lbolt - zs->zst_last) / hz) > zfetch_min_sec_reap)
			break;
	}

	if (zs) {
		dmu_zfetch_stream_remove(zf, zs);
		mutex_destroy(&zs->zst_lock);
		bzero(zs, sizeof (zstream_t));
	} else {
		zf->zf_alloc_fail++;
	}
	rw_exit(&zf->zf_rwlock);

	return (zs);
}
Exemple #3
0
/*
 * Remove a scheduling class module.
 *
 * we only null out the init func and the class functions because
 * once a class has been loaded it has that slot in the class
 * array until the next reboot. We don't decrement loaded_classes
 * because this keeps count of the number of classes that have
 * been loaded for this session. It will have to be this way until
 * we implement the class array as a linked list and do true
 * dynamic allocation.
 */
static int
mod_removesched(struct modlsched *modl, struct modlinkage *modlp)
{
	int status;
	sclass_t *clp;
	struct modctl *mcp;
	char *modname;
	id_t cid;

	status = getcidbyname(modl->sched_class->cl_name, &cid);
	if (status != 0) {
		mcp = mod_getctl(modlp);
		ASSERT(mcp != NULL);
		modname = mcp->mod_modname;
		cmn_err(CE_WARN, uninstall_err, modname);
		return (EINVAL);
	}
	clp = &sclass[cid];
	if (moddebug & MODDEBUG_NOAUL_SCHED ||
	    !rw_tryenter(clp->cl_lock, RW_WRITER))
		return (EBUSY);

	clp->cl_init = NULL;
	clp->cl_funcs = NULL;
	rw_exit(clp->cl_lock);
	return (0);
}
Exemple #4
0
static void
splat_rwlock_test4_func(void *arg)
{
	rw_priv_t *rwp = (rw_priv_t *)arg;
	ASSERT(rwp->rw_magic == SPLAT_RWLOCK_TEST_MAGIC);

	if (rw_tryenter(&rwp->rw_rwlock, rwp->rw_type)) {
		rwp->rw_rc = 0;
		rw_exit(&rwp->rw_rwlock);
	} else {
		rwp->rw_rc = -EBUSY;
	}
}
Exemple #5
0
/*
 * Remove a loadable system call entry from a sysent table.
 */
static int
mod_removesys_sysent(
	struct modlsys		*modl,
	struct modlinkage	*modlp,
	struct sysent		table[])
{
	struct sysent	*sysp;

	if ((sysp = mod_getsysent(modlp, table)) == NULL ||
	    (sysp->sy_flags & (SE_LOADABLE | SE_NOUNLOAD)) == 0 ||
	    sysp->sy_call != modl->sys_sysent->sy_call) {

		struct modctl *mcp = mod_getctl(modlp);
		char *modname = mcp->mod_modname;

		cmn_err(CE_WARN, uninstall_err, modname);
		return (EINVAL);
	}

	/* If we can't get the write lock, we can't unlink from the system */

	if (!(moddebug & MODDEBUG_NOAUL_SYS) &&
	    rw_tryenter(sysp->sy_lock, RW_WRITER)) {
		/*
		 * Check the flags to be sure the syscall is still
		 * (un)loadable.
		 * If SE_NOUNLOAD is set, SE_LOADABLE will not be.
		 */
		if ((sysp->sy_flags & (SE_LOADED | SE_LOADABLE)) ==
		    (SE_LOADED | SE_LOADABLE)) {
			sysp->sy_flags &= ~SE_LOADED;
			sysp->sy_callc = loadable_syscall;
			sysp->sy_call = (int (*)())nosys;
			rw_exit(sysp->sy_lock);
			return (0);
		}
		rw_exit(sysp->sy_lock);
	}
	return (EBUSY);
}
Exemple #6
0
/*
 * Remove an exec module.
 */
static int
mod_removeexec(struct modlexec *modl, struct modlinkage *modlp)
{
	struct execsw *eswp;
	struct modctl *mcp;
	char *modname;

	eswp = findexecsw(modl->exec_execsw->exec_magic);
	if (eswp == NULL) {
		mcp = mod_getctl(modlp);
		ASSERT(mcp != NULL);
		modname = mcp->mod_modname;
		cmn_err(CE_WARN, uninstall_err, modname);
		return (EINVAL);
	}
	if (moddebug & MODDEBUG_NOAUL_EXEC ||
	    !rw_tryenter(eswp->exec_lock, RW_WRITER))
		return (EBUSY);
	eswp->exec_func = NULL;
	eswp->exec_core = NULL;
	rw_exit(eswp->exec_lock);
	return (0);
}
Exemple #7
0
int
rumpuser_rw_tryenter(int enum_rumprwlock, struct rumpuser_rw *rw)
{

	return rw_tryenter(enum_rumprwlock, rw);
}
Exemple #8
0
/*
 * Given a zfetch structure and a zstream structure, determine whether the
 * blocks to be read are part of a co-linear pair of existing prefetch
 * streams.  If a set is found, coalesce the streams, removing one, and
 * configure the prefetch so it looks for a strided access pattern.
 *
 * In other words: if we find two sequential access streams that are
 * the same length and distance N appart, and this read is N from the
 * last stream, then we are probably in a strided access pattern.  So
 * combine the two sequential streams into a single strided stream.
 *
 * If no co-linear streams are found, return NULL.
 */
static int
dmu_zfetch_colinear(zfetch_t *zf, zstream_t *zh)
{
	zstream_t	*z_walk;
	zstream_t	*z_comp;

	if (! rw_tryenter(&zf->zf_rwlock, RW_WRITER))
		return (0);

	if (zh == NULL) {
		rw_exit(&zf->zf_rwlock);
		return (0);
	}

	for (z_walk = list_head(&zf->zf_stream); z_walk;
	    z_walk = list_next(&zf->zf_stream, z_walk)) {
		for (z_comp = list_next(&zf->zf_stream, z_walk); z_comp;
		    z_comp = list_next(&zf->zf_stream, z_comp)) {
			int64_t		diff;

			if (z_walk->zst_len != z_walk->zst_stride ||
			    z_comp->zst_len != z_comp->zst_stride) {
				continue;
			}

			diff = z_comp->zst_offset - z_walk->zst_offset;
			if (z_comp->zst_offset + diff == zh->zst_offset) {
				z_walk->zst_offset = zh->zst_offset;
				z_walk->zst_direction = diff < 0 ? -1 : 1;
				z_walk->zst_stride =
				    diff * z_walk->zst_direction;
				z_walk->zst_ph_offset =
				    zh->zst_offset + z_walk->zst_stride;
				dmu_zfetch_stream_remove(zf, z_comp);
				mutex_destroy(&z_comp->zst_lock);
				kmem_free(z_comp, sizeof (zstream_t));

				dmu_zfetch_dofetch(zf, z_walk);

				rw_exit(&zf->zf_rwlock);
				return (1);
			}

			diff = z_walk->zst_offset - z_comp->zst_offset;
			if (z_walk->zst_offset + diff == zh->zst_offset) {
				z_walk->zst_offset = zh->zst_offset;
				z_walk->zst_direction = diff < 0 ? -1 : 1;
				z_walk->zst_stride =
				    diff * z_walk->zst_direction;
				z_walk->zst_ph_offset =
				    zh->zst_offset + z_walk->zst_stride;
				dmu_zfetch_stream_remove(zf, z_comp);
				mutex_destroy(&z_comp->zst_lock);
				kmem_free(z_comp, sizeof (zstream_t));

				dmu_zfetch_dofetch(zf, z_walk);

				rw_exit(&zf->zf_rwlock);
				return (1);
			}
		}
	}

	rw_exit(&zf->zf_rwlock);
	return (0);
}
Exemple #9
0
/*
 * Process debugging system call.
 */
int
sys_ptrace(struct lwp *l, const struct sys_ptrace_args *uap, register_t *retval)
{
	/* {
		syscallarg(int) req;
		syscallarg(pid_t) pid;
		syscallarg(void *) addr;
		syscallarg(int) data;
	} */
	struct proc *p = l->l_proc;
	struct lwp *lt;
	struct proc *t;				/* target process */
	struct uio uio;
	struct iovec iov;
	struct ptrace_io_desc piod;
	struct ptrace_lwpinfo pl;
	struct vmspace *vm;
	int error, write, tmp, req, pheld;
	int signo;
	ksiginfo_t ksi;
#ifdef COREDUMP
	char *path;
#endif

	error = 0;
	req = SCARG(uap, req);

	/*
	 * If attaching or detaching, we need to get a write hold on the
	 * proclist lock so that we can re-parent the target process.
	 */
	mutex_enter(proc_lock);

	/* "A foolish consistency..." XXX */
	if (req == PT_TRACE_ME) {
		t = p;
		mutex_enter(t->p_lock);
	} else {
		/* Find the process we're supposed to be operating on. */
		if ((t = p_find(SCARG(uap, pid), PFIND_LOCKED)) == NULL) {
			mutex_exit(proc_lock);
			return (ESRCH);
		}

		/* XXX-elad */
		mutex_enter(t->p_lock);
		error = kauth_authorize_process(l->l_cred, KAUTH_PROCESS_CANSEE,
		    t, KAUTH_ARG(KAUTH_REQ_PROCESS_CANSEE_ENTRY), NULL, NULL);
		if (error) {
			mutex_exit(proc_lock);
			mutex_exit(t->p_lock);
			return (ESRCH);
		}
	}

	/*
	 * Grab a reference on the process to prevent it from execing or
	 * exiting.
	 */
	if (!rw_tryenter(&t->p_reflock, RW_READER)) {
		mutex_exit(proc_lock);
		mutex_exit(t->p_lock);
		return EBUSY;
	}

	/* Make sure we can operate on it. */
	switch (req) {
	case  PT_TRACE_ME:
		/* Saying that you're being traced is always legal. */
		break;

	case  PT_ATTACH:
		/*
		 * You can't attach to a process if:
		 *	(1) it's the process that's doing the attaching,
		 */
		if (t->p_pid == p->p_pid) {
			error = EINVAL;
			break;
		}

		/*
		 *  (2) it's a system process
		 */
		if (t->p_flag & PK_SYSTEM) {
			error = EPERM;
			break;
		}

		/*
		 *	(3) it's already being traced, or
		 */
		if (ISSET(t->p_slflag, PSL_TRACED)) {
			error = EBUSY;
			break;
		}

		/*
		 * 	(4) the tracer is chrooted, and its root directory is
		 * 	    not at or above the root directory of the tracee
		 */
		mutex_exit(t->p_lock);	/* XXXSMP */
		tmp = proc_isunder(t, l);
		mutex_enter(t->p_lock);	/* XXXSMP */
		if (!tmp) {
			error = EPERM;
			break;
		}
		break;

	case  PT_READ_I:
	case  PT_READ_D:
	case  PT_WRITE_I:
	case  PT_WRITE_D:
	case  PT_IO:
#ifdef PT_GETREGS
	case  PT_GETREGS:
#endif
#ifdef PT_SETREGS
	case  PT_SETREGS:
#endif
#ifdef PT_GETFPREGS
	case  PT_GETFPREGS:
#endif
#ifdef PT_SETFPREGS
	case  PT_SETFPREGS:
#endif
#ifdef __HAVE_PTRACE_MACHDEP
	PTRACE_MACHDEP_REQUEST_CASES
#endif
		/*
		 * You can't read/write the memory or registers of a process
		 * if the tracer is chrooted, and its root directory is not at
		 * or above the root directory of the tracee.
		 */
		mutex_exit(t->p_lock);	/* XXXSMP */
		tmp = proc_isunder(t, l);
		mutex_enter(t->p_lock);	/* XXXSMP */
		if (!tmp) {
			error = EPERM;
			break;
		}
		/*FALLTHROUGH*/

	case  PT_CONTINUE:
	case  PT_KILL:
	case  PT_DETACH:
	case  PT_LWPINFO:
	case  PT_SYSCALL:
#ifdef COREDUMP
	case  PT_DUMPCORE:
#endif
#ifdef PT_STEP
	case  PT_STEP:
#endif
		/*
		 * You can't do what you want to the process if:
		 *	(1) It's not being traced at all,
		 */
		if (!ISSET(t->p_slflag, PSL_TRACED)) {
			error = EPERM;
			break;
		}

		/*
		 *	(2) it's being traced by procfs (which has
		 *	    different signal delivery semantics),
		 */
		if (ISSET(t->p_slflag, PSL_FSTRACE)) {
			uprintf("file system traced\n");
			error = EBUSY;
			break;
		}

		/*
		 *	(3) it's not being traced by _you_, or
		 */
		if (t->p_pptr != p) {
			uprintf("parent %d != %d\n", t->p_pptr->p_pid, p->p_pid);
			error = EBUSY;
			break;
		}

		/*
		 *	(4) it's not currently stopped.
		 */
		if (t->p_stat != SSTOP || !t->p_waited /* XXXSMP */) {
			uprintf("stat %d flag %d\n", t->p_stat,
			    !t->p_waited);
			error = EBUSY;
			break;
		}
		break;

	default:			/* It was not a legal request. */
		error = EINVAL;
		break;
	}

	if (error == 0)
		error = kauth_authorize_process(l->l_cred,
		    KAUTH_PROCESS_PTRACE, t, KAUTH_ARG(req),
		    NULL, NULL);

	if (error != 0) {
		mutex_exit(proc_lock);
		mutex_exit(t->p_lock);
		rw_exit(&t->p_reflock);
		return error;
	}

	/* Do single-step fixup if needed. */
	FIX_SSTEP(t);

	/*
	 * XXX NJWLWP
	 *
	 * The entire ptrace interface needs work to be useful to a
	 * process with multiple LWPs. For the moment, we'll kluge
	 * this; memory access will be fine, but register access will
	 * be weird.
	 */
	lt = LIST_FIRST(&t->p_lwps);
	KASSERT(lt != NULL);
	lwp_addref(lt);

	/*
	 * Which locks do we need held? XXX Ugly.
	 */
	switch (req) {
#ifdef PT_STEP
	case PT_STEP:
#endif
	case PT_CONTINUE:
	case PT_DETACH:
	case PT_KILL:
	case PT_SYSCALL:
	case PT_ATTACH:
	case PT_TRACE_ME:
		pheld = 1;
		break;
	default:
		mutex_exit(proc_lock);
		mutex_exit(t->p_lock);
		pheld = 0;
		break;
	}

	/* Now do the operation. */
	write = 0;
	*retval = 0;
	tmp = 0;

	switch (req) {
	case  PT_TRACE_ME:
		/* Just set the trace flag. */
		SET(t->p_slflag, PSL_TRACED);
		t->p_opptr = t->p_pptr;
		break;

	case  PT_WRITE_I:		/* XXX no separate I and D spaces */
	case  PT_WRITE_D:
#if defined(__HAVE_RAS)
		/*
		 * Can't write to a RAS
		 */
		if (ras_lookup(t, SCARG(uap, addr)) != (void *)-1) {
			error = EACCES;
			break;
		}
#endif
		write = 1;
		tmp = SCARG(uap, data);
		/* FALLTHROUGH */

	case  PT_READ_I:		/* XXX no separate I and D spaces */
	case  PT_READ_D:
		/* write = 0 done above. */
		iov.iov_base = (void *)&tmp;
		iov.iov_len = sizeof(tmp);
		uio.uio_iov = &iov;
		uio.uio_iovcnt = 1;
		uio.uio_offset = (off_t)(unsigned long)SCARG(uap, addr);
		uio.uio_resid = sizeof(tmp);
		uio.uio_rw = write ? UIO_WRITE : UIO_READ;
		UIO_SETUP_SYSSPACE(&uio);

		error = process_domem(l, lt, &uio);
		if (!write)
			*retval = tmp;
		break;

	case  PT_IO:
		error = copyin(SCARG(uap, addr), &piod, sizeof(piod));
		if (error)
			break;
		switch (piod.piod_op) {
		case PIOD_READ_D:
		case PIOD_READ_I:
			uio.uio_rw = UIO_READ;
			break;
		case PIOD_WRITE_D:
		case PIOD_WRITE_I:
			/*
			 * Can't write to a RAS
			 */
			if (ras_lookup(t, SCARG(uap, addr)) != (void *)-1) {
				return (EACCES);
			}
			uio.uio_rw = UIO_WRITE;
			break;
		default:
			error = EINVAL;
			break;
		}
		if (error)
			break;
		error = proc_vmspace_getref(l->l_proc, &vm);
		if (error)
			break;
		iov.iov_base = piod.piod_addr;
		iov.iov_len = piod.piod_len;
		uio.uio_iov = &iov;
		uio.uio_iovcnt = 1;
		uio.uio_offset = (off_t)(unsigned long)piod.piod_offs;
		uio.uio_resid = piod.piod_len;
		uio.uio_vmspace = vm;

		error = process_domem(l, lt, &uio);
		piod.piod_len -= uio.uio_resid;
		(void) copyout(&piod, SCARG(uap, addr), sizeof(piod));
		uvmspace_free(vm);
		break;

#ifdef COREDUMP
	case  PT_DUMPCORE:
		if ((path = SCARG(uap, addr)) != NULL) {
			char *dst;
			int len = SCARG(uap, data);
			if (len < 0 || len >= MAXPATHLEN) {
				error = EINVAL;
				break;
			}
			dst = malloc(len + 1, M_TEMP, M_WAITOK);
			if ((error = copyin(path, dst, len)) != 0) {
				free(dst, M_TEMP);
				break;
			}
			path = dst;
			path[len] = '\0';
		}
		error = coredump(lt, path);
		if (path)
			free(path, M_TEMP);
		break;
#endif

#ifdef PT_STEP
	case  PT_STEP:
		/*
		 * From the 4.4BSD PRM:
		 * "Execution continues as in request PT_CONTINUE; however
		 * as soon as possible after execution of at least one
		 * instruction, execution stops again. [ ... ]"
		 */
#endif
	case  PT_CONTINUE:
	case  PT_SYSCALL:
	case  PT_DETACH:
		if (req == PT_SYSCALL) {
			if (!ISSET(t->p_slflag, PSL_SYSCALL)) {
				SET(t->p_slflag, PSL_SYSCALL);
#ifdef __HAVE_SYSCALL_INTERN
				(*t->p_emul->e_syscall_intern)(t);
#endif
			}
		} else {
			if (ISSET(t->p_slflag, PSL_SYSCALL)) {
				CLR(t->p_slflag, PSL_SYSCALL);
#ifdef __HAVE_SYSCALL_INTERN
				(*t->p_emul->e_syscall_intern)(t);
#endif
			}
		}
		p->p_trace_enabled = trace_is_enabled(p);

		/*
		 * From the 4.4BSD PRM:
		 * "The data argument is taken as a signal number and the
		 * child's execution continues at location addr as if it
		 * incurred that signal.  Normally the signal number will
		 * be either 0 to indicate that the signal that caused the
		 * stop should be ignored, or that value fetched out of
		 * the process's image indicating which signal caused
		 * the stop.  If addr is (int *)1 then execution continues
		 * from where it stopped."
		 */

		/* Check that the data is a valid signal number or zero. */
		if (SCARG(uap, data) < 0 || SCARG(uap, data) >= NSIG) {
			error = EINVAL;
			break;
		}

		uvm_lwp_hold(lt);

		/* If the address parameter is not (int *)1, set the pc. */
		if ((int *)SCARG(uap, addr) != (int *)1)
			if ((error = process_set_pc(lt, SCARG(uap, addr))) != 0) {
				uvm_lwp_rele(lt);
				break;
			}

#ifdef PT_STEP
		/*
		 * Arrange for a single-step, if that's requested and possible.
		 */
		error = process_sstep(lt, req == PT_STEP);
		if (error) {
			uvm_lwp_rele(lt);
			break;
		}
#endif

		uvm_lwp_rele(lt);

		if (req == PT_DETACH) {
			CLR(t->p_slflag, PSL_TRACED|PSL_FSTRACE|PSL_SYSCALL);

			/* give process back to original parent or init */
			if (t->p_opptr != t->p_pptr) {
				struct proc *pp = t->p_opptr;
				proc_reparent(t, pp ? pp : initproc);
			}

			/* not being traced any more */
			t->p_opptr = NULL;
		}

		signo = SCARG(uap, data);
	sendsig:
		/* Finally, deliver the requested signal (or none). */
		if (t->p_stat == SSTOP) {
			/*
			 * Unstop the process.  If it needs to take a
			 * signal, make all efforts to ensure that at
			 * an LWP runs to see it.
			 */
			t->p_xstat = signo;
			proc_unstop(t);
		} else if (signo != 0) {
			KSI_INIT_EMPTY(&ksi);
			ksi.ksi_signo = signo;
			kpsignal2(t, &ksi);
		}
		break;

	case  PT_KILL:
		/* just send the process a KILL signal. */
		signo = SIGKILL;
		goto sendsig;	/* in PT_CONTINUE, above. */

	case  PT_ATTACH:
		/*
		 * Go ahead and set the trace flag.
		 * Save the old parent (it's reset in
		 *   _DETACH, and also in kern_exit.c:wait4()
		 * Reparent the process so that the tracing
		 *   proc gets to see all the action.
		 * Stop the target.
		 */
		t->p_opptr = t->p_pptr;
		if (t->p_pptr != p) {
			struct proc *parent = t->p_pptr;

			if (parent->p_lock < t->p_lock) {
				if (!mutex_tryenter(parent->p_lock)) {
					mutex_exit(t->p_lock);
					mutex_enter(parent->p_lock);
				}
			} else if (parent->p_lock > t->p_lock) {
				mutex_enter(parent->p_lock);
			}
			parent->p_slflag |= PSL_CHTRACED;
			proc_reparent(t, p);
			if (parent->p_lock != t->p_lock)
				mutex_exit(parent->p_lock);
		}
		SET(t->p_slflag, PSL_TRACED);
		signo = SIGSTOP;
		goto sendsig;

	case PT_LWPINFO:
		if (SCARG(uap, data) != sizeof(pl)) {
			error = EINVAL;
			break;
		}
		error = copyin(SCARG(uap, addr), &pl, sizeof(pl));
		if (error)
			break;
		tmp = pl.pl_lwpid;
		lwp_delref(lt);
		mutex_enter(t->p_lock);
		if (tmp == 0)
			lt = LIST_FIRST(&t->p_lwps);
		else {
			lt = lwp_find(t, tmp);
			if (lt == NULL) {
				mutex_exit(t->p_lock);
				error = ESRCH;
				break;
			}
			lt = LIST_NEXT(lt, l_sibling);
		}
		while (lt != NULL && lt->l_stat == LSZOMB)
			lt = LIST_NEXT(lt, l_sibling);
		pl.pl_lwpid = 0;
		pl.pl_event = 0;
		if (lt) {
			lwp_addref(lt);
			pl.pl_lwpid = lt->l_lid;
			if (lt->l_lid == t->p_sigctx.ps_lwp)
				pl.pl_event = PL_EVENT_SIGNAL;
		}
		mutex_exit(t->p_lock);

		error = copyout(&pl, SCARG(uap, addr), sizeof(pl));
		break;

#ifdef PT_SETREGS
	case  PT_SETREGS:
		write = 1;
#endif
#ifdef PT_GETREGS
	case  PT_GETREGS:
		/* write = 0 done above. */
#endif
#if defined(PT_SETREGS) || defined(PT_GETREGS)
		tmp = SCARG(uap, data);
		if (tmp != 0 && t->p_nlwps > 1) {
			lwp_delref(lt);
			mutex_enter(t->p_lock);
			lt = lwp_find(t, tmp);
			if (lt == NULL) {
				mutex_exit(t->p_lock);
				error = ESRCH;
				break;
			}
			lwp_addref(lt);
			mutex_exit(t->p_lock);
		}
		if (!process_validregs(lt))
			error = EINVAL;
		else {
			error = proc_vmspace_getref(l->l_proc, &vm);
			if (error)
				break;
			iov.iov_base = SCARG(uap, addr);
			iov.iov_len = sizeof(struct reg);
			uio.uio_iov = &iov;
			uio.uio_iovcnt = 1;
			uio.uio_offset = 0;
			uio.uio_resid = sizeof(struct reg);
			uio.uio_rw = write ? UIO_WRITE : UIO_READ;
			uio.uio_vmspace = vm;

			error = process_doregs(l, lt, &uio);
			uvmspace_free(vm);
		}
		break;
#endif

#ifdef PT_SETFPREGS
	case  PT_SETFPREGS:
		write = 1;
#endif
#ifdef PT_GETFPREGS
	case  PT_GETFPREGS:
		/* write = 0 done above. */
#endif
#if defined(PT_SETFPREGS) || defined(PT_GETFPREGS)
		tmp = SCARG(uap, data);
		if (tmp != 0 && t->p_nlwps > 1) {
			lwp_delref(lt);
			mutex_enter(t->p_lock);
			lt = lwp_find(t, tmp);
			if (lt == NULL) {
				mutex_exit(t->p_lock);
				error = ESRCH;
				break;
			}
			lwp_addref(lt);
			mutex_exit(t->p_lock);
		}
		if (!process_validfpregs(lt))
			error = EINVAL;
		else {
			error = proc_vmspace_getref(l->l_proc, &vm);
			if (error)
				break;
			iov.iov_base = SCARG(uap, addr);
			iov.iov_len = sizeof(struct fpreg);
			uio.uio_iov = &iov;
			uio.uio_iovcnt = 1;
			uio.uio_offset = 0;
			uio.uio_resid = sizeof(struct fpreg);
			uio.uio_rw = write ? UIO_WRITE : UIO_READ;
			uio.uio_vmspace = vm;

			error = process_dofpregs(l, lt, &uio);
			uvmspace_free(vm);
		}
		break;
#endif

#ifdef __HAVE_PTRACE_MACHDEP
	PTRACE_MACHDEP_REQUEST_CASES
		error = ptrace_machdep_dorequest(l, lt,
		    req, SCARG(uap, addr), SCARG(uap, data));
		break;
#endif
	}

	if (pheld) {
		mutex_exit(t->p_lock);
		mutex_exit(proc_lock);
	}
	if (lt != NULL)
		lwp_delref(lt);
	rw_exit(&t->p_reflock);

	return error;
}
/*
 * Sync quota information records to disk for the specified file system
 * or all file systems with quotas if ufsvfsp == NULL.  Grabs a reader
 * lock on vfs_dqrwlock if it is needed.
 *
 * Currently, if ufsvfsp is NULL, then do_lock is always true, but the
 * routine is coded to account for either do_lock value.  This seemed
 * to be the safer thing to do.
 */
int
quotasync(struct ufsvfs *ufsvfsp, int do_lock)
{
	struct dquot *dqp;

	rw_enter(&dq_rwlock, RW_READER);
	if (!quotas_initialized) {
		rw_exit(&dq_rwlock);
		return (ESRCH);
	}
	rw_exit(&dq_rwlock);

	/*
	 * The operation applies to a specific file system only.
	 */
	if (ufsvfsp) {
		if (do_lock) {
			rw_enter(&ufsvfsp->vfs_dqrwlock, RW_READER);
		}

		/*
		 * Quotas are not enabled on this file system so bail.
		 */
		if ((ufsvfsp->vfs_qflags & MQ_ENABLED) == 0) {
			if (do_lock) {
				rw_exit(&ufsvfsp->vfs_dqrwlock);
			}
			return (ESRCH);
		}

		/*
		 * This operation is a no-op on a logging file system because
		 * quota information is treated as metadata and is in the log.
		 * This code path treats quota information as user data which
		 * is not necessary on a logging file system.
		 */
		if (TRANS_ISTRANS(ufsvfsp)) {
			if (do_lock) {
				rw_exit(&ufsvfsp->vfs_dqrwlock);
			}
			return (0);
		}

		/*
		 * Try to sync all the quota info records for this
		 * file system:
		 */
		for (dqp = dquot; dqp < dquotNDQUOT; dqp++) {
			/*
			 * If someone else has it, then ignore it.
			 */
			if (!mutex_tryenter(&dqp->dq_lock)) {
				continue;
			}

			/*
			 * The quota info record is for this file system
			 * and it has changes.
			 */
			if (dqp->dq_ufsvfsp == ufsvfsp &&
			    (dqp->dq_flags & DQ_MOD)) {
				ASSERT(ufsvfsp->vfs_qflags & MQ_ENABLED);
				dqupdate(dqp);
			}

			mutex_exit(&dqp->dq_lock);
		}
		if (do_lock) {
			rw_exit(&ufsvfsp->vfs_dqrwlock);
		}

		return (0);
	}

	/*
	 * Try to sync all the quota info records for *all* file systems
	 * for which quotas are enabled.
	 */
	for (dqp = dquot; dqp < dquotNDQUOT; dqp++) {
		/*
		 * If someone else has it, then ignore it.
		 */
		if (!mutex_tryenter(&dqp->dq_lock)) {
			continue;
		}

		ufsvfsp = dqp->dq_ufsvfsp;	/* shorthand */

		/*
		 * This quota info record has no changes or is
		 * not a valid quota info record yet.
		 */
		if ((dqp->dq_flags & DQ_MOD) == 0 || ufsvfsp == NULL) {
			mutex_exit(&dqp->dq_lock);
			continue;
		}

		/*
		 * Now we have a potential lock order problem:
		 *
		 *	vfs_dqrwlock > dq_lock
		 *
		 * so if we have to get vfs_dqrwlock, then go thru hoops
		 * to avoid deadlock.  If we cannot get the order right,
		 * then we ignore this quota info record.
		 */
		if (do_lock) {
			/*
			 * If we can't grab vfs_dqrwlock, then we don't
			 * want to wait to avoid deadlock.
			 */
			if (rw_tryenter(&ufsvfsp->vfs_dqrwlock,
			    RW_READER) == 0) {
				mutex_exit(&dqp->dq_lock);
				continue;
			}
			/*
			 * Okay, now we have both dq_lock and vfs_dqrwlock.
			 * We should not deadlock for the following reasons:
			 * - If another thread has a reader lock on
			 *   vfs_dqrwlock and is waiting for dq_lock,
			 *   there is no conflict because we can also have
			 *   a reader lock on vfs_dqrwlock.
			 * - If another thread has a writer lock on
			 *   vfs_dqrwlock and is waiting for dq_lock,
			 *   we would have failed the rw_tryenter() above
			 *   and given up dq_lock.
			 * - Since we have dq_lock another thread cannot
			 *   have it and be waiting for vfs_dqrwlock.
			 */
		}

		/*
		 * Since we got to this file system via a quota info
		 * record and we have vfs_dqrwlock this is paranoia
		 * to make sure that quotas are enabled.
		 */
		ASSERT(ufsvfsp->vfs_qflags & MQ_ENABLED);

		/*
		 * We are not logging.  See above logging file system
		 * comment.
		 */
		if (!TRANS_ISTRANS(ufsvfsp)) {
			dqupdate(dqp);
		}

		/*
		 * Since we have a private copy of dqp->dq_ufsvfsp,
		 * we can drop dq_lock now.
		 */
		mutex_exit(&dqp->dq_lock);

		if (do_lock) {
			rw_exit(&ufsvfsp->vfs_dqrwlock);
		}
	}

	return (0);
}
int
memsegs_trylock(int writer)
{
	return (rw_tryenter(&memsegslock, writer ? RW_WRITER : RW_READER));
}