Ejemplo n.º 1
0
/*
 * Variant of page_unlock() specifically for the page freelist
 * code. The mere existence of this code is a vile hack that
 * has resulted due to the backwards locking order of the page
 * freelist manager; please don't call it.
 */
void
page_unlock_nocapture(page_t *pp)
{
	kmutex_t *pse = PAGE_SE_MUTEX(pp);
	selock_t old;

	mutex_enter(pse);

	old = pp->p_selock;
	if ((old & ~SE_EWANTED) == SE_READER) {
		pp->p_selock = old & ~SE_READER;
		if (CV_HAS_WAITERS(&pp->p_cv))
			cv_broadcast(&pp->p_cv);
	} else if ((old & ~SE_EWANTED) == SE_DELETED) {
		panic("page_unlock_nocapture: page %p is deleted", pp);
	} else if (old < 0) {
		THREAD_KPRI_RELEASE();
		pp->p_selock &= SE_EWANTED;
		if (CV_HAS_WAITERS(&pp->p_cv))
			cv_broadcast(&pp->p_cv);
	} else if ((old & ~SE_EWANTED) > SE_READER) {
		pp->p_selock = old - SE_READER;
	} else {
		panic("page_unlock_nocapture: page %p is not locked", pp);
	}

	mutex_exit(pse);
}
Ejemplo n.º 2
0
/*ARGSUSED*/
int64_t
loadable_syscall(
    long a0, long a1, long a2, long a3,
    long a4, long a5, long a6, long a7)
{
	klwp_t *lwp = ttolwp(curthread);
	int64_t	rval;
	struct sysent *callp;
	struct sysent *se = LWP_GETSYSENT(lwp);
	krwlock_t *module_lock;
	int code, error = 0;
	int64_t (*sy_call)();

	code = curthread->t_sysnum;
	callp = se + code;

	/*
	 * Try to autoload the system call if necessary
	 */
	module_lock = lock_syscall(se, code);
	THREAD_KPRI_RELEASE();	/* drop priority given by rw_enter */

	/*
	 * we've locked either the loaded syscall or nosys
	 */

	if (lwp_getdatamodel(lwp) == DATAMODEL_NATIVE) {
#if defined(_LP64)
		if (callp->sy_flags & SE_ARGC) {
			sy_call = (int64_t (*)())callp->sy_call;
			rval = (*sy_call)(a0, a1, a2, a3, a4, a5);
		} else
			rval = syscall_ap();
	} else {
#endif
		/*
		 * Now that it's loaded, make sure enough args were copied.
		 */
		if (COPYIN_ARGS32(lwptoregs(lwp), lwp->lwp_ap, callp->sy_narg))
			error = EFAULT;
		if (error) {
			rval = set_errno(error);
		} else if (callp->sy_flags & SE_ARGC) {
			sy_call = (int64_t (*)())callp->sy_call;
			rval = (*sy_call)(lwp->lwp_ap[0], lwp->lwp_ap[1],
			    lwp->lwp_ap[2], lwp->lwp_ap[3], lwp->lwp_ap[4],
			    lwp->lwp_ap[5]);
		} else
			rval = syscall_ap();
	}

	THREAD_KPRI_REQUEST();	/* regain priority from read lock */
	rw_exit(module_lock);
	return (rval);
}
Ejemplo n.º 3
0
/*
 * lxpr_unlock()
 *
 * Unlock locked process
 */
void
lxpr_unlock(proc_t *p)
{
	ASSERT(p->p_proc_flag & P_PR_LOCK);
	ASSERT(MUTEX_HELD(&p->p_lock));
	ASSERT(!MUTEX_HELD(&pidlock));

	cv_signal(&pr_pid_cv[p->p_slot]);
	p->p_proc_flag &= ~P_PR_LOCK;
	mutex_exit(&p->p_lock);
	THREAD_KPRI_RELEASE();
}
Ejemplo n.º 4
0
/*
 * Release the page's "shared/exclusive" lock and wake up anyone
 * who might be waiting for it.
 */
void
page_unlock(page_t *pp)
{
	kmutex_t *pse = PAGE_SE_MUTEX(pp);
	selock_t old;

	mutex_enter(pse);

	old = pp->p_selock;
	if ((old & ~SE_EWANTED) == SE_READER) {
		pp->p_selock = old & ~SE_READER;
		if (CV_HAS_WAITERS(&pp->p_cv))
			cv_broadcast(&pp->p_cv);
	} else if ((old & ~SE_EWANTED) == SE_DELETED) {
		panic("page_unlock: page %p is deleted", pp);
	} else if (old < 0) {
		THREAD_KPRI_RELEASE();
		pp->p_selock &= SE_EWANTED;
		if (CV_HAS_WAITERS(&pp->p_cv))
			cv_broadcast(&pp->p_cv);
	} else if ((old & ~SE_EWANTED) > SE_READER) {
		pp->p_selock = old - SE_READER;
	} else {
		panic("page_unlock: page %p is not locked", pp);
	}

	if (pp->p_selock == 0) {
		/*
		 * If the T_CAPTURING bit is set, that means that we should
		 * not try and capture the page again as we could recurse
		 * which could lead to a stack overflow panic or spending a
		 * relatively long time in the kernel making no progress.
		 */
		if ((pp->p_toxic & PR_CAPTURE) &&
		    !(curthread->t_flag & T_CAPTURING) &&
		    !PP_RETIRED(pp)) {
			THREAD_KPRI_REQUEST();
			pp->p_selock = SE_WRITER;
			mutex_exit(pse);
			page_unlock_capture(pp);
		} else {
			mutex_exit(pse);
		}
	} else {
		mutex_exit(pse);
	}
}
Ejemplo n.º 5
0
void
page_lock_delete(page_t *pp)
{
	kmutex_t *pse = PAGE_SE_MUTEX(pp);

	ASSERT(PAGE_EXCL(pp));
	ASSERT(pp->p_vnode == NULL);
	ASSERT(pp->p_offset == (u_offset_t)-1);
	ASSERT(!PP_ISFREE(pp));

	mutex_enter(pse);
	THREAD_KPRI_RELEASE();
	pp->p_selock = SE_DELETED;
	if (CV_HAS_WAITERS(&pp->p_cv))
		cv_broadcast(&pp->p_cv);
	mutex_exit(pse);
}
Ejemplo n.º 6
0
/*
 * Downgrade the "exclusive" lock on the page to a "shared" lock
 * while holding the mutex protecting this page's p_selock field.
 */
void
page_downgrade(page_t *pp)
{
	kmutex_t *pse = PAGE_SE_MUTEX(pp);
	int excl_waiting;

	ASSERT((pp->p_selock & ~SE_EWANTED) != SE_DELETED);
	ASSERT(PAGE_EXCL(pp));

	mutex_enter(pse);
	excl_waiting =  pp->p_selock & SE_EWANTED;
	THREAD_KPRI_RELEASE();
	pp->p_selock = SE_READER | excl_waiting;
	if (CV_HAS_WAITERS(&pp->p_cv))
		cv_broadcast(&pp->p_cv);
	mutex_exit(pse);
}
Ejemplo n.º 7
0
/*ARGSUSED*/
int64_t
loadable_syscall(
    long a0, long a1, long a2, long a3,
    long a4, long a5, long a6, long a7)
{
	int64_t		rval;
	struct sysent	*callp;
	struct sysent	*se = LWP_GETSYSENT(ttolwp(curthread));
	krwlock_t	*module_lock;
	int		code;

	code = curthread->t_sysnum;
	callp = se + code;

	/*
	 * Try to autoload the system call if necessary.
	 */
	module_lock = lock_syscall(se, code);
	THREAD_KPRI_RELEASE();	/* drop priority given by rw_enter */

	/*
	 * we've locked either the loaded syscall or nosys
	 */
	if (callp->sy_flags & SE_ARGC) {
		int64_t (*sy_call)();

		sy_call = (int64_t (*)())callp->sy_call;
		rval = (*sy_call)(a0, a1, a2, a3, a4, a5);
	} else {
		rval = syscall_ap();
	}

	THREAD_KPRI_REQUEST();	/* regain priority from read lock */
	rw_exit(module_lock);
	return (rval);
}