Ejemplo n.º 1
0
Archivo: ast.c Proyecto: MarginC/kame
void
userret(struct proc *p, u_int32_t pc, quad_t oticks)
{
	int sig;

	/* Take pending signals. */
	while ((sig = (CURSIG(p))) != 0)
		postsig(sig);

	p->p_priority = p->p_usrpri;

	if (want_resched) {
		/*
		 * We're being preempted.
		 */
		preempt(NULL);
		while ((sig = CURSIG(p)) != 0)
			postsig(sig);
	}

	/*
	 * If profiling, charge recent system time to the trapped pc.
	 */
	if (p->p_flag & P_PROFIL) { 
		extern int psratio;

		addupc_task(p, pc, (int)(p->p_sticks - oticks) * psratio);
	}                   

	curpriority = p->p_priority;
}
Ejemplo n.º 2
0
/*
 * Same as above, but also handles writeback completion on 68040.
 */
void
wb_userret(struct proc *p, struct frame *fp)
{
	int sig;
	union sigval sv;

	/* take pending signals */
	while ((sig = CURSIG(p)) != 0)
		postsig(sig);
	p->p_priority = p->p_usrpri;

	/*
	 * Deal with user mode writebacks (from trap, or from sigreturn).
	 * If any writeback fails, go back and attempt signal delivery.
	 * unless we have already been here and attempted the writeback
	 * (e.g. bad address with user ignoring SIGSEGV).  In that case
	 * we just return to the user without successfully completing
	 * the writebacks.  Maybe we should just drop the sucker?
	 */
	if (mmutype == MMU_68040 && fp->f_format == FMT7) {
		if ((sig = writeback(fp)) != 0) {
			sv.sival_int = fp->f_fmt7.f_fa;
			trapsignal(p, sig, T_MMUFLT, SEGV_MAPERR, sv);

			while ((sig = CURSIG(p)) != 0)
				postsig(sig);
			p->p_priority = p->p_usrpri;
		}
	}
	curcpu()->ci_schedstate.spc_curpriority = p->p_priority;
}
Ejemplo n.º 3
0
Archivo: trap.c Proyecto: MarginC/kame
static __inline void
userret (struct lwp *l, register_t pc, u_quad_t oticks)
{
	struct proc *p = l->l_proc;
	int sig;

	/* take pending signals */
	while ((sig = CURSIG(l)) != 0)
		postsig(sig);

	l->l_priority = l->l_usrpri;
	if (want_resched) {
		/*
		 * We're being preempted.
		 */
		preempt(0);
		while ((sig = CURSIG(l)) != 0)
			postsig(sig);
	}

	/*
	 * If profiling, charge recent system time to the trapped pc.
	 */
	if (l->l_flag & P_PROFIL) {
		extern int psratio;

		addupc_task(p, pc, (int)(p->p_sticks - oticks) * psratio);
	}

	curcpu()->ci_schedstate.spc_curpriority = l->l_priority;
}
Ejemplo n.º 4
0
Archivo: trap.c Proyecto: MarginC/kame
static inline void
userret(struct proc *p, struct trapframe *frame, u_quad_t oticks)
{
	int sig;

	/* take pending signals */
	while ((sig = CURSIG(p)) != 0)
		postsig(sig);
	p->p_priority = p->p_usrpri;

	if (want_resched) {
		/*
		 * We're being preempted.
		 */
		preempt(NULL);
		while ((sig = CURSIG(p)) != 0)
			postsig(sig);
	}

	/*
	 * If profiling, charge recent system time to the trapped pc.
	 */
	if (p->p_flag & P_PROFIL) {
		extern int psratio;

		addupc_task(p, frame->tf_sxip & XIP_ADDR,
		    (int)(p->p_sticks - oticks) * psratio);
	}
	curpriority = p->p_priority;
}
Ejemplo n.º 5
0
/*
 * Trap and syscall both need the following work done before returning
 * to user mode.
 */
void
userret(struct proc *p)
{
	int sig;

	/* take pending signals */
	while ((sig = CURSIG(p)) != 0)
		postsig(sig);
	curcpu()->ci_schedstate.spc_curpriority = p->p_priority = p->p_usrpri;
}
Ejemplo n.º 6
0
/*
 * Define the code needed before returning to user mode, for
 * trap, mem_access_fault, and syscall.
 */
static inline void
userret(struct proc *p, int pc, u_quad_t oticks)
{
	int sig;

	/* take pending signals */
	while ((sig = CURSIG(p)) != 0)
		postsig(sig);
	p->p_priority = p->p_usrpri;
	if (want_ast) {
		want_ast = 0;
		if (p->p_flag & P_OWEUPC) {
			p->p_flag &= ~P_OWEUPC;
			ADDUPROF(p);
		}
	}
	if (want_resched) {
		/*
		 * Since we are curproc, clock will normally just change
		 * our priority without moving us from one queue to another
		 * (since the running process is not on a queue.)
		 * If that happened after we put ourselves on the run queue
		 * but before we switched, we might not be on the queue
		 * indicated by our priority.
		 */
		(void) splstatclock();
		setrunqueue(p);
		p->p_stats->p_ru.ru_nivcsw++;
		mi_switch();
		(void) spl0();
		while ((sig = CURSIG(p)) != 0)
			postsig(sig);
	}

	/*
	 * If profiling, charge recent system time to the trapped pc.
	 */
	if (p->p_flag & P_PROFIL)
		addupc_task(p, pc, (int)(p->p_sticks - oticks));

	curpriority = p->p_priority;
}
Ejemplo n.º 7
0
/*
 * userret:
 *
 *	Common code used by various exception handlers to
 *	return to usermode.
 */
static __inline void
userret(struct proc *p)
{
	int sig;

	/* Take pending signals. */
	while ((sig = CURSIG(p)) !=0)
		postsig(sig);

	p->p_cpu->ci_schedstate.spc_curpriority = p->p_priority = p->p_usrpri;
}
Ejemplo n.º 8
0
/*
 * Define the code needed before returning to user mode, for
 * trap and syscall.
 */
void
userret(struct proc *p)
{
	int sig;

	/* Do any deferred user pmap operations. */
	PMAP_USERRET(vm_map_pmap(&p->p_vmspace->vm_map));

	/* take pending signals */
	while ((sig = CURSIG(p)) != 0)
		postsig(sig);

	curcpu()->ci_schedstate.spc_curpriority = p->p_priority = p->p_usrpri;
}
Ejemplo n.º 9
0
/*
 * trap and syscall both need the following work done before returning
 * to user mode.
 */
void
userret(struct proc *p)
{
	int sig;

#ifdef MAC
	if (p->p_flag & P_MACPEND)
		mac_proc_userret(p);
#endif

	/* take pending signals */
	while ((sig = CURSIG(p)) != 0)
		postsig(sig);
	curcpu()->ci_schedstate.spc_curpriority = p->p_priority = p->p_usrpri;
}
Ejemplo n.º 10
0
/*
 * Define the code needed before returning to user mode, for
 * trap, mem_access_fault, and syscall.
 */
static __inline void
userret(struct proc *p)
{
	int sig;

	/* take pending signals */
	while ((sig = CURSIG(p)) != 0)
		postsig(sig);

#ifdef notyet
	curcpu()->ci_schedstate.spc_curpriority = p->p_priority = p->p_usrpri;
#else
	curpriority = p->p_priority = p->p_usrpri;
#endif
}
Ejemplo n.º 11
0
void
userret(struct proc *p, register_t pc, u_quad_t oticks)
{
	int sig;

	/* take pending signals */
	while ((sig = CURSIG(p)) != 0)
		postsig(sig);

	p->p_priority = p->p_usrpri;
	if (astpending) {
		astpending = 0;
		if (p->p_flag & P_OWEUPC) {
			ADDUPROF(p);
		}
	}
	if (want_resched) {
		/*
		 * We're being preempted.
		 */
		preempt(NULL);
		while ((sig = CURSIG(p)) != 0)
			postsig(sig);
	}

	/*
	 * If profiling, charge recent system time to the trapped pc.
	 */
	if (p->p_flag & P_PROFIL) {
		extern int psratio;

		addupc_task(p, pc, (int)(p->p_sticks - oticks) * psratio);
	}

	p->p_cpu->ci_schedstate.spc_curpriority = p->p_priority;
}
Ejemplo n.º 12
0
/*
 * Process an asynchronous software trap.
 * This is relatively easy.
 * This function will return with preemption disabled.
 */
void
ast(struct trapframe *framep)
{
	struct thread *td;
	struct proc *p;
	int flags;
	int sig;

	td = curthread;
	p = td->td_proc;

	CTR3(KTR_SYSC, "ast: thread %p (pid %d, %s)", td, p->p_pid,
            p->p_comm);
	KASSERT(TRAPF_USERMODE(framep), ("ast in kernel mode"));
	WITNESS_WARN(WARN_PANIC, NULL, "Returning to user mode");
	mtx_assert(&Giant, MA_NOTOWNED);
	THREAD_LOCK_ASSERT(td, MA_NOTOWNED);
	td->td_frame = framep;
	td->td_pticks = 0;

	/*
	 * This updates the td_flag's for the checks below in one
	 * "atomic" operation with turning off the astpending flag.
	 * If another AST is triggered while we are handling the
	 * AST's saved in flags, the astpending flag will be set and
	 * ast() will be called again.
	 */
	thread_lock(td);
	flags = td->td_flags;
	td->td_flags &= ~(TDF_ASTPENDING | TDF_NEEDSIGCHK | TDF_NEEDSUSPCHK |
	    TDF_NEEDRESCHED | TDF_ALRMPEND | TDF_PROFPEND | TDF_MACPEND);
	thread_unlock(td);
	PCPU_INC(cnt.v_trap);

	if (td->td_ucred != p->p_ucred) 
		cred_update_thread(td);
	if (td->td_pflags & TDP_OWEUPC && p->p_flag & P_PROFIL) {
		addupc_task(td, td->td_profil_addr, td->td_profil_ticks);
		td->td_profil_ticks = 0;
		td->td_pflags &= ~TDP_OWEUPC;
	}
#ifdef HWPMC_HOOKS
	/* Handle Software PMC callchain capture. */
	if (PMC_IS_PENDING_CALLCHAIN(td))
		PMC_CALL_HOOK_UNLOCKED(td, PMC_FN_USER_CALLCHAIN_SOFT, (void *) framep);
#endif
	if (flags & TDF_ALRMPEND) {
		PROC_LOCK(p);
		kern_psignal(p, SIGVTALRM);
		PROC_UNLOCK(p);
	}
	if (flags & TDF_PROFPEND) {
		PROC_LOCK(p);
		kern_psignal(p, SIGPROF);
		PROC_UNLOCK(p);
	}
#ifdef MAC
	if (flags & TDF_MACPEND)
		mac_thread_userret(td);
#endif
	if (flags & TDF_NEEDRESCHED) {
#ifdef KTRACE
		if (KTRPOINT(td, KTR_CSW))
			ktrcsw(1, 1, __func__);
#endif
		thread_lock(td);
		sched_prio(td, td->td_user_pri);
		mi_switch(SW_INVOL | SWT_NEEDRESCHED, NULL);
		thread_unlock(td);
#ifdef KTRACE
		if (KTRPOINT(td, KTR_CSW))
			ktrcsw(0, 1, __func__);
#endif
	}

	/*
	 * Check for signals. Unlocked reads of p_pendingcnt or
	 * p_siglist might cause process-directed signal to be handled
	 * later.
	 */
	if (flags & TDF_NEEDSIGCHK || p->p_pendingcnt > 0 ||
	    !SIGISEMPTY(p->p_siglist)) {
		PROC_LOCK(p);
		mtx_lock(&p->p_sigacts->ps_mtx);
		while ((sig = cursig(td)) != 0)
			postsig(sig);
		mtx_unlock(&p->p_sigacts->ps_mtx);
		PROC_UNLOCK(p);
	}
	/*
	 * We need to check to see if we have to exit or wait due to a
	 * single threading requirement or some other STOP condition.
	 */
	if (flags & TDF_NEEDSUSPCHK) {
		PROC_LOCK(p);
		thread_suspend_check(0);
		PROC_UNLOCK(p);
	}

	if (td->td_pflags & TDP_OLDMASK) {
		td->td_pflags &= ~TDP_OLDMASK;
		kern_sigprocmask(td, SIG_SETMASK, &td->td_oldsigmask, NULL, 0);
	}

	userret(td, framep);
}
Ejemplo n.º 13
0
/*
 * Handle signals, upcalls, profiling, and other AST's and/or tasks that
 * must be completed before we can return to or try to return to userland.
 *
 * Note that td_sticks is a 64 bit quantity, but there's no point doing 64
 * arithmatic on the delta calculation so the absolute tick values are
 * truncated to an integer.
 */
static void
userret(struct lwp *lp, struct trapframe *frame, int sticks)
{
	struct proc *p = lp->lwp_proc;
	int sig;
	int ptok;

	/*
	 * Charge system time if profiling.  Note: times are in microseconds.
	 * This may do a copyout and block, so do it first even though it
	 * means some system time will be charged as user time.
	 */
	if (p->p_flags & P_PROFIL) {
		addupc_task(p, frame->tf_rip,
			(u_int)((int)lp->lwp_thread->td_sticks - sticks));
	}

recheck:
	/*
	 * Specific on-return-to-usermode checks (LWP_MP_WEXIT,
	 * LWP_MP_VNLRU, etc).
	 */
	if (lp->lwp_mpflags & LWP_MP_URETMASK)
		lwpuserret(lp);

	/*
	 * Block here if we are in a stopped state.
	 */
	if (STOPLWP(p, lp)) {
		lwkt_gettoken(&p->p_token);
		tstop();
		lwkt_reltoken(&p->p_token);
		goto recheck;
	}
	while (dump_stop_usertds) {
		tsleep(&dump_stop_usertds, 0, "dumpstp", 0);
	}

	/*
	 * Post any pending upcalls.  If running a virtual kernel be sure
	 * to restore the virtual kernel's vmspace before posting the upcall.
	 */
	if (p->p_flags & (P_SIGVTALRM | P_SIGPROF)) {
		lwkt_gettoken(&p->p_token);
		if (p->p_flags & P_SIGVTALRM) {
			p->p_flags &= ~P_SIGVTALRM;
			ksignal(p, SIGVTALRM);
		}
		if (p->p_flags & P_SIGPROF) {
			p->p_flags &= ~P_SIGPROF;
			ksignal(p, SIGPROF);
		}
		lwkt_reltoken(&p->p_token);
		goto recheck;
	}

	/*
	 * Post any pending signals.  If running a virtual kernel be sure
	 * to restore the virtual kernel's vmspace before posting the signal.
	 *
	 * WARNING!  postsig() can exit and not return.
	 */
	if ((sig = CURSIG_LCK_TRACE(lp, &ptok)) != 0) {
		postsig(sig, ptok);
		goto recheck;
	}

	/*
	 * block here if we are swapped out, but still process signals
	 * (such as SIGKILL).  proc0 (the swapin scheduler) is already
	 * aware of our situation, we do not have to wake it up.
	 */
	if (p->p_flags & P_SWAPPEDOUT) {
		lwkt_gettoken(&p->p_token);
		p->p_flags |= P_SWAPWAIT;
		swapin_request();
		if (p->p_flags & P_SWAPWAIT)
			tsleep(p, PCATCH, "SWOUT", 0);
		p->p_flags &= ~P_SWAPWAIT;
		lwkt_reltoken(&p->p_token);
		goto recheck;
	}

	/*
	 * In a multi-threaded program it is possible for a thread to change
	 * signal state during a system call which temporarily changes the
	 * signal mask.  In this case postsig() might not be run and we
	 * have to restore the mask ourselves.
	 */
	if (lp->lwp_flags & LWP_OLDMASK) {
		lp->lwp_flags &= ~LWP_OLDMASK;
		lp->lwp_sigmask = lp->lwp_oldsigmask;
		goto recheck;
	}
}
Ejemplo n.º 14
0
/*
 * Handle signals, upcalls, profiling, and other AST's and/or tasks that
 * must be completed before we can return to or try to return to userland.
 *
 * Note that td_sticks is a 64 bit quantity, but there's no point doing 64
 * arithmatic on the delta calculation so the absolute tick values are
 * truncated to an integer.
 */
static void
userret(struct lwp *lp, struct trapframe *frame, int sticks)
{
	struct proc *p = lp->lwp_proc;
	void (*hook)(void);
	int sig;

	if (p->p_userret != NULL) {
		hook = p->p_userret;
		p->p_userret = NULL;
		(*hook)();
	}
		
	/*
	 * Charge system time if profiling.  Note: times are in microseconds.
	 * This may do a copyout and block, so do it first even though it
	 * means some system time will be charged as user time.
	 */
	if (p->p_flags & P_PROFIL) {
		addupc_task(p, frame->tf_eip, 
			(u_int)((int)lp->lwp_thread->td_sticks - sticks));
	}

recheck:
	/*
	 * If the jungle wants us dead, so be it.
	 */
	if (lp->lwp_mpflags & LWP_MP_WEXIT) {
		lwkt_gettoken(&p->p_token);
		lwp_exit(0);
		lwkt_reltoken(&p->p_token);	/* NOT REACHED */
	}

	/*
	 * Block here if we are in a stopped state.
	 */
	if (p->p_stat == SSTOP || dump_stop_usertds) {
		lwkt_gettoken(&p->p_token);
		tstop();
		lwkt_reltoken(&p->p_token);
		goto recheck;
	}

	/*
	 * Post any pending upcalls.  If running a virtual kernel be sure
	 * to restore the virtual kernel's vmspace before posting the upcall.
	 */
	if (p->p_flags & (P_SIGVTALRM | P_SIGPROF | P_UPCALLPEND)) {
		lwkt_gettoken(&p->p_token);
		if (p->p_flags & P_SIGVTALRM) {
			p->p_flags &= ~P_SIGVTALRM;
			ksignal(p, SIGVTALRM);
		}
		if (p->p_flags & P_SIGPROF) {
			p->p_flags &= ~P_SIGPROF;
			ksignal(p, SIGPROF);
		}
		if (p->p_flags & P_UPCALLPEND) {
			p->p_flags &= ~P_UPCALLPEND;
			postupcall(lp);
		}
		lwkt_reltoken(&p->p_token);
		goto recheck;
	}

	/*
	 * Post any pending signals.  If running a virtual kernel be sure
	 * to restore the virtual kernel's vmspace before posting the signal.
	 *
	 * WARNING!  postsig() can exit and not return.
	 */
	if ((sig = CURSIG_TRACE(lp)) != 0) {
		lwkt_gettoken(&p->p_token);
		postsig(sig);
		lwkt_reltoken(&p->p_token);
		goto recheck;
	}

	/*
	 * block here if we are swapped out, but still process signals
	 * (such as SIGKILL).  proc0 (the swapin scheduler) is already
	 * aware of our situation, we do not have to wake it up.
	 */
	if (p->p_flags & P_SWAPPEDOUT) {
		lwkt_gettoken(&p->p_token);
		get_mplock();
		p->p_flags |= P_SWAPWAIT;
		swapin_request();
		if (p->p_flags & P_SWAPWAIT)
			tsleep(p, PCATCH, "SWOUT", 0);
		p->p_flags &= ~P_SWAPWAIT;
		rel_mplock();
		lwkt_reltoken(&p->p_token);
		goto recheck;
	}

	/*
	 * In a multi-threaded program it is possible for a thread to change
	 * signal state during a system call which temporarily changes the
	 * signal mask.  In this case postsig() might not be run and we
	 * have to restore the mask ourselves.
	 */
	if (lp->lwp_flags & LWP_OLDMASK) {
		lp->lwp_flags &= ~LWP_OLDMASK;
		lp->lwp_sigmask = lp->lwp_oldsigmask;
		goto recheck;
	}
}