Exemplo n.º 1
0
static void thread0_thread_blockon_sysc(struct uthread *uthread, void *arg)
{
	struct syscall *sysc = (struct syscall*)arg;
	/* We're in vcore context.  Regardless of what we do here, we'll pop back in
	 * to vcore entry, just like with any uthread_yield.  We don't have a 2LS,
	 * but we always have one uthread: the SCP's thread0.  Note that at this
	 * point, current_uthread is still set, but will be cleared as soon as the
	 * callback returns (and before we start over in vcore_entry).
	 *
	 * If notif_pending is already set (due to a concurrent signal), we'll fail
	 * to yield.  Once in VC ctx, we'll handle any other signals/events that
	 * arrived, then restart the uthread that issued the syscall, which if the
	 * syscall isn't done yet, will just blockon again.
	 *
	 * The one trick is that we don't want to register the evq twice.  The way
	 * register_evq currently works, if a SC completed (SC_DONE) while we were
	 * registering, we could end up clearing sysc->ev_q before the kernel sees
	 * it.  We'll use u_data to track whether we registered or not. */
	#define U_DATA_BLOB ((void*)0x55555555)
	if ((sysc->u_data == U_DATA_BLOB)
	    || register_evq(sysc, &__ros_scp_simple_evq)) {
		sysc->u_data = U_DATA_BLOB;
		/* Sending false for now - we want to signal proc code that we want to
		 * wait (piggybacking on the MCP meaning of this variable).  If
		 * notif_pending is set, the kernel will immediately return us. */
		__ros_syscall_noerrno(SYS_yield, FALSE, 0, 0, 0, 0, 0);
	}
}
Exemplo n.º 2
0
static void thread0_thread_blockon_sysc(struct uthread *uthread, void *arg)
{
	struct syscall *sysc = (struct syscall*)arg;
	thread0_thread_has_blocked(uthread, 0);
	if (!register_evq(sysc, sysc_evq))
		thread0_thread_runnable(uthread);
}
Exemplo n.º 3
0
/* This will be called from vcore context, after the current thread has yielded
 * and is trying to block on sysc.  Need to put it somewhere were we can wake it
 * up when the sysc is done.  For now, we'll have the kernel send us an event
 * when the syscall is done. */
void pth_blockon_sysc(struct syscall *sysc)
{
	int old_flags;
	bool need_to_restart = FALSE;
	uint32_t vcoreid = vcore_id();

	assert(current_uthread->state == UT_BLOCKED);
	/* rip from the active queue */
	struct mcs_lock_qnode local_qn = {0};
	struct pthread_tcb *pthread = (struct pthread_tcb*)current_uthread;
	mcs_lock_notifsafe(&queue_lock, &local_qn);
	threads_active--;
	TAILQ_REMOVE(&active_queue, pthread, next);
	mcs_unlock_notifsafe(&queue_lock, &local_qn);

	/* Set things up so we can wake this thread up later */
	sysc->u_data = current_uthread;
	/* Put the uthread on the pending list.  Note the ordering.  We must be on
	 * the list before we register the ev_q.  All sysc's must be tracked before
	 * we tell the kernel to signal us. */
	TAILQ_INSERT_TAIL(&sysc_mgmt[vcoreid].pending_syscs, pthread, next);
	/* Safety: later we'll make sure we restart on the core we slept on */
	pthread->vcoreid = vcoreid;
	/* Register our vcore's syscall ev_q to hear about this syscall. */
	if (!register_evq(sysc, &sysc_mgmt[vcoreid].ev_q)) {
		/* Lost the race with the call being done.  The kernel won't send the
		 * event.  Just restart him. */
		restart_thread(sysc);
	}
	/* GIANT WARNING: do not touch the thread after this point. */
}
Exemplo n.º 4
0
/* Glibc initial blockon, usable before parlib code can init things (or if it
 * never can, like for RTLD).  As processes initialize further, they will use
 * different functions.
 *
 * In essence, we're in vcore context already.  For one, this function could be
 * called from a full SCP in vcore context.  For early processes, we are not
 * vcctx_ready.  Either way, we don't need to worry about the kernel forcing us
 * into vcore context and otherwise clearing notif_pending.  For those curious,
 * the old race was that the kernel sets notif pending after we register, then
 * we drop into VC ctx, clear notif pending, and yield. */
void __ros_early_syscall_blockon(struct syscall *sysc)
{
	/* For early SCPs, notif_pending will probably be false anyways.  For SCPs
	 * in VC ctx, it might be set.  Regardless, when we pop back up,
	 * notif_pending will be set (for a full SCP in VC ctx). */
	__procdata.vcore_preempt_data[0].notif_pending = FALSE;
	/* order register after clearing notif_pending, handled by register_evq */
	/* Ask for a SYSCALL event when the sysc is done.  We don't need a handler,
	 * we just need the kernel to restart us from proc_yield.  If register
	 * fails, we're already done. */
	if (register_evq(sysc, &__ros_scp_simple_evq)) {
		/* Sending false for now - we want to signal proc code that we want to
		 * wait (piggybacking on the MCP meaning of this variable).  If
		 * notif_pending is set, the kernel will immediately return us. */
		__ros_syscall_noerrno(SYS_yield, FALSE, 0, 0, 0, 0, 0);
	}
	/* For early SCPs, the kernel turns off notif_pending for us.  For SCPs in
	 * vcore context that blocked (should be rare!), it'll still be set.  Other
	 * VC ctx code must handle it later. (could have coalesced notifs) */
}
Exemplo n.º 5
0
/* Glibc initial blockon, usable before parlib code can init things (or if it
 * never can, like for RTLD).  MCPs will need the 'uthread-aware' blockon. */
void __ros_scp_syscall_blockon(struct syscall *sysc)
{
	/* Need to disable notifs before registering, so we don't take an __notify
	 * that drops us into VC ctx and forces us to eat the notif_pending that was
	 * meant to prevent us from yielding if the syscall completed early. */
	__procdata.vcore_preempt_data[0].notif_disabled = TRUE;
	/* Ask for a SYSCALL event when the sysc is done.  We don't need a handler,
	 * we just need the kernel to restart us from proc_yield.  If register
	 * fails, we're already done. */
	if (register_evq(sysc, &__ros_scp_simple_evq)) {
		/* Sending false for now - we want to signal proc code that we want to
		 * wait (piggybacking on the MCP meaning of this variable) */
		__ros_syscall_noerrno(SYS_yield, FALSE, 0, 0, 0, 0, 0);
	}
	/* Manually doing an enable_notifs for VC 0 */
	__procdata.vcore_preempt_data[0].notif_disabled = FALSE;
	wrmb();	/* need to read after the write that enabled notifs */
	if (__procdata.vcore_preempt_data[0].notif_pending)
		__ros_syscall_noerrno(SYS_self_notify, 0, EV_NONE, 0, TRUE, 0, 0);
}
Exemplo n.º 6
0
/* This will be called from vcore context, after the current thread has yielded
 * and is trying to block on sysc.  Need to put it somewhere were we can wake it
 * up when the sysc is done.  For now, we'll have the kernel send us an event
 * when the syscall is done. */
static void pth_thread_blockon_sysc(struct uthread *uthread, void *syscall)
{
	struct syscall *sysc = (struct syscall*)syscall;
	int old_flags;
	uint32_t vcoreid = vcore_id();
	/* rip from the active queue */
	struct pthread_tcb *pthread = (struct pthread_tcb*)uthread;
	pthread->state = PTH_BLK_SYSC;
	mcs_pdr_lock(&queue_lock);
	threads_active--;
	TAILQ_REMOVE(&active_queue, pthread, tq_next);
	mcs_pdr_unlock(&queue_lock);
	/* Set things up so we can wake this thread up later */
	sysc->u_data = uthread;
	/* Register our vcore's syscall ev_q to hear about this syscall. */
	if (!register_evq(sysc, sysc_mgmt[vcoreid].ev_q)) {
		/* Lost the race with the call being done.  The kernel won't send the
		 * event.  Just restart him. */
		restart_thread(sysc);
	}
	/* GIANT WARNING: do not touch the thread after this point. */
}
Exemplo n.º 7
0
static void handle_page_fault(struct uthread *uthread, unsigned int err,
                              unsigned long aux)
{
	struct pthread_tcb *pthread = (struct pthread_tcb*)uthread;
	if (!(err & PF_VMR_BACKED)) {
		__pthread_signal_and_restart(pthread, SIGSEGV, SEGV_MAPERR, (void*)aux);
	} else {
		/* stitching for the event handler.  sysc -> uth, uth -> sysc */
		uthread->local_sysc.u_data = uthread;
		uthread->sysc = &uthread->local_sysc;
		pthread->state = PTH_BLK_SYSC;
		/* one downside is that we'll never check the return val of the syscall.  if
		 * we errored out, we wouldn't know til we PF'd again, and inspected the old
		 * retval/err and other sysc fields (make sure the PF is on the same addr,
		 * etc).  could run into this issue on truncated files too. */
		syscall_async(&uthread->local_sysc, SYS_populate_va, aux, 1);
		if (!register_evq(&uthread->local_sysc, sysc_mgmt[vcore_id()].ev_q)) {
			/* Lost the race with the call being done.  The kernel won't send the
			 * event.  Just restart him. */
			restart_thread(&uthread->local_sysc);
		}
	}
}