Пример #1
0
/* Like smp_idle(), this will put the core in a state that it can only be woken
 * up by an IPI.  For now, this is a halt.  Maybe an mwait in the future.
 *
 * This will return if an event was pending (could be the one you were waiting
 * for) or if the halt failed for some reason, such as a concurrent RKM.  If
 * successful, this will not return at all, and the vcore will restart from the
 * top next time it wakes.  Any sort of IRQ will wake the core.
 *
 * Alternatively, I might make this so it never returns, if that's easier to
 * work with (similar issues with yield). */
void vcore_idle(void)
{
    uint32_t vcoreid = vcore_id();
    /* Once we enable notifs, the calling context will be treated like a uthread
     * (saved into the uth slot).  We don't want to ever run it again, so we
     * need to make sure there's no cur_uth. */
    assert(!current_uthread);
    /* This clears notif_pending (check, signal, check again pattern). */
    if (handle_events(vcoreid))
        return;
    /* This enables notifs, but also checks notif pending.  At this point, any
     * new notifs will restart the vcore from the top. */
    enable_notifs(vcoreid);
    /* From now, til we get into the kernel, any notifs will permanently destroy
     * this context and start the VC from the top.
     *
     * Once we're in the kernel, any messages (__notify, __preempt), will be
     * RKMs.  halt will need to check for those atomically.  Checking for
     * notif_pending in the kernel (sleep only if not set) is not enough, since
     * not all reasons for the kernel to stay awak set notif_pending (e.g.,
     * __preempts and __death).
     *
     * At this point, we're out of VC ctx, so anyone who sets notif_pending
     * should also send an IPI / __notify */
    sys_halt_core(0);
    /* in case halt returns without actually restarting the VC ctx. */
    disable_notifs(vcoreid);
}
Пример #2
0
int main(int argc, char** argv)
{
	uint32_t vcoreid;
	int nr_vcores;

	if (argc < 2)
		nr_vcores = max_vcores();
	else
		nr_vcores = atoi(argv[1]);

	/* Inits a thread for us, though we won't use it.  Just a hack to get into
	 * _M mode.  Note this requests one vcore for us */
	struct uthread dummy = {0};
	uthread_2ls_init(&dummy, &ghetto_sched_ops);
	uthread_mcp_init();

	/* Reset the blockon to be the spinner...  This is really shitty.  Any
	 * blocking calls after we become an MCP and before this will fail.  This is
	 * just mhello showing its warts due to trying to work outside uthread.c */
	ros_syscall_blockon = __ros_syscall_spinon;

	vcore_request(nr_vcores - 1); /* since we already have 1 */

	while (1)
		sys_halt_core(0);

	return 0;
}
Пример #3
0
void ghetto_vcore_entry(void)
{
	if (vcore_id() == 0)
		run_current_uthread();

	while (1)
		sys_halt_core(0);
}
Пример #4
0
/* This will keep a core from spinning forever, but will also allow it to still
 * schedule() and run _S processes.  arg1 is the number of loops (0 for
 * forever), and arg2 is how many usec to wait per loop. */
int main(int argc, char** argv)
{
	unsigned long nr_loops = 1;			/* default, 1 loop */
	unsigned long timeout = 5000000;	/* default, 5 sec */
	int i = 0;
	if (argc > 1)
		nr_loops = strtol(argv[1], 0, 10);
	if (argc > 2)
		timeout = strtol(argv[2], 0, 10);
	printf("Idling for %d usec for %d loops\n", timeout, nr_loops);
	while (!nr_loops || i++ < nr_loops) {
		sys_halt_core(timeout);
		sys_yield(0);
	}
	return 0;
}