Пример #1
0
/* Helper, picks some sane defaults and changes the process into an MCP */
void vcore_change_to_m(void)
{
    int ret;
    __procdata.res_req[RES_CORES].amt_wanted = 1;
    __procdata.res_req[RES_CORES].amt_wanted_min = 1;	/* whatever */
    assert(!in_multi_mode());
    assert(!in_vcore_context());
    ret = sys_change_to_m();
    assert(!ret);
    assert(in_multi_mode());
    assert(!in_vcore_context());
}
Пример #2
0
void ghetto_vcore_entry(void)
{
	uint32_t vcoreid = vcore_id();
	static bool first_time = TRUE;

	temp = 0xcafebabe;
	/* vcore_context test (don't need to do this anywhere) */
	assert(in_vcore_context());

	/* old logic was moved to parlib code */
	if (current_uthread) {
		assert(vcoreid == 0);
		run_current_uthread();
	}
	/* unmask notifications once you can let go of the notif_tf and it is okay
	 * to clobber the transition stack.
	 * Check Documentation/processes.txt: 4.2.4.  In real code, you should be
	 * popping the tf of whatever user process you want (get off the x-stack) */
	enable_notifs(vcoreid);

/* end: stuff userspace needs to do to handle notifications */

	printf("Hello from vcore_entry in vcore %d with temp addr %p and temp %p\n",
	       vcoreid, &temp, temp);
	vcore_request(1);
	//mcs_barrier_wait(&b,vcore_id());
	udelay(vcoreid * 10000000);
	//exit(0);
	while(1);
}
Пример #3
0
/**
 * Switch into vcore mode to run the scheduler code. 
 **/
void switch_to_vcore() {

	uint32_t vcoreid = vcore_id();

	/* Disable notifications.  Once we do this, we might miss a notif_pending,
	 * so we need to enter vcore entry later.  Need to disable notifs so we
	 * don't get in weird loops */
	struct preempt_data *vcpd = &__procdata.vcore_preempt_data[vcoreid];
	vcpd->notif_enabled = FALSE;

	/* Grab a reference to the currently running thread on this vcore */
	thread_t *t = current_thread; 

	/* Switch to the vcore's tls region */
	extern void** vcore_thread_control_blocks;
	set_tls_desc(vcore_thread_control_blocks[vcoreid], vcoreid);
	
	/* Verify that the thread the vcore thinks was running is the same as the thread
	 * that was actually running */
	assert(current_thread == t);

	/* Set the stack pointer to the stack of the vcore. 
	 * We know this function is always inlined because of the attribute we set
	 * on it, so there will be no stack unwinding when this function "returns".
	 * After this call, make sure you don't use local variables. */
	set_stack_pointer((void*)vcpd->transition_stack);
	assert(in_vcore_context());

	/* Leave the current vcore completely */
	current_thread = NULL; 
	
	/* Restart the vcore and run the scheduler code */
	vcore_entry();
	assert(0);
}
Пример #4
0
/* Check with the kernel to determine what vcore we are.  Normally, you should
 * never call this, since your vcoreid is stored in your TLS.  Also, if you call
 * it from a uthread, you could get migrated, so you should drop into some form
 * of vcore context (DONT_MIGRATE on) */
uint32_t get_vcoreid(void)
{
    if (!in_vcore_context()) {
        assert(current_uthread);
        assert(current_uthread->flags & UTHREAD_DONT_MIGRATE);
    }
    return __get_vcoreid();
}
Пример #5
0
void *block_thread(void* arg)
{	
	assert(!in_vcore_context());
	for (int i = 0; i < NUM_TEST_LOOPS; i++) {
		printf_safe("[A] pthread %d on vcore %d\n", pthread_self()->id, vcore_id());
		sys_block(5000 + pthread_self()->id);
	}
	return (void*)(long)pthread_self()->id;
}
Пример #6
0
void vcore_reenter(void (*entry_func)(void))
{
    assert(in_vcore_context());
    struct preempt_data *vcpd = vcpd_of(vcore_id());

    __vcore_reentry_func = entry_func;
    set_stack_pointer((void*)vcpd->vcore_stack);
    cmb();
    __vcore_reenter();
}
Пример #7
0
/* If you are spinning in vcore context and it is likely that you don't know who
 * you are waiting on, call this.  It will spin for a bit before firing up the
 * potentially expensive __ensure_all_run().  Don't call this from uthread
 * context.  sys_change_vcore will probably mess you up. */
void cpu_relax_vc(uint32_t vcoreid)
{
    static __thread unsigned int __vc_relax_spun = 0;
    assert(in_vcore_context());
    if (__vc_relax_spun++ >= NR_RELAX_SPINS) {
        /* if vcoreid == vcore_id(), this might be expensive */
        ensure_vcore_runs(vcoreid);
        __vc_relax_spun = 0;
    }
    cpu_relax();
}
Пример #8
0
/**
 * Entry point for the vcore.  Basic job is to either resume the thread that
 * was interrupted in the case of a notification coming in, or to find a new
 * thread from the user level threading library and launch it.
 **/
void __attribute__((noreturn)) vcore_entry()
{
	/* Grab references to the current vcoreid vcore preemption data, and the
     * vcoremap */
	assert(in_vcore_context());
	uint32_t vcoreid = vcore_id();
	struct preempt_data *vcpd = &__procdata.vcore_preempt_data[vcoreid];
	struct vcore *vc = &__procinfo.vcoremap[vcoreid];

	tdebug("current=%s, vcore=%d\n",
	        current_thread?current_thread->name : "NULL", vcoreid);

	/* Assert that notifications are disabled. Should always have notifications
	 * disabled when coming in here. */
	assert(vcpd->notif_enabled == FALSE);

	/* Put this in the loop that deals with notifications.  It will return if
	 * there is no preempt pending. */ 
	if (vc->preempt_pending)
		sys_yield(TRUE);

	/* When running vcore_entry(), we are using the TLS of the vcore, not any
	 * particular thread.  If current_thread is set in the vcore's TLS, then 
	 * that means the thread did not yield voluntarily, and was, instead, 
	 * interrupted by a notification.  We therefore need to restore the thread
	 * context from the notification trapframe, not the one stored in the 
	 * thread struct itself. */
    if (unlikely(current_thread)) {
        vcpd->notif_pending = 0;
        /* Do one last check for notifs after clearing pending */
        // TODO: call the handle_notif() here (first)

		/* Copy the notification trapframe into the current 
		 * threads trapframe */
		memcpy(&current_thread->context->utf, &vcpd->notif_tf, 
		       sizeof(struct user_trapframe));

        /* Restore the context from the current_thread's trapframe */
        restore_context(current_thread->context);
        assert(0);
    }

	/* Otherwise either a vcore is coming up for the first time, or a thread
	 * has just yielded and vcore_entry() was called directly. In this case we 
	 * need to figure out which thread to schedule next on the vcore */
	run_next_thread();
	assert(0);
}
Пример #9
0
void ghetto_vcore_entry(void)
{
	uint32_t vcoreid = vcore_id();
	static bool first_time = TRUE;

	temp = 0xcafebabe;
	/* vcore_context test (don't need to do this anywhere) */
	assert(in_vcore_context());

	/* old logic was moved to parlib code */
	if (current_uthread) {
		assert(vcoreid == 0);
		run_current_uthread();
	}
	/* unmask notifications once you can let go of the uthread_ctx and it is
	 * okay to clobber the transition stack.
	 * Check Documentation/processes.txt: 4.2.4.  In real code, you should be
	 * popping the tf of whatever user process you want (get off the x-stack) */
	enable_notifs(vcoreid);

/* end: stuff userspace needs to do to handle notifications */

	printf("Hello from vcore_entry in vcore %d with temp addr %p and temp %p\n",
	       vcoreid, &temp, temp);

	#if 0
	/* Test sys change vcore.  Need to manually preempt the pcore vcore4 is
	 * mapped to from the monitor */
	udelay(20000000);
	if (vcoreid == 1) {
		disable_notifs(vcoreid);
		printf("VC1 changing to VC4\n");
		sys_change_vcore(4, TRUE);		/* try both of these manually */
		//sys_change_vcore(4, FALSE);		/* try both of these manually */
		printf("VC1 returned\n");
	}
	udelay(10000000);
	#endif

	vcore_request(1);
	//mcs_barrier_wait(&b,vcore_id());
	udelay(vcoreid * 10000000);
	//exit(0);
	while(1);
}
Пример #10
0
/* This handler is usually run in vcore context, though I can imagine it being
 * called by a uthread in some other threading library. */
static void pth_handle_syscall(struct event_msg *ev_msg, unsigned int ev_type,
                               bool overflow)
{
	struct syscall *sysc;
	assert(in_vcore_context());
	if (overflow) {
		handle_sysc_overflow();
	}
	if (!ev_msg) {
		/* Probably a bug somewhere if we had no ev_msg and no overflow */
		if (!overflow)
			printf("[pthread] crap, no ev_msg!!\n");
		return;
	}
	sysc = ev_msg->ev_arg3;
	assert(sysc);
	restart_thread(sysc);
}
Пример #11
0
void __attribute__((constructor)) vcore_lib_init(void)
{
    uintptr_t mmap_block;

    /* Note this is racy, but okay.  The first time through, we are _S.
     * Also, this is the "lowest" level constructor for now, so we don't need
     * to call any other init functions after our run_once() call. This may
     * change in the future. */
    init_once_racy(return);

    /* Need to alloc vcore0's transition stuff here (technically, just the TLS)
     * so that schedulers can use vcore0's transition TLS before it comes up in
     * vcore_entry() */
    if (allocate_vcore_stack(0) || allocate_transition_tls(0))
        goto vcore_lib_init_fail;

    /* Initialize our VCPD event queues' ucqs, two pages per ucq, 4 per vcore */
    mmap_block = (uintptr_t)mmap(0, PGSIZE * 4 * max_vcores(),
                                 PROT_WRITE | PROT_READ,
                                 MAP_POPULATE | MAP_ANONYMOUS, -1, 0);
    /* Yeah, this doesn't fit in the error-handling scheme, but this whole
     * system doesn't really handle failure, and needs a rewrite involving less
     * mmaps/munmaps. */
    assert(mmap_block);
    /* Note we may end up doing vcore 0's elsewhere, for _Ss, or else have a
     * separate ev_q for that. */
    for (int i = 0; i < max_vcores(); i++) {
        /* four pages total for both ucqs from the big block (2 pages each) */
        ucq_init_raw(&vcpd_of(i)->ev_mbox_public.ev_msgs,
                     mmap_block + (4 * i    ) * PGSIZE,
                     mmap_block + (4 * i + 1) * PGSIZE);
        ucq_init_raw(&vcpd_of(i)->ev_mbox_private.ev_msgs,
                     mmap_block + (4 * i + 2) * PGSIZE,
                     mmap_block + (4 * i + 3) * PGSIZE);
        /* Set the lowest level entry point for each vcore. */
        vcpd_of(i)->vcore_entry = (uintptr_t)__kernel_vcore_entry;
    }
    atomic_init(&vc_req_being_handled, 0);
    assert(!in_vcore_context());
    vcore_libc_init();
    return;
vcore_lib_init_fail:
    assert(0);
}
Пример #12
0
/* This handler is usually run in vcore context, though I can imagine it being
 * called by a uthread in some other threading library. */
static void pth_handle_syscall(struct event_msg *ev_msg, unsigned int ev_type,
                               void *data)
{
	struct syscall *sysc;
	assert(in_vcore_context());
	/* if we just got a bit (not a msg), it should be because the process is
	 * still an SCP and hasn't started using the MCP ev_q yet (using the simple
	 * ev_q and glibc's blockon) or because the bit is still set from an old
	 * ev_q (blocking syscalls from before we could enter vcore ctx).  Either
	 * way, just return.  Note that if you screwed up the pth ev_q and made it
	 * NO_MSG, you'll never notice (we used to assert(ev_msg)). */
	if (!ev_msg)
		return;
	/* It's a bug if we don't have a msg (we're handling a syscall bit-event) */
	assert(ev_msg);
	/* Get the sysc from the message and just restart it */
	sysc = ev_msg->ev_arg3;
	assert(sysc);
	restart_thread(sysc);
}
Пример #13
0
void vcore_init(void)
{
	uintptr_t mmap_block;
	/* Note this is racy, but okay.  The first time through, we are _S */
	init_once_racy(return);

	/* Need to alloc vcore0's transition stuff here (technically, just the TLS)
	 * so that schedulers can use vcore0's transition TLS before it comes up in
	 * vcore_entry() */
	if(allocate_transition_stack(0) || allocate_transition_tls(0))
		goto vcore_init_fail;

	/* Initialize our VCPD event queues' ucqs, two pages per ucq, 4 per vcore */
	mmap_block = (uintptr_t)mmap(0, PGSIZE * 4 * max_vcores(),
	                             PROT_WRITE | PROT_READ,
	                             MAP_POPULATE | MAP_ANONYMOUS, -1, 0);
	/* Yeah, this doesn't fit in the error-handling scheme, but this whole
	 * system doesn't really handle failure, and needs a rewrite involving less
	 * mmaps/munmaps. */
	assert(mmap_block);
	/* Note we may end up doing vcore 0's elsewhere, for _Ss, or else have a
	 * separate ev_q for that. */
	for (int i = 0; i < max_vcores(); i++) {
		/* four pages total for both ucqs from the big block (2 pages each) */
		ucq_init_raw(&vcpd_of(i)->ev_mbox_public.ev_msgs,
		             mmap_block + (4 * i    ) * PGSIZE,
		             mmap_block + (4 * i + 1) * PGSIZE);
		ucq_init_raw(&vcpd_of(i)->ev_mbox_private.ev_msgs,
		             mmap_block + (4 * i + 2) * PGSIZE,
		             mmap_block + (4 * i + 3) * PGSIZE);
	}
	atomic_init(&vc_req_being_handled, 0);
	assert(!in_vcore_context());
	/* no longer need to enable notifs on vcore 0, it is set like that by
	 * default (so you drop into vcore context immediately on transtioning to
	 * _M) */
	vc_initialized = TRUE;
	return;
vcore_init_fail:
	assert(0);
}
Пример #14
0
/* Helper to disable notifs.  It simply checks to make sure we disabled uthread
 * migration, which is a common mistake. */
void disable_notifs(uint32_t vcoreid)
{
    if (!in_vcore_context() && current_uthread)
        assert(current_uthread->flags & UTHREAD_DONT_MIGRATE);
    __disable_notifs(vcoreid);
}
Пример #15
0
 *
 * Also note that if you make a global ctor (not static, like this used to be),
 * any shared objects that you load when the binary is built with -rdynamic will
 * run the global ctor from the binary, not the one from the .so. */
void vcore_lib_init(void)
{
	/* Note this is racy, but okay.  The first time through, we are _S.
	 * Also, this is the "lowest" level constructor for now, so we don't
	 * need to call any other init functions after our run_once() call. This
	 * may change in the future. */
	parlib_init_once_racy(return);
	/* Need to alloc vcore0's transition stuff here (technically, just the
	 * TLS) so that schedulers can use vcore0's transition TLS before it
	 * comes up in vcore_entry() */
	prep_vcore_0();
	assert(!in_vcore_context());
	vcore_libc_init();
}

static void __attribute__((constructor)) vcore_lib_ctor(void)
{
	if (__in_fake_parlib())
		return;
	vcore_lib_init();
}

/* Helper functions used to reenter at the top of a vcore's stack for an
 * arbitrary function */
static void __attribute__((noinline, noreturn)) __vcore_reenter()
{
	__vcore_reentry_func();
Пример #16
0
/* to trick uthread_create() */
int main(int argc, char** argv)
{
	uint32_t vcoreid;
	int retval;

	/* Initialize our barrier. */
	mcs_barrier_init(&b, max_vcores());

	/* vcore_context test */
	assert(!in_vcore_context());
	
	/* prep indirect ev_q.  Note we grab a big one */
	indirect_q = get_eventq(EV_MBOX_UCQ);
	indirect_q->ev_flags = EVENT_IPI;
	indirect_q->ev_vcore = 1;			/* IPI core 1 */
	indirect_q->ev_handler = 0;
	printf("Registering %08p for event type %d\n", indirect_q,
	       EV_FREE_APPLE_PIE);
	register_kevent_q(indirect_q, EV_FREE_APPLE_PIE);

	/* handle events: just want to print out what we get.  This is just a
	 * quick set of handlers, not a registration for a kevent. */
	for (int i = 0; i < MAX_NR_EVENT; i++)
		register_ev_handler(i, handle_generic, 0);
	/* Want to use the default ev_ev (which we just overwrote) */
	register_ev_handler(EV_EVENT, handle_ev_ev, 0);
	/* vcore_lib_init() done in vcore_request() now. */
	/* Set up event reception.  For example, this will allow us to receive an
	 * event and IPI for USER_IPIs on vcore 0.  Check event.c for more stuff.
	 * Note you don't have to register for USER_IPIs to receive ones you send
	 * yourself with sys_self_notify(). */
	enable_kevent(EV_USER_IPI, 0, EVENT_IPI | EVENT_VCORE_PRIVATE);
	/* Receive pending preemption events.  (though there's no PP handler) */
	struct event_queue *ev_q = get_eventq_vcpd(0, EVENT_VCORE_PRIVATE);
	ev_q->ev_flags = EVENT_IPI | EVENT_VCORE_APPRO;
	register_kevent_q(ev_q, EV_PREEMPT_PENDING);
	/* We also receive preemption events, it is set up in uthread.c */

	/* Inits a thread for us, though we won't use it.  Just a hack to get into
	 * _M mode.  Note this requests one vcore for us */
	struct uthread dummy = {0};
	uthread_2ls_init(&dummy, &ghetto_sched_ops);
	uthread_mcp_init();
	/* Reset the blockon to be the spinner...  This is really shitty.  Any
	 * blocking calls after we become an MCP and before this will fail.  This is
	 * just mhello showing its warts due to trying to work outside uthread.c */
	ros_syscall_blockon = __ros_syscall_spinon;

	if ((vcoreid = vcore_id())) {
		printf("Should never see me! (from vcore %d)\n", vcoreid);
	} else { // core 0
		temp = 0xdeadbeef;
		printf("Hello from vcore %d with temp addr = %p and temp = %p\n",
		       vcoreid, &temp, temp);
		printf("Multi-Goodbye, world, from PID: %d!\n", sys_getpid());
		printf("Requesting %d vcores\n", max_vcores() - 1);
		retval = vcore_request(max_vcores() - 1); /* since we already have 1 */
		//retval = vcore_request(5);
		printf("This is vcore0, right after vcore_request, retval=%d\n", retval);
		/* vcore_context test */
		assert(!in_vcore_context());
	}

	//#if 0
	/* test notifying my vcore2 */
	udelay(5000000);
	printf("Vcore 0 self-notifying vcore 2 with notif 4!\n");
	struct event_msg msg;
	msg.ev_type = 4;
	sys_self_notify(2, 4, &msg, TRUE);
	udelay(5000000);
	printf("Vcore 0 notifying itself with notif 6!\n");
	msg.ev_type = 6;
	sys_notify(sys_getpid(), 6, &msg);
	udelay(1000000);
	//#endif

	/* test loop for restarting a uthread_ctx */
	if (vcoreid == 0) {
		int ctr = 0;
		while(1) {
			printf("Vcore %d Spinning (%d), temp = %08x!\n", vcoreid, ctr++, temp);
			udelay(5000000);
			//exit(0);
		}
	}

	printf("Vcore %d Done!\n", vcoreid);
	//mcs_barrier_wait(&b,vcore_id());

	printf("All Cores Done!\n", vcoreid);
	while(1); // manually kill from the monitor
	/* since everyone should cleanup their uthreads, even if they don't plan on
	 * calling their code or want uthreads in the first place. <3 */
	uthread_cleanup(&dummy);
	return 0;
}
Пример #17
0
int main(int argc, char** argv)
{
	uint32_t vcoreid;
	int retval;

	mcs_barrier_init(&b, max_vcores());

	/* vcore_context test */
	assert(!in_vcore_context());
	
	/* prep indirect ev_q.  Note we grab a big one */
	indirect_q = get_big_event_q();
	indirect_q->ev_flags = EVENT_IPI;
	indirect_q->ev_vcore = 1;			/* IPI core 1 */
	indirect_q->ev_handler = 0;
	printf("Registering %08p for event type %d\n", indirect_q,
	       EV_FREE_APPLE_PIE);
	register_kevent_q(indirect_q, EV_FREE_APPLE_PIE);

	/* handle events: just want to print out what we get.  This is just a
	 * quick set of handlers, not a registration for a kevent. */
	for (int i = 0; i < MAX_NR_EVENT; i++)
		ev_handlers[i] = handle_generic;
	/* Want to use the default ev_ev (which we just overwrote) */
	ev_handlers[EV_EVENT] = handle_ev_ev;
	/* vcore_init() done in vcore_request() now. */
	/* Set up event reception.  For example, this will allow us to receive an
	 * event and IPI for USER_IPIs on vcore 0.  Check event.c for more stuff.
	 * Note you don't have to register for USER_IPIs to receive ones you send
	 * yourself with sys_self_notify(). */
	enable_kevent(EV_USER_IPI, 0, EVENT_IPI);
	/* Receive pending preemption events.  Can also get a MSG if you want. */
	struct event_queue *ev_q = get_event_q();
	ev_q->ev_flags = EVENT_IPI | EVENT_NOMSG | EVENT_VCORE_APPRO;
	register_kevent_q(ev_q, EV_PREEMPT_PENDING);

	/* Makes a thread for us, though we won't use it.  Just a hack to get into
	 * _M mode.  Note this requests one vcore for us */
	uthread_create(dummy, 0);

	if ((vcoreid = vcore_id())) {
		printf("Should never see me! (from vcore %d)\n", vcoreid);
	} else { // core 0
		temp = 0xdeadbeef;
		printf("Hello from vcore %d with temp addr = %p and temp = %p\n",
		       vcoreid, &temp, temp);
		printf("Multi-Goodbye, world, from PID: %d!\n", sys_getpid());
		//retval = sys_resource_req(RES_CORES, 2, 0);
		printf("Requesting %d vcores\n", max_vcores() - 1);
		retval = vcore_request(max_vcores() - 1); /* since we already have 1 */
		//retval = vcore_request(5);
		printf("This is vcore0, right after vcore_request, retval=%d\n", retval);
		/* vcore_context test */
		assert(!in_vcore_context());
	}

	/* test notifying my vcore2 */
	udelay(5000000);
	printf("Vcore 0 self-notifying vcore 2 with notif 4!\n");
	struct event_msg msg;
	msg.ev_type = 4;
	sys_self_notify(2, 4, &msg);
	udelay(5000000);
	printf("Vcore 0 notifying itself with notif 3!\n");
	msg.ev_type = 3;
	sys_notify(sys_getpid(), 3, &msg);
	udelay(1000000);

	/* test loop for restarting a notif_tf */
	if (vcoreid == 0) {
		int ctr = 0;
		while(1) {
			printf("Vcore %d Spinning (%d), temp = %08x!\n", vcoreid, ctr++, temp);
			udelay(5000000);
			//exit(0);
		}
	}

	printf("Vcore %d Done!\n", vcoreid);
	//mcs_barrier_wait(&b,vcore_id());

	printf("All Cores Done!\n", vcoreid);
	while(1); // manually kill from the monitor
	return 0;
}