Example #1
0
/* Called from vcore entry.  Options usually include restarting whoever was
 * running there before or running a new thread.  Events are handled out of
 * event.c (table of function pointers, stuff like that). */
static void __attribute__((noreturn)) pth_sched_entry(void)
{
	uint32_t vcoreid = vcore_id();
	if (current_uthread) {
		/* Prep the pthread to run any pending posix signal handlers registered
         * via pthread_kill once it is restored. */
		__pthread_prep_for_pending_posix_signals((pthread_t)current_uthread);
		/* Run the thread itself */
		run_current_uthread();
		assert(0);
	}
	/* no one currently running, so lets get someone from the ready queue */
	struct pthread_tcb *new_thread = NULL;
	/* Try to get a thread.  If we get one, we'll break out and run it.  If not,
	 * we'll try to yield.  vcore_yield() might return, if we lost a race and
	 * had a new event come in, one that may make us able to get a new_thread */
	do {
		handle_events(vcoreid);
		__check_preempt_pending(vcoreid);
		mcs_pdr_lock(&queue_lock);
		new_thread = TAILQ_FIRST(&ready_queue);
		if (new_thread) {
			TAILQ_REMOVE(&ready_queue, new_thread, tq_next);
			TAILQ_INSERT_TAIL(&active_queue, new_thread, tq_next);
			threads_active++;
			threads_ready--;
			mcs_pdr_unlock(&queue_lock);
			/* If you see what looks like the same uthread running in multiple
			 * places, your list might be jacked up.  Turn this on. */
			printd("[P] got uthread %08p on vc %d state %08p flags %08p\n",
			       new_thread, vcoreid,
			       ((struct uthread*)new_thread)->state,
			       ((struct uthread*)new_thread)->flags);
			break;
		}
		mcs_pdr_unlock(&queue_lock);
		/* no new thread, try to yield */
		printd("[P] No threads, vcore %d is yielding\n", vcore_id());
		/* TODO: you can imagine having something smarter here, like spin for a
		 * bit before yielding (or not at all if you want to be greedy). */
		if (can_adjust_vcores)
			vcore_yield(FALSE);
		if (!parlib_wants_to_be_mcp)
			sys_yield(FALSE);
	} while (1);
	assert(new_thread->state == PTH_RUNNABLE);
	/* Prep the pthread to run any pending posix signal handlers registered
     * via pthread_kill once it is restored. */
	__pthread_prep_for_pending_posix_signals(new_thread);
	/* Run the thread itself */
	run_uthread((struct uthread*)new_thread);
	assert(0);
}
Example #2
0
int bthread_cond_wait(bthread_cond_t *c, bthread_mutex_t *m)
{
  c->waiters[vcore_id()] = 1;
  bthread_mutex_unlock(m);

  volatile int* poll = &c->waiters[vcore_id()];
  while(*poll);

  bthread_mutex_lock(m);

  return 0;
}
Example #3
0
void vcore_entry(void)
{
	uint32_t vcoreid = vcore_id();

/* begin: stuff userspace needs to do to handle notifications */
	struct vcore *vc = &__procinfo.vcoremap[vcoreid];
	struct preempt_data *vcpd;
	vcpd = &__procdata.vcore_preempt_data[vcoreid];
	
	/* Lets try to restart vcore0's context.  Note this doesn't do anything to
	 * set the appropriate TLS.  On x86, this will involve changing the LDT
	 * entry for this vcore to point to the TCB of the new user-thread. */
	if (vcoreid == 0) {
		handle_events(vcoreid);
		set_tls_desc(core0_tls, 0);
		assert(__vcoreid == 0); /* in case anyone uses this */
		/* Load silly state (Floating point) too */
		pop_user_ctx(&vcpd->uthread_ctx, vcoreid);
		panic("should never see me!");
	}	
/* end: stuff userspace needs to do to handle notifications */

	/* all other vcores are down here */
	core1_up = TRUE;

	while (core1_up)
		cpu_relax();
	printf("Proc %d's vcore %d is yielding\n", getpid(), vcoreid);
	sys_yield(0);

	while(1);
}
Example #4
0
File: ceq.c Project: anandab/akaros
/* Helper, returns an index into the events array from the ceq ring.  -1 if the
 * ring was empty when we looked (could be filled right after we looked).  This
 * is the same algorithm used with BCQs, but with a magic value (-1) instead of
 * a bool to track whether or not the slot is ready for consumption. */
static int32_t get_ring_idx(struct ceq *ceq)
{
	long pvt_idx, prod_idx;
	int32_t ret;
	do {
		prod_idx = atomic_read(&ceq->prod_idx);
		pvt_idx = atomic_read(&ceq->cons_pvt_idx);
		if (__ring_empty(prod_idx, pvt_idx))
			return -1;
	} while (!atomic_cas(&ceq->cons_pvt_idx, pvt_idx, pvt_idx + 1));
	/* We claimed our slot, which is pvt_idx.  The new cons_pvt_idx is advanced
	 * by 1 for the next consumer.  Now we need to wait on the kernel to fill
	 * the value: */
	while ((ret = ceq->ring[pvt_idx & (ceq->ring_sz - 1)]) == -1)
		cpu_relax();
	/* Set the value back to -1 for the next time the slot is used */
	ceq->ring[pvt_idx & (ceq->ring_sz - 1)] = -1;
	/* We now have our entry.  We need to make sure the pub_idx is updated.  All
	 * consumers are doing this.  We can just wait on all of them to update the
	 * cons_pub to our location, then we update it to the next.
	 *
	 * We're waiting on other vcores, but we don't know which one(s). */
	while (atomic_read(&ceq->cons_pub_idx) != pvt_idx)
		cpu_relax_vc(vcore_id());	/* wait on all of them */
	/* This is the only time we update cons_pub.  We also know no one else is
	 * updating it at this moment; the while loop acts as a lock, such that
	 * no one gets to this point until pub == their pvt_idx, all of which are
	 * unique. */
	/* No rwmb needed, it's the same variable (con_pub) */
	atomic_set(&ceq->cons_pub_idx, pvt_idx + 1);
	return ret;
}
Example #5
0
/* Prep a pthread to run a signal handler.  The original context of the pthread
 * is saved, and a new context with a new stack is set up to run the signal
 * handler the next time the pthread is run. */
static void __pthread_prep_sighandler(struct pthread_tcb *pthread,
                                      void (*entry)(void),
                                      struct siginfo *info)
{
	struct user_context *ctx;

	pthread->sigdata = alloc_sigdata();
	if (info != NULL)
		pthread->sigdata->info = *info;
	init_user_ctx(&pthread->sigdata->u_ctx,
	              (uintptr_t)entry,
	              (uintptr_t)pthread->sigdata->stack);
	if (pthread->uthread.flags & UTHREAD_SAVED) {
		ctx = &pthread->uthread.u_ctx;
		if (pthread->uthread.flags & UTHREAD_FPSAVED) {
			pthread->sigdata->as = pthread->uthread.as;
			pthread->uthread.flags &= ~UTHREAD_FPSAVED;
		}
	} else {
		assert(current_uthread == &pthread->uthread);
		ctx = &vcpd_of(vcore_id())->uthread_ctx;
		save_fp_state(&pthread->sigdata->as);
	}
	swap_user_contexts(ctx, &pthread->sigdata->u_ctx);
}
Example #6
0
/* This will be called from vcore context, after the current thread has yielded
 * and is trying to block on sysc.  Need to put it somewhere were we can wake it
 * up when the sysc is done.  For now, we'll have the kernel send us an event
 * when the syscall is done. */
void pth_blockon_sysc(struct syscall *sysc)
{
	int old_flags;
	bool need_to_restart = FALSE;
	uint32_t vcoreid = vcore_id();

	assert(current_uthread->state == UT_BLOCKED);
	/* rip from the active queue */
	struct mcs_lock_qnode local_qn = {0};
	struct pthread_tcb *pthread = (struct pthread_tcb*)current_uthread;
	mcs_lock_notifsafe(&queue_lock, &local_qn);
	threads_active--;
	TAILQ_REMOVE(&active_queue, pthread, next);
	mcs_unlock_notifsafe(&queue_lock, &local_qn);

	/* Set things up so we can wake this thread up later */
	sysc->u_data = current_uthread;
	/* Put the uthread on the pending list.  Note the ordering.  We must be on
	 * the list before we register the ev_q.  All sysc's must be tracked before
	 * we tell the kernel to signal us. */
	TAILQ_INSERT_TAIL(&sysc_mgmt[vcoreid].pending_syscs, pthread, next);
	/* Safety: later we'll make sure we restart on the core we slept on */
	pthread->vcoreid = vcoreid;
	/* Register our vcore's syscall ev_q to hear about this syscall. */
	if (!register_evq(sysc, &sysc_mgmt[vcoreid].ev_q)) {
		/* Lost the race with the call being done.  The kernel won't send the
		 * event.  Just restart him. */
		restart_thread(sysc);
	}
	/* GIANT WARNING: do not touch the thread after this point. */
}
Example #7
0
void vcore_entry(void)
{
	uint32_t vcoreid = vcore_id();

	if (vcoreid) {
		mcs_barrier_wait(&b, vcoreid);
		udelay(5000000);
		if (vcoreid == 1)
			printf("Proc %d's vcores are yielding\n", getpid());
		sys_yield(0);
	} else {
		/* trip the barrier here, all future times are in the loop */
		mcs_barrier_wait(&b, vcoreid);
		while (1) {
			udelay(15000000);
			printf("Proc %d requesting its cores again\n", getpid());
			begin = read_tsc();
			sys_resource_req(RES_CORES, max_vcores(), 1, REQ_SOFT);
			mcs_barrier_wait(&b, vcoreid);
			end = read_tsc();
			printf("Took %llu usec (%llu nsec) to get my yielded cores back.\n",
			       udiff(begin, end), ndiff(begin, end));
			printf("[T]:010:%llu:%llu\n",
			       udiff(begin, end), ndiff(begin, end));
		}
	}
	printf("We're screwed!\n");
	exit(-1);
}
Example #8
0
/* Like smp_idle(), this will put the core in a state that it can only be woken
 * up by an IPI.  For now, this is a halt.  Maybe an mwait in the future.
 *
 * This will return if an event was pending (could be the one you were waiting
 * for) or if the halt failed for some reason, such as a concurrent RKM.  If
 * successful, this will not return at all, and the vcore will restart from the
 * top next time it wakes.  Any sort of IRQ will wake the core.
 *
 * Alternatively, I might make this so it never returns, if that's easier to
 * work with (similar issues with yield). */
void vcore_idle(void)
{
    uint32_t vcoreid = vcore_id();
    /* Once we enable notifs, the calling context will be treated like a uthread
     * (saved into the uth slot).  We don't want to ever run it again, so we
     * need to make sure there's no cur_uth. */
    assert(!current_uthread);
    /* This clears notif_pending (check, signal, check again pattern). */
    if (handle_events(vcoreid))
        return;
    /* This enables notifs, but also checks notif pending.  At this point, any
     * new notifs will restart the vcore from the top. */
    enable_notifs(vcoreid);
    /* From now, til we get into the kernel, any notifs will permanently destroy
     * this context and start the VC from the top.
     *
     * Once we're in the kernel, any messages (__notify, __preempt), will be
     * RKMs.  halt will need to check for those atomically.  Checking for
     * notif_pending in the kernel (sleep only if not set) is not enough, since
     * not all reasons for the kernel to stay awak set notif_pending (e.g.,
     * __preempts and __death).
     *
     * At this point, we're out of VC ctx, so anyone who sets notif_pending
     * should also send an IPI / __notify */
    sys_halt_core(0);
    /* in case halt returns without actually restarting the VC ctx. */
    disable_notifs(vcoreid);
}
Example #9
0
// What if someone calls waiton the same desc several times?
int waiton_syscall(syscall_desc_t* desc)
{
	int retval = 0;
	if (desc == NULL || desc->channel == NULL){
		errno = EFAIL;
		return -1;
	}
	// Make sure we were given a desc with a non-NULL frontring.  This could
	// happen if someone forgot to check the error code on the paired syscall.
	syscall_front_ring_t *fr =  &desc->channel->sysfr;
	
	if (!fr){
		errno = EFAIL;
		return -1;
	}
	printf("waiting %d\n", vcore_id());
	syscall_rsp_t* rsp = RING_GET_RESPONSE(fr, desc->idx);

	// ignoring the ring push response from the kernel side now
	while (atomic_read(&rsp->sc->flags) != SC_DONE)
		cpu_relax();
	// memcpy(rsp, rsp_inring, sizeof(*rsp));
	
    // run a cleanup function for this desc, if available
    if (rsp->cleanup)
    	rsp->cleanup(rsp->data);
	if (RSP_ERRNO(rsp)){
		errno = RSP_ERRNO(rsp);
		retval = -1;
	} else 
		retval =  RSP_RESULT(rsp); 
	atomic_inc((atomic_t*) &(fr->rsp_cons));
	return retval;
}
Example #10
0
/**
 * Switch into vcore mode to run the scheduler code. 
 **/
void switch_to_vcore() {

	uint32_t vcoreid = vcore_id();

	/* Disable notifications.  Once we do this, we might miss a notif_pending,
	 * so we need to enter vcore entry later.  Need to disable notifs so we
	 * don't get in weird loops */
	struct preempt_data *vcpd = &__procdata.vcore_preempt_data[vcoreid];
	vcpd->notif_enabled = FALSE;

	/* Grab a reference to the currently running thread on this vcore */
	thread_t *t = current_thread; 

	/* Switch to the vcore's tls region */
	extern void** vcore_thread_control_blocks;
	set_tls_desc(vcore_thread_control_blocks[vcoreid], vcoreid);
	
	/* Verify that the thread the vcore thinks was running is the same as the thread
	 * that was actually running */
	assert(current_thread == t);

	/* Set the stack pointer to the stack of the vcore. 
	 * We know this function is always inlined because of the attribute we set
	 * on it, so there will be no stack unwinding when this function "returns".
	 * After this call, make sure you don't use local variables. */
	set_stack_pointer((void*)vcpd->transition_stack);
	assert(in_vcore_context());

	/* Leave the current vcore completely */
	current_thread = NULL; 
	
	/* Restart the vcore and run the scheduler code */
	vcore_entry();
	assert(0);
}
Example #11
0
void vcore_entry(void)
{
	uint32_t vcoreid;
	static int first_time = 1; // used by vcore2

	vcoreid = vcore_id();
	printf("Hello from vcore_entry in vcore %d\n", vcoreid);

	if ((vcoreid == 2) && first_time) {
		first_time = 0;
		switch (test) {
			case TEST_INCREMENTAL_CHANGES:
				// Testing asking for less than we already have
				udelay(1000000);
				printf("Asking for too few:\n");
				vcore_request_more(2);
				// Testing getting more while running
				printf("Asking for more while running:\n");
				udelay(1000000);
				vcore_request_more(5);
				break;
			case TEST_YIELD_OUT_OF_ORDER:
				printf("Core %d yielding\n", vcoreid);
				sys_yield(0);
				break;
			case TEST_YIELD_0_OUT_OF_ORDER:
				udelay(7500000);
				printf("Core 0 should have yielded, asking for another\n");
				vcore_request_more(5);
		}
	}
	global_tests(vcoreid);
	printf("Vcore %d Done!\n", vcoreid);
}
Example #12
0
void ghetto_vcore_entry(void)
{
	uint32_t vcoreid = vcore_id();
	static bool first_time = TRUE;

	temp = 0xcafebabe;
	/* vcore_context test (don't need to do this anywhere) */
	assert(in_vcore_context());

	/* old logic was moved to parlib code */
	if (current_uthread) {
		assert(vcoreid == 0);
		run_current_uthread();
	}
	/* unmask notifications once you can let go of the notif_tf and it is okay
	 * to clobber the transition stack.
	 * Check Documentation/processes.txt: 4.2.4.  In real code, you should be
	 * popping the tf of whatever user process you want (get off the x-stack) */
	enable_notifs(vcoreid);

/* end: stuff userspace needs to do to handle notifications */

	printf("Hello from vcore_entry in vcore %d with temp addr %p and temp %p\n",
	       vcoreid, &temp, temp);
	vcore_request(1);
	//mcs_barrier_wait(&b,vcore_id());
	udelay(vcoreid * 10000000);
	//exit(0);
	while(1);
}
Example #13
0
File: mcs.c Project: 7perl/akaros
/* Actual MCS-PDRO locks.  Don't worry about initializing any fields of qnode.
 * We'll do vcoreid here, and the locking code deals with the other fields */
void mcs_pdro_lock(struct mcs_pdro_lock *lock, struct mcs_pdro_qnode *qnode)
{
	/* Disable notifs, if we're an _M uthread */
	uth_disable_notifs();
	cmb();	/* in the off-chance the compiler wants to read vcoreid early */
	qnode->vcoreid = vcore_id();
	__mcs_pdro_lock(lock, qnode);
}
Example #14
0
void ghetto_vcore_entry(void)
{
	if (vcore_id() == 0)
		run_current_uthread();

	while (1)
		sys_halt_core(0);
}
Example #15
0
int main(int argc, char** argv)
{
	uint32_t vcoreid = vcore_id();
	int retval = 0;

	mcs_barrier_init(&b, max_vcores());

/* begin: stuff userspace needs to do before switching to multi-mode */
	vcore_lib_init();
	#if 0
	/* tell the kernel where and how we want to receive notifications */
	struct notif_method *nm;
	for (int i = 0; i < MAX_NR_NOTIF; i++) {
		nm = &__procdata.notif_methods[i];
		nm->flags |= NOTIF_WANTED | NOTIF_MSG | NOTIF_IPI;
		nm->vcoreid = i % 2; // vcore0 or 1, keepin' it fresh.
	}
	#endif
	/* Need to save this somewhere that you can find it again when restarting
	 * core0 */
	core0_tls = get_tls_desc(0);
	/* Need to save our floating point state somewhere (like in the
	 * user_thread_tcb so it can be restarted too */
/* end: stuff userspace needs to do before switching to multi-mode */

	/* get into multi mode */
	retval = vcore_request(1);
	if (retval)
		printf("F****d!\n");

	printf("Proc %d requesting another vcore\n", getpid());
	begin = read_tsc();
	retval = vcore_request(1);
	if (retval)
		printf("F****d!\n");
	while (!core1_up)
		cpu_relax;
	end = read_tsc();
	printf("Took %llu usec (%llu nsec) to receive 1 core (cold).\n",
	       udiff(begin, end), ndiff(begin, end));
	printf("[T]:002:%llu:%llu:1:C.\n",
	       udiff(begin, end), ndiff(begin, end));
	core1_up = FALSE;
	udelay(2000000);
	printf("Proc %d requesting the vcore again\n", getpid());
	begin = read_tsc();
	retval = vcore_request(1);
	if (retval)
		printf("F****d!\n");
	while (!core1_up)
		cpu_relax();
	end = read_tsc();
	printf("Took %llu usec (%llu nsec) to receive 1 core (warm).\n",
	       udiff(begin, end), ndiff(begin, end));
	printf("[T]:002:%llu:%llu:1:W.\n",
	       udiff(begin, end), ndiff(begin, end));
	return 0;
}
Example #16
0
File: mcs.c Project: 7perl/akaros
/* Similar to the original PDR lock, this tracks the lockholder for better
 * recovery from preemptions.  Under heavy contention, changing to the
 * lockholder instead of pred makes it more likely to have a vcore outside the
 * MCS chain handle the preemption.  If that never happens, performance will
 * suffer.
 *
 * Simply checking the lockholder causes a lot of unnecessary traffic, so we
 * first look for signs of preemption in read-mostly locations (by comparison,
 * the lockholder changes on every lock/unlock).
 *
 * We also use the "qnodes are in the lock" style, which is slightly slower than
 * using the stack in regular MCS/MCSPDR locks, but it speeds PDR up a bit by
 * not having to read other qnodes' memory to determine their vcoreid.  The
 * slowdown may be due to some weird caching/prefetch settings (like Adjacent
 * Cacheline Prefetch).
 *
 * Note that these locks, like all PDR locks, have opportunities to accidentally
 * ensure some vcore runs that isn't in the chain.  Whenever we read lockholder
 * or even pred, that particular vcore might subsequently unlock and then get
 * preempted (or change_to someone else) before we ensure they run.  If this
 * happens and there is another VC in the MCS chain, it will make sure the right
 * cores run.  If there are no other vcores in the chain, it is up to the rest
 * of the vcore/event handling system to deal with this, which should happen
 * when one of the other vcores handles the preemption message generated by our
 * change_to. */
void __mcs_pdr_lock(struct mcs_pdr_lock *lock, struct mcs_pdr_qnode *qnode)
{
	struct mcs_pdr_qnode *predecessor;
	uint32_t pred_vcoreid;
	struct mcs_pdr_qnode *qnode0 = qnode - vcore_id();
	seq_ctr_t seq;
	qnode->next = 0;
	cmb();	/* swap provides a CPU mb() */
	predecessor = atomic_swap_ptr((void**)&lock->lock, qnode);
	if (predecessor) {
		qnode->locked = 1;
		pred_vcoreid = predecessor - qnode0;	/* can compute this whenever */
		wmb();	/* order the locked write before the next write */
		predecessor->next = qnode;
		seq = ACCESS_ONCE(__procinfo.coremap_seqctr);
		/* no need for a wrmb(), since this will only get unlocked after they
		 * read our pred->next write */
		while (qnode->locked) {
			/* Check to see if anything is amiss.  If someone in the chain is
			 * preempted, then someone will notice.  Simply checking our pred
			 * isn't that great of an indicator of preemption.  The reason is
			 * that the offline vcore is most likely the lockholder (under heavy
			 * lock contention), and we want someone farther back in the chain
			 * to notice (someone that will stay preempted long enough for a
			 * vcore outside the chain to recover them).  Checking the seqctr
			 * will tell us of any preempts since we started, so if a storm
			 * starts while we're spinning, we can join in and try to save the
			 * lockholder before its successor gets it.
			 *
			 * Also, if we're the lockholder, then we need to let our pred run
			 * so they can hand us the lock. */
			if (vcore_is_preempted(pred_vcoreid) ||
			    seq != __procinfo.coremap_seqctr) {
				if (lock->lockholder_vcoreid == MCSPDR_NO_LOCKHOLDER ||
				    lock->lockholder_vcoreid == vcore_id())
					ensure_vcore_runs(pred_vcoreid);
				else
					ensure_vcore_runs(lock->lockholder_vcoreid);
			}
			cpu_relax();
		}
	} else {
		lock->lockholder_vcoreid = vcore_id();
	}
}
Example #17
0
void *block_thread(void* arg)
{	
	assert(!in_vcore_context());
	for (int i = 0; i < NUM_TEST_LOOPS; i++) {
		printf_safe("[A] pthread %d on vcore %d\n", pthread_self()->id, vcore_id());
		sys_block(5000 + pthread_self()->id);
	}
	return (void*)(long)pthread_self()->id;
}
Example #18
0
/* Makes sure a vcore is running.  If it is preempted, we'll switch to
 * it.  This will return, either immediately if the vcore is running, or later
 * when someone preempt-recovers us.
 *
 * If you pass in your own vcoreid, this will make sure all other preempted
 * vcores run. */
void ensure_vcore_runs(uint32_t vcoreid)
{
    /* if the vcoreid is ourselves, make sure everyone else is running */
    if (vcoreid == vcore_id()) {
        __ensure_all_run();
        return;
    }
    __ensure_vcore_runs(vcoreid);
}
Example #19
0
void *yield_thread(void* arg)
{	
	/* Wait til all threads are created */
	while (!ready)
		cpu_relax();
	for (int i = 0; i < nr_yield_loops; i++) {
		printf_safe("[A] pthread %d %p on vcore %d, itr: %d\n", pthread_self()->id,
		       pthread_self(), vcore_id(), i);
		/* Fakes some work by spinning a bit.  Amount varies per uth/vcore,
		 * scaled by fake_work */
		if (amt_fake_work)
			udelay(amt_fake_work * (pthread_self()->id * (vcore_id() + 1)));
		pthread_yield();
		printf_safe("[A] pthread %p returned from yield on vcore %d, itr: %d\n",
		            pthread_self(), vcore_id(), i);
	}
	return (void*)(pthread_self());
}
Example #20
0
void *yield_thread(void* arg)
{	
	/* Wait til all threads are created */
	pthread_barrier_wait(&barrier);
	for (int i = 0; i < nr_yield_loops; i++) {
		printf_safe("[A] pthread %d %p on vcore %d, itr: %d\n",
			    pthread_id(), pthread_self(), vcore_id(), i);
		/* Fakes some work by spinning a bit.  Amount varies per
		 * uth/vcore, scaled by fake_work */
		if (amt_fake_work)
			udelay(amt_fake_work * (pthread_id() * (vcore_id() +
								2)));
		pthread_yield();
		printf_safe("[A] pthread %p returned from yield on vcore %d, itr: %d\n",
		            pthread_self(), vcore_id(), i);
	}
	return (void*)(pthread_self());
}
Example #21
0
void vcore_reenter(void (*entry_func)(void))
{
    assert(in_vcore_context());
    struct preempt_data *vcpd = vcpd_of(vcore_id());

    __vcore_reentry_func = entry_func;
    set_stack_pointer((void*)vcpd->vcore_stack);
    cmb();
    __vcore_reenter();
}
Example #22
0
File: vcore.c Project: 7perl/akaros
/* Like smp_idle(), this will put the core in a state that it can only be woken
 * up by an IPI.  In the future, we may halt or something.  This will return if
 * an event was pending (could be the one you were waiting for). */
void vcore_idle(void)
{
	uint32_t vcoreid = vcore_id();
	if (handle_events(vcoreid))
		return;
	enable_notifs(vcoreid);
	while (1) {
		cpu_relax();
	}
}
Example #23
0
/* Handles syscall overflow */
static void handle_sysc_overflow(void)
{
	struct sysc_mgmt *vc_sysc_mgmt = &sysc_mgmt[vcore_id()];
	/* if we're currently handling it on this vcore, bail out */
	if (vc_sysc_mgmt->handling_overflow)
		return;
	/* Actually handle stuff (TODO) */
	vc_sysc_mgmt->handling_overflow = TRUE;
	printf("FUUUUUUUUUUUUUUUUCK, OVERFLOW!!!!!!!\n");
}
Example #24
0
/* Called from vcore entry.  Options usually include restarting whoever was
 * running there before or running a new thread.  Events are handled out of
 * event.c (table of function pointers, stuff like that). */
void __attribute__((noreturn)) pth_sched_entry(void)
{
	uint32_t vcoreid = vcore_id();
	if (current_uthread) {
		run_current_uthread();
		assert(0);
	}
	/* no one currently running, so lets get someone from the ready queue */
	struct pthread_tcb *new_thread = NULL;
	struct mcs_lock_qnode local_qn = {0};
	/* For now, let's spin and handle events til we get a thread to run.  This
	 * will help catch races, instead of only having one core ever run a thread
	 * (if there is just one, etc).  Also, we don't need the EVENT_IPIs for this
	 * to work (since we poll handle_events() */
	while (!new_thread) {
		handle_events(vcoreid);
		mcs_lock_notifsafe(&queue_lock, &local_qn);
		new_thread = TAILQ_FIRST(&ready_queue);
		if (new_thread) {
			TAILQ_REMOVE(&ready_queue, new_thread, next);
			TAILQ_INSERT_TAIL(&active_queue, new_thread, next);
			threads_active++;
			threads_ready--;
		}
		mcs_unlock_notifsafe(&queue_lock, &local_qn);
	}
	/* Instead of yielding, you could spin, turn off the core, set an alarm,
	 * whatever.  You want some logic to decide this.  Uthread code wil have
	 * helpers for this (like how we provide run_uthread()) */
	if (!new_thread) {
		/* Note, we currently don't get here (due to the while loop) */
		printd("[P] No threads, vcore %d is yielding\n", vcore_id());
		/* Not actually yielding - just spin for now, so we can get syscall
		 * unblocking events */
		vcore_idle();
		//sys_yield(0);
		assert(0);
	}
	assert(((struct uthread*)new_thread)->state != UT_RUNNING);
	run_uthread((struct uthread*)new_thread);
	assert(0);
}
Example #25
0
/* Handle an mbox.  This is the receive-side processing of an event_queue.  It
 * takes an ev_mbox, since the vcpd mbox isn't a regular ev_q.  Returns 1 if we
 * handled something, 0 o/w. */
int handle_mbox(struct event_mbox *ev_mbox)
{
	int retval = 0;
	uint32_t vcoreid = vcore_id();
	void bit_handler(unsigned int bit) {
		printd("[event] Bit: ev_type: %d\n", bit);
		if (ev_handlers[bit])
			ev_handlers[bit](0, bit);
		retval = 1;
		/* Consider checking the queue for incoming messages while we're here */
	}
Example #26
0
/* This can return, if you failed to yield due to a concurrent event.  Note
 * we're atomicly setting the CAN_RCV flag, and aren't bothering with CASing
 * (either with the kernel or uthread's handle_indirs()).  We don't particularly
 * care what other code does - we intend to set those flags no matter what. */
void vcore_yield(bool preempt_pending)
{
    unsigned long old_nr;
    uint32_t vcoreid = vcore_id();
    struct preempt_data *vcpd = vcpd_of(vcoreid);
    __sync_fetch_and_and(&vcpd->flags, ~VC_CAN_RCV_MSG);
    /* no wrmb() necessary, handle_events() has an mb() if it is checking */
    /* Clears notif pending and tries to handle events.  This is an optimization
     * to avoid the yield syscall if we have an event pending.  If there is one,
     * we want to unwind and return to the 2LS loop, where we may not want to
     * yield anymore.
     * Note that the kernel only cares about CAN_RCV_MSG for the desired vcore,
     * not for a FALLBACK.  */
    if (handle_events(vcoreid)) {
        __sync_fetch_and_or(&vcpd->flags, VC_CAN_RCV_MSG);
        return;
    }
    /* If we are yielding since we don't want the core, tell the kernel we want
     * one less vcore (vc_yield assumes a dumb 2LS).
     *
     * If yield fails (slight race), we may end up having more vcores than
     * amt_wanted for a while, and might lose one later on (after a
     * preempt/timeslicing) - the 2LS will have to notice eventually if it
     * actually needs more vcores (which it already needs to do).  amt_wanted
     * could even be 0.
     *
     * In general, any time userspace decrements or sets to 0, it could get
     * preempted, so the kernel will still give us at least one, until the last
     * vcore properly yields without missing a message (and becomes a WAITING
     * proc, which the ksched will not give cores to).
     *
     * I think it's possible for userspace to do this (lock, read amt_wanted,
     * check all message queues for all vcores, subtract amt_wanted (not set to
     * 0), unlock) so long as every event handler +1s the amt wanted, but that's
     * a huge pain, and we already have event handling code making sure a
     * process can't sleep (transition to WAITING) if a message arrives (can't
     * yield if notif_pending, can't go WAITING without yielding, and the event
     * posting the notif_pending will find the online VC or be delayed by
     * spinlock til the proc is WAITING). */
    if (!preempt_pending) {
        do {
            old_nr = __procdata.res_req[RES_CORES].amt_wanted;
            if (old_nr == 0)
                break;
        } while (!__sync_bool_compare_and_swap(
                     &__procdata.res_req[RES_CORES].amt_wanted,
                     old_nr, old_nr - 1));
    }
    /* We can probably yield.  This may pop back up if notif_pending became set
     * by the kernel after we cleared it and we lost the race. */
    sys_yield(preempt_pending);
    __sync_fetch_and_or(&vcpd->flags, VC_CAN_RCV_MSG);
}
Example #27
0
/* Internal version of the locking func, doesn't care if notifs are disabled */
void __spin_pdr_lock(struct spin_pdr_lock *pdr_lock)
{
	uint32_t vcoreid = vcore_id();
	uint32_t lock_val;
	do {
		while ((lock_val = pdr_lock->lock) != SPINPDR_UNLOCKED) {
			ensure_vcore_runs(lock_val);
			cmb();
		}
	} while (!atomic_cas_u32(&pdr_lock->lock, lock_val, vcoreid));
	cmb();	/* just need a cmb, the CAS handles the CPU wmb/wrmb() */
}
Example #28
0
/* Helper, that actually makes sure a vcore is running.  Call this is you really
 * want vcoreid.  More often, you'll want to call the regular version. */
static void __ensure_vcore_runs(uint32_t vcoreid)
{
    if (vcore_is_preempted(vcoreid)) {
        printd("[vcore]: VC %d changing to VC %d\n", vcore_id(), vcoreid);
        /* Note that at this moment, the vcore could still be mapped (we're
         * racing with __preempt.  If that happens, we'll just fail the
         * sys_change_vcore(), and next time __ensure runs we'll get it. */
        /* We want to recover them from preemption.  Since we know they have
         * notifs disabled, they will need to be directly restarted, so we can
         * skip the other logic and cut straight to the sys_change_vcore() */
        sys_change_vcore(vcoreid, FALSE);
    }
}
Example #29
0
void __print_func_entry(const char *func, const char *file)
{
	if (!print)
		return;
	if (is_blacklisted(func))
		return;
	spinlock_lock(&lock);
	printd("Vcore %2d", vcore_id());	/* helps with multicore output */
	for (int i = 0; i < tab_depth; i++)
		printf("\t");
	printf("%s() in %s\n", func, file);
	spinlock_unlock(&lock);
	tab_depth++;
}
Example #30
0
void __print_func_exit(const char *func, const char *file)
{
	if (!print)
		return;
	if (is_blacklisted(func))
		return;
	tab_depth--;
	spinlock_lock(&lock);
	printd("Vcore %2d", vcore_id());
	for (int i = 0; i < tab_depth; i++)
		printf("\t");
	printf("---- %s()\n", func);
	spinlock_unlock(&lock);
}