Beispiel #1
0
/* Returns -1 with errno set on error, or 0 on success.  This does not return
 * the number of cores actually granted (though some parts of the kernel do
 * internally).
 *
 * This tries to get "more vcores", based on the number we currently have.
 * We'll probably need smarter 2LSs in the future that just directly set
 * amt_wanted.  What happens is we can have a bunch of 2LS vcore contexts
 * trying to get "another vcore", which currently means more than num_vcores().
 * If you have someone ask for two more, and then someone else ask for one more,
 * how many you ultimately ask for depends on if the kernel heard you and
 * adjusted num_vcores in between the two calls.  Or maybe your amt_wanted
 * already was num_vcores + 5, so neither call is telling the kernel anything
 * new.  It comes down to "one more than I have" vs "one more than I've already
 * asked for".
 *
 * So for now, this will keep the older behavior (one more than I have).  It
 * will try to accumulate any concurrent requests, and adjust amt_wanted up.
 * Interleaving, repetitive calls (everyone asking for one more) may get
 * ignored.
 *
 * Note the doesn't block or anything (despite the min number requested is
 * 1), since the kernel won't block the call.
 *
 * There are a few concurrency concerns.  We have _max_vcores_ever_wanted,
 * initialization of new vcore stacks/TLSs, making sure we don't ask for too
 * many (minor point), and most importantly not asking the kernel for too much
 * or otherwise miscommunicating our desires to the kernel.  Remember, the
 * kernel wants just one answer from the process about what it wants, and it is
 * up to the process to figure that out.
 *
 * So we basically have one thread do the submitting/prepping/bookkeeping, and
 * other threads come in just update the number wanted and make sure someone
 * is sorting things out.  This will perform a bit better too, since only one
 * vcore makes syscalls (which hammer the proc_lock).  This essentially has
 * cores submit work, and one core does the work (like Eric's old delta
 * functions).
 *
 * There's a slight semantic change: this will return 0 (success) for the
 * non-submitters, and 0 if we submitted.  -1 only if the submitter had some
 * non-kernel failure.
 *
 * Also, beware that this (like the old version) doesn't protect with races on
 * num_vcores().  num_vcores() is how many you have now or very soon (accounting
 * for messages in flight that will take your cores), not how many you told the
 * kernel you want. */
int vcore_request(long nr_new_vcores)
{
	long nr_to_prep_now, nr_vcores_wanted;

	assert(vc_initialized);
	/* Early sanity checks */
	if ((nr_new_vcores < 0) || (nr_new_vcores + num_vcores() > max_vcores()))
		return -1;	/* consider ERRNO */
	/* Post our desires (ROS atomic_add() conflicts with glibc) */
	atomic_fetch_and_add(&nr_new_vcores_wanted, nr_new_vcores);
try_handle_it:
	cmb();	/* inc before swap.  the atomic is a CPU mb() */
	if (atomic_swap(&vc_req_being_handled, 1)) {
		/* We got a 1 back, so someone else is already working on it */
		return 0;
	}
	/* So now we're the ones supposed to handle things.  This does things in the
	 * "increment based on the number we have", vs "increment on the number we
	 * said we want".
	 *
	 * Figure out how many we have, though this is racy.  Yields/preempts/grants
	 * will change this over time, and we may end up asking for less than we
	 * had. */
	nr_vcores_wanted = num_vcores();
	/* Pull all of the vcores wanted into our local variable, where we'll deal
	 * with prepping/requesting that many vcores.  Keep doing this til we think
	 * no more are wanted. */
	while ((nr_to_prep_now = atomic_swap(&nr_new_vcores_wanted, 0))) {
		nr_vcores_wanted += nr_to_prep_now;
		/* Don't bother prepping or asking for more than we can ever get */
		nr_vcores_wanted = MIN(nr_vcores_wanted, max_vcores());
		/* Make sure all we might ask for are prepped */
		for (long i = _max_vcores_ever_wanted; i < nr_vcores_wanted; i++) {
			if (allocate_transition_stack(i) || allocate_transition_tls(i)) {
				atomic_set(&vc_req_being_handled, 0);	/* unlock and bail out*/
				return -1;
			}
			_max_vcores_ever_wanted++;	/* done in the loop to handle failures*/
		}
	}
	cmb();	/* force a reread of num_vcores() */
	/* Update amt_wanted if we now want *more* than what the kernel already
	 * knows.  See notes in the func doc. */
	if (nr_vcores_wanted > __procdata.res_req[RES_CORES].amt_wanted)
		__procdata.res_req[RES_CORES].amt_wanted = nr_vcores_wanted;
	/* If num_vcores isn't what we want, we can poke the ksched.  Due to some
	 * races with yield, our desires may be old.  Not a big deal; any vcores
	 * that pop up will just end up yielding (or get preempt messages.)  */
	if (nr_vcores_wanted > num_vcores())
		sys_poke_ksched(0, RES_CORES);	/* 0 -> poke for ourselves */
	/* Unlock, (which lets someone else work), and check to see if more work
	 * needs to be done.  If so, we'll make sure it gets handled. */
	atomic_set(&vc_req_being_handled, 0);	/* unlock, to allow others to try */
	wrmb();
	/* check for any that might have come in while we were out */
	if (atomic_read(&nr_new_vcores_wanted))
		goto try_handle_it;
	return 0;
}
Beispiel #2
0
static void prep_remaining_vcores(void)
{
	uintptr_t mmap_block;

	mmap_block = (uintptr_t)mmap(0, PGSIZE * 4 * (max_vcores() - 1),
	                             PROT_WRITE | PROT_READ,
	                             MAP_POPULATE | MAP_ANONYMOUS | MAP_PRIVATE,
	                             -1, 0);
	assert((void*)mmap_block != MAP_FAILED);
	for (int i = 1; i < max_vcores(); i++)
		__prep_vcore(i, mmap_block + 4 * (i - 1) * PGSIZE);
}
Beispiel #3
0
int main(int argc, char** argv)
{
	uint32_t vcoreid;
	int nr_vcores;

	if (argc < 2)
		nr_vcores = max_vcores();
	else
		nr_vcores = atoi(argv[1]);

	/* Inits a thread for us, though we won't use it.  Just a hack to get into
	 * _M mode.  Note this requests one vcore for us */
	struct uthread dummy = {0};
	uthread_2ls_init(&dummy, &ghetto_sched_ops);
	uthread_mcp_init();

	/* Reset the blockon to be the spinner...  This is really shitty.  Any
	 * blocking calls after we become an MCP and before this will fail.  This is
	 * just mhello showing its warts due to trying to work outside uthread.c */
	ros_syscall_blockon = __ros_syscall_spinon;

	vcore_request(nr_vcores - 1); /* since we already have 1 */

	while (1)
		sys_halt_core(0);

	return 0;
}
Beispiel #4
0
libgomp_lithe_sched_t *libgomp_lithe_sched_alloc()
{
  /* Allocate all the scheduler data together. */
  struct sched_data {
    libgomp_lithe_sched_t sched;
    libgomp_lithe_context_t main_context;
    struct lithe_fork_join_vc_mgmt vc_mgmt[];
  };

  /* Use a zombie list to reuse old schedulers if available, otherwise, create
   * a new one. */
  struct sched_data *s = wfl_remove(&sched_zombie_list);
  if (!s) {
    s = parlib_aligned_alloc(PGSIZE,
            sizeof(*s) + sizeof(struct lithe_fork_join_vc_mgmt) * max_vcores());
    s->sched.sched.vc_mgmt = &s->vc_mgmt[0];
    s->sched.sched.sched.funcs = &libgomp_lithe_sched_funcs;
  }

  /* Initialize some libgomp specific fields before initializing the generic
   * underlying fjs. */
  s->sched.refcnt = 1;
  s->main_context.completed = false;
  lithe_fork_join_sched_init(&s->sched.sched, &s->main_context.context);
  return &s->sched;
}
Beispiel #5
0
void vcore_entry(void)
{
	uint32_t vcoreid = vcore_id();

	if (vcoreid) {
		mcs_barrier_wait(&b, vcoreid);
		udelay(5000000);
		if (vcoreid == 1)
			printf("Proc %d's vcores are yielding\n", getpid());
		sys_yield(0);
	} else {
		/* trip the barrier here, all future times are in the loop */
		mcs_barrier_wait(&b, vcoreid);
		while (1) {
			udelay(15000000);
			printf("Proc %d requesting its cores again\n", getpid());
			begin = read_tsc();
			sys_resource_req(RES_CORES, max_vcores(), 1, REQ_SOFT);
			mcs_barrier_wait(&b, vcoreid);
			end = read_tsc();
			printf("Took %llu usec (%llu nsec) to get my yielded cores back.\n",
			       udiff(begin, end), ndiff(begin, end));
			printf("[T]:010:%llu:%llu\n",
			       udiff(begin, end), ndiff(begin, end));
		}
	}
	printf("We're screwed!\n");
	exit(-1);
}
Beispiel #6
0
// MCS dissemination barrier!
int mcs_barrier_init(mcs_barrier_t* b, size_t np)
{
	if(np > max_vcores())
		return -1;
	b->allnodes = (mcs_dissem_flags_t*)malloc(np*sizeof(mcs_dissem_flags_t));
	memset(b->allnodes,0,np*sizeof(mcs_dissem_flags_t));
	b->nprocs = np;

	b->logp = (np & (np-1)) != 0;
	while(np >>= 1)
		b->logp++;

	size_t i,k;
	for(i = 0; i < b->nprocs; i++)
	{
		b->allnodes[i].parity = 0;
		b->allnodes[i].sense = 1;

		for(k = 0; k < b->logp; k++)
		{
			size_t j = (i+(1<<k)) % b->nprocs;
			b->allnodes[i].partnerflags[0][k] = &b->allnodes[j].myflags[0][k];
			b->allnodes[i].partnerflags[1][k] = &b->allnodes[j].myflags[1][k];
		} 
	}

	return 0;
}
int main(int argc, char** argv)
{
	uint32_t vcoreid = vcore_id();
	int retval = 0;

	mcs_barrier_init(&b, max_vcores());

/* begin: stuff userspace needs to do before switching to multi-mode */
	vcore_lib_init();
	#if 0
	/* tell the kernel where and how we want to receive notifications */
	struct notif_method *nm;
	for (int i = 0; i < MAX_NR_NOTIF; i++) {
		nm = &__procdata.notif_methods[i];
		nm->flags |= NOTIF_WANTED | NOTIF_MSG | NOTIF_IPI;
		nm->vcoreid = i % 2; // vcore0 or 1, keepin' it fresh.
	}
	#endif
	/* Need to save this somewhere that you can find it again when restarting
	 * core0 */
	core0_tls = get_tls_desc(0);
	/* Need to save our floating point state somewhere (like in the
	 * user_thread_tcb so it can be restarted too */
/* end: stuff userspace needs to do before switching to multi-mode */

	/* get into multi mode */
	retval = vcore_request(1);
	if (retval)
		printf("F****d!\n");

	printf("Proc %d requesting another vcore\n", getpid());
	begin = read_tsc();
	retval = vcore_request(1);
	if (retval)
		printf("F****d!\n");
	while (!core1_up)
		cpu_relax;
	end = read_tsc();
	printf("Took %llu usec (%llu nsec) to receive 1 core (cold).\n",
	       udiff(begin, end), ndiff(begin, end));
	printf("[T]:002:%llu:%llu:1:C.\n",
	       udiff(begin, end), ndiff(begin, end));
	core1_up = FALSE;
	udelay(2000000);
	printf("Proc %d requesting the vcore again\n", getpid());
	begin = read_tsc();
	retval = vcore_request(1);
	if (retval)
		printf("F****d!\n");
	while (!core1_up)
		cpu_relax();
	end = read_tsc();
	printf("Took %llu usec (%llu nsec) to receive 1 core (warm).\n",
	       udiff(begin, end), ndiff(begin, end));
	printf("[T]:002:%llu:%llu:1:W.\n",
	       udiff(begin, end), ndiff(begin, end));
	return 0;
}
Beispiel #8
0
int main(int argc, char** argv)
{

	/* don't forget to enable notifs on vcore0.  if you don't, the kernel will
	 * restart your _S with notifs disabled, which is a path to confusion. */
	struct preempt_data *vcpd = &__procdata.vcore_preempt_data[0];
	vcpd->notif_enabled = TRUE;

	mcs_barrier_init(&b, max_vcores());

	vcore_request(max_vcores());
	printf("not enough vcores, going to try it manually\n");
	sys_resource_req(RES_CORES, max_vcores(), 1, REQ_SOFT);
	printf("We're screwed!\n");

	/* should never make it here */
	return -1;
}
Beispiel #9
0
void __attribute__((constructor)) vcore_lib_init(void)
{
    uintptr_t mmap_block;

    /* Note this is racy, but okay.  The first time through, we are _S.
     * Also, this is the "lowest" level constructor for now, so we don't need
     * to call any other init functions after our run_once() call. This may
     * change in the future. */
    init_once_racy(return);

    /* Need to alloc vcore0's transition stuff here (technically, just the TLS)
     * so that schedulers can use vcore0's transition TLS before it comes up in
     * vcore_entry() */
    if (allocate_vcore_stack(0) || allocate_transition_tls(0))
        goto vcore_lib_init_fail;

    /* Initialize our VCPD event queues' ucqs, two pages per ucq, 4 per vcore */
    mmap_block = (uintptr_t)mmap(0, PGSIZE * 4 * max_vcores(),
                                 PROT_WRITE | PROT_READ,
                                 MAP_POPULATE | MAP_ANONYMOUS, -1, 0);
    /* Yeah, this doesn't fit in the error-handling scheme, but this whole
     * system doesn't really handle failure, and needs a rewrite involving less
     * mmaps/munmaps. */
    assert(mmap_block);
    /* Note we may end up doing vcore 0's elsewhere, for _Ss, or else have a
     * separate ev_q for that. */
    for (int i = 0; i < max_vcores(); i++) {
        /* four pages total for both ucqs from the big block (2 pages each) */
        ucq_init_raw(&vcpd_of(i)->ev_mbox_public.ev_msgs,
                     mmap_block + (4 * i    ) * PGSIZE,
                     mmap_block + (4 * i + 1) * PGSIZE);
        ucq_init_raw(&vcpd_of(i)->ev_mbox_private.ev_msgs,
                     mmap_block + (4 * i + 2) * PGSIZE,
                     mmap_block + (4 * i + 3) * PGSIZE);
        /* Set the lowest level entry point for each vcore. */
        vcpd_of(i)->vcore_entry = (uintptr_t)__kernel_vcore_entry;
    }
    atomic_init(&vc_req_being_handled, 0);
    assert(!in_vcore_context());
    vcore_libc_init();
    return;
vcore_lib_init_fail:
    assert(0);
}
Beispiel #10
0
/* CAS-less unlock, not quite as efficient and will make sure every vcore runs
 * (since we don't have a convenient way to make sure our qnode->next runs
 * yet, other than making sure everyone runs).
 *
 * To use this without ensuring all vcores run, you'll need the unlock code to
 * save pred to a specific field in the qnode and check both its initial pred
 * as well as its run time pred (who could be an usurper).  It's all possible,
 * but a little more difficult to follow.  Also, I'm adjusting this comment
 * months after writing it originally, so it is probably not sufficient, but
 * necessary. */
void __mcs_pdro_unlock_no_cas(struct mcs_pdro_lock *lock,
                             struct mcs_pdro_qnode *qnode)
{
	struct mcs_pdro_qnode *old_tail, *usurper;
	/* Check if someone is already waiting on us to unlock */
	if (qnode->next == 0) {
		cmb();	/* no need for CPU mbs, since there's an atomic_swap() */
		/* Unlock it */
		old_tail = atomic_swap_ptr((void**)&lock->lock, 0);
		/* no one else was already waiting, so we successfully unlocked and can
		 * return */
		if (old_tail == qnode)
			return;
		/* someone else was already waiting on the lock (last one on the list),
		 * and we accidentally took them off.  Try and put it back. */
		usurper = atomic_swap_ptr((void*)&lock->lock, old_tail);
		/* since someone else was waiting, they should have made themselves our
		 * next.  spin (very briefly!) til it happens. */
		while (qnode->next == 0) {
			/* make sure old_tail isn't preempted.  best we can do for now is
			 * to make sure all vcores run, and thereby get our next. */
			for (int i = 0; i < max_vcores(); i++)
				ensure_vcore_runs(i);
			cpu_relax();
		}
		if (usurper) {
			/* an usurper is someone who snuck in before we could put the old
			 * tail back.  They now have the lock.  Let's put whoever is
			 * supposed to be next as their next one. 
			 *
			 * First, we need to change our next's pred.  There's a slight race
			 * here, so our next will need to make sure both us and pred are
			 * done */
			/* I was trying to do something so we didn't need to ensure all
			 * vcores run, using more space in the qnode to figure out who our
			 * pred was a lock time (guessing actually, since there's a race,
			 * etc). */
			//qnode->next->pred = usurper;
			//wmb();
			usurper->next = qnode->next;
			/* could imagine another wmb() and a flag so our next knows to no
			 * longer check us too. */
		} else {
			/* No usurper meant we put things back correctly, so we should just
			 * pass the lock / unlock whoever is next */
			qnode->next->locked = 0;
		}
	} else {
		/* mb()s necessary since we didn't call an atomic_swap() */
		wmb();	/* need to make sure any previous writes don't pass unlocking */
		rwmb();	/* need to make sure any reads happen before the unlocking */
		/* simply unlock whoever is next */
		qnode->next->locked = 0;
	}
}
Beispiel #11
0
void vcore_init(void)
{
	uintptr_t mmap_block;
	/* Note this is racy, but okay.  The first time through, we are _S */
	init_once_racy(return);

	/* Need to alloc vcore0's transition stuff here (technically, just the TLS)
	 * so that schedulers can use vcore0's transition TLS before it comes up in
	 * vcore_entry() */
	if(allocate_transition_stack(0) || allocate_transition_tls(0))
		goto vcore_init_fail;

	/* Initialize our VCPD event queues' ucqs, two pages per ucq, 4 per vcore */
	mmap_block = (uintptr_t)mmap(0, PGSIZE * 4 * max_vcores(),
	                             PROT_WRITE | PROT_READ,
	                             MAP_POPULATE | MAP_ANONYMOUS, -1, 0);
	/* Yeah, this doesn't fit in the error-handling scheme, but this whole
	 * system doesn't really handle failure, and needs a rewrite involving less
	 * mmaps/munmaps. */
	assert(mmap_block);
	/* Note we may end up doing vcore 0's elsewhere, for _Ss, or else have a
	 * separate ev_q for that. */
	for (int i = 0; i < max_vcores(); i++) {
		/* four pages total for both ucqs from the big block (2 pages each) */
		ucq_init_raw(&vcpd_of(i)->ev_mbox_public.ev_msgs,
		             mmap_block + (4 * i    ) * PGSIZE,
		             mmap_block + (4 * i + 1) * PGSIZE);
		ucq_init_raw(&vcpd_of(i)->ev_mbox_private.ev_msgs,
		             mmap_block + (4 * i + 2) * PGSIZE,
		             mmap_block + (4 * i + 3) * PGSIZE);
	}
	atomic_init(&vc_req_being_handled, 0);
	assert(!in_vcore_context());
	/* no longer need to enable notifs on vcore 0, it is set like that by
	 * default (so you drop into vcore context immediately on transtioning to
	 * _M) */
	vc_initialized = TRUE;
	return;
vcore_init_fail:
	assert(0);
}
Beispiel #12
0
int main(int argc, char** argv)
{
	pthread_t *my_threads = malloc(sizeof(pthread_t) * max_vcores());

	/* set up to receive the PREEMPT_PENDING event.  EVENT_VCORE_APPRO tells the
	 * kernel to send the msg to whichever vcore is appropriate.  Pthread code
	 * will see the preemption and yield. */
	struct event_queue *ev_q = get_event_q();
	ev_q->ev_flags = EVENT_IPI | EVENT_NOMSG | EVENT_VCORE_APPRO;
	register_kevent_q(ev_q, EV_PREEMPT_PENDING);

	/* actually only need one less, since the _S will be pthread 0 */
	for (int i = 0; i < max_vcores() - 1; i++)
		pthread_create(&my_threads[i], NULL, &while_thread, NULL);

	assert(num_vcores() == max_vcores());
	while (1);

	/* should never make it here */
	return -1;
}
Beispiel #13
0
/* Do whatever init you want.  Return a uthread representing thread0 (int
 * main()) */
struct uthread *pth_init(void)
{
	struct mcs_lock_qnode local_qn = {0};
	/* Tell the kernel where and how we want to receive events.  This is just an
	 * example of what to do to have a notification turned on.  We're turning on
	 * USER_IPIs, posting events to vcore 0's vcpd, and telling the kernel to
	 * send to vcore 0.  Note sys_self_notify will ignore the vcoreid pref.
	 * Also note that enable_kevent() is just an example, and you probably want
	 * to use parts of event.c to do what you want. */
	enable_kevent(EV_USER_IPI, 0, EVENT_IPI);

	/* Handle syscall events.  Using small ev_qs, with no internal ev_mbox. */
	ev_handlers[EV_SYSCALL] = pth_handle_syscall;
	/* Set up the per-vcore structs to track outstanding syscalls */
	sysc_mgmt = malloc(sizeof(struct sysc_mgmt) * max_vcores());
	assert(sysc_mgmt);
	for (int i = 0; i < max_vcores(); i++) {
		/* Set up each of the per-vcore syscall event queues so that they point
		 * to the VCPD/default vcore mailbox (for now)  Note you'll need the
		 * vcore to be online to get the events (for now). */
		sysc_mgmt[i].ev_q.ev_mbox =  &__procdata.vcore_preempt_data[i].ev_mbox;
		sysc_mgmt[i].ev_q.ev_flags = EVENT_IPI;		/* totally up to you */
		sysc_mgmt[i].ev_q.ev_vcore = i;
		/* Init the list and other data */
		TAILQ_INIT(&sysc_mgmt[i].pending_syscs);
		sysc_mgmt[i].handling_overflow = FALSE;
	}
	/* Create a pthread_tcb for the main thread */
	pthread_t t = (pthread_t)calloc(1, sizeof(struct pthread_tcb));
	assert(t);
	t->id = get_next_pid();
	assert(t->id == 0);
	/* Put the new pthread on the active queue */
	mcs_lock_notifsafe(&queue_lock, &local_qn);
	threads_active++;
	TAILQ_INSERT_TAIL(&active_queue, t, next);
	mcs_unlock_notifsafe(&queue_lock, &local_qn);
	return (struct uthread*)t;
}
Beispiel #14
0
int bthread_cond_signal(bthread_cond_t *c)
{
  int i;
  for(i = 0; i < max_vcores(); i++)
  {
    if(c->waiters[i])
    {
      c->waiters[i] = 0;
      break;
    }
  }
  return 0;
}
Beispiel #15
0
static libgomp_lithe_context_t *__ctx_alloc(size_t stacksize)
{
	libgomp_lithe_context_t *ctx = wfl_remove(&context_zombie_list);
	if (!ctx) {
		int offset = ROUNDUP(sizeof(libgomp_lithe_context_t), ARCH_CL_SIZE);
		offset += rand_r(&rseed(0)) % max_vcores() * ARCH_CL_SIZE;
		stacksize = ROUNDUP(stacksize + offset, PGSIZE);
		void *stackbot = mmap(
			0, stacksize, PROT_READ|PROT_WRITE|PROT_EXEC,
			MAP_PRIVATE|MAP_ANONYMOUS, -1, 0
		);
		if (stackbot == MAP_FAILED)
			abort();
		ctx = stackbot + stacksize - offset;
		ctx->context.stack_offset = offset;
		ctx->context.context.stack.bottom = stackbot;
		ctx->context.context.stack.size = stacksize - offset;
	}
    return ctx;
}
Beispiel #16
0
/* Used by both -p and -c modes (-c will use it after creating pid) */
static int prov_pid(pid_t pid, struct prog_args *pargs)
{
	unsigned int kernel_res_type;
	int retval;
	switch (pargs->res_type) {
		case ('c'):
			if (pargs->max) {
				/* TODO: don't guess the LL/CG layout and num pcores */
				#if 1
				for (int i = 1; i < max_vcores() + 1; i++) {
					if ((retval = sys_provision(pid, RES_CORES, i))) {
						perror("Failed max provisioning");
						return retval;
					}
				}
				#else
				/* To force a vcore shuffle / least optimal ordering, change
				 * the if 1 to 0.  Normally, we provision out in a predictable,
				 * VCn->PCn+1 ordering.  This splits the odd and even VCs
				 * across sockets on a 32 PC machine (c89).  This is only for
				 * perf debugging, when using the lockprov.sh script. */
				retval = 0;
				retval |= sys_provision(pid, RES_CORES,  1);
				retval |= sys_provision(pid, RES_CORES, 16);
				retval |= sys_provision(pid, RES_CORES,  2);
				retval |= sys_provision(pid, RES_CORES, 17);
				retval |= sys_provision(pid, RES_CORES,  3);
				retval |= sys_provision(pid, RES_CORES, 18);
				retval |= sys_provision(pid, RES_CORES,  4);
				retval |= sys_provision(pid, RES_CORES, 19);
				retval |= sys_provision(pid, RES_CORES,  5);
				retval |= sys_provision(pid, RES_CORES, 20);
				retval |= sys_provision(pid, RES_CORES,  6);
				retval |= sys_provision(pid, RES_CORES, 21);
				retval |= sys_provision(pid, RES_CORES,  7);
				retval |= sys_provision(pid, RES_CORES, 22);
				retval |= sys_provision(pid, RES_CORES,  8);
				retval |= sys_provision(pid, RES_CORES, 23);
				retval |= sys_provision(pid, RES_CORES,  9);
				retval |= sys_provision(pid, RES_CORES, 24);
				retval |= sys_provision(pid, RES_CORES, 10);
				retval |= sys_provision(pid, RES_CORES, 25);
				retval |= sys_provision(pid, RES_CORES, 11);
				retval |= sys_provision(pid, RES_CORES, 26);
				retval |= sys_provision(pid, RES_CORES, 12);
				retval |= sys_provision(pid, RES_CORES, 27);
				retval |= sys_provision(pid, RES_CORES, 13);
				retval |= sys_provision(pid, RES_CORES, 28);
				retval |= sys_provision(pid, RES_CORES, 14);
				retval |= sys_provision(pid, RES_CORES, 29);
				retval |= sys_provision(pid, RES_CORES, 15);
				retval |= sys_provision(pid, RES_CORES, 31);
				retval |= sys_provision(pid, RES_CORES, 30);
				return retval;
				#endif
			} else {
				if ((retval = sys_provision(pid, RES_CORES, pargs->res_val))) {
					perror("Failed single provision");
					return retval;
				}
			}
			kernel_res_type = RES_CORES;
			break;
		case ('m'):
			printf("Provisioning memory is not supported yet\n");
			return -1;
			break;
		default:
			if (!pargs->res_type)
				printf("No resource type selected.  Use -t\n");
			else
				printf("Unsupported resource type %c\n", pargs->res_type);
			return -1;
	}
	sys_poke_ksched(pid, kernel_res_type);
	return 0;
}
Beispiel #17
0
int
__get_nprocs ()
{
  return max_vcores();
}
Beispiel #18
0
/* to trick uthread_create() */
int main(int argc, char** argv)
{
	uint32_t vcoreid;
	int retval;

	/* Initialize our barrier. */
	mcs_barrier_init(&b, max_vcores());

	/* vcore_context test */
	assert(!in_vcore_context());
	
	/* prep indirect ev_q.  Note we grab a big one */
	indirect_q = get_eventq(EV_MBOX_UCQ);
	indirect_q->ev_flags = EVENT_IPI;
	indirect_q->ev_vcore = 1;			/* IPI core 1 */
	indirect_q->ev_handler = 0;
	printf("Registering %08p for event type %d\n", indirect_q,
	       EV_FREE_APPLE_PIE);
	register_kevent_q(indirect_q, EV_FREE_APPLE_PIE);

	/* handle events: just want to print out what we get.  This is just a
	 * quick set of handlers, not a registration for a kevent. */
	for (int i = 0; i < MAX_NR_EVENT; i++)
		register_ev_handler(i, handle_generic, 0);
	/* Want to use the default ev_ev (which we just overwrote) */
	register_ev_handler(EV_EVENT, handle_ev_ev, 0);
	/* vcore_lib_init() done in vcore_request() now. */
	/* Set up event reception.  For example, this will allow us to receive an
	 * event and IPI for USER_IPIs on vcore 0.  Check event.c for more stuff.
	 * Note you don't have to register for USER_IPIs to receive ones you send
	 * yourself with sys_self_notify(). */
	enable_kevent(EV_USER_IPI, 0, EVENT_IPI | EVENT_VCORE_PRIVATE);
	/* Receive pending preemption events.  (though there's no PP handler) */
	struct event_queue *ev_q = get_eventq_vcpd(0, EVENT_VCORE_PRIVATE);
	ev_q->ev_flags = EVENT_IPI | EVENT_VCORE_APPRO;
	register_kevent_q(ev_q, EV_PREEMPT_PENDING);
	/* We also receive preemption events, it is set up in uthread.c */

	/* Inits a thread for us, though we won't use it.  Just a hack to get into
	 * _M mode.  Note this requests one vcore for us */
	struct uthread dummy = {0};
	uthread_2ls_init(&dummy, &ghetto_sched_ops);
	uthread_mcp_init();
	/* Reset the blockon to be the spinner...  This is really shitty.  Any
	 * blocking calls after we become an MCP and before this will fail.  This is
	 * just mhello showing its warts due to trying to work outside uthread.c */
	ros_syscall_blockon = __ros_syscall_spinon;

	if ((vcoreid = vcore_id())) {
		printf("Should never see me! (from vcore %d)\n", vcoreid);
	} else { // core 0
		temp = 0xdeadbeef;
		printf("Hello from vcore %d with temp addr = %p and temp = %p\n",
		       vcoreid, &temp, temp);
		printf("Multi-Goodbye, world, from PID: %d!\n", sys_getpid());
		printf("Requesting %d vcores\n", max_vcores() - 1);
		retval = vcore_request(max_vcores() - 1); /* since we already have 1 */
		//retval = vcore_request(5);
		printf("This is vcore0, right after vcore_request, retval=%d\n", retval);
		/* vcore_context test */
		assert(!in_vcore_context());
	}

	//#if 0
	/* test notifying my vcore2 */
	udelay(5000000);
	printf("Vcore 0 self-notifying vcore 2 with notif 4!\n");
	struct event_msg msg;
	msg.ev_type = 4;
	sys_self_notify(2, 4, &msg, TRUE);
	udelay(5000000);
	printf("Vcore 0 notifying itself with notif 6!\n");
	msg.ev_type = 6;
	sys_notify(sys_getpid(), 6, &msg);
	udelay(1000000);
	//#endif

	/* test loop for restarting a uthread_ctx */
	if (vcoreid == 0) {
		int ctr = 0;
		while(1) {
			printf("Vcore %d Spinning (%d), temp = %08x!\n", vcoreid, ctr++, temp);
			udelay(5000000);
			//exit(0);
		}
	}

	printf("Vcore %d Done!\n", vcoreid);
	//mcs_barrier_wait(&b,vcore_id());

	printf("All Cores Done!\n", vcoreid);
	while(1); // manually kill from the monitor
	/* since everyone should cleanup their uthreads, even if they don't plan on
	 * calling their code or want uthreads in the first place. <3 */
	uthread_cleanup(&dummy);
	return 0;
}
Beispiel #19
0
/* Helper, looks for any preempted vcores, making sure each of them runs at some
 * point.  This is pretty heavy-weight, and should be used to help get out of
 * weird deadlocks (spinning in vcore context, waiting on another vcore).  If
 * you might know which vcore you are waiting on, use ensure_vc_runs. */
static void __ensure_all_run(void)
{
    for (int i = 0; i < max_vcores(); i++)
        __ensure_vcore_runs(i);
}
Beispiel #20
0
int main(int argc, char** argv)
{
	uint32_t vcoreid;
	int retval;

	mcs_barrier_init(&b, max_vcores());

	/* vcore_context test */
	assert(!in_vcore_context());
	
	/* prep indirect ev_q.  Note we grab a big one */
	indirect_q = get_big_event_q();
	indirect_q->ev_flags = EVENT_IPI;
	indirect_q->ev_vcore = 1;			/* IPI core 1 */
	indirect_q->ev_handler = 0;
	printf("Registering %08p for event type %d\n", indirect_q,
	       EV_FREE_APPLE_PIE);
	register_kevent_q(indirect_q, EV_FREE_APPLE_PIE);

	/* handle events: just want to print out what we get.  This is just a
	 * quick set of handlers, not a registration for a kevent. */
	for (int i = 0; i < MAX_NR_EVENT; i++)
		ev_handlers[i] = handle_generic;
	/* Want to use the default ev_ev (which we just overwrote) */
	ev_handlers[EV_EVENT] = handle_ev_ev;
	/* vcore_init() done in vcore_request() now. */
	/* Set up event reception.  For example, this will allow us to receive an
	 * event and IPI for USER_IPIs on vcore 0.  Check event.c for more stuff.
	 * Note you don't have to register for USER_IPIs to receive ones you send
	 * yourself with sys_self_notify(). */
	enable_kevent(EV_USER_IPI, 0, EVENT_IPI);
	/* Receive pending preemption events.  Can also get a MSG if you want. */
	struct event_queue *ev_q = get_event_q();
	ev_q->ev_flags = EVENT_IPI | EVENT_NOMSG | EVENT_VCORE_APPRO;
	register_kevent_q(ev_q, EV_PREEMPT_PENDING);

	/* Makes a thread for us, though we won't use it.  Just a hack to get into
	 * _M mode.  Note this requests one vcore for us */
	uthread_create(dummy, 0);

	if ((vcoreid = vcore_id())) {
		printf("Should never see me! (from vcore %d)\n", vcoreid);
	} else { // core 0
		temp = 0xdeadbeef;
		printf("Hello from vcore %d with temp addr = %p and temp = %p\n",
		       vcoreid, &temp, temp);
		printf("Multi-Goodbye, world, from PID: %d!\n", sys_getpid());
		//retval = sys_resource_req(RES_CORES, 2, 0);
		printf("Requesting %d vcores\n", max_vcores() - 1);
		retval = vcore_request(max_vcores() - 1); /* since we already have 1 */
		//retval = vcore_request(5);
		printf("This is vcore0, right after vcore_request, retval=%d\n", retval);
		/* vcore_context test */
		assert(!in_vcore_context());
	}

	/* test notifying my vcore2 */
	udelay(5000000);
	printf("Vcore 0 self-notifying vcore 2 with notif 4!\n");
	struct event_msg msg;
	msg.ev_type = 4;
	sys_self_notify(2, 4, &msg);
	udelay(5000000);
	printf("Vcore 0 notifying itself with notif 3!\n");
	msg.ev_type = 3;
	sys_notify(sys_getpid(), 3, &msg);
	udelay(1000000);

	/* test loop for restarting a notif_tf */
	if (vcoreid == 0) {
		int ctr = 0;
		while(1) {
			printf("Vcore %d Spinning (%d), temp = %08x!\n", vcoreid, ctr++, temp);
			udelay(5000000);
			//exit(0);
		}
	}

	printf("Vcore %d Done!\n", vcoreid);
	//mcs_barrier_wait(&b,vcore_id());

	printf("All Cores Done!\n", vcoreid);
	while(1); // manually kill from the monitor
	return 0;
}
Beispiel #21
0
/* Ghetto, sets its retval to max_vc to communicate without pipes */
int main(int argc, char** argv)
{
	return max_vcores();
}