Esempio n. 1
0
int lithe_mutex_lock(lithe_mutex_t *mutex)
{
  if(mutex == NULL)
    return EINVAL;

  mcs_lock_qnode_t qnode = {0};
  mcs_pdr_lock(&mutex->lock, &qnode);
  if(mutex->attr.type == LITHE_MUTEX_RECURSIVE &&
     mutex->owner == lithe_context_self()) {
    mutex->locked++;
  }
  else {
    while(mutex->locked) {
      mutex->qnode = &qnode;
      lithe_context_block(block, mutex);

      memset(&qnode, 0, sizeof(mcs_lock_qnode_t));
      mcs_pdr_lock(&mutex->lock, &qnode);
    }
    mutex->owner = lithe_context_self();
    mutex->locked++;
  }
  mcs_pdr_unlock(&mutex->lock, &qnode);
  return 0;
}
Esempio n. 2
0
static void pth_thread_refl_fault(struct uthread *uthread, unsigned int trap_nr,
                                  unsigned int err, unsigned long aux)
{
	struct pthread_tcb *pthread = (struct pthread_tcb*)uthread;
	pthread->state = PTH_BLK_SYSC;
	mcs_pdr_lock(&queue_lock);
	threads_active--;
	TAILQ_REMOVE(&active_queue, pthread, tq_next);
	mcs_pdr_unlock(&queue_lock);

	/* TODO: RISCV/x86 issue! (0 is divby0, 14 is PF, etc) */
#if defined(__i386__) || defined(__x86_64__) 
	switch(trap_nr) {
		case 0:
			handle_div_by_zero(uthread, err, aux);
			break;
		case 13:
			handle_gp_fault(uthread, err, aux);
			break;
		case 14:
			handle_page_fault(uthread, err, aux);
			break;
		default:
			printf("Pthread has unhandled fault: %d, err: %d, aux: %p\n",
			       trap_nr, err, aux);
			/* Note that uthread.c already copied out our ctx into the uth
			 * struct */
			print_user_context(&uthread->u_ctx);
			printf("Turn on printx to spew unhandled, malignant trap info\n");
			exit(-1);
	}
#else
	#error "Handling hardware faults is currently only supported on x86"
#endif
}
Esempio n. 3
0
/* GIANT WARNING: if you make any changes to this, also change the broadcast
 * wakeups (cond var, barrier, etc) */
static void pth_thread_runnable(struct uthread *uthread)
{
	struct pthread_tcb *pthread = (struct pthread_tcb*)uthread;
	/* At this point, the 2LS can see why the thread blocked and was woken up in
	 * the first place (coupling these things together).  On the yield path, the
	 * 2LS was involved and was able to set the state.  Now when we get the
	 * thread back, we can take a look. */
	printd("pthread %08p runnable, state was %d\n", pthread, pthread->state);
	switch (pthread->state) {
		case (PTH_CREATED):
		case (PTH_BLK_YIELDING):
		case (PTH_BLK_JOINING):
		case (PTH_BLK_SYSC):
		case (PTH_BLK_PAUSED):
		case (PTH_BLK_MUTEX):
			/* can do whatever for each of these cases */
			break;
		default:
			printf("Odd state %d for pthread %08p\n", pthread->state, pthread);
	}
	pthread->state = PTH_RUNNABLE;
	/* Insert the newly created thread into the ready queue of threads.
	 * It will be removed from this queue later when vcore_entry() comes up */
	mcs_pdr_lock(&queue_lock);
	/* Again, GIANT WARNING: if you change this, change batch wakeup code */
	TAILQ_INSERT_TAIL(&ready_queue, pthread, tq_next);
	threads_ready++;
	mcs_pdr_unlock(&queue_lock);
	/* Smarter schedulers should look at the num_vcores() and how much work is
	 * going on to make a decision about how many vcores to request. */
	if (can_adjust_vcores)
		vcore_request(threads_ready);
}
Esempio n. 4
0
static inline int futex_wake(int *uaddr, int count)
{
  struct futex_element *e,*n = NULL;
  struct futex_queue q = TAILQ_HEAD_INITIALIZER(q);

  // Atomically grab all relevant futex blockers
  // from the global futex queue
  mcs_pdr_lock(&__futex.lock);
  e = TAILQ_FIRST(&__futex.queue);
  while(e != NULL) {
    if(count > 0) {
      n = TAILQ_NEXT(e, link);
      if(e->uaddr == uaddr) {
        TAILQ_REMOVE(&__futex.queue, e, link);
        TAILQ_INSERT_TAIL(&q, e, link);
        count--;
      }
      e = n;
    }
    else break;
  }
  mcs_pdr_unlock(&__futex.lock);

  // Unblock them outside the lock
  e = TAILQ_FIRST(&q);
  while(e != NULL) {
    n = TAILQ_NEXT(e, link);
    TAILQ_REMOVE(&q, e, link);
    while(e->pthread == NULL)
      cpu_relax();
    uthread_runnable((struct uthread*)e->pthread);
    e = n;
  }
  return 0;
}
Esempio n. 5
0
/* Called from vcore entry.  Options usually include restarting whoever was
 * running there before or running a new thread.  Events are handled out of
 * event.c (table of function pointers, stuff like that). */
static void __attribute__((noreturn)) pth_sched_entry(void)
{
	uint32_t vcoreid = vcore_id();
	if (current_uthread) {
		/* Prep the pthread to run any pending posix signal handlers registered
         * via pthread_kill once it is restored. */
		__pthread_prep_for_pending_posix_signals((pthread_t)current_uthread);
		/* Run the thread itself */
		run_current_uthread();
		assert(0);
	}
	/* no one currently running, so lets get someone from the ready queue */
	struct pthread_tcb *new_thread = NULL;
	/* Try to get a thread.  If we get one, we'll break out and run it.  If not,
	 * we'll try to yield.  vcore_yield() might return, if we lost a race and
	 * had a new event come in, one that may make us able to get a new_thread */
	do {
		handle_events(vcoreid);
		__check_preempt_pending(vcoreid);
		mcs_pdr_lock(&queue_lock);
		new_thread = TAILQ_FIRST(&ready_queue);
		if (new_thread) {
			TAILQ_REMOVE(&ready_queue, new_thread, tq_next);
			TAILQ_INSERT_TAIL(&active_queue, new_thread, tq_next);
			threads_active++;
			threads_ready--;
			mcs_pdr_unlock(&queue_lock);
			/* If you see what looks like the same uthread running in multiple
			 * places, your list might be jacked up.  Turn this on. */
			printd("[P] got uthread %08p on vc %d state %08p flags %08p\n",
			       new_thread, vcoreid,
			       ((struct uthread*)new_thread)->state,
			       ((struct uthread*)new_thread)->flags);
			break;
		}
		mcs_pdr_unlock(&queue_lock);
		/* no new thread, try to yield */
		printd("[P] No threads, vcore %d is yielding\n", vcore_id());
		/* TODO: you can imagine having something smarter here, like spin for a
		 * bit before yielding (or not at all if you want to be greedy). */
		if (can_adjust_vcores)
			vcore_yield(FALSE);
		if (!parlib_wants_to_be_mcp)
			sys_yield(FALSE);
	} while (1);
	assert(new_thread->state == PTH_RUNNABLE);
	/* Prep the pthread to run any pending posix signal handlers registered
     * via pthread_kill once it is restored. */
	__pthread_prep_for_pending_posix_signals(new_thread);
	/* Run the thread itself */
	run_uthread((struct uthread*)new_thread);
	assert(0);
}
Esempio n. 6
0
static void *timer_thread(void *arg)
{
  struct futex_element *e,*n = NULL;
  struct futex_queue q = TAILQ_HEAD_INITIALIZER(q);

  // Do this forever...
  for(;;) {
    // Block for 1 millisecond
    sys_block(1000);

    // Then atomically do the following...
    mcs_pdr_lock(&__futex.lock);
    // Up the time
    __futex.time++;

	// Find all futexes that have timed out on this iteration,
	// and count those still waiting
    int waiting = 0;
    e = TAILQ_FIRST(&__futex.queue);
    while(e != NULL) {
      n = TAILQ_NEXT(e, link);
      if(e->ms_timeout == __futex.time) {
        e->timedout = true;
        TAILQ_REMOVE(&__futex.queue, e, link);
        TAILQ_INSERT_TAIL(&q, e, link);
      }
      else if(e->ms_timeout != (uint64_t)-1)
        waiting++;
      e = n;
    }
    // If there are no more waiting, disable the timer
    if(waiting == 0) {
      __futex.time = 0;
      __futex.timer_enabled = false;
    }
    mcs_pdr_unlock(&__futex.lock);

    // Unblock any futexes that have timed out outside the lock
    e = TAILQ_FIRST(&q);
    while(e != NULL) {
      n = TAILQ_NEXT(e, link);
      TAILQ_REMOVE(&q, e, link);
      while(e->pthread == NULL)
        cpu_relax();
      uthread_runnable((struct uthread*)e->pthread);
      e = n;
    }

    // If we have disabled the timer, park this thread
    futex_wait(&__futex.timer_enabled, false, -1);
  }
}
Esempio n. 7
0
int upthread_cond_wait(upthread_cond_t *c, upthread_mutex_t *m)
{
	if(c == NULL)
		return EINVAL;
	if(m == NULL)
		return EINVAL;

	mcs_lock_qnode_t qnode = {0};
	mcs_pdr_lock(&c->lock, &qnode);
	c->waiting_mutex = m;
	c->waiting_qnode = &qnode;
	uthread_yield(true, block, c);
	return upthread_mutex_lock(m);
}
Esempio n. 8
0
int upthread_cond_signal(upthread_cond_t *c)
{
	if(c == NULL)
		return EINVAL;

	mcs_lock_qnode_t qnode = {0};
	mcs_pdr_lock(&c->lock, &qnode);
	upthread_t upthread = STAILQ_FIRST(&c->queue);
	if(upthread)
		STAILQ_REMOVE_HEAD(&c->queue, next);
	mcs_pdr_unlock(&c->lock, &qnode);

	if (upthread != NULL) {
		uthread_runnable((struct uthread*)upthread);
	}
	return 0;
}
Esempio n. 9
0
/* For some reason not under its control, the uthread stopped running (compared
 * to yield, which was caused by uthread/2LS code).
 *
 * The main case for this is if the vcore was preempted or if the vcore it was
 * running on needed to stop.  You are given a uthread that looks like it took a
 * notif, and had its context/silly state copied out to the uthread struct.
 * (copyout_uthread).  Note that this will be called in the context (TLS) of the
 * vcore that is losing the uthread.  If that vcore is running, it'll be in a
 * preempt-event handling loop (not in your 2LS code).  If this is a big
 * problem, I'll change it. */
static void pth_thread_paused(struct uthread *uthread)
{
	struct pthread_tcb *pthread = (struct pthread_tcb*)uthread;
	/* Remove from the active list.  Note that I don't particularly care about
	 * the active list.  We keep it around because it causes bugs and keeps us
	 * honest.  After all, some 2LS may want an active list */
	mcs_pdr_lock(&queue_lock);
	threads_active--;
	TAILQ_REMOVE(&active_queue, pthread, tq_next);
	mcs_pdr_unlock(&queue_lock);
	/* communicate to pth_thread_runnable */
	pthread->state = PTH_BLK_PAUSED;
	/* At this point, you could do something clever, like put it at the front of
	 * the runqueue, see if it was holding a lock, do some accounting, or
	 * whatever. */
	pth_thread_runnable(uthread);
}
Esempio n. 10
0
static inline int futex_wait(int *uaddr, int val, uint64_t ms_timeout)
{
  // Atomically do the following...
  mcs_pdr_lock(&__futex.lock);
  // If the value of *uaddr matches val
  if(*uaddr == val) {
    // Create a new futex element and initialize it.
    struct futex_element e;
    bool enable_timer = false;
    e.uaddr = uaddr;
    e.pthread = NULL;
    e.ms_timeout = ms_timeout;
    e.timedout = false;
    if(e.ms_timeout != (uint64_t)-1) {
      e.ms_timeout += __futex.time;
	  // If we are setting the timeout, get ready to
	  // enable the timer if it is currently disabled.
      if(__futex.timer_enabled == false) {
        __futex.timer_enabled = true;
        enable_timer = true;
      }
    }
    // Insert the futex element into the queue
    TAILQ_INSERT_TAIL(&__futex.queue, &e, link);
    mcs_pdr_unlock(&__futex.lock);

    // Enable the timer if we need to outside the lock
    if(enable_timer)
      futex_wake(&__futex.timer_enabled, 1);

    // Yield the current uthread
    uthread_yield(TRUE, __futex_block, &e);

	// After waking, if we timed out, set the error
	// code appropriately and return
    if(e.timedout) {
      errno = ETIMEDOUT;
      return -1;
    }
  }
  else {
    mcs_pdr_unlock(&__futex.lock);
  }
  return 0;
}
Esempio n. 11
0
int upthread_cond_broadcast(upthread_cond_t *c)
{
	if(c == NULL)
		return EINVAL;

	mcs_lock_qnode_t qnode = {0};
	while(1) {
		mcs_pdr_lock(&c->lock, &qnode);
		upthread_t upthread = STAILQ_FIRST(&c->queue);
		if(upthread)
			STAILQ_REMOVE_HEAD(&c->queue, next);
		else break;
		mcs_pdr_unlock(&c->lock, &qnode);
		uthread_runnable((struct uthread*)upthread);
		memset(&qnode, 0, sizeof(mcs_lock_qnode_t));
	}
	mcs_pdr_unlock(&c->lock, &qnode);
	return 0;
}
Esempio n. 12
0
/* This will be called from vcore context, after the current thread has yielded
 * and is trying to block on sysc.  Need to put it somewhere were we can wake it
 * up when the sysc is done.  For now, we'll have the kernel send us an event
 * when the syscall is done. */
static void pth_thread_blockon_sysc(struct uthread *uthread, void *syscall)
{
	struct syscall *sysc = (struct syscall*)syscall;
	int old_flags;
	uint32_t vcoreid = vcore_id();
	/* rip from the active queue */
	struct pthread_tcb *pthread = (struct pthread_tcb*)uthread;
	pthread->state = PTH_BLK_SYSC;
	mcs_pdr_lock(&queue_lock);
	threads_active--;
	TAILQ_REMOVE(&active_queue, pthread, tq_next);
	mcs_pdr_unlock(&queue_lock);
	/* Set things up so we can wake this thread up later */
	sysc->u_data = uthread;
	/* Register our vcore's syscall ev_q to hear about this syscall. */
	if (!register_evq(sysc, sysc_mgmt[vcoreid].ev_q)) {
		/* Lost the race with the call being done.  The kernel won't send the
		 * event.  Just restart him. */
		restart_thread(sysc);
	}
	/* GIANT WARNING: do not touch the thread after this point. */
}
Esempio n. 13
0
int lithe_mutex_trylock(lithe_mutex_t *mutex)
{
  if(mutex == NULL)
    return EINVAL;

  int retval = 0;
  mcs_lock_qnode_t qnode = {0};
  mcs_pdr_lock(&mutex->lock, &qnode);
  if(mutex->attr.type == LITHE_MUTEX_RECURSIVE &&
     mutex->owner == lithe_context_self()) {
    mutex->locked++;
  }
  else if(mutex->locked) {
    retval = EBUSY;
  }
  else {
    mutex->owner = lithe_context_self();
    mutex->locked++;
  }
  mcs_pdr_unlock(&mutex->lock, &qnode);
  return retval;
}
Esempio n. 14
0
int lithe_mutex_unlock(lithe_mutex_t *mutex)
{
  if(mutex == NULL)
    return EINVAL;

  mcs_lock_qnode_t qnode = {0};
  mcs_pdr_lock(&mutex->lock, &qnode);
  mutex->locked--;
  if(mutex->locked == 0) {
    lithe_context_t *context = TAILQ_FIRST(&mutex->queue);
    if(context)
      TAILQ_REMOVE(&mutex->queue, context, link);
    mutex->owner = NULL;
    mcs_pdr_unlock(&mutex->lock, &qnode);

    if(context != NULL) {
      lithe_context_unblock(context);
    }
  }
  else {
    mcs_pdr_unlock(&mutex->lock, &qnode);
  }
  return 0;
}