Exemple #1
0
/* callback, runs in vcore context.  this sets up our initial context.  once we
 * become runnable again, we'll run the first bits of the vm ctx.  after that,
 * our context will be stopped and started and will just run whatever the guest
 * VM wants.  we'll never come back to this code or to run_vm(). */
static void __build_vm_ctx_cb(struct uthread *uth, void *arg)
{
	struct pthread_tcb *pthread = (struct pthread_tcb*)uth;
	struct vmctl *vmctl = (struct vmctl*)arg;
	struct vm_trapframe *vm_tf;

	__pthread_generic_yield(pthread);
	pthread->state = PTH_BLK_YIELDING;

	memset(&uth->u_ctx, 0, sizeof(struct user_context));
	uth->u_ctx.type = ROS_VM_CTX;
	vm_tf = &uth->u_ctx.tf.vm_tf;

	vm_tf->tf_guest_pcoreid = 0;	/* assuming only 1 guest core */

	copy_vmctl_to_vmtf(vmctl, vm_tf);

	/* other HW/GP regs are 0, which should be fine.  the FP state is still
	 * whatever we were running before, though this is pretty much unnecessary.
	 * we mostly don't want crazy crap in the uth->as, and a non-current_uthread
	 * VM ctx is supposed to have something in their FP state (like HW ctxs). */
	save_fp_state(&uth->as);
	uth->flags |= UTHREAD_FPSAVED | UTHREAD_SAVED;

	uthread_runnable(uth);
}
Exemple #2
0
/* this will start the vm thread, and return when the thread has blocked,
 * with the right info in vmctl. */
static void run_vmthread(struct vmctl *vmctl)
{
	struct vm_trapframe *vm_tf;

	if (!vm_thread) {
		/* first time through, we make the vm thread.  the_ball was already
		 * grabbed right after it was alloc'd. */
		if (pthread_create(&vm_thread, NULL, run_vm, vmctl)) {
			perror("pth_create");
			exit(-1);
		}
		/* hack in our own handlers for some 2LS ops */
		old_thread_refl = sched_ops->thread_refl_fault;
		sched_ops->thread_refl_fault = vmm_thread_refl_fault;
	} else {
		copy_vmctl_to_vmtf(vmctl, &vm_thread->uthread.u_ctx.tf.vm_tf);
		uth_mutex_lock(the_ball);	/* grab it for the vm_thread */
		uthread_runnable((struct uthread*)vm_thread);
	}
	uth_mutex_lock(the_ball);
	/* We woke due to a vm exit.  Need to unlock for the next time we're run */
	uth_mutex_unlock(the_ball);
	/* the vm stopped.  we can do whatever we want before rerunning it.  since
	 * we're controlling the uth, we need to handle its vmexits.  we'll fill in
	 * the vmctl, since that's the current framework. */
	copy_vmtf_to_vmctl(&vm_thread->uthread.u_ctx.tf.vm_tf, vmctl);
}
Exemple #3
0
static inline int futex_wake(int *uaddr, int count)
{
  struct futex_element *e,*n = NULL;
  struct futex_queue q = TAILQ_HEAD_INITIALIZER(q);

  // Atomically grab all relevant futex blockers
  // from the global futex queue
  mcs_pdr_lock(&__futex.lock);
  e = TAILQ_FIRST(&__futex.queue);
  while(e != NULL) {
    if(count > 0) {
      n = TAILQ_NEXT(e, link);
      if(e->uaddr == uaddr) {
        TAILQ_REMOVE(&__futex.queue, e, link);
        TAILQ_INSERT_TAIL(&q, e, link);
        count--;
      }
      e = n;
    }
    else break;
  }
  mcs_pdr_unlock(&__futex.lock);

  // Unblock them outside the lock
  e = TAILQ_FIRST(&q);
  while(e != NULL) {
    n = TAILQ_NEXT(e, link);
    TAILQ_REMOVE(&q, e, link);
    while(e->pthread == NULL)
      cpu_relax();
    uthread_runnable((struct uthread*)e->pthread);
    e = n;
  }
  return 0;
}
Exemple #4
0
static void *timer_thread(void *arg)
{
  struct futex_element *e,*n = NULL;
  struct futex_queue q = TAILQ_HEAD_INITIALIZER(q);

  // Do this forever...
  for(;;) {
    // Block for 1 millisecond
    sys_block(1000);

    // Then atomically do the following...
    mcs_pdr_lock(&__futex.lock);
    // Up the time
    __futex.time++;

	// Find all futexes that have timed out on this iteration,
	// and count those still waiting
    int waiting = 0;
    e = TAILQ_FIRST(&__futex.queue);
    while(e != NULL) {
      n = TAILQ_NEXT(e, link);
      if(e->ms_timeout == __futex.time) {
        e->timedout = true;
        TAILQ_REMOVE(&__futex.queue, e, link);
        TAILQ_INSERT_TAIL(&q, e, link);
      }
      else if(e->ms_timeout != (uint64_t)-1)
        waiting++;
      e = n;
    }
    // If there are no more waiting, disable the timer
    if(waiting == 0) {
      __futex.time = 0;
      __futex.timer_enabled = false;
    }
    mcs_pdr_unlock(&__futex.lock);

    // Unblock any futexes that have timed out outside the lock
    e = TAILQ_FIRST(&q);
    while(e != NULL) {
      n = TAILQ_NEXT(e, link);
      TAILQ_REMOVE(&q, e, link);
      while(e->pthread == NULL)
        cpu_relax();
      uthread_runnable((struct uthread*)e->pthread);
      e = n;
    }

    // If we have disabled the timer, park this thread
    futex_wait(&__futex.timer_enabled, false, -1);
  }
}
Exemple #5
0
int pthread_create(pthread_t* thread, const pthread_attr_t* attr,
                   void *(*start_routine)(void *), void* arg)
{
	struct pthread_tcb *pthread =
	       (struct pthread_tcb*)uthread_create(__pthread_run, (void*)attr);
	if (!pthread)
		return -1;
	pthread->start_routine = start_routine;
	pthread->arg = arg;
	uthread_runnable((struct uthread*)pthread);
	*thread = pthread;
	return 0;
}
Exemple #6
0
int upthread_cond_signal(upthread_cond_t *c)
{
	if(c == NULL)
		return EINVAL;

	mcs_lock_qnode_t qnode = {0};
	mcs_pdr_lock(&c->lock, &qnode);
	upthread_t upthread = STAILQ_FIRST(&c->queue);
	if(upthread)
		STAILQ_REMOVE_HEAD(&c->queue, next);
	mcs_pdr_unlock(&c->lock, &qnode);

	if (upthread != NULL) {
		uthread_runnable((struct uthread*)upthread);
	}
	return 0;
}
Exemple #7
0
/* Restarts a uthread hanging off a syscall.  For the simple pthread case, we
 * just make it runnable and let the main scheduler code handle it.
 *
 * The pthread code relies on syscall handling being done per-vcore.  Don't try
 * and restart a thread on a different vcore, since you'll get screwed.  We have
 * a little test to catch that. */
static void restart_thread(struct syscall *sysc)
{
	uint32_t vcoreid = vcore_id();
	/* Using two vars to make the code simpler.  It's the same thread. */
	struct uthread *ut_restartee = (struct uthread*)sysc->u_data;
	struct pthread_tcb *pt_restartee = (struct pthread_tcb*)sysc->u_data;
	/* uthread stuff here: */
	assert(ut_restartee);
	assert(ut_restartee->state == UT_BLOCKED);
	assert(ut_restartee->sysc == sysc);
	ut_restartee->sysc = 0;	/* so we don't 'reblock' on this later */
	/* pthread stuff here: */
	/* Rip it from pending syscall list. */
	assert(pt_restartee->vcoreid == vcoreid);
	TAILQ_REMOVE(&sysc_mgmt[vcoreid].pending_syscs, pt_restartee, next);
	uthread_runnable(ut_restartee);
}
Exemple #8
0
int upthread_cond_broadcast(upthread_cond_t *c)
{
	if(c == NULL)
		return EINVAL;

	mcs_lock_qnode_t qnode = {0};
	while(1) {
		mcs_pdr_lock(&c->lock, &qnode);
		upthread_t upthread = STAILQ_FIRST(&c->queue);
		if(upthread)
			STAILQ_REMOVE_HEAD(&c->queue, next);
		else break;
		mcs_pdr_unlock(&c->lock, &qnode);
		uthread_runnable((struct uthread*)upthread);
		memset(&qnode, 0, sizeof(mcs_lock_qnode_t));
	}
	mcs_pdr_unlock(&c->lock, &qnode);
	return 0;
}
Exemple #9
0
int upthread_mutex_unlock(upthread_mutex_t* mutex)
{
	if(mutex == NULL)
		return EINVAL;

	spin_pdr_lock(&mutex->lock);
	mutex->locked--;
	if(mutex->locked == 0) {
		upthread_t upthread = STAILQ_FIRST(&mutex->queue);
		if(upthread)
			STAILQ_REMOVE_HEAD(&mutex->queue, next);
		mutex->owner = NULL;
		spin_pdr_unlock(&mutex->lock);

		if(upthread != NULL) {
			uthread_runnable((struct uthread*)upthread);
		}
	}
	else {
		spin_pdr_unlock(&mutex->lock);
	}
	return 0;
}