コード例 #1
0
ファイル: vmrunkernel.c プロジェクト: ihategit/akaros
/* this will start the vm thread, and return when the thread has blocked,
 * with the right info in vmctl. */
static void run_vmthread(struct vmctl *vmctl)
{
	struct vm_trapframe *vm_tf;

	if (!vm_thread) {
		/* first time through, we make the vm thread.  the_ball was already
		 * grabbed right after it was alloc'd. */
		if (pthread_create(&vm_thread, NULL, run_vm, vmctl)) {
			perror("pth_create");
			exit(-1);
		}
		/* hack in our own handlers for some 2LS ops */
		old_thread_refl = sched_ops->thread_refl_fault;
		sched_ops->thread_refl_fault = vmm_thread_refl_fault;
	} else {
		copy_vmctl_to_vmtf(vmctl, &vm_thread->uthread.u_ctx.tf.vm_tf);
		uth_mutex_lock(the_ball);	/* grab it for the vm_thread */
		uthread_runnable((struct uthread*)vm_thread);
	}
	uth_mutex_lock(the_ball);
	/* We woke due to a vm exit.  Need to unlock for the next time we're run */
	uth_mutex_unlock(the_ball);
	/* the vm stopped.  we can do whatever we want before rerunning it.  since
	 * we're controlling the uth, we need to handle its vmexits.  we'll fill in
	 * the vmctl, since that's the current framework. */
	copy_vmtf_to_vmctl(&vm_thread->uthread.u_ctx.tf.vm_tf, vmctl);
}
コード例 #2
0
ファイル: uth.c プロジェクト: mballance/uex
void uth_cond_wait(uth_cond_t *c, uth_mutex_t *t) {
	if (!prv_active_thread) {
		uth_init_main_thread();
	}

//	fprintf(stdout, "uth_cond_wait: active=%p\n", prv_active_thread);

	prv_active_thread->next = c->waiters;
	c->waiters = prv_active_thread;

	uth_mutex_unlock(t);
	uth_thread_block();
	uth_mutex_lock(t);
}
コード例 #3
0
ファイル: vmrunkernel.c プロジェクト: ihategit/akaros
static void vmm_thread_refl_fault(struct uthread *uth,
                                  struct user_context *ctx)
{
	struct pthread_tcb *pthread = (struct pthread_tcb*)uth;

	/* Hack to call the original pth 2LS op */
	if (!ctx->type == ROS_VM_CTX) {
		old_thread_refl(uth, ctx);
		return;
	}
	__pthread_generic_yield(pthread);
	/* normally we'd handle the vmexit here.  to work within the existing
	 * framework, we just wake the controller thread.  It'll look at our ctx
	 * then make us runnable again */
	pthread->state = PTH_BLK_MUTEX;
	uth_mutex_unlock(the_ball);		/* wake the run_vmthread */
}
コード例 #4
0
ファイル: vmexit.c プロジェクト: borisnorm/akaros
/* Blocks a guest pcore / thread until it has an IRQ pending.  Syncs with
 * vmm_interrupt_guest(). */
static void sleep_til_irq(struct guest_thread *gth)
{
	struct vmm_gpcore_init *gpci = gth_to_gpci(gth);

	/* The invariant is that if an IRQ is posted, but not delivered, we will not
	 * sleep.  Anyone who posts an IRQ must signal after setting it.
	 * vmm_interrupt_guest() does this.  If we use alternate sources of IRQ
	 * posting, we'll need to revist this.
	 *
	 * Although vmm_interrupt_guest() only writes OUTSTANDING_NOTIF, it's
	 * possible that the hardware attempted to post the interrupt.  In SDM
	 * parlance, the processor could have "recognized" the virtual IRQ, but not
	 * delivered it yet.  This could happen if the guest had executed "sti", but
	 * not "hlt" yet.  The IRQ was posted and recognized, but not delivered
	 * ("sti blocking").  Then the guest executes "hlt", and vmexits.
	 * OUTSTANDING_NOTIF will be clear in this case.  RVI should be set - at
	 * least to the vector we just sent, but possibly to a greater vector if
	 * multiple were sent.  RVI should only be cleared after virtual IRQs were
	 * actually delivered.  So checking OUTSTANDING_NOTIF and RVI should
	 * suffice.
	 *
	 * Generally, we should also check GUEST_INTERRUPTIBILITY_INFO to see if
	 * there's some reason to not deliver the interrupt and check things like
	 * the VPPR (priority register).  But since we're emulating a halt, mwait,
	 * or something else that needs to be woken by an IRQ, we can ignore that
	 * and just wake them up.  Note that we won't actually deliver the IRQ,
	 * we'll just restart the guest and the hardware will deliver the virtual
	 * IRQ at the appropriate time.  So in the event that something weird
	 * happens, the halt/mwait just returns spuriously.
	 *
	 * The more traditional race here is if the halt starts concurrently with
	 * the post; that's why we sync with the mutex to make sure there is an
	 * ordering between the actual halt (this function) and the posting. */
	uth_mutex_lock(gth->halt_mtx);
	while (!(pir_notif_is_set(gpci) || rvi_is_set(gth)))
		uth_cond_var_wait(gth->halt_cv, gth->halt_mtx);
	uth_mutex_unlock(gth->halt_mtx);
}
コード例 #5
0
extern "C" void __malloc_unlock(struct _reent *reent)
{
	uth_mutex_unlock(&prv_malloc_mutex);
}