Example #1
0
/**
 * Switch into vcore mode to run the scheduler code. 
 **/
void switch_to_vcore() {

	uint32_t vcoreid = vcore_id();

	/* Disable notifications.  Once we do this, we might miss a notif_pending,
	 * so we need to enter vcore entry later.  Need to disable notifs so we
	 * don't get in weird loops */
	struct preempt_data *vcpd = &__procdata.vcore_preempt_data[vcoreid];
	vcpd->notif_enabled = FALSE;

	/* Grab a reference to the currently running thread on this vcore */
	thread_t *t = current_thread; 

	/* Switch to the vcore's tls region */
	extern void** vcore_thread_control_blocks;
	set_tls_desc(vcore_thread_control_blocks[vcoreid], vcoreid);
	
	/* Verify that the thread the vcore thinks was running is the same as the thread
	 * that was actually running */
	assert(current_thread == t);

	/* Set the stack pointer to the stack of the vcore. 
	 * We know this function is always inlined because of the attribute we set
	 * on it, so there will be no stack unwinding when this function "returns".
	 * After this call, make sure you don't use local variables. */
	set_stack_pointer((void*)vcpd->transition_stack);
	assert(in_vcore_context());

	/* Leave the current vcore completely */
	current_thread = NULL; 
	
	/* Restart the vcore and run the scheduler code */
	vcore_entry();
	assert(0);
}
Example #2
0
void bthread_exit(void* ret)
{
  struct mcs_lock_qnode local_qn = {0};
  bthread_once(&init_once,&_bthread_init);

  bthread_t t = bthread_self();

  mcs_lock_lock(&work_queue_lock, &local_qn);
  threads_active--;
  if(threads_active == 0)
    exit(0);
  mcs_lock_unlock(&work_queue_lock, &local_qn);

  if(t)
  {
    t->arg = ret;
    t->finished = 1;
    if(t->detached)
      free(t);
  }

  vcore_entry();
}
Example #3
0
/* The lowest level function jumped to by the kernel on every vcore_entry.
 * Currently, this function is only necessary so we can set the tls_desc from
 * the vcpd for non x86_64 architectures. We should consider removing this and
 * making it mandatory to set the tls_desc in the kernel. We wouldn't even
 * need to pass the vcore id to user space at all if we did this.  It would
 * already be set in the preinstalled TLS as __vcore_id. */
static void __attribute__((noreturn)) __kernel_vcore_entry(void)
{
    /* The kernel sets the TLS desc for us, based on whatever is in VCPD.
     *
     * x86 32-bit TLS is pretty jacked up, so the kernel doesn't set the TLS
     * desc for us.  it's a little more expensive to do it here, esp for
     * amd64.  Can remove this when/if we overhaul 32 bit TLS.
     *
     * AFAIK, riscv's TLS changes are really cheap, and they don't do it in
     * the kernel (yet/ever), so they can set their TLS here too. */
    int id = __vcore_id_on_entry;
#ifndef __x86_64__
    set_tls_desc(vcpd_of(id)->vcore_tls_desc);
#endif
    /* Every time the vcore comes up, it must set that it is in vcore context.
     * uthreads may share the same TLS as their vcore (when uthreads do not have
     * their own TLS), and if a uthread was preempted, __vcore_context == FALSE,
     * and that will continue to be true the next time the vcore pops up. */
    __vcore_context = TRUE;
    vcore_entry();
    fprintf(stderr, "vcore_entry() should never return!\n");
    abort();
    __builtin_unreachable();
}