コード例 #1
0
ファイル: thread0_sched.c プロジェクト: windyuuy/akaros
static void thread0_thread_blockon_sysc(struct uthread *uthread, void *arg)
{
	struct syscall *sysc = (struct syscall*)arg;
	/* We're in vcore context.  Regardless of what we do here, we'll pop back in
	 * to vcore entry, just like with any uthread_yield.  We don't have a 2LS,
	 * but we always have one uthread: the SCP's thread0.  Note that at this
	 * point, current_uthread is still set, but will be cleared as soon as the
	 * callback returns (and before we start over in vcore_entry).
	 *
	 * If notif_pending is already set (due to a concurrent signal), we'll fail
	 * to yield.  Once in VC ctx, we'll handle any other signals/events that
	 * arrived, then restart the uthread that issued the syscall, which if the
	 * syscall isn't done yet, will just blockon again.
	 *
	 * The one trick is that we don't want to register the evq twice.  The way
	 * register_evq currently works, if a SC completed (SC_DONE) while we were
	 * registering, we could end up clearing sysc->ev_q before the kernel sees
	 * it.  We'll use u_data to track whether we registered or not. */
	#define U_DATA_BLOB ((void*)0x55555555)
	if ((sysc->u_data == U_DATA_BLOB)
	    || register_evq(sysc, &__ros_scp_simple_evq)) {
		sysc->u_data = U_DATA_BLOB;
		/* Sending false for now - we want to signal proc code that we want to
		 * wait (piggybacking on the MCP meaning of this variable).  If
		 * notif_pending is set, the kernel will immediately return us. */
		__ros_syscall_noerrno(SYS_yield, FALSE, 0, 0, 0, 0, 0);
	}
}
コード例 #2
0
ファイル: sbrk.c プロジェクト: 7perl/akaros
static int
__internal_setbrk (uintptr_t addr)
{
  uintptr_t real_new_brk = (addr + PGSIZE - 1)/PGSIZE*PGSIZE;
  uintptr_t real_brk = (__internal_getbrk() + PGSIZE - 1)/PGSIZE*PGSIZE;

  if(real_new_brk > real_brk)
  {
    if(real_new_brk > BRK_END)
      return -1;
	// calling mmap directly to avoid referencing errno before it is initialized.
    if ((void*)__ros_syscall_noerrno(SYS_mmap, (void*)real_brk,
	                                 real_new_brk-real_brk,
	                                 PROT_READ | PROT_WRITE | PROT_EXEC,
	                                 MAP_FIXED | MAP_ANONYMOUS,
	                                 -1, 0) != (void*)real_brk)
      return -1;
  }
  else if(real_new_brk < real_brk)
  {
    if(real_new_brk < (uintptr_t)__procinfo.heap_bottom)
      return -1;

    if (munmap((void*)real_new_brk, real_brk - real_new_brk))
      return -1;
  }

  curbrk = addr;
  return 0;
}
コード例 #3
0
ファイル: syscall.c プロジェクト: 7perl/akaros
/* Glibc initial blockon, usable before parlib code can init things (or if it
 * never can, like for RTLD).  MCPs will need the 'uthread-aware' blockon. */
void __ros_scp_syscall_blockon(struct syscall *sysc)
{
	/* Need to disable notifs before registering, so we don't take an __notify
	 * that drops us into VC ctx and forces us to eat the notif_pending that was
	 * meant to prevent us from yielding if the syscall completed early. */
	__procdata.vcore_preempt_data[0].notif_disabled = TRUE;
	/* Ask for a SYSCALL event when the sysc is done.  We don't need a handler,
	 * we just need the kernel to restart us from proc_yield.  If register
	 * fails, we're already done. */
	if (register_evq(sysc, &__ros_scp_simple_evq)) {
		/* Sending false for now - we want to signal proc code that we want to
		 * wait (piggybacking on the MCP meaning of this variable) */
		__ros_syscall_noerrno(SYS_yield, FALSE, 0, 0, 0, 0, 0);
	}
	/* Manually doing an enable_notifs for VC 0 */
	__procdata.vcore_preempt_data[0].notif_disabled = FALSE;
	wrmb();	/* need to read after the write that enabled notifs */
	if (__procdata.vcore_preempt_data[0].notif_pending)
		__ros_syscall_noerrno(SYS_self_notify, 0, EV_NONE, 0, TRUE, 0, 0);
}
コード例 #4
0
ファイル: syscall.c プロジェクト: windyuuy/akaros
/* Glibc initial blockon, usable before parlib code can init things (or if it
 * never can, like for RTLD).  As processes initialize further, they will use
 * different functions.
 *
 * In essence, we're in vcore context already.  For one, this function could be
 * called from a full SCP in vcore context.  For early processes, we are not
 * vcctx_ready.  Either way, we don't need to worry about the kernel forcing us
 * into vcore context and otherwise clearing notif_pending.  For those curious,
 * the old race was that the kernel sets notif pending after we register, then
 * we drop into VC ctx, clear notif pending, and yield. */
void __ros_early_syscall_blockon(struct syscall *sysc)
{
	/* For early SCPs, notif_pending will probably be false anyways.  For SCPs
	 * in VC ctx, it might be set.  Regardless, when we pop back up,
	 * notif_pending will be set (for a full SCP in VC ctx). */
	__procdata.vcore_preempt_data[0].notif_pending = FALSE;
	/* order register after clearing notif_pending, handled by register_evq */
	/* Ask for a SYSCALL event when the sysc is done.  We don't need a handler,
	 * we just need the kernel to restart us from proc_yield.  If register
	 * fails, we're already done. */
	if (register_evq(sysc, &__ros_scp_simple_evq)) {
		/* Sending false for now - we want to signal proc code that we want to
		 * wait (piggybacking on the MCP meaning of this variable).  If
		 * notif_pending is set, the kernel will immediately return us. */
		__ros_syscall_noerrno(SYS_yield, FALSE, 0, 0, 0, 0, 0);
	}
	/* For early SCPs, the kernel turns off notif_pending for us.  For SCPs in
	 * vcore context that blocked (should be rare!), it'll still be set.  Other
	 * VC ctx code must handle it later. (could have coalesced notifs) */
}