/* enable_my_notif tells the kernel whether or not it is okay to turn on notifs * when our calling vcore 'yields'. This controls whether or not the vcore will * get started from vcore_entry() or not, and whether or not remote cores need * to sys_change_vcore to preempt-recover the calling vcore. Only set this to * FALSE if you are unable to handle starting fresh at vcore_entry(). One * example of this is in mcs_pdr_locks. * * Will return: * 0 if we successfully changed to the target vcore. * -EBUSY if the target vcore is already mapped (a good kind of failure) * -EAGAIN if we failed for some other reason and need to try again. For * example, the caller could be preempted, and we never even attempted to * change. * -EINVAL some userspace bug */ int sys_change_vcore(uint32_t vcoreid, bool enable_my_notif) { /* Since we might be asking to start up on a fresh stack (if * enable_my_notif), we need to use some non-stack memory for the struct * sysc. Our vcore could get restarted before the syscall finishes (after * unlocking the proc, before finish_sysc()), and the act of finishing would * write onto our stack. Thus we use the per-vcore struct. */ int flags; /* Need to wait while a previous syscall is not done or locked. Since this * should only be called from VC ctx, we'll just spin. Should be extremely * rare. Note flags is initialized to SC_DONE. */ do { cpu_relax(); flags = atomic_read(&__vcore_one_sysc.flags); } while (!(flags & SC_DONE) || flags & SC_K_LOCK); __vcore_one_sysc.num = SYS_change_vcore; __vcore_one_sysc.arg0 = vcoreid; __vcore_one_sysc.arg1 = enable_my_notif; /* keep in sync with glibc sysdeps/ros/syscall.c */ __ros_arch_syscall((long)&__vcore_one_sysc, 1); /* If we returned, either we wanted to (!enable_my_notif) or we failed. * Need to wait til the sysc is finished to find out why. Again, its okay * to just spin. */ do { cpu_relax(); flags = atomic_read(&__vcore_one_sysc.flags); } while (!(flags & SC_DONE) || flags & SC_K_LOCK); return __vcore_one_sysc.retval; }
/* Issue a single syscall and block into the 2LS until it completes */ static inline void __ros_syscall_sync(struct syscall *sysc) { /* There is only one syscall in the syscall array when we want to do it * synchronously */ __ros_arch_syscall((long)sysc, 1); /* Don't proceed til we are done */ while (!(atomic_read(&sysc->flags) & SC_DONE)) ros_syscall_blockon(sysc); /* Need to wait til it is unlocked. It's not really done until SC_DONE & * !SC_K_LOCK. */ while (atomic_read(&sysc->flags) & SC_K_LOCK) cpu_relax(); }
void syscall_async(struct syscall *sysc, unsigned long num, ...) { va_list args; sysc->num = num; sysc->flags = 0; sysc->ev_q = 0; /* not necessary, but good for debugging */ /* This is a little dangerous, since we'll usually pull more args than were * passed in, ultimately reading gibberish off the stack. */ va_start(args, num); sysc->arg0 = va_arg(args, long); sysc->arg1 = va_arg(args, long); sysc->arg2 = va_arg(args, long); sysc->arg3 = va_arg(args, long); sysc->arg4 = va_arg(args, long); sysc->arg5 = va_arg(args, long); va_end(args); __ros_arch_syscall((long)sysc, 1); }
void syscall_async_evq(struct syscall *sysc, struct event_queue *evq, unsigned long num, ...) { va_list args; sysc->num = num; atomic_set(&sysc->flags, SC_UEVENT); sysc->ev_q = evq; /* This is a little dangerous, since we'll usually pull more args than were * passed in, ultimately reading gibberish off the stack. */ va_start(args, num); sysc->arg0 = va_arg(args, long); sysc->arg1 = va_arg(args, long); sysc->arg2 = va_arg(args, long); sysc->arg3 = va_arg(args, long); sysc->arg4 = va_arg(args, long); sysc->arg5 = va_arg(args, long); va_end(args); __ros_arch_syscall((long)sysc, 1); }
int main(int argc, char** argv) { int num_started, retval; unsigned int ev_type; /* register our syscall handler (2LS does this) */ register_ev_handler(EV_SYSCALL, handle_syscall, 0); printf("Trying to block\n"); /* Not doing anything else to it: no EVENT_IPI yet, etc. */ ev_q = get_eventq(); /* issue the diagnostic block syscall */ sysc.num = SYS_block; sysc.arg0 = 5000; /* 5ms */ sysc.ev_q = ev_q; /* Trap */ num_started = __ros_arch_syscall((long)&sysc, 1); if (!(atomic_read(&sysc.flags) & SC_DONE)) printf("Not done, looping!\n"); /* You could poll on this. This is really ghetto, but i got rid of * event_activity, whose sole purpose was to encourage spinning. */ while (!(atomic_read(&sysc.flags) & SC_DONE)) cpu_relax(); handle_event_q(ev_q); /* by now, we should have run our handler */ /********************************************************/ /* Start MCP / IPI test */ printf("Switching to _M mode and testing an IPI-d ev_q\n"); printf("Our indirect ev_q is %08p\n", ev_q); /* begin: stuff userspace needs to do before switching to multi-mode */ /* Note we don't need to set up event reception for any particular kevent. * The ev_q in the syscall said to send an IPI to vcore 0 which means an * EV_EVENT will be sent straight to vcore0. */ /* Inits a thread for us, though we won't use it. Just a hack to get into * _M mode. Note this requests one vcore for us */ struct uthread dummy = {0}; uthread_2ls_init(&dummy, &ghetto_sched_ops); uthread_mcs_init(); /* Need to save our floating point state somewhere (like in the * user_thread_tcb so it can be restarted too */ enable_notifs(0); /* end: stuff userspace needs to do before switching to multi-mode */ retval = vcore_request(1); if (retval < 0) printf("No cores granted, Rut Ro Raggy!\n"); /* now we're back in thread 0 on vcore 0 */ ev_q->ev_flags = EVENT_IPI; ev_q->ev_vcore = 0; sysc.u_data = (void*)1; /* using this to loop on */ /* issue the diagnostic blocking syscall */ sysc.num = SYS_block; sysc.arg0 = 5000; /* 5ms */ sysc.ev_q = ev_q; num_started = __ros_arch_syscall((long)&sysc, 1); /* have this thread "wait" */ if (!(atomic_read(&sysc.flags) & SC_DONE)) printf("Not done, looping on a local variable!\n"); while (sysc.u_data) cpu_relax(); assert(atomic_read(&sysc.flags) & SC_DONE); printf("Syscall unblocked, IPI broke me out of the loop.\n"); /* done */ put_eventq(ev_q); printf("Syscall test exiting\n"); return 0; }