/** Wait an event with mutex lock @param[in] wq wait queue @param[in] mtx mutex */ wq_reason wque_wait_on_event_with_mutex(wait_queue *wq, struct _mutex *mtx) { psw_t psw; wait_queue_entry ent, *ep = &ent; wq_reason reason; init_list_node(&ep->link); psw_disable_and_save_interrupt(&psw); rdq_remove_thread(current); wque_add_thread(wq, ep, current); ep->thr->status = THR_TSTATE_WAIT; mutex_release(mtx); /* We must hold the mutex during queue operations */ sched_schedule(); /* Reschedule because current task is slept. */ mutex_hold(mtx); /* We must hold the mutex during queue operations */ wque_remove_entry(wq, ep); reason = wq->reason; psw_restore_interrupt(&psw); return reason; }
void semaphore_signal(semaphore_t* semaphore) { // lock semaphore spinlock_lock(&semaphore->lock); // wake up first waiting task, if any if (semaphore->waiting_tasks) { task_t *task; task_list_pop(&semaphore->waiting_tasks, &task); sched_schedule(task); } // unlock semaphore spinlock_unlock(&semaphore->lock); }
// check if the given signal is not to ignore, and if not, prepare process // to handle it and remove it from the pending signals static void try_deliver_one(struct process *proc, int sig) { int sigindex; struct sigaction *action; sigdelset(& proc->sig_pending, sig); sigindex = _trans_number2index[sig]; if(sigindex == _SIGUNDEF) { printk(LOG_WARNING, "signal: unimplemented SIG %d ignored\n", sig); } else { action = &(proc->sig_array[sigindex]); if(action->sa_handler == SIG_IGN || (action->sa_handler == SIG_DFL && _trans_index2default[sigindex] == SIGDFL_IGN) ) { // ignored signal } else { if(action->sa_handler == SIG_DFL) { switch(_trans_index2default[sigindex]) { case SIGDFL_TERM: process_terminate(proc, _WSTATUS_TERMSIG(sig)); sched_schedule(); // TODO be sure the process is *not* executed after this function ret break; case SIGDFL_STOP: sched_stop_proc(proc, sig); break; case SIGDFL_CONT: // TODO : maybe this specific SIGCONT signal is better handled // in the signal_raise() function, so it's working even if // process is not in a runnable state? break; } } else { // this is a user-defined handler arch_signal_prepare_sigcontext(proc, action, sig); proc->sig_blocked |= (action->sa_mask & ~(SIGKILL | SIGSTOP)); arch_kernel_contextjmp(proc->acnt, & proc->acnt); //process_contextjmp(proc); } } } }
context_t act_init(context_t own_context, init_info_t* info, size_t init_base, size_t init_entry) { KERNEL_TRACE("init", "activation init"); internel_if.message_send = kernel_seal(act_send_message_get_trampoline(), act_ref_type); internel_if.message_reply = kernel_seal(act_send_return_get_trampoline(), act_sync_ref_type); setup_syscall_interface(&internel_if); kernel_next_act = 0; // This is a dummy. Our first context has already been created reg_frame_t frame; bzero(&frame, sizeof(struct reg_frame)); // Register the kernel (exception) activation act_t * kernel_act = &kernel_acts[0]; act_register(&frame, &kernel_queue.queue, "kernel", status_terminated, NULL, cheri_getbase(cheri_getpcc())); /* The kernel context already exists and we set it here */ kernel_act->context = own_context; // Create and register the init activation KERNEL_TRACE("act", "Retroactively creating init activation"); /* Not a dummy here. We will subset our own c0/pcc for init. init is loaded directly after the kernel */ bzero(&frame, sizeof(struct reg_frame)); size_t length = cheri_getlen(cheri_getdefault()) - init_base; frame.cf_c0 = cheri_setbounds(cheri_setoffset(cheri_getdefault(), init_base), length); capability pcc = cheri_setbounds(cheri_setoffset(cheri_getpcc(), init_base), length); KERNEL_TRACE("act", "assuming init has virtual entry point %lx", init_entry); frame.cf_c12 = frame.cf_pcc = cheri_setoffset(pcc, init_entry); /* provide config info to init. c3 is the conventional register */ frame.cf_c3 = info; act_t * init_act = &kernel_acts[namespace_num_boot]; act_register_create(&frame, &init_queue.queue, "init", status_alive, NULL); /* The boot activation should be the current activation */ sched_schedule(init_act); return init_act->context; }
/** Wait an event associated to a wait queue @param[in] wq wait queue */ wq_reason wque_wait_on_queue(wait_queue *wq) { psw_t psw; wait_queue_entry ent, *ep = &ent; wq_reason reason; init_list_node(&ep->link); psw_disable_and_save_interrupt(&psw); rdq_remove_thread(current); wque_add_thread(wq, ep, current); ep->thr->status = THR_TSTATE_WAIT; sched_schedule(); /* Reschedule because current task is slept. */ wque_remove_entry(wq, ep); reason = wq->reason; psw_restore_interrupt(&psw); return reason; }
/** Wake up all the threads in a wait queue @param[in] wq wait queue @param[in] reason wakeup reason */ void wque_wakeup(wait_queue *wq, wq_reason reason) { psw_t psw; wait_queue_entry *ep; psw_disable_and_save_interrupt(&psw); if ( list_is_empty(&wq->head) ) goto out; while(!list_is_empty(&wq->head)) { ep = CONTAINER_OF(list_get_top(&wq->head), wait_queue_entry, link); list_del(&ep->link); init_list_node(&ep->link); ep->thr->status = THR_TSTATE_RUN; sched_set_ready(ep->thr); ep->thr = NULL; } wq->reason = reason; sched_schedule(); /* Reschedule because change the top priority task */ out: psw_restore_interrupt(&psw); }
void sched_relinquish() { current_thread->state = AVAILABLE; sched_schedule(current_thread); sched_switch(); }
void sched_yield() { thread_t * thr = thr_current(); thr->state = Yielded; sched_schedule(); }