/* * In this function, you will be modifying the run queue, which can * also be modified from an interrupt context. In order for thread * contexts and interrupt contexts to play nicely, you need to mask * all interrupts before reading or modifying the run queue and * re-enable interrupts when you are done. This is analagous to * locking a mutex before modifying a data structure shared between * threads. Masking interrupts is accomplished by setting the IPL to * high. * * Once you have masked interrupts, you need to remove a thread from * the run queue and switch into its context from the currently * executing context. * * If there are no threads on the run queue (assuming you do not have * any bugs), then all kernel threads are waiting for an interrupt * (for example, when reading from a block device, a kernel thread * will wait while the block device seeks). You will need to re-enable * interrupts and wait for one to occur in the hopes that a thread * gets put on the run queue from the interrupt context. * * The proper way to do this is with the intr_wait call. See * interrupt.h for more details on intr_wait. * * Note: When waiting for an interrupt, don't forget to modify the * IPL. If the IPL of the currently executing thread masks the * interrupt you are waiting for, the interrupt will never happen, and * your run queue will remain empty. This is very subtle, but * _EXTREMELY_ important. * * Note: Don't forget to set curproc and curthr. When sched_switch * returns, a different thread should be executing than the thread * which was executing when sched_switch was called. * * Note: The IPL is process specific. */ void sched_switch(void) { /* Do I need to enque prevthr? * Do I need to continue running prevthr if it is still runnable */ /*for bugs check Run Queue Access Slide */ uint8_t prev_ipl = intr_getipl(); intr_setipl(IPL_HIGH); kthread_t *prevthr = curthr; if (prevthr->kt_state == KT_RUN) ktqueue_enqueue(&kt_runq, prevthr); while (sched_queue_empty(&kt_runq)) { panic("I never actually wait on things here"); intr_setipl(IPL_LOW); intr_wait(); intr_setipl(IPL_HIGH); } /*If there is a thread*/ kthread_t *t = ktqueue_dequeue(&kt_runq); curproc = t->kt_proc; curthr = t; /* NOT_YET_IMPLEMENTED("PROCS: sched_switch");*/ context_switch(&prevthr->kt_ctx, &curthr->kt_ctx); intr_setipl(prev_ipl); }
/* * Should be called from the init proc */ static void test_proc_create(){ dbg(DBG_TEST, "testing proc_create\n"); proc_t *myproc = proc_create("myproc"); KASSERT(list_empty(&myproc->p_threads)); KASSERT(list_empty(&myproc->p_children)); KASSERT(sched_queue_empty(&myproc->p_wait)); KASSERT(myproc->p_pproc->p_pid == 1 && "created proc's parent isn't the init proc\n"); KASSERT(myproc->p_state == PROC_RUNNING); /* make sure it's in the proc list */ KASSERT(proc_lookup(myproc->p_pid) == myproc && "created proc not in proc list\n"); /* make sure it's in it's parent's child list */ KASSERT(in_child_list(myproc)); /* clean everything up */ kthread_t *mythread = kthread_create(myproc, simple_function, NULL, NULL); sched_make_runnable(mythread); int status; do_waitpid(myproc->p_pid, 0, &status); dbg(DBG_TESTPASS, "all proc_create tests passed!\n"); }
void sched_broadcast_on(ktqueue_t *q) { //NOT_YET_IMPLEMENTED("PROCS: sched_broadcast_on"); while (!sched_queue_empty(q)) { kthread_t *thr = ktqueue_dequeue(q); sched_make_runnable(thr); } }
void sched_broadcast_on(ktqueue_t *q) { kthread_t *t; while (!sched_queue_empty(q)) { t = ktqueue_dequeue(q); sched_make_runnable(t); } }
static void * watch_dog(int arg1, void *arg2) { while(!sched_queue_empty(&mynode.my_queue)) { dbg(DBG_TEST, "Watch_dog wake up all sleeping thread\n"); sched_broadcast_on(&mynode.my_queue); sched_sleep_on(&mynode.my_queue); } return NULL; }
kthread_t * sched_wakeup_on(ktqueue_t *q) { //NOT_YET_IMPLEMENTED("PROCS: sched_wakeup_on"); if (sched_queue_empty(q)) { return NULL; } kthread_t *thr = ktqueue_dequeue(q); sched_make_runnable(thr); return thr; }
/* The thread executing this MUST be cancelled before it succesfully * obtains the mutex. Otherwise, bad things will happen */ static void * cancellable_lock_kmutex(int arg1, void *arg2){ kmutex_t *m = (kmutex_t *) arg2; int lock_result = kmutex_lock_cancellable(m); KASSERT(lock_result == -EINTR); KASSERT(m->km_holder == NULL); KASSERT(sched_queue_empty(&m->km_waitq)); return NULL; }
void sched_broadcast_on(ktqueue_t *q) { /* Dequeue all from wait queue and make runnable */ while (!sched_queue_empty(q)) { kthread_t *wake_thr = ktqueue_dequeue(q); wake_thr->kt_wchan = NULL; sched_make_runnable(wake_thr); } /* NOT_YET_IMPLEMENTED("PROCS: sched_broadcast_on"); */ }
static void test_do_waitpid(waitpid_type_t type){ proc_t *test_procs[NUM_PROCS]; kthread_t *test_threads[NUM_PROCS]; int i; for (i = 0; i < NUM_PROCS; i++){ test_procs[i] = proc_create("test proc"); test_threads[i] = kthread_create(test_procs[i], simple_function, i, NULL); sched_make_runnable(test_threads[i]); } int j; for (j = 0; j < NUM_PROCS; j++){ if (type == ANY){ int status; do_waitpid(-1, 0, &status); } else { int status; pid_t proc_pid = test_procs[j]->p_pid; pid_t waitpid_pid = do_waitpid(proc_pid, 0, &status); KASSERT(waitpid_pid == proc_pid); } } int k; for (k = 0; k < NUM_PROCS; k++){ proc_t *p = test_procs[k]; KASSERT(proc_lookup(p->p_pid) == NULL); /* make sure all children have been reparented */ KASSERT(list_empty(&p->p_children)); /* make sure that it is no longer in it's parent's * child list */ KASSERT(!in_child_list(p)); /* make sure it exited with the correct status */ KASSERT(p->p_status == 0); KASSERT(p->p_state == PROC_DEAD); KASSERT(sched_queue_empty(&p->p_wait)); } }
/* * If there are any threads waiting to take a lock on the mutex, one * should be woken up and given the lock. * * Note: This should _NOT_ be a blocking operation! * * Note: Don't forget to add the new owner of the mutex back to the * run queue. * * Note: Make sure that the thread on the head of the mutex's wait * queue becomes the new owner of the mutex. * * @param mtx the mutex to unlock */ void kmutex_unlock(kmutex_t *mtx) { /*NOT_YET_IMPLEMENTED("PROCS: kmutex_unlock");*/ if(sched_queue_empty(&mtx->km_waitq)) { mtx->km_holder=NULL; } else { mtx->km_holder=sched_wakeup_on(&mtx->km_waitq); /*sched_switch();*/ } }
kthread_t * sched_wakeup_on(ktqueue_t *q) { if(!sched_queue_empty(q)) { kthread_t *thr = ktqueue_dequeue(q); KASSERT((thr->kt_state == KT_SLEEP) || (thr->kt_state == KT_SLEEP_CANCELLABLE)); dbg(DBG_PRINT, "(GRADING1A 4.a)\n"); sched_make_runnable(thr); return thr; } else { dbg(DBG_PRINT, "(GRADING1C 1)\n"); return NULL; } /*NOT_YET_IMPLEMENTED("PROCS: sched_wakeup_on"); return NULL; */ }
kthread_t * sched_wakeup_on(ktqueue_t *q) { if (sched_queue_empty(q)) return NULL; /* If you get here, q was not empty, so there's someone to remove */ /* Dequeue one from given queue */ kthread_t *wake_thr = ktqueue_dequeue(q); /* Reset pointer to queue wake_thr is waiting on */ wake_thr->kt_wchan = NULL; /* Make runnable */ sched_make_runnable(wake_thr); /* NOT_YET_IMPLEMENTED("PROCS: sched_wakeup_on"); */ return wake_thr; }
/* * In this function, you will be modifying the run queue, which can * also be modified from an interrupt context. In order for thread * contexts and interrupt contexts to play nicely, you need to mask * all interrupts before reading or modifying the run queue and * re-enable interrupts when you are done. This is analagous to * locking a mutex before modifying a data structure shared between * threads. Masking interrupts is accomplished by setting the IPL to * high. * * Once you have masked interrupts, you need to remove a thread from * the run queue and switch into its context from the currently * executing context. * * If there are no threads on the run queue (assuming you do not have * any bugs), then all kernel threads are waiting for an interrupt * (for example, when reading from a block device, a kernel thread * will wait while the block device seeks). You will need to re-enable * interrupts and wait for one to occur in the hopes that a thread * gets put on the run queue from the interrupt context. * * The proper way to do this is with the intr_wait call. See * interrupt.h for more details on intr_wait. * * Note: When waiting for an interrupt, don't forget to modify the * IPL. If the IPL of the currently executing thread masks the * interrupt you are waiting for, the interrupt will never happen, and * your run queue will remain empty. This is very subtle, but * _EXTREMELY_ important. * * Note: Don't forget to set curproc and curthr. When sched_switch * returns, a different thread should be executing than the thread * which was executing when sched_switch was called. * * Note: The IPL is process specific. */ void sched_switch(void) { //NOT_YET_IMPLEMENTED("PROCS: sched_switch"); uint8_t orig_ipl = intr_getipl(); intr_setipl(IPL_HIGH); while (sched_queue_empty(&kt_runq)) { intr_setipl(IPL_LOW); intr_wait(); intr_setipl(IPL_HIGH); } kthread_t *thr = ktqueue_dequeue(&kt_runq); context_t *old_context = &(curthr->kt_ctx); context_t *new_context = &(thr->kt_ctx); curthr = thr; curproc = thr->kt_proc; context_switch(old_context, new_context); intr_setipl(orig_ipl); }
/* * In this function, you will be modifying the run queue, which can * also be modified from an interrupt context. In order for thread * contexts and interrupt contexts to play nicely, you need to mask * all interrupts before reading or modifying the run queue and * re-enable interrupts when you are done. This is analagous to * locking a mutex before modifying a data structure shared between * threads. Masking interrupts is accomplished by setting the IPL to * high. * * Once you have masked interrupts, you need to remove a thread from * the run queue and switch into its context from the currently * executing context. * * If there are no threads on the run queue (assuming you do not have * any bugs), then all kernel threads are waiting for an interrupt * (for example, when reading from a block device, a kernel thread * will wait while the block device seeks). You will need to re-enable * interrupts and wait for one to occur in the hopes that a thread * gets put on the run queue from the interrupt context. * * The proper way to do this is with the intr_wait call. See * interrupt.h for more details on intr_wait. * * Note: When waiting for an interrupt, don't forget to modify the * IPL. If the IPL of the currently executing thread masks the * interrupt you are waiting for, the interrupt will never happen, and * your run queue will remain empty. This is very subtle, but * _EXTREMELY_ important. * * Note: Don't forget to set curproc and curthr. When sched_switch * returns, a different thread should be executing than the thread * which was executing when sched_switch was called. * * Note: The IPL is process specific. */ void sched_switch(void) { /* dbg(DBG_PRINT, "(In sched switch)\n");*/ kthread_t *oldthr; int oldIPL; oldIPL=intr_getipl(); intr_setipl(IPL_HIGH); while(sched_queue_empty(&kt_runq)) { intr_disable(); intr_setipl(0); intr_wait(); intr_setipl(IPL_HIGH); } oldthr=curthr; curthr=ktqueue_dequeue(&kt_runq); curproc = curthr->kt_proc; context_switch(&(oldthr->kt_ctx),&(curthr->kt_ctx)); intr_setipl(oldIPL); dbg(DBG_PRINT, "(GRADING1C 1)\n"); /*NOT_YET_IMPLEMENTED("PROCS: sched_switch");*/ }
/* * In this function, you will be modifying the run queue, which can * also be modified from an interrupt context. In order for thread * contexts and interrupt contexts to play nicely, you need to mask * all interrupts before reading or modifying the run queue and * re-enable interrupts when you are done. This is analagous to * locking a mutex before modifying a data structure shared between * threads. Masking interrupts is accomplished by setting the IPL to * high. * * Once you have masked interrupts, you need to remove a thread from * the run queue and switch into its context from the currently * executing context. * * If there are no threads on the run queue (assuming you do not have * any bugs), then all kernel threads are waiting for an interrupt * (for example, when reading from a block device, a kernel thread * will wait while the block device seeks). You will need to re-enable * interrupts and wait for one to occur in the hopes that a thread * gets put on the run queue from the interrupt context. * * The proper way to do this is with the intr_wait call. See * interrupt.h for more details on intr_wait. * * Note: When waiting for an interrupt, don't forget to modify the * IPL. If the IPL of the currently executing thread masks the * interrupt you are waiting for, the interrupt will never happen, and * your run queue will remain empty. This is very subtle, but * _EXTREMELY_ important. * * Note: Don't forget to set curproc and curthr. When sched_switch * returns, a different thread should be executing than the thread * which was executing when sched_switch was called. * * Note: The IPL is process specific. */ void sched_switch(void) { kthread_t *next_thr; /* Somewhere in here: set interrupts to protect run queue intr_setipl(IPL_LOW) or IPL_HIGH, in include/main/interrupt.h */ uint8_t oldIPL = intr_getipl(); /* Check what currently running IPL is */ intr_setipl(IPL_HIGH); /* Block all hardware interrupts */ /* Enqueue requesting thread on run queue if still runnable (dead threads become unschedulable) */ if (curthr->kt_state == KT_RUN) ktqueue_enqueue(&kt_runq, curthr); /* Pick a runnable thread. Take someone off the run queue. */ /* If no threads on run queue, re-enable interrupts and wait for one to occur */ if (sched_queue_empty(&kt_runq)) { intr_wait(); /* Once this returns, there should be a process in the run queue */ } /* Remove a thread from the run queue */ next_thr = ktqueue_dequeue(&kt_runq); /* Manage curproc, curthr */ kthread_t *old_thr = curthr; proc_t *old_proc = curproc; curthr = next_thr; curproc = next_thr->kt_proc; /* Switch context from old context to new context */ context_switch(&old_thr->kt_ctx, &curthr->kt_ctx); /* NOT_YET_IMPLEMENTED("PROCS: sched_switch"); */ }
/* * The tty subsystem calls this when the tty driver has received a * character. Now, the line discipline needs to store it in its read * buffer and move the read tail forward. * * Special cases to watch out for: backspaces (both ASCII characters * 0x08 and 0x7F should be treated as backspaces), newlines ('\r' or * '\n'), and full buffers. * * Return a null terminated string containing the characters which * need to be echoed to the screen. For a normal, printable character, * just the character to be echoed. */ const char * n_tty_receive_char(tty_ldisc_t *ldisc, char c) { /* DRIVERS{{{ */ static const char echo_newline[] = {CR, LF, '\0'}; static const char echo_bs[] = {BS, SPACE, BS, '\0'}; static const char echo_esc[] = { '^', '\0' }; static const char echo_null[] = { '\0' }; static char echo_char[] = { ' ', '\0' }; n_tty_t *ntty; KASSERT(NULL != ldisc); ntty = ldisc_to_ntty(ldisc); N_TTY_ASSERT_VALID(ntty); switch (c) { case BS: case DEL: dprintf("Received backspace\n"); if (!n_tty_rawbuf_empty(ntty)) { n_tty_rawbuf_remove_last(ntty); return echo_bs; } return echo_null; case ESC: dprintf("Received escape\n"); if (n_tty_rawbuf_almost_full(ntty)) return echo_null; n_tty_rawbuf_enqueue(ntty, c); return echo_esc; case CR: c = LF; case LF: dprintf("Received newline\n"); if (n_tty_rawbuf_full(ntty)) return echo_null; n_tty_rawbuf_enqueue(ntty, c); n_tty_rawbuf_cook(ntty); if (!sched_queue_empty(&ntty->ntty_rwaitq)) { kthread_t *thr = sched_wakeup_on(&ntty->ntty_rwaitq); KASSERT(NULL != thr); } return echo_newline; case EOT: dprintf("Received EOT\n"); n_tty_rawbuf_cook(ntty); if (!sched_queue_empty(&ntty->ntty_rwaitq)) { kthread_t *thr = sched_wakeup_on(&ntty->ntty_rwaitq); KASSERT(NULL != thr); } return echo_null; default: dprintf("Receiving printable character\n"); if (n_tty_rawbuf_almost_full(ntty)) return echo_null; n_tty_rawbuf_enqueue(ntty, c); echo_char[0] = c; return echo_char; } panic("Should never get here\n"); /* DRIVERS }}} */ return NULL; }