void cv_signal(struct cv *cv, struct lock *lock) { //On a signal, this means the next thread in the queue can start!! int spl; //We must complete an unconditional wait once an unlock occurs and we can then take the lock. We will check the conditions now. assert(cv != NULL); assert(lock !=NULL); assert (lock_do_i_hold(lock)); spl = splhigh(); //Disable All Interrupts cv->count--; //Decrement count since the next thread can go. //We will never know which thread is next, so we must create a temp thread pointer to be able to work with the next pointer in the queue. struct thread *next_thread = q_remhead(cv->thread_queue); //removes the next head in the queue. thread_wakeup(next_thread); //Wake up this next thread! splx(spl); //Re-enable All Interrupts (void)cv; // suppress warning until code gets written (void)lock; // suppress warning until code gets written }
int kframe_alloc(int *frame, int frames_wanted) { int rv, id; static int id_cur = 0; // only acquire lock and initialize idgen AFTER booting -- due to kmalloc if (booting){ id = id_cur++; } else { lock_acquire(coremap_lock); if (!q_empty(id_not_used)){ id = (int) q_remhead(id_not_used); } else { id = id_cur++; } } rv = frame_alloc_continuous(frame, KERNEL, 0, id, frames_wanted); if (rv && id_not_used != NULL) { q_addtail(id_not_used, (void*)id); } if (!booting) lock_release(coremap_lock); return rv; }
void kitchen_destroy(struct kitchen *k) { int i; // Destroy the queue elements while (!q_empty(k->group_list)) { kfree(q_remhead(k->group_list)); } // Destroy the queue q_destroy(k->group_list); // Destroy the cv cv_destroy(k->kitchen_cv); // Destroy the entrance lock lock_destroy(k->kitchen_lock); // Destroy the bowl locks for (i = 0; i < NumBowls; i++) { lock_destroy(k->bowl_locks[i]); } // Destroy the bowl lock array kfree(k->bowl_locks); // Destroy the kitchen kfree(k); // Clear the pointer k = NULL; }
static void testq(struct queue *q, int n) { int i, result, *x, *r; x = kmalloc(n * sizeof(int)); for (i=0; i<n; i++) { x[i] = i; } assert(q_empty(q)); for (i=0; i<n; i++) { kprintf("queue: adding %d\n", i); result = q_addtail(q, &x[i]); assert(result==0); } for (i=0; i<n; i++) { r = q_remhead(q); assert(r != NULL); kprintf("queue: got %d, should be %d\n", *r, i); assert(*r == i); } assert(q_empty(q)); kfree(x); }
void exit_kitchen() { // Reacquire entrance lock lock_acquire(k->kitchen_lock); // Decrement count k->creature_count--; /* * If there are no creatures left, let in the next group waiting. * If there is no other group waiting, reset the switch that * indicates which creature type currently owns the kitchen. */ if (!q_empty(k->group_list) && k->creature_count == 0) { // Dequeue first group in line struct kgroup *g = q_remhead(k->group_list); int i; // Signal every member of that group for (i = 0; i < g->amount; i++) { cv_signal(k->kitchen_cv, k->kitchen_lock); } // Destroy the group struct kfree(g); } else if (q_empty(k->group_list) && k->creature_count == 0) { // 2 is the "unset" value for k->current_creature k->current_creature = 2; } // Release enter lock and exit lock_release(k->kitchen_lock); }
void cv_signal(struct cv *cv, struct lock *lock) { #if opt_A1 // validate parameter assert (cv != NULL); assert (lock != NULL); // others assert (lock_do_i_hold(lock) == 1); // disable interrupts int spl = splhigh(); if (q_empty(cv->sleeping_list)) goto done; // signal must be called after wait! // pick one thread and wake it up thread_wakeup((struct thread*) q_remhead(cv->sleeping_list)); // enable interrupts done: splx(spl); #else (void) cv; (void) lock; #endif }
/* * This is called during panic shutdown to dispose of threads other * than the one invoking panic. We drop them on the floor instead of * cleaning them up properly; since we're about to go down it doesn't * really matter, and freeing everything might cause further panics. */ void scheduler_killall(void) { assert(curspl>0); while (!q_empty(runqueue)) { struct thread *t = q_remhead(runqueue); kprintf("scheduler: Dropping thread %s.\n", t->t_name); } }
/* * This is called during panic shutdown to dispose of threads other * than the one invoking panic. We drop them on the floor instead of * cleaning them up properly; since we're about to go down it doesn't * really matter, and freeing everything might cause further panics. */ void scheduler_killall(void) { // panic("not used."); assert(curspl>0); while (!q_empty(runqueue)) { struct thread *t = q_remhead(runqueue); kprintf("scheduler: Dropping thread.\n"); } }
void scheduler_killall(void) { int i; assert(curspl>0); for(i = 0; i < NUM_PRIORITIES; i++) { while (!q_empty(runqueue[i])) { struct thread *t = q_remhead(runqueue[i]); kprintf("scheduler: Dropping thread %s.\n", t->t_name); } } }
/* * Actual scheduler. Returns the next thread to run. Calls cpu_idle() * if there's nothing ready. (Note: cpu_idle must be called in a loop * until something's ready - it doesn't know whether the things that * wake it up are going to make a thread runnable or not.) */ struct thread * scheduler(void) { // meant to be called with interrupts off assert(curspl>0); while (q_empty(runqueue)) { cpu_idle(); } // You can actually uncomment this to see what the scheduler's // doing - even this deep inside thread code, the console // still works. However, the amount of text printed is // prohibitive. // //print_run_queue(); return q_remhead(runqueue); }
void cv_broadcast(struct cv *cv, struct lock *lock) { #if OPT_A1 // validate parameter assert (cv != NULL); assert (lock != NULL); // others assert (lock_do_i_hold(lock) == 1); // disable interrupts int spl = splhigh(); // test if (q_empty(cv->sleeping_list)) goto done; // wake up all threads while (!q_empty(cv->sleeping_list)) { thread_wakeup((struct thread*) q_remhead(cv->sleeping_list)); } // enable interrupts done: splx(spl); #else (void) cv; (void) lock; #endif }
/* * Actual scheduler. Returns the next thread to run. Calls cpu_idle() * if there's nothing ready. (Note: cpu_idle must be called in a loop * until something's ready - it doesn't know whether the things that * wake it up are going to make a thread runnable or not.) */ struct thread * scheduler(void) { // meant to be called with interrupts off assert(curspl>0); while (q_empty(runqueue)) { cpu_idle(); } // You can actually uncomment this to see what the scheduler's // doing - even this deep inside thread code, the console // still works. However, the amount of text printed is // prohibitive. // //print_run_queue(); /* We will have a defined variable in scheduler.h that will control the type of scheduling */ if(scheduler_type == SCHEDULER_RANDOM) { /* Random queue method */ // We could manipulate q->next_read, an integer that indexes the next in line // i.e. pick an index based on the size of the queue (q->size), and change // runqueue->next_read to that index // We might also be able to just jump in and get a random index from the queue // queue size is 32 by default int queue_size = q_getsize(runqueue); int random_index; struct thread * temp_thread; // We will have to get the thread number from within the active part // of the queue int start = q_getstart(runqueue); int end = q_getend(runqueue); int random_range = (end - start); // We have a problem if the start and end are the same assert(random_range != 0); // The startup code seems to have issues if you pick it right off the bat if (random_range < 0) random_range = random_range + queue_size; // No need to pick a random thread if there is only 1 in the queue if (random_range == 1) return q_remhead(runqueue); DEBUG(DB_THREADS, "Number of active threads: %u.\n", random_range); DEBUG(DB_THREADS, "Start: %u.\n", start); DEBUG(DB_THREADS, "End: %u.\n", end); random_index = (random() % random_range + start) % queue_size; DEBUG(DB_THREADS, "%u index chosen.\n", random_index); // Now, we have to move our chosen thread to the front of the line // There is probably some other way to do this that is more efficient, but // I had no success with q_getguy() // We start with the next thread in the queue, and work our way to the chosen one while(q_getstart(runqueue) != random_index) { temp_thread = q_remhead(runqueue); q_addtail(runqueue, temp_thread); } DEBUG(DB_THREADS, "New start: %u.\n", q_getstart(runqueue)); DEBUG(DB_THREADS, "New end: %u.\n", q_getend(runqueue)); return q_remhead(runqueue); } else if (scheduler_type == SCHEDULER_MLFQ) { /* MLFQ method */ // We will go through all of our queue, looking for the highest priority thread // By starting at the next read and working up, on a tie we are taking the first in // queue size is 32 by default int queue_size = q_getsize(runqueue); int i; int chosen_index; int priority; int random_choice; struct thread * temp_thread; // We will have to get the thread number from within the active part // of the queue int start = q_getstart(runqueue); int end = q_getend(runqueue); cycles++; if (cycles > 2000) { // reset priorities //kprintf("Resetting priorities"); i = start; while( i != end) { temp_thread = q_getguy(runqueue, i); DEBUG(DB_THREADS, "Setting priority\n"); thread_set_priority(temp_thread, 50); i = (i+1) % queue_size; } cycles = 0; // A bit of randomness to prevent immediate restarving return q_remhead(runqueue); } int highest_priority = -1; // 100 is maximum priority i = start; while( i != end) { temp_thread = q_getguy(runqueue, i); DEBUG(DB_THREADS, "Getting priority\n"); priority = thread_get_priority(temp_thread); DEBUG(DB_THREADS, "Priority: %u.\n", priority); if (priority > highest_priority) { chosen_index = i; highest_priority = priority; } // In the event of a tie, random pick else if (priority == highest_priority) { random_choice == random() % 3; if (random_choice == 0) chosen_index = i; } i = (i+1) % queue_size; } DEBUG(DB_THREADS, "Start: %u.\n", start); DEBUG(DB_THREADS, "End: %u.\n", end); DEBUG(DB_THREADS, "%u index chosen with priority %u.\n", chosen_index, highest_priority); //kprintf("%u index chosen with priority %u.\n", chosen_index, highest_priority); // Now, we have to move our chosen thread to the front of the line // There is probably some other way to do this that is more efficient, but // I had no success with q_getguy() // We start with the next thread in the queue, and work our way to the chosen one while(q_getstart(runqueue) != chosen_index) { temp_thread = q_remhead(runqueue); q_addtail(runqueue, temp_thread); } DEBUG(DB_THREADS, "New start: %u.\n", q_getstart(runqueue)); DEBUG(DB_THREADS, "New end: %u.\n", q_getend(runqueue)); return q_remhead(runqueue); } // Fall through to default FIFO scheduler return q_remhead(runqueue); }
/* * Schedules the threads according to the highest priority threads. */ struct thread * scheduler(void) { int do_idle; int i; //int k; struct thread* t; // meant to be called with interrupts off assert(curspl>0); // Check if all queues are empty. do { do_idle = 1; for(i = 0; i < NUM_PRIORITIES; i++) { do_idle &= q_empty(runqueue[i]); } // Idle if there are no threads to schedule. if(do_idle) cpu_idle(); } while(do_idle); // Uncomment to print queue contents. //print_run_queue(); //// Select the queue to get the thread from. //k = 1 << NUM_PRIORITIES; //i = NUM_PRIORITIES; //// Avoid all empty queues. //while(q_empty(runqueue[i - 1])) //{ // k = k >> 1; // i--; //} // //// Select the queue indicated by queuenum. //// NOTE: There is an important difference between //// calculating k this way and using PRIORITY_START at initialization! //while(k - 1 >= queuenum) //{ // k = k >> 1; // i--; //} //// Reset queuenum if needed. //queuenum--; //if(queuenum < 1) // queuenum = PRIORITY_START; // Use a multilevel-feedback queue approach to get the first non-empty queue. for(i = NUM_PRIORITIES - 1; i > 0; i--) { if(!q_empty(runqueue[i])) break; } // Return the first thread from the appropriate queue. t = q_remhead(runqueue[i]); // Modify the priority of the thread to place it in // next-highest queue during the next pass. // Lowest queue acts as round robin. if(t->priority > 0) t->priority--; return(t); }
void intersection_after_exit(Direction origin, Direction destination) { KASSERT(intersection_lock != NULL); /* replace this default implementation with your own implementation */ (void)origin; /* avoid compiler complaint about unused parameter */ (void)destination; /* avoid compiler complaint about unused parameter */ // KASSERT(intersectionSem != NULL); // V(intersectionSem); // lock_release(intersection_lock); lock_acquire(intersection_lock); if (!q_empty(next_lights)){ // if (num_vehicles_waiting_b > 0){ // num_vehicles_waiting_b--; // if (origin == south){ // num_vehicles_waiting_south--; // } // else if (origin == north){ // num_vehicles_waiting_north--; // } // else if(origin == east){ // num_vehicles_waiting_east--; // } // else if(origin == west){ // num_vehicles_waiting_west--; // } // } // if (origin == south){ // num_vehicles_waiting_south--; // if(num_vehicles_waiting_south == 0){ // q_remhead(next_lights); // } // } // else if (origin == north){ // num_vehicles_waiting_north--; // if(num_vehicles_waiting_north == 0){ // q_remhead(next_lights); // } // } // else if(origin == east){ // num_vehicles_waiting_east--; // if(num_vehicles_waiting_east == 0){ // q_remhead(next_lights); // } // } // else if(origin == west){ // num_vehicles_waiting_west--; // if(num_vehicles_waiting_west == 0){ // q_remhead(next_lights); // } // } q_remhead(next_lights); // if(num_vehicles_waiting_b == 0){ // q_remhead(next_lights); // } if (!q_empty(next_lights)) { // q_remhead(next_lights); // current_light = q_peek(next_lights); current_light = q_peek(next_lights); if (*current_light == north){ //*current_light = east; //num_vehicles_waiting -= cv_north_go->cv_wchan->wc_threads.tl_count; //struct threadlist l = (struct threadlist) cv_north_go->cv_wchan->wc_threads; // num_vehicles_waiting = 0; // is_waiting = true; //lock_release(intersection_lock); // num_vehicles_waiting_b = num_vehicles_waiting_north; cv_broadcast(cv_north_go,intersection_lock); } else if (*current_light == south){ //num_vehicles_waiting -= cv_south_go->cv_wchan->wc_threads.tl_count; //*current_light = west; // num_vehicles_waiting = 0; //lock_release(intersection_lock); // num_vehicles_waiting_b = num_vehicles_waiting_south; cv_broadcast(cv_south_go,intersection_lock); } else if (*current_light == east){ // num_vehicles_waiting -= cv_east_go->cv_wchan->wc_threads.tl_count; // num_vehicles_waiting = 0; //*current_light = south; //lock_release(intersection_lock); // num_vehicles_waiting_b = num_vehicles_waiting_east; cv_broadcast(cv_east_go,intersection_lock); } else if (*current_light == west){ //*current_light = north; // num_vehicles_waiting -= cv_west_go->cv_wchan->wc_threads.tl_count; // num_vehicles_waiting = 0; //lock_release(intersection_lock); // num_vehicles_waiting_b = num_vehicles_waiting_west; cv_broadcast(cv_west_go,intersection_lock); } } } lock_release(intersection_lock); }