void block(node_t * wait_queue) { ASSERT(disable_count); current_running->status = BLOCKED; queue_put(wait_queue, (node_t *) current_running); scheduler_entry(); }
/* Call scheduler to run the 'next' process */ void yield(void) { enter_critical(); current_running->yield_count++; scheduler_entry(); leave_critical(); }
void do_exit() { enter_critical(); current_running->status = EXITED; scheduler_entry(); /* No need for leave_critical() since scheduler_entry() never returns */ }
/* This function is the entry point for the kernel * It must be the first function in the file */ void _start(void) { /* Set up the single entry-point for system calls */ *ENTRY_POINT = &kernel_entry; struct queue ready_q; ready_queue = &ready_q; ready_queue->pcbs = ready_arr; ready_queue->capacity = NUM_TASKS; queue_init(ready_queue); struct queue blocked_q; blocked_queue = &blocked_q; blocked_queue->pcbs = blocked_arr; blocked_queue->capacity = NUM_TASKS; queue_init(blocked_queue); clear_screen(0, 0, 80, 25); /* Initialize the pcbs and the ready queue */ int iProcessIndex; int iStackTop = STACK_MIN; static pcb_t pcbs[NUM_TASKS]; pcb_t *process = &pcbs[0]; /* check that we won't exceed STACK_MAX */ ASSERT(STACK_MIN + NUM_TASKS * STACK_SIZE <= STACK_MAX); for (iProcessIndex = 0; iProcessIndex < NUM_TASKS; iProcessIndex++) { struct task_info *thisTask = task[iProcessIndex]; // populate instance variables iStackTop += STACK_SIZE; process->esp = iStackTop; process->ebp = iStackTop; process->state = PROCESS_READY; process->eip = thisTask->entry_point; process->eax = 0; process->ebx = 0; process->ecx = 0; process->edx = 0; process->edi = 0; process->esi = 0; process->eflags = 0; process->isKernel = (thisTask->task_type == KERNEL_THREAD) ? TRUE : FALSE; // add to queue queue_push(ready_queue, process); process++; } /* Schedule the first task */ scheduler_count = 0; scheduler_entry(); /* We shouldn't ever get here */ ASSERT(0); }
/* * Remove the current_running process from the linked list so it will * not be scheduled in the future */ void exit(void) { enter_critical(); current_running->status = EXITED; /* Removes job from ready queue, and dispatchs next job to run */ scheduler_entry(); /* * No leave_critical() needed, as we never return here. (The * process is exiting!) */ }
void do_sleep(int milliseconds) { enter_critical(); current_running->sleep_until = do_gettimeofday() + milliseconds; current_running->status = BLOCKED; queue_put_sort( &sleep_queue, (node_t*) current_running, &order_by_wake_up); scheduler_entry(); leave_critical(); }
/* TODO: Blocking sleep. Caution: this function currently cannot be pre-empted! */ void do_sleep(int milliseconds){ ASSERT( !disable_count ); enter_critical(); uint64_t deadline; deadline = time_elapsed + milliseconds; current_running->deadline = deadline; current_running->status = SLEEPING; enqueue_sort(&sleep_wait_queue, (node_t *)current_running, (node_lte)<e_deadline); scheduler_entry(); leave_critical(); }
/* * q is a pointer to the waiting list where current_running should be * inserted. */ void block(pcb_t ** q, int *spinlock) { pcb_t *tmp; enter_critical(); if (spinlock) spinlock_release(spinlock); /* mark the job as blocked */ current_running->status = BLOCKED; /* Insert into waiting list */ tmp = *q; (*q) = current_running; current_running->next_blocked = tmp; /* remove job from ready queue, pick next job to run and dispatch it */ scheduler_entry(); leave_critical(); }
void do_yield(){ enter_critical(); put_current_running(); scheduler_entry(); leave_critical(); }
/* This is just like do_yield(), * but does not call enter_/leave_critical first. * It is called by the timer interrupt in entry.S */ void do_yield_naked() { ASSERT( disable_count ); put_current_running(); scheduler_entry(); }