示例#1
0
/*
 * In this function, you will be modifying the run queue, which can
 * also be modified from an interrupt context. In order for thread
 * contexts and interrupt contexts to play nicely, you need to mask
 * all interrupts before reading or modifying the run queue and
 * re-enable interrupts when you are done. This is analagous to
 * locking a mutex before modifying a data structure shared between
 * threads. Masking interrupts is accomplished by setting the IPL to
 * high.
 *
 * Once you have masked interrupts, you need to remove a thread from
 * the run queue and switch into its context from the currently
 * executing context.
 *
 * If there are no threads on the run queue (assuming you do not have
 * any bugs), then all kernel threads are waiting for an interrupt
 * (for example, when reading from a block device, a kernel thread
 * will wait while the block device seeks). You will need to re-enable
 * interrupts and wait for one to occur in the hopes that a thread
 * gets put on the run queue from the interrupt context.
 *
 * The proper way to do this is with the intr_wait call. See
 * interrupt.h for more details on intr_wait.
 *
 * Note: When waiting for an interrupt, don't forget to modify the
 * IPL. If the IPL of the currently executing thread masks the
 * interrupt you are waiting for, the interrupt will never happen, and
 * your run queue will remain empty. This is very subtle, but
 * _EXTREMELY_ important.
 *
 * Note: Don't forget to set curproc and curthr. When sched_switch
 * returns, a different thread should be executing than the thread
 * which was executing when sched_switch was called.
 *
 * Note: The IPL is process specific.
 */
void
sched_switch(void)
{
	/*----Kernel1:PROCS:sched_switch:Begins---*/
	uint8_t old_ipl = apic_getipl();
	apic_setipl(IPL_HIGH);
	kthread_t *nextthr = ktqueue_dequeue(&kt_runq);
	kthread_t *oldthr = NULL;

	/*loop thru interupt wait for next runnable thread*/
	while (nextthr == NULL)
	{
		apic_setipl(IPL_LOW);
		intr_wait();
		apic_setipl(IPL_HIGH);
		nextthr = ktqueue_dequeue(&kt_runq);
	}

	/*Switch to next thr*/
	apic_setipl(old_ipl);
	oldthr = curthr;
	curthr = nextthr;
	curproc = nextthr->kt_proc;
	KASSERT(nextthr->kt_state == KT_RUN);
	
	context_switch(&oldthr->kt_ctx,&curthr->kt_ctx);
	/*----Ends---*/
}
示例#2
0
/*
 * In this function, you will be modifying the run queue, which can
 * also be modified from an interrupt context. In order for thread
 * contexts and interrupt contexts to play nicely, you need to mask
 * all interrupts before reading or modifying the run queue and
 * re-enable interrupts when you are done. This is analagous to
 * locking a mutex before modifying a data structure shared between
 * threads. Masking interrupts is accomplished by setting the IPL to
 * high.
 *
 * Once you have masked interrupts, you need to remove a thread from
 * the run queue and switch into its context from the currently
 * executing context.
 *
 * If there are no threads on the run queue (assuming you do not have
 * any bugs), then all kernel threads are waiting for an interrupt
 * (for example, when reading from a block device, a kernel thread
 * will wait while the block device seeks). You will need to re-enable
 * interrupts and wait for one to occur in the hopes that a thread
 * gets put on the run queue from the interrupt context.
 *
 * The proper way to do this is with the intr_wait call. See
 * interrupt.h for more details on intr_wait.
 *
 * Note: When waiting for an interrupt, don't forget to modify the
 * IPL. If the IPL of the currently executing thread masks the
 * interrupt you are waiting for, the interrupt will never happen, and
 * your run queue will remain empty. This is very subtle, but
 * _EXTREMELY_ important.
 *
 * Note: Don't forget to set curproc and curthr. When sched_switch
 * returns, a different thread should be executing than the thread
 * which was executing when sched_switch was called.
 *
 * Note: The IPL is process specific.
 */
void
sched_switch(void)
{
       /*MASKING THE INTERRUPT LEVELS*/
        uint8_t curr_intr_level = apic_getipl();
        apic_setipl(IPL_HIGH);

        if(list_empty(&(kt_runq.tq_list)))
        {
                apic_setipl(IPL_LOW);
                intr_wait();
                apic_setipl(curr_intr_level);
                sched_switch();          
        }
        else
        {
                kthread_t *old_thr = curthr;

                dbg(DBG_THR,"PROCESS FORMERLY EXECUTING: %s\n", curthr->kt_proc->p_comm);

                if(kt_runq.tq_size > 0)
                    {
                            while(1)
                            {
                                curthr = ktqueue_dequeue(&kt_runq);
                                curproc = curthr->kt_proc;
                                if(curthr->kt_state == KT_EXITED)
                                    continue;
                                else
                                    break;
                                
                                if(kt_runq.tq_size == 0)
                                    sched_switch();
                            }

                    }
                else
                    sched_switch();

                     if(curthr->kt_cancelled == 1)
                    {
                        dbg(DBG_THR,"%s was cancelled\n", curproc->p_comm);
                        do_exit(0); 
                    }
                 
                    
                    apic_setipl(curr_intr_level);
                 dbg(DBG_THR,"PROCESS CURRENTLY EXECUTING: %s\n", curthr->kt_proc->p_comm);

                context_switch(&(old_thr->kt_ctx), &(curthr->kt_ctx));

        }
}
示例#3
0
/*
 * Since we are modifying the run queue, we _MUST_ set the IPL to high
 * so that no interrupts happen at an inopportune moment.

 * Remember to restore the original IPL before you return from this
 * function. Otherwise, we will not get any interrupts after returning
 * from this function.
 *
 * Using intr_disable/intr_enable would be equally as effective as
 * modifying the IPL in this case. However, in some cases, we may want
 * more fine grained control, making modifying the IPL more
 * suitable. We modify the IPL here for consistency.
 */
void
sched_make_runnable(kthread_t *thr)
{
	/*----Kernel1:PROCS:sched_make_runnable:Begins---*/
	KASSERT(thr != NULL);
	uint8_t old_ipl = apic_getipl();
	apic_setipl(IPL_HIGH);
	KASSERT(&kt_runq != thr->kt_wchan && "thread already on the runq");
	thr->kt_state = KT_RUN;
	ktqueue_enqueue(&kt_runq,thr);
	apic_setipl(old_ipl);
	/*----Ends----*/	
}
示例#4
0
void
sched_make_runnable(kthread_t *thr)
{
    
    KASSERT(&kt_runq != thr->kt_wchan);

    static int i = 1;
        if(thr->kt_proc->p_pid == 0 && i == 1)
            {
                sched_queue_init(&kt_runq);
                i = i+1;
            }

        dbg(DBG_THR,"ADDING PROCESS: %s, ON RUN_QUEUE\n", thr->kt_proc->p_comm);

        uint8_t curr_intr_level = apic_getipl();
        apic_setipl(IPL_HIGH);

        thr->kt_state = KT_RUN;
        ktqueue_enqueue(&kt_runq,thr);

        apic_setipl(curr_intr_level);

}