Esempio n. 1
0
/*
* schedule() is your CPU scheduler. It currently implements basic FIFO
*scheduling -
* 1. calls getReadyProcess to select and remove a runnable process from your
*ready queue
* 2. updates the current array to show this process (or NULL if there was none)
*as
*    running on the given cpu
* 3. sets this process state to running (unless its the NULL process)
* 4. calls context_switch to actually start the chosen process on the given cpu
*    - note if proc==NULL the idle process will be run
*    - note the final arg of -1 means there is no clock interrupt
*	context_switch() is prototyped in os-sim.h. Look there for more
*information.
*  a basic getReadyProcess() is implemented below, look at the comments for
*info.
*
* TO-DO: handle scheduling with a time-slice when necessary
*
* THIS FUNCTION IS PARTIALLY COMPLETED - REQUIRES MODIFICATION
*/
static void schedule(unsigned int cpu_id) {
  pcb_t* proc;

  if (alg == StaticPriority) {  // if it is SP, do the poping way
    proc = pop_high_priority();
  } else {  // otherwise, keep the normal way
    proc = getReadyProcess();
  }

  pthread_mutex_lock(&current_mutex);
  current[cpu_id] = proc;  // get cpuID
  pthread_mutex_unlock(&current_mutex);

  if (proc != NULL) {
    proc->state = PROCESS_RUNNING;
  }

  if (alg == FIFO || alg == StaticPriority) {  // for FIFO and SP
    context_switch(cpu_id, proc, -1);
  }

  if (alg == RoundRobin) {  // for RR
    context_switch(cpu_id, proc, time_slice);
  }
}
Esempio n. 2
0
void schedule()
{
	static struct task_struct *nxt = NULL;
	struct task_struct *curr;
	
//	printf("In schedule\n");
//	print_rq();
	
	current->need_reschedule = 0; /* Always make sure to reset that, in case *
								   * we entered the scheduler because current*
								   * had requested so by setting this flag   */
	current->last=sched_clock();
	if(current!=nxt){
		current->burst=current->last-current->used_the_processor;
		current->exp_burst=(current->burst + (0.5*current->exp_burst))/1+0.5;
	}
	else{
		 current->burst += current->last-current->used_the_processor;	//gia tis diadoxikes fores pou pernei ton epeksergasth!
	}

	if (rq->nr_running == 1) {
		context_switch(rq->head);
		nxt = rq->head->next;
	}
	else {	
		
		if (nxt == rq->head) {   /* Do this to always skip init at the head */
			nxt = nxt->next;	/* of the queue, whenever there are other  */
		}					/* processes available					   */
		curr = best_burst();
		context_switch(curr);
	  	curr->used_the_processor = sched_clock();
	}
}
static void infinite(void) {
	while (mask) {
		test_assert(0 == (mask & 1));
		mask >>= 1;

		context_switch(&infinite_context, &entry_context);
	}
	context_switch(&infinite_context, &redundant_context);
}
Esempio n. 4
0
//Schedules the current batch of processes
void process_schedule()
{
    process_t* start;
    if (curr_running_proc != NULL )
    {
        start = curr_running_proc;
        //In round-robin our aim is to find next runnable process, and make it running
        //First run the list from currently running process till the end of allocated processes
        while(start != NULL)
        {
            if(start->state==PROCESS_RUNNABLE)
            {
                //just for fun check the pid
                if(start->pid==-1)
                {
                    #ifdef LOG
                        printf("Run for your lives. Process is runnable with -1 process id \n");
                    #endif
                    return;
                }
                context_switch(start);
                return;
            }
            start = start->next;
        }
    }
   
     //Now run the list from start of allocated processes till currently running process
     start = process_runnable_queue;
     while(start && start != curr_running_proc)
     {
        if (start->state==PROCESS_RUNNABLE)
        {
            //just for fun check the pid
            if(start->pid==-1)
            {
                #ifdef LOG
                    printf("Run for your lives. Process is runnable with -1 process id \n");
                #endif
                return;            
            }
            context_switch(start);
            return;
        }
        start = start->next;
     }
     //If we have reached this point then no other process is in dire need of scheduling.Hence return
     return;
}
Esempio n. 5
0
asmlinkage void schedule(void)
{
	//...
	array = rq->active;
	if (unlikely(!array->nr_active)) {
		/*
		 * Switch the active and expired arrays.
		 */
		rq->active = rq->expired;
		rq->expired = array;
		array = rq->active;
		rq->expired_timestamp = 0;
	}

	idx = sched_find_first_bit(array->bitmap);
	queue = array->queue + idx;
	next = list_entry(queue->next, task_t, run_list);
	//...
	if (likely(prev != next)) {
		next->timestamp = now;
		rq->nr_switches++;
		rq->curr = next;

		prepare_arch_switch(rq, next);
		prev = context_switch(rq, prev, next);
		barrier();

		finish_task_switch(prev);
	}
	//...
}
Esempio n. 6
0
/**
 * Starts the operating system by starting the system timer, creating the main thread,
 * and performing the very first invocation of context_switch().
 */
void os_start(void) {
   createMainThread();

   start_system_timer();
   context_switch((uint16_t *) (&system->threads[0].stackPointer), 
    (uint16_t *) (&system->threads[MAX_NUMBER_OF_THREADS].stackPointer));
}
Esempio n. 7
0
/*
 * In this function, you will be modifying the run queue, which can
 * also be modified from an interrupt context. In order for thread
 * contexts and interrupt contexts to play nicely, you need to mask
 * all interrupts before reading or modifying the run queue and
 * re-enable interrupts when you are done. This is analagous to
 * locking a mutex before modifying a data structure shared between
 * threads. Masking interrupts is accomplished by setting the IPL to
 * high.
 *
 * Once you have masked interrupts, you need to remove a thread from
 * the run queue and switch into its context from the currently
 * executing context.
 *
 * If there are no threads on the run queue (assuming you do not have
 * any bugs), then all kernel threads are waiting for an interrupt
 * (for example, when reading from a block device, a kernel thread
 * will wait while the block device seeks). You will need to re-enable
 * interrupts and wait for one to occur in the hopes that a thread
 * gets put on the run queue from the interrupt context.
 *
 * The proper way to do this is with the intr_wait call. See
 * interrupt.h for more details on intr_wait.
 *
 * Note: When waiting for an interrupt, don't forget to modify the
 * IPL. If the IPL of the currently executing thread masks the
 * interrupt you are waiting for, the interrupt will never happen, and
 * your run queue will remain empty. This is very subtle, but
 * _EXTREMELY_ important.
 *
 * Note: Don't forget to set curproc and curthr. When sched_switch
 * returns, a different thread should be executing than the thread
 * which was executing when sched_switch was called.
 *
 * Note: The IPL is process specific.
 */
void
sched_switch(void)
{
	/*----Kernel1:PROCS:sched_switch:Begins---*/
	uint8_t old_ipl = apic_getipl();
	apic_setipl(IPL_HIGH);
	kthread_t *nextthr = ktqueue_dequeue(&kt_runq);
	kthread_t *oldthr = NULL;

	/*loop thru interupt wait for next runnable thread*/
	while (nextthr == NULL)
	{
		apic_setipl(IPL_LOW);
		intr_wait();
		apic_setipl(IPL_HIGH);
		nextthr = ktqueue_dequeue(&kt_runq);
	}

	/*Switch to next thr*/
	apic_setipl(old_ipl);
	oldthr = curthr;
	curthr = nextthr;
	curproc = nextthr->kt_proc;
	KASSERT(nextthr->kt_state == KT_RUN);
	
	context_switch(&oldthr->kt_ctx,&curthr->kt_ctx);
	/*----Ends---*/
}
Esempio n. 8
0
/**
 * Switch out the current task for the next task to run.
 *
 * TODO: schedule MUST always be called with interrupts disabled
 */
void schedule(void) {
    struct task_struct *prev, *next;

    /* Assuming atomicity */
    prev = curr_task;
    next = rr_pick_next_task();

    if(prev != next) {
        run_queue.num_switches++;
        curr_task = next;
        last_task = prev; /* the last_task to run is the "prev" */

        context_switch(prev, next);
        /* WE CAN NOT REFER TO LOCALS AFTER the context_switch */

        post_context_switch();

        /* If this was the first switch then the current stack has no frame for
         * schedule(), on retq we will return to the task's start function for
         * the first time.
         */
        if(curr_task->first_switch) {
            curr_task->first_switch = 0;
            curr_task->kernel_rsp = ALIGN_UP(curr_task->kernel_rsp, PAGE_SIZE) - 16;
            __asm__ __volatile__ ("retq;");
        }
    }

}
Esempio n. 9
0
/*
 * fiber procedure
 */
void WINAPI fp (void *  p) {
  sc_context *  pSelf = (sc_context *)p;
  *(pSelf->pParameter) = (pSelf->pRoutine)(*(pSelf->pParameter));
  pSelf->state = FS_COMPLETED;
  if (pSelf->pParent != NULL) context_switch(pSelf->pParent);
  return;
}
Esempio n. 10
0
void os_tick()
{
    if (started && ++current_timeslice == MAX_TIMESLICE) {
        current_timeslice = 0;
	
        __disable_irq();

        int next_idx = get_next_ready_task_index();

        if (next_idx != current_task) {
            // context switch
            uint32_t *dest = task_list[current_task]->registers;
            uint32_t *source = task_list[next_idx]->registers;
            uint32_t newpc = task_list[current_task]->pc;

            current_task = next_idx;

            context_switch(dest, source, newpc);
/*
            asm volatile(
                    "mov  R0, %[dest]"      "\n\t"
                    "stm  R0!, {R1-R7}"     "\n\t"
                    "mov  R0, %[source]"    "\n\t"
                    "ldm  R0!, {R1-R7}" :: [dest] "r" (dest), [source] "r" (source)
                    );
  */
        }
        __enable_irq();
    }
}
Esempio n. 11
0
int user_mutex_trylock(user_mutex_t *x) {
#ifdef WITH_REALTIME_THREADS
  if (handlerMask&javax_realtime_Scheduler_HANDLE_MUTEX_TRYLOCK) {
    JNIEnv* env = FNI_GetJNIEnv();
    (*env)->CallStaticVoidMethod(env, SchedulerClaz, Scheduler_handle_mutex_trylock);
#ifdef WITH_REALTIME_THREADS_DEBUG
    fflush(0);
    assert(!((*env)->ExceptionOccurred(env)));
#endif
  }
#endif
  if ((SEMAPHORE_TEST_AND_SET(&(x->mutex)))==SEMAPHORE_CLEAR) {
    return 0;
  } else {
#ifndef WITH_REALTIME_THREADS
    // This is a tricky one -
    // Without this, bdemsky's user threads can livelock in the
    // following code: 
    // while (try_lock) do stuff;
    context_switch();
#else
    // But with RTJ, we need to guarantee a thread's quanta as much as
    // possible.  Therefore, RTJ may need to switch when the quanta is up or 
    // when the thread is blocked - but at NO OTHER TIME.
    //
    // Perhaps the scheduler should be informed of this to make
    // wise scheduling policy decisions - but I'm gonna wait until 
    // I put the scheduler lock informing code in place...

#endif
    return EBUSY;
  }
}
Esempio n. 12
0
/*
 * In this function, you will be modifying the run queue, which can
 * also be modified from an interrupt context. In order for thread
 * contexts and interrupt contexts to play nicely, you need to mask
 * all interrupts before reading or modifying the run queue and
 * re-enable interrupts when you are done. This is analagous to
 * locking a mutex before modifying a data structure shared between
 * threads. Masking interrupts is accomplished by setting the IPL to
 * high.
 *
 * Once you have masked interrupts, you need to remove a thread from
 * the run queue and switch into its context from the currently
 * executing context.
 *
 * If there are no threads on the run queue (assuming you do not have
 * any bugs), then all kernel threads are waiting for an interrupt
 * (for example, when reading from a block device, a kernel thread
 * will wait while the block device seeks). You will need to re-enable
 * interrupts and wait for one to occur in the hopes that a thread
 * gets put on the run queue from the interrupt context.
 *
 * The proper way to do this is with the intr_wait call. See
 * interrupt.h for more details on intr_wait.
 *
 * Note: When waiting for an interrupt, don't forget to modify the
 * IPL. If the IPL of the currently executing thread masks the
 * interrupt you are waiting for, the interrupt will never happen, and
 * your run queue will remain empty. This is very subtle, but
 * _EXTREMELY_ important.
 *
 * Note: Don't forget to set curproc and curthr. When sched_switch
 * returns, a different thread should be executing than the thread
 * which was executing when sched_switch was called.
 *
 * Note: The IPL is process specific.
 */
void
sched_switch(void)
{
	/* Do I need to enque prevthr?
	 * Do I need to continue running prevthr if it is still runnable */
	/*for bugs check Run Queue Access Slide */

	uint8_t prev_ipl = intr_getipl();
	intr_setipl(IPL_HIGH);
	
	kthread_t *prevthr = curthr;
	
	if (prevthr->kt_state == KT_RUN)
		ktqueue_enqueue(&kt_runq, prevthr);

	while (sched_queue_empty(&kt_runq)) { 
		panic("I never actually wait on things here");
		intr_setipl(IPL_LOW);
		intr_wait();
		intr_setipl(IPL_HIGH);
	}
	/*If there is a thread*/
	kthread_t *t = ktqueue_dequeue(&kt_runq);
	
	curproc = t->kt_proc;
	curthr = t; 
       /* NOT_YET_IMPLEMENTED("PROCS: sched_switch");*/
	context_switch(&prevthr->kt_ctx, &curthr->kt_ctx);
		intr_setipl(prev_ipl);	

}
Esempio n. 13
0
/*
 * implementation for context_switchback
 */
void context_switchback() {
  if (top_context != NULL) {
    if (current_context != top_context && delayed(top_context)) {
      context_switch(top_context);
    }
  }
  return;
}
void thread_pause (int delay)
{
  context_disable ();
  if (current_process) {
    thread_insert_timer (current_process, time_add (delay,
						    current_process->time));
  }
  context_switch (context_select ());
}
static void entry(void) {
	TRACE("entry begin\n");
	while (mask) {
		test_assert(mask & 1);
		mask >>= 1;

		context_switch(&entry_context, &infinite_context);
	}
	TRACE("entry end (should not be reached)\n");
}
Esempio n. 16
0
void
arch_sched(Thread *prev)
{
    if (prev) {
        context_switch(&prev->arch_data.context, sched_context);
    } else {
        context_restore(sched_context);
        panic("schould not return here");
    }
}
Esempio n. 17
0
void
arch_run(Thread *t)
{
    load_tss(t->kstack + PAGE_SIZE);
    lcr3(kva2pa(t->process->vm->arch_data));
    kercall_stack = t->kstack + PAGE_SIZE;
    wrmsr(MSR_IA32_FS_BASE, t->tls_ptr);

    context_switch(&sched_context, t->arch_data.context);
}
/// Copy local buff to shared ring_buff
/// \par            Refer
/// \par            Modify
void put_reply(void)
{
    while( put_ring_buff( w_buff, buff, wp ) != 0 )
    {
        if( context_switch(100) != 0 )
          {
            printf("rttot @put_reply\n");
          }
    }
}
Esempio n. 19
0
void Fiber::switchTo()
{	
	Fiber *fromFiber = current();
	if (fromFiber == this)
		return;

	Core::current()->fCurrentFiber[__builtin_vp_get_current_strand() % 
		kHardwareThreadsPerCore] = this;
	context_switch(&fromFiber->fStackPointer, fStackPointer);
}
Esempio n. 20
0
VOID process_switch(VOID)
{
  //dump_queue();
  if (currentRunningProcess != NULL)
  {
    append_process(currentRunningProcess);
  }
  pcb *nextRunning = next_ready_process();
  context_switch(nextRunning);

}
Esempio n. 21
0
/*
 * This is the wrapper for the task that executes rendundantly on both cores
 * There is one VERY important thing to note. When the critical task begins executing
 * the value of the stack pointer MUST be the same on both cores. This means that
 * the wrapper must have the same number of variables declared within its scope (i.e.
 * onto its stack) before calling the critical task (pt() in this example)
 */
void preemption_task(void* pdata){
	int done = 0;
	int first = 0;
	int t_os;

	CriticalFunctionPointers* cp =
				(CriticalFunctionPointers*) SHARED_MEMORY_BASE;
	pt = cp->task[1];

	while(1){
		// Get initial time, then wait for 2 ticks
		t_os = OSTimeGet();
		OSTimeDly(2 - t_os);

		//This is a crude way of synchronizing the beginning of the task
		//on both cores
		while (done == 0) {
			altera_avalon_mutex_lock(mutex, 1); //Acquire the hardware mutex
			{
				if(first == 0){
					cp->checkout[1] = 1;
					first = 1;
				}
				if( cp->checkout[0] == 1){
					cp->checkout[0] = 0;
					done = 1;
				}

			}
			altera_avalon_mutex_unlock(mutex);
		}

		// Set default block size for fingerprinting
		fprint_set_block_size(cp->blocksize[1]);

		//Context switch is necessary to clear the callee saved registers
		long registers[8];
		context_switch(registers);

		//Set the global pointer in case of compilation issues related
		//to global variables
		set_gp();
		//call the critical task
		pt(cp->args[1]);
		//restore the original global pointer
		restore_gp();
		//Restore the callee saved registers
		context_restore(registers);
		//Get the end time
		alt_u64 t = alt_timestamp();
		//store the end time
		cp->core_time[1] = t;
	}
}
Esempio n. 22
0
void irq_handler(irq_type type) {

	assert(pCurrentProcessPCB->currentState != NEW,"A new process has been interrupted.");

	pCurrentProcessPCB->currentState = INTERRUPTED;

	switch (type) {
		case TIMER_IRQ:
			context_switch(pCurrentProcessPCB, get_timer_pcb());
			break;
		case UART0_IRQ:
			context_switch(pCurrentProcessPCB, get_uart_pcb()); 
			break;
		default:
			break;
		
	}

	k_release_processor();
}
Esempio n. 23
0
/*
 * In this function, you will be modifying the run queue, which can
 * also be modified from an interrupt context. In order for thread
 * contexts and interrupt contexts to play nicely, you need to mask
 * all interrupts before reading or modifying the run queue and
 * re-enable interrupts when you are done. This is analagous to
 * locking a mutex before modifying a data structure shared between
 * threads. Masking interrupts is accomplished by setting the IPL to
 * high.
 *
 * Once you have masked interrupts, you need to remove a thread from
 * the run queue and switch into its context from the currently
 * executing context.
 *
 * If there are no threads on the run queue (assuming you do not have
 * any bugs), then all kernel threads are waiting for an interrupt
 * (for example, when reading from a block device, a kernel thread
 * will wait while the block device seeks). You will need to re-enable
 * interrupts and wait for one to occur in the hopes that a thread
 * gets put on the run queue from the interrupt context.
 *
 * The proper way to do this is with the intr_wait call. See
 * interrupt.h for more details on intr_wait.
 *
 * Note: When waiting for an interrupt, don't forget to modify the
 * IPL. If the IPL of the currently executing thread masks the
 * interrupt you are waiting for, the interrupt will never happen, and
 * your run queue will remain empty. This is very subtle, but
 * _EXTREMELY_ important.
 *
 * Note: Don't forget to set curproc and curthr. When sched_switch
 * returns, a different thread should be executing than the thread
 * which was executing when sched_switch was called.
 *
 * Note: The IPL is process specific.
 */
void
sched_switch(void)
{
       /*MASKING THE INTERRUPT LEVELS*/
        uint8_t curr_intr_level = apic_getipl();
        apic_setipl(IPL_HIGH);

        if(list_empty(&(kt_runq.tq_list)))
        {
                apic_setipl(IPL_LOW);
                intr_wait();
                apic_setipl(curr_intr_level);
                sched_switch();          
        }
        else
        {
                kthread_t *old_thr = curthr;

                dbg(DBG_THR,"PROCESS FORMERLY EXECUTING: %s\n", curthr->kt_proc->p_comm);

                if(kt_runq.tq_size > 0)
                    {
                            while(1)
                            {
                                curthr = ktqueue_dequeue(&kt_runq);
                                curproc = curthr->kt_proc;
                                if(curthr->kt_state == KT_EXITED)
                                    continue;
                                else
                                    break;
                                
                                if(kt_runq.tq_size == 0)
                                    sched_switch();
                            }

                    }
                else
                    sched_switch();

                     if(curthr->kt_cancelled == 1)
                    {
                        dbg(DBG_THR,"%s was cancelled\n", curproc->p_comm);
                        do_exit(0); 
                    }
                 
                    
                    apic_setipl(curr_intr_level);
                 dbg(DBG_THR,"PROCESS CURRENTLY EXECUTING: %s\n", curthr->kt_proc->p_comm);

                context_switch(&(old_thr->kt_ctx), &(curthr->kt_ctx));

        }
}
VOID c_serial_handler(VOID)
{
  //atomic_on();
 //rtx_dbug_outs((CHAR *)"IN SERIAL current running process: ");
  //rtx_dbug_out_number(get_running_process()->pid);
  //rtx_dbug_outs((CHAR *)"current caller: ");
  //rtx_dbug_out_number(get_running_process()->pid);
  context_switch(get_uart_pcb());
  process_switch_if_there_is_a_higher_priority_process(); 

  //atomic_off();
}
Esempio n. 25
0
void schedule() {
  struct task_struct *next_task = NULL;
  struct task_struct *prev_task = current_task;

  register unsigned long sp asm ("sp");
  printk(PR_SS_PROC, PR_LVL_DBG5, "%s, sp = %x\n", __func__, sp);

  need_reschedule = false;

  if (NULL == current_task) while(1);

  if (PROCESS_STATE_DEAD == current_task->sched_en.state) {
	int waiting_pid = current_task->sched_en.blocked_pid;
	if (-1 != waiting_pid) {
	  struct task_struct *waiting_task = find_task_by_pid(waiting_pid);
	  if (waiting_task) {
		dequeue_task(waiting_task);
		waiting_task->sched_en.state = PROCESS_STATE_READY;
		waiting_task->sched_en.blocking_pid = -1;
		enqueue_task(waiting_task, sched_enqueue_flag_timeout);
	  }
	}
	destroy_user_thread(current_task);
	current_task = NULL;
  } else
	scheduler->enqueue_task(current_task, sched_enqueue_flag_timeout);

  next_task = scheduler->pick_next_task();

  if (current_task)
	printk(PR_SS_PROC, PR_LVL_DBG5, "%s, current_task->pid = %d\n", __func__, current_task->pid);
  if (next_task)
	printk(PR_SS_PROC, PR_LVL_DBG5, "%s, next_task->pid = %d\n", __func__, next_task->pid);

  if (current_task == next_task) {
	printk(PR_SS_PROC, PR_LVL_DBG5, "%s, current_task == next_task\n", __func__);
	return;
  } else if (NULL == next_task) {
	printk(PR_SS_PROC, PR_LVL_DBG5, "%s, NULL == next_task\n", __func__);
	return;
  } else if (NULL == current_task) {
	printk(PR_SS_PROC, PR_LVL_DBG5, "%s, NULL == current_task\n", __func__);
	current_task = next_task;
  } else {
	printk(PR_SS_PROC, PR_LVL_DBG5, "%s, current_task != next_task\n", __func__);
	current_task = next_task;
  }

  printk(PR_SS_PROC, PR_LVL_DBG5, "%s, context_switch %d <--> %d start\n", __func__, prev_task->pid, next_task->pid);  
  context_switch(prev_task, next_task);
  printk(PR_SS_PROC, PR_LVL_DBG5, "%s, context_switch %d <--> %d finish\n", __func__, prev_task->pid, next_task->pid);
}
Esempio n. 26
0
/*
 * schedule() is the main scheduler function.
 */
asmlinkage void __sched schedule(void)
{
        struct task_struct *prev, *next;
        unsigned long *switch_count;
        struct rq *rq;
        int cpu;

need_resched:
        preempt_disable();
        cpu = smp_processor_id();
        rq = cpu_rq(cpu);
        rcu_sched_qs(cpu);
        prev = rq->curr;
        switch_count = &prev->nivcsw;

        release_kernel_lock(prev);
need_resched_nonpreemptible:

        schedule_debug(prev);

        if (sched_feat(HRTICK))
                hrtick_clear(rq);

        raw_spin_lock_irq(&rq->lock);
        update_rq_clock(rq);
        clear_tsk_need_resched(prev);

	        if (prev->state && !(preempt_count() & PREEMPT_ACTIVE)) {
                if (unlikely(signal_pending_state(prev->state, prev)))
                        prev->state = TASK_RUNNING;
                else
                        deactivate_task(rq, prev, 1);
                switch_count = &prev->nvcsw;
        }

        pre_schedule(rq, prev);

        if (unlikely(!rq->nr_running))
                idle_balance(cpu, rq);

        put_prev_task(rq, prev);
        next = pick_next_task(rq);

        if (likely(prev != next)) {
                sched_info_switch(prev, next);
                perf_event_task_sched_out(prev, next);

                rq->nr_switches++;
                rq->curr = next;
                ++*switch_count;

                context_switch(rq, prev, next); /* unlocks the rq */
Esempio n. 27
0
/*
 * implementation for context_recall
 */
void *  context_recall(sc_context *  pContext) {
  void *  r;

  assert(pContext != NULL);

  if (top_context == current_context) /*especially for PM realization*/
    context_switch(pContext);
  r = (pContext->pFiber != NULL)? 
    completed(pContext)? *(pContext->pParameter): NULL:
    NULL;
  if (completed(pContext)) context_complete(pContext);
  return r;
}
/* pre: interrupts disabled */
void context_timeout (void)
{
  process_t *p;

  if (current_process != NULL) {
    current_process->in_readyq = 1;
    q_ins (readyQh, readyQt, current_process);
    p = context_select ();
    context_switch (p);
  }
  else
    context_enable();
}
Esempio n. 29
0
/* fig_begin task_switch_soft_kernel */ 
void task_switch(int task_id_old, int task_id_new)
{
    /* a pointer to the old stack pointer */ 
    mem_address *old_stack_pointer; 
    /* the new stack pointer */ 
    mem_address new_stack_pointer; 

    /* pointers to TCB for the two tasks */ 
    task_control_block *old_tcb_ref; 
    task_control_block *new_tcb_ref;

    /* get references to the TCBs */ 
    old_tcb_ref = tcb_storage_get_tcb_ref(task_id_old); 
    new_tcb_ref = tcb_storage_get_tcb_ref(task_id_new);  
   
    /* set pointer to old stack pointer */ 
    old_stack_pointer = &old_tcb_ref->stack_pointer; 
    /* set new stack pointer */ 
    new_stack_pointer = new_tcb_ref->stack_pointer; 

    /* set Task_Id_Running to task id of new task */ 
    Task_Id_Running = task_id_new; 
/* fig_end task_switch_soft_kernel */ 

#ifdef BUILD_ARM_BB
    if (int_status_is_interrupt_active())
    {
        /* task switch from interrupt */ 
        // console_put_string("TS-INT old-new "); 
        // console_put_hex(task_id_old); 
        // console_put_hex(task_id_new); 
        context_switch_int(old_stack_pointer, new_stack_pointer);
    }
    else
    {
        /* task switch using software interrupt */ 
        // console_put_string("TS-SWI old-new "); 
        // console_put_hex(task_id_old); 
        // console_put_hex(task_id_new); 
        /* fig_begin context_switch_swi */ 
        context_switch_swi(old_stack_pointer, new_stack_pointer);
        /* fig_end context_switch_swi */ 
    }
#else
/* fig_begin task_switch_soft_kernel */ 

    /* do the task switch on the host */ 
    context_switch(old_stack_pointer, new_stack_pointer);
/* fig_end task_switch_soft_kernel */ 
#endif
}
Esempio n. 30
0
static void schedule(unsigned int cpu_id)
{
    pthread_mutex_lock(&readylist_mutex);

    pcb_t* process = NULL;
    if ( readylist_item_count == 0) { //if there are no ready processes, then
        // run idle process on this cpu, and
        // call context_switch with NULL as PCB to select idle proc

        context_switch(cpu_id, NULL, -1); //set cpu as idle for now; NULL selects the idle proc.; -1 for idle?...

    }
    else { // there is a ready proc to run
        // use readylist, extract 1st item, then call context_switch to select the process to execute

	process = head; // type: pcb_t*; head = of readylist
	head = head->next; // 'remove' proc from ready list
	readylist_item_count--; // decrease size count of list

	process->state = PROCESS_RUNNING;
	context_switch(cpu_id, process, -1); //cpu_id is id of cpu that is available; readyproc is act. a pcb*; -1 for infinite timeslice (for FIFO + priority)
	
        pthread_mutex_lock(&current_mutex);

        //update currentarr
        currentarr[cpu_id] = process; //put process into current array
        currentq_item_count++; // update size

        pthread_mutex_unlock(&current_mutex); //unlock

    }

    pthread_mutex_unlock(&readylist_mutex);
 
    //any ready processes left are waiting for a cpu to execute them
    //when adding to ready queue, signal cond wait on idle
}