Esempio n. 1
0
int exec_thread( unsigned int cpu_id, struct thread *t, 
				 unsigned int milliseconds )
{
	assert( t != NULL );
	
	set_fpu_trap();
	
	set_map( t->process->map );

	cpu[ cpu_id ].sched.running = 1;			// Marked as running before
	cpu[ cpu_id ].sched.current_thread = t;		// Mark the thread.
	
	release_spinlock( &(cpu[ cpu_id ].sched.lock_scheduler) ); 
			// Other CPU's can register their need to mess with this CPU's tables.
	

					
		do
		{
		  	sysenter_set_esp( t->stack_kernel );
	
			cpu[ cpu_id ].system_tss->esp0 = t->stack_kernel;
			cpu[ cpu_id ].system_tss->esp  = t->stack;
			cpu[ cpu_id ].system_tss->cr3  = (uint32_t)t->process->map;

			set_apic_distance( cpu_id, milliseconds );
	
			stats_time( cpu_id, &(cpu[ cpu_id ].st_schedulerTime) ); // Scheduler time.

			t->stack = __switch_thread( t->stack );

			stats_time_start( &(cpu[ cpu_id ].st_schedulerTime) ); // Scheduler
		   
		} while ( cpu[cpu_id].sched.locked == 1 ); // in case...

	// If math was used....
	if ( t->math_state > 0 ) save_fpu( t );
	
	cpu[ cpu_id ].sched.current_thread = NULL;
	cpu[ cpu_id ].sched.running = 0;
			// WARNING: Don't use the *t pointer anymore after this.

			// Synchronization point occurs here. Table messing. 

	acquire_spinlock( &(cpu[cpu_id].sched.lock_scheduler) );

	
	return 0;
}
Esempio n. 2
0
void scheduler()
{
	unsigned int cpu_id = CPUID; 
	struct scheduler_info *si = &(cpu[ cpu_id ].sched);
	struct thread *tr;
	int idle;
	int flip = 0;
	uint64_t requested_time = TIMESLICE;

	// We will never go back.
	set_cpu_flags(  cpu_flags() & ~EFLAG_NESTED_TASK );

	// Now we're working.
	acquire_spinlock( &(si->lock_scheduler) );

	// Start timing the scheduler
	stats_time_start( &(cpu[ cpu_id ].st_schedulerTime) );

	assert( (cpu_flags() & EFLAG_INTERRUPT) == 0 );

	ack_apic();

	while (1==1)
	{
		// Just show the world that we're still alive.
		((char*)0xB8000)[158 - cpu_id * 2] ++ ;		/// \todo remove one day


		// If the garbage collector has work to do, let it run.
		if ( gc_has_work( cpu_id ) == 0 )
		{
			tr = smk_gc[ cpu_id ];
			exec_thread( cpu_id, tr, TIMESLICE, 0 );
			stats_time( cpu_id, &(cpu[ cpu_id ].st_systemTime) ); 
		}

		
		// Find out when the next timed event is.
		requested_time = remove_timers( si, cpu[ cpu_id ].st_systemTime.usage );
		idle = 0;
			
		// Fast queue support
		if ( flip == 0 )
		{
			tr = get_fast( si );
			if ( tr != NULL )
			{
				exec_thread( cpu_id, tr, TIMESLICE, 0 );
				stats_time( cpu_id, &(cpu[ cpu_id ].st_systemTime) ); 
														// Maintain CPU time.

				if ( si->sched_count != 0 ) flip = 1;	// Ensure others run
				continue;
			}
		}


		flip = 0;
		// Reset of fast queue
		
		// If there's nothing to do, do idle.
		if ( si->sched_count == 0 ) 
		{
			if ( gc_has_work( cpu_id ) == 0 ) continue;	// Al-e-oop!
				
			tr = smk_idle[ cpu_id ];
			idle = 1;		// Safe to wake up when required.
		}
		else
		{
			tr = si->sched_queue[ si->position ].tr;
			si->position = (si->position + 1) % si->sched_count;

			requested_time = TIMESLICE;
			idle = 0;		// Don't interrupt until timeslice is over
		}


		// And run the selected thread.
		exec_thread( cpu_id, tr, requested_time, idle );
		stats_time( cpu_id, &(cpu[ cpu_id ].st_systemTime) ); 
														// Maintain CPU time.
														//
		// If the previous thread requested a state change, honour it.
		remove_last( si );

	}

}