/* * This is the public scheduler specific Change Priority operation. */ Thread_Control *_Scheduler_priority_affinity_SMP_Update_priority( const Scheduler_Control *scheduler, Thread_Control *thread ) { Scheduler_Context *context = _Scheduler_Get_context( scheduler ); Thread_Control *displaced; displaced = _Scheduler_SMP_Update_priority( context, thread, _Scheduler_priority_SMP_Extract_from_ready, _Scheduler_priority_SMP_Do_update, _Scheduler_priority_affinity_SMP_Enqueue_fifo, _Scheduler_priority_affinity_SMP_Enqueue_lifo, _Scheduler_priority_affinity_SMP_Enqueue_scheduled_fifo, _Scheduler_priority_affinity_SMP_Enqueue_scheduled_lifo ); /* * Perform any thread migrations that are needed due to these changes. */ _Scheduler_priority_affinity_SMP_Check_for_migrations( context ); return displaced; }
void _Scheduler_priority_SMP_Update_priority( const Scheduler_Control *scheduler, Thread_Control *thread, Priority_Control new_priority ) { Scheduler_Context *context = _Scheduler_Get_context( scheduler ); Scheduler_Node *node = _Scheduler_Thread_get_node( thread ); _Scheduler_priority_SMP_Do_update( context, node, new_priority ); }
void _Scheduler_simple_SMP_Unblock( const Scheduler_Control *scheduler, Thread_Control *thread ) { Scheduler_Context *context = _Scheduler_Get_context( scheduler ); _Scheduler_SMP_Unblock( context, thread, _Scheduler_simple_SMP_Enqueue_fifo ); }
Thread_Control *_Scheduler_priority_SMP_Unblock( const Scheduler_Control *scheduler, Thread_Control *thread ) { Scheduler_Context *context = _Scheduler_Get_context( scheduler ); return _Scheduler_SMP_Unblock( context, thread, _Scheduler_priority_SMP_Enqueue_fifo ); }
static void _SMP_Start_processors( uint32_t cpu_count ) { uint32_t cpu_index_self; uint32_t cpu_index; cpu_index_self = _SMP_Get_current_processor(); for ( cpu_index = 0 ; cpu_index < cpu_count; ++cpu_index ) { const Scheduler_Assignment *assignment; Per_CPU_Control *cpu; bool started; assignment = _Scheduler_Get_initial_assignment( cpu_index ); cpu = _Per_CPU_Get_by_index( cpu_index ); if ( cpu_index != cpu_index_self ) { if ( _Scheduler_Should_start_processor( assignment ) ) { started = _CPU_SMP_Start_processor( cpu_index ); if ( !started && _Scheduler_Is_mandatory_processor( assignment ) ) { _SMP_Fatal( SMP_FATAL_START_OF_MANDATORY_PROCESSOR_FAILED ); } } else { started = false; } } else { started = true; cpu->boot = true; if ( !_Scheduler_Should_start_processor( assignment ) ) { _SMP_Fatal( SMP_FATAL_BOOT_PROCESSOR_NOT_ASSIGNED_TO_SCHEDULER ); } } cpu->online = started; if ( started ) { const Scheduler_Control *scheduler; Scheduler_Context *context; scheduler = assignment->scheduler; context = _Scheduler_Get_context( scheduler ); _Processor_mask_Set( &_SMP_Online_processors, cpu_index ); _Processor_mask_Set( &context->Processors, cpu_index ); cpu->Scheduler.control = scheduler; cpu->Scheduler.context = context; } } }
void _Scheduler_SMP_Start_idle( const Scheduler_Control *scheduler, Thread_Control *idle, Per_CPU_Control *cpu ) { Scheduler_Context *context = _Scheduler_Get_context( scheduler ); _Scheduler_SMP_Do_start_idle( context, idle, cpu, _Scheduler_SMP_Do_nothing_register_idle ); }
void _Scheduler_simple_SMP_Block( const Scheduler_Control *scheduler, Thread_Control *thread ) { Scheduler_Context *context = _Scheduler_Get_context( scheduler ); _Scheduler_SMP_Block( context, thread, _Scheduler_simple_SMP_Extract_from_ready, _Scheduler_simple_SMP_Get_highest_ready, _Scheduler_simple_SMP_Move_from_ready_to_scheduled ); }
void _Scheduler_simple_SMP_Yield( const Scheduler_Control *scheduler, Thread_Control *thread ) { Scheduler_Context *context = _Scheduler_Get_context( scheduler ); ISR_Level level; _ISR_Disable( level ); _Scheduler_SMP_Extract_from_scheduled( thread ); _Scheduler_simple_SMP_Enqueue_scheduled_fifo( context, thread ); _ISR_Enable( level ); }
Thread_Control *_Scheduler_priority_SMP_Yield( const Scheduler_Control *scheduler, Thread_Control *thread ) { Scheduler_Context *context = _Scheduler_Get_context( scheduler ); return _Scheduler_SMP_Yield( context, thread, _Scheduler_priority_SMP_Extract_from_ready, _Scheduler_priority_SMP_Enqueue_fifo, _Scheduler_priority_SMP_Enqueue_scheduled_fifo ); }
void _Scheduler_SMP_Start_idle( const Scheduler_Control *scheduler, Thread_Control *thread, Per_CPU_Control *cpu ) { Scheduler_Context *context = _Scheduler_Get_context( scheduler ); Scheduler_SMP_Context *self = _Scheduler_SMP_Get_self( context ); Scheduler_SMP_Node *node = _Scheduler_SMP_Node_get( thread ); node->state = SCHEDULER_SMP_NODE_SCHEDULED; _Thread_Set_CPU( thread, cpu ); _Chain_Append_unprotected( &self->Scheduled, &thread->Object.Node ); }
Thread_Control *_Scheduler_priority_SMP_Ask_for_help( const Scheduler_Control *scheduler, Thread_Control *offers_help, Thread_Control *needs_help ) { Scheduler_Context *context = _Scheduler_Get_context( scheduler ); return _Scheduler_SMP_Ask_for_help( context, offers_help, needs_help, _Scheduler_priority_SMP_Enqueue_fifo ); }
void _Scheduler_priority_SMP_Block( const Scheduler_Control *scheduler, Thread_Control *thread ) { Scheduler_Context *context = _Scheduler_Get_context( scheduler ); _Scheduler_SMP_Block( context, thread, _Scheduler_priority_SMP_Extract_from_ready, _Scheduler_priority_SMP_Get_highest_ready, _Scheduler_priority_SMP_Move_from_ready_to_scheduled, _Scheduler_SMP_Allocate_processor_lazy ); }
Thread_Control *_Scheduler_priority_affinity_SMP_Ask_for_help( const Scheduler_Control *scheduler, Thread_Control *offers_help, Thread_Control *needs_help ) { Scheduler_Context *context = _Scheduler_Get_context( scheduler ); needs_help = _Scheduler_SMP_Ask_for_help( context, offers_help, needs_help, _Scheduler_priority_affinity_SMP_Enqueue_fifo ); _Scheduler_priority_affinity_SMP_Check_for_migrations( context ); return needs_help; }
/* * This is the public scheduler specific Unblock operation. */ Thread_Control *_Scheduler_priority_affinity_SMP_Unblock( const Scheduler_Control *scheduler, Thread_Control *thread ) { Scheduler_Context *context = _Scheduler_Get_context( scheduler ); Thread_Control *needs_help; needs_help = _Scheduler_SMP_Unblock( context, thread, _Scheduler_priority_SMP_Do_update, _Scheduler_priority_affinity_SMP_Enqueue_fifo ); /* * Perform any thread migrations that are needed due to these changes. */ _Scheduler_priority_affinity_SMP_Check_for_migrations( context ); return needs_help; }
Thread_Control *_Scheduler_priority_SMP_Change_priority( const Scheduler_Control *scheduler, Thread_Control *thread, Priority_Control new_priority, bool prepend_it ) { Scheduler_Context *context = _Scheduler_Get_context( scheduler ); return _Scheduler_SMP_Change_priority( context, thread, new_priority, prepend_it, _Scheduler_priority_SMP_Extract_from_ready, _Scheduler_priority_SMP_Do_update, _Scheduler_priority_SMP_Enqueue_fifo, _Scheduler_priority_SMP_Enqueue_lifo, _Scheduler_priority_SMP_Enqueue_scheduled_fifo, _Scheduler_priority_SMP_Enqueue_scheduled_lifo ); }
/* * This method is very similar to _Scheduler_priority_affinity_SMP_Block * but has the difference that is invokes this scheduler's * get_highest_ready() support method. */ void _Scheduler_priority_affinity_SMP_Block( const Scheduler_Control *scheduler, Thread_Control *thread ) { Scheduler_Context *context = _Scheduler_Get_context( scheduler ); _Scheduler_SMP_Block( context, thread, _Scheduler_priority_SMP_Extract_from_ready, _Scheduler_priority_affinity_SMP_Get_highest_ready, _Scheduler_priority_SMP_Move_from_ready_to_scheduled, _Scheduler_SMP_Allocate_processor_exact ); /* * Since this removed a single thread from the scheduled set * and selected the most appropriate thread from the ready * set to replace it, there should be no need for thread * migrations. */ }
static Scheduler_simple_SMP_Context * _Scheduler_simple_SMP_Get_context( const Scheduler_Control *scheduler ) { return (Scheduler_simple_SMP_Context *) _Scheduler_Get_context( scheduler ); }
static Scheduler_priority_SMP_Context * _Scheduler_priority_SMP_Get_context( const Scheduler_Control *scheduler ) { return (Scheduler_priority_SMP_Context *) _Scheduler_Get_context( scheduler ); }