void _Thread_queue_Surrender_sticky( Thread_queue_Queue *queue, Thread_queue_Heads *heads, Thread_Control *previous_owner, Thread_queue_Context *queue_context, const Thread_queue_Operations *operations ) { Thread_Control *new_owner; Per_CPU_Control *cpu_self; _Assert( heads != NULL ); _Thread_queue_Context_clear_priority_updates( queue_context ); new_owner = ( *operations->surrender )( queue, heads, previous_owner, queue_context ); queue->owner = new_owner; _Thread_queue_Make_ready_again( new_owner ); cpu_self = _Thread_Dispatch_disable_critical( &queue_context->Lock_context.Lock_context ); _Thread_queue_Queue_release( queue, &queue_context->Lock_context.Lock_context ); _Thread_Priority_and_sticky_update( previous_owner, -1 ); _Thread_Priority_and_sticky_update( new_owner, 0 ); _Thread_Dispatch_enable( cpu_self ); }
static Per_CPU_Control *_Condition_Do_wait( struct _Condition_Control *_condition, Watchdog_Interval timeout, ISR_lock_Context *lock_context ) { Condition_Control *condition; Thread_Control *executing; Per_CPU_Control *cpu_self; condition = _Condition_Get( _condition ); executing = _Condition_Queue_acquire_critical( condition, lock_context ); cpu_self = _Thread_Dispatch_disable_critical( lock_context ); executing->Wait.return_code = 0; _Thread_queue_Enqueue_critical( &condition->Queue.Queue, CONDITION_TQ_OPERATIONS, executing, STATES_WAITING_FOR_SYS_LOCK_CONDITION, timeout, ETIMEDOUT, lock_context ); return cpu_self; }
static void _Rate_monotonic_Release_job( Rate_monotonic_Control *the_period, Thread_Control *owner, rtems_interval next_length, ISR_lock_Context *lock_context ) { Per_CPU_Control *cpu_self; Thread_queue_Context queue_context; uint64_t deadline; cpu_self = _Thread_Dispatch_disable_critical( lock_context ); deadline = _Watchdog_Per_CPU_insert_ticks( &the_period->Timer, cpu_self, next_length ); _Scheduler_Release_job( owner, &the_period->Priority, deadline, &queue_context ); _Rate_monotonic_Release( the_period, lock_context ); _Thread_Priority_update( &queue_context ); _Thread_Dispatch_enable( cpu_self ); }
bool _Thread_Start( Thread_Control *the_thread, const Thread_Entry_information *entry, ISR_lock_Context *lock_context ) { Per_CPU_Control *cpu_self; _Thread_State_acquire_critical( the_thread, lock_context ); if ( !_States_Is_dormant( the_thread->current_state ) ) { _Thread_State_release( the_thread, lock_context ); return false; } the_thread->Start.Entry = *entry; _Thread_Load_environment( the_thread ); _Thread_Clear_state_locked( the_thread, STATES_ALL_SET ); cpu_self = _Thread_Dispatch_disable_critical( lock_context ); _Thread_State_release( the_thread, lock_context ); _User_extensions_Thread_start( the_thread ); _Thread_Dispatch_enable( cpu_self ); return true; }
int pthread_kill( pthread_t thread, int sig ) { Thread_Control *the_thread; ISR_lock_Context lock_context; POSIX_API_Control *api; Per_CPU_Control *cpu_self; if ( !is_valid_signo( sig ) ) { return EINVAL; } the_thread = _Thread_Get( thread, &lock_context ); if ( the_thread == NULL ) { return ESRCH; } api = the_thread->API_Extensions[ THREAD_API_POSIX ]; if ( _POSIX_signals_Vectors[ sig ].sa_handler == SIG_IGN ) { _ISR_lock_ISR_enable( &lock_context ); return 0; } /* XXX critical section */ api->signals_pending |= signo_to_mask( sig ); cpu_self = _Thread_Dispatch_disable_critical( &lock_context ); _ISR_lock_ISR_enable( &lock_context ); (void) _POSIX_signals_Unblock_thread( the_thread, sig, NULL ); _Thread_Dispatch_enable( cpu_self ); return 0; }
rtems_status_code rtems_task_set_priority( rtems_id id, rtems_task_priority new_priority, rtems_task_priority *old_priority_p ) { Thread_Control *the_thread; ISR_lock_Context lock_context; const Scheduler_Control *scheduler; Priority_Control old_priority; rtems_status_code status; if ( old_priority_p == NULL ) { return RTEMS_INVALID_ADDRESS; } the_thread = _Thread_Get( id, &lock_context ); if ( the_thread == NULL ) { #if defined(RTEMS_MULTIPROCESSING) return _RTEMS_tasks_MP_Set_priority( id, new_priority, old_priority_p ); #else return RTEMS_INVALID_ID; #endif } if ( new_priority != RTEMS_CURRENT_PRIORITY ) { RTEMS_tasks_Set_priority_context context; Per_CPU_Control *cpu_self; cpu_self = _Thread_Dispatch_disable_critical( &lock_context ); _ISR_lock_ISR_enable( &lock_context ); context.new_priority = new_priority; _Thread_Change_priority( the_thread, 0, &context, _RTEMS_tasks_Set_priority_filter, false ); _Thread_Dispatch_enable( cpu_self ); scheduler = context.scheduler; old_priority = context.old_priority; status = context.status; } else { _Thread_State_acquire_critical( the_thread, &lock_context ); scheduler = _Scheduler_Get_own( the_thread ); old_priority = _Thread_Get_priority( the_thread ); _Thread_State_release( the_thread, &lock_context ); status = RTEMS_SUCCESSFUL; } *old_priority_p = _RTEMS_Priority_From_core( scheduler, old_priority ); return status; }
Status_Control _Thread_queue_Enqueue_sticky( Thread_queue_Queue *queue, const Thread_queue_Operations *operations, Thread_Control *the_thread, Thread_queue_Context *queue_context ) { Per_CPU_Control *cpu_self; _Thread_Wait_claim( the_thread, queue ); if ( !_Thread_queue_Path_acquire_critical( queue, the_thread, queue_context ) ) { _Thread_queue_Path_release_critical( queue_context ); _Thread_Wait_restore_default( the_thread ); _Thread_queue_Queue_release( queue, &queue_context->Lock_context.Lock_context ); _Thread_Wait_tranquilize( the_thread ); ( *queue_context->deadlock_callout )( the_thread ); return _Thread_Wait_get_status( the_thread ); } _Thread_queue_Context_clear_priority_updates( queue_context ); _Thread_Wait_claim_finalize( the_thread, operations ); ( *operations->enqueue )( queue, the_thread, queue_context ); _Thread_queue_Path_release_critical( queue_context ); the_thread->Wait.return_code = STATUS_SUCCESSFUL; _Thread_Wait_flags_set( the_thread, THREAD_QUEUE_INTEND_TO_BLOCK ); cpu_self = _Thread_Dispatch_disable_critical( &queue_context->Lock_context.Lock_context ); _Thread_queue_Queue_release( queue, &queue_context->Lock_context.Lock_context ); if ( cpu_self->thread_dispatch_disable_level != 1 ) { _Internal_error( INTERNAL_ERROR_THREAD_QUEUE_ENQUEUE_STICKY_FROM_BAD_STATE ); } _Thread_queue_Timeout( the_thread, cpu_self, queue_context ); _Thread_Priority_update( queue_context ); _Thread_Priority_and_sticky_update( the_thread, 1 ); _Thread_Dispatch_enable( cpu_self ); while ( _Thread_Wait_flags_get_acquire( the_thread ) == THREAD_QUEUE_INTEND_TO_BLOCK ) { /* Wait */ } _Thread_Wait_tranquilize( the_thread ); _Thread_Timer_remove( the_thread ); return _Thread_Wait_get_status( the_thread ); }
rtems_status_code rtems_task_set_scheduler( rtems_id task_id, rtems_id scheduler_id, rtems_task_priority priority ) { const Scheduler_Control *scheduler; Thread_Control *the_thread; Thread_queue_Context queue_context; ISR_lock_Context state_context; Per_CPU_Control *cpu_self; bool valid; Priority_Control core_priority; Status_Control status; if ( !_Scheduler_Get_by_id( scheduler_id, &scheduler ) ) { return RTEMS_INVALID_ID; } core_priority = _RTEMS_Priority_To_core( scheduler, priority, &valid ); if ( !valid ) { return RTEMS_INVALID_PRIORITY; } _Thread_queue_Context_initialize( &queue_context ); the_thread = _Thread_Get( task_id, &queue_context.Lock_context.Lock_context ); if ( the_thread == NULL ) { #if defined(RTEMS_MULTIPROCESSING) if ( _Thread_MP_Is_remote( task_id ) ) { return RTEMS_ILLEGAL_ON_REMOTE_OBJECT; } #endif return RTEMS_INVALID_ID; } cpu_self = _Thread_Dispatch_disable_critical( &queue_context.Lock_context.Lock_context ); _Thread_Wait_acquire_critical( the_thread, &queue_context ); _Thread_State_acquire_critical( the_thread, &state_context ); status = _Scheduler_Set( scheduler, the_thread, core_priority ); _Thread_State_release_critical( the_thread, &state_context ); _Thread_Wait_release( the_thread, &queue_context ); _Thread_Dispatch_enable( cpu_self ); return _Status_Get( status ); }
void _Thread_queue_Surrender( Thread_queue_Queue *queue, Thread_queue_Heads *heads, Thread_Control *previous_owner, Thread_queue_Context *queue_context, const Thread_queue_Operations *operations ) { Thread_Control *new_owner; bool unblock; Per_CPU_Control *cpu_self; _Assert( heads != NULL ); _Thread_queue_Context_clear_priority_updates( queue_context ); new_owner = ( *operations->surrender )( queue, heads, previous_owner, queue_context ); queue->owner = new_owner; #if defined(RTEMS_MULTIPROCESSING) if ( !_Thread_queue_MP_set_callout( new_owner, queue_context ) ) #endif { _Thread_Resource_count_increment( new_owner ); } unblock = _Thread_queue_Make_ready_again( new_owner ); cpu_self = _Thread_Dispatch_disable_critical( &queue_context->Lock_context.Lock_context ); _Thread_queue_Queue_release( queue, &queue_context->Lock_context.Lock_context ); _Thread_Priority_update( queue_context ); if ( unblock ) { _Thread_Remove_timer_and_unblock( new_owner, queue ); } _Thread_Dispatch_enable( cpu_self ); }
void _Thread_Restart_self( Thread_Control *executing, const Thread_Entry_information *entry, ISR_lock_Context *lock_context ) { Per_CPU_Control *cpu_self; Thread_queue_Context queue_context; _Assert( _Watchdog_Get_state( &executing->Timer.Watchdog ) == WATCHDOG_INACTIVE ); _Assert( executing->current_state == STATES_READY || executing->current_state == STATES_SUSPENDED ); _Thread_queue_Context_initialize( &queue_context ); _Thread_queue_Context_clear_priority_updates( &queue_context ); _Thread_State_acquire_critical( executing, lock_context ); executing->Start.Entry = *entry; _Thread_Change_life_locked( executing, 0, THREAD_LIFE_RESTARTING, THREAD_LIFE_PROTECTED | THREAD_LIFE_CHANGE_DEFERRED ); cpu_self = _Thread_Dispatch_disable_critical( lock_context ); _Thread_State_release( executing, lock_context ); _Thread_Wait_acquire_default( executing, lock_context ); _Thread_Priority_change( executing, &executing->Real_priority, executing->Start.initial_priority, false, &queue_context ); _Thread_Wait_release_default( executing, lock_context ); _Thread_Priority_update( &queue_context ); _Thread_Dispatch_enable( cpu_self ); RTEMS_UNREACHABLE(); }
void _Thread_Cancel( Thread_Control *the_thread, Thread_Control *executing, void *exit_value ) { ISR_lock_Context lock_context; Thread_Life_state previous; Per_CPU_Control *cpu_self; Priority_Control priority; _Assert( the_thread != executing ); _Thread_State_acquire( the_thread, &lock_context ); _Thread_Set_exit_value( the_thread, exit_value ); previous = _Thread_Change_life_locked( the_thread, 0, THREAD_LIFE_TERMINATING, 0 ); cpu_self = _Thread_Dispatch_disable_critical( &lock_context ); priority = _Thread_Get_priority( executing ); if ( _States_Is_dormant( the_thread->current_state ) ) { _Thread_State_release( the_thread, &lock_context ); _Thread_Make_zombie( the_thread ); } else if ( _Thread_Is_life_change_allowed( previous ) ) { _Thread_Add_life_change_request( the_thread ); _Thread_State_release( the_thread, &lock_context ); _Thread_Finalize_life_change( the_thread, priority ); } else { _Thread_Add_life_change_request( the_thread ); _Thread_Clear_state_locked( the_thread, STATES_SUSPENDED ); _Thread_State_release( the_thread, &lock_context ); _Thread_Raise_real_priority( the_thread, priority ); _Thread_Remove_life_change_request( the_thread ); } _Thread_Dispatch_enable( cpu_self ); }
int pthread_setschedprio( pthread_t thread, int prio ) { Thread_Control *the_thread; Per_CPU_Control *cpu_self; Thread_queue_Context queue_context; const Scheduler_Control *scheduler; Priority_Control new_priority; bool valid; the_thread = _Thread_Get( thread, &queue_context.Lock_context.Lock_context ); if ( the_thread == NULL ) { return ESRCH; } _Thread_queue_Context_clear_priority_updates( &queue_context ); _Thread_Wait_acquire_critical( the_thread, &queue_context ); scheduler = _Scheduler_Get_own( the_thread ); new_priority = _POSIX_Priority_To_core( scheduler, prio, &valid ); if ( !valid ) { _Thread_Wait_release( the_thread, &queue_context ); return EINVAL; } _Thread_Priority_change( the_thread, &the_thread->Real_priority, new_priority, true, &queue_context ); cpu_self = _Thread_Dispatch_disable_critical( &queue_context.Lock_context.Lock_context ); _Thread_Wait_release( the_thread, &queue_context ); _Thread_Priority_update( &queue_context ); _Thread_Dispatch_enable( cpu_self ); return 0; }
static rtems_status_code _Rate_monotonic_Block_while_active( Rate_monotonic_Control *the_period, rtems_interval length, Thread_Control *executing, ISR_lock_Context *lock_context ) { Per_CPU_Control *cpu_self; bool success; /* * Update statistics from the concluding period. */ _Rate_monotonic_Update_statistics( the_period ); /* * This tells the _Rate_monotonic_Timeout that this task is * in the process of blocking on the period and that we * may be changing the length of the next period. */ the_period->next_length = length; executing->Wait.return_argument = the_period; _Thread_Wait_flags_set( executing, RATE_MONOTONIC_INTEND_TO_BLOCK ); cpu_self = _Thread_Dispatch_disable_critical( lock_context ); _Rate_monotonic_Release( the_period, lock_context ); _Thread_Set_state( executing, STATES_WAITING_FOR_PERIOD ); success = _Thread_Wait_flags_try_change_acquire( executing, RATE_MONOTONIC_INTEND_TO_BLOCK, RATE_MONOTONIC_BLOCKED ); if ( !success ) { _Assert( _Thread_Wait_flags_get( executing ) == RATE_MONOTONIC_READY_AGAIN ); _Thread_Unblock( executing ); } _Thread_Dispatch_enable( cpu_self ); return RTEMS_SUCCESSFUL; }
rtems_status_code rtems_task_delete( rtems_id id ) { Thread_Control *the_thread; ISR_lock_Context lock_context; Thread_Control *executing; Per_CPU_Control *cpu_self; the_thread = _Thread_Get( id, &lock_context ); if ( the_thread == NULL ) { #if defined(RTEMS_MULTIPROCESSING) if ( _Thread_MP_Is_remote( id ) ) { return RTEMS_ILLEGAL_ON_REMOTE_OBJECT; } #endif return RTEMS_INVALID_ID; } cpu_self = _Thread_Dispatch_disable_critical( &lock_context ); _ISR_lock_ISR_enable( &lock_context ); executing = _Per_CPU_Get_executing( cpu_self ); if ( the_thread == executing ) { /* * The Classic tasks are neither detached nor joinable. In case of * self deletion, they are detached, otherwise joinable by default. */ _Thread_Exit( executing, THREAD_LIFE_TERMINATING | THREAD_LIFE_DETACHED, NULL ); } else { _Thread_Close( the_thread, executing ); } _Thread_Dispatch_enable( cpu_self ); return RTEMS_SUCCESSFUL; }
bool _Thread_Restart_other( Thread_Control *the_thread, const Thread_Entry_information *entry, ISR_lock_Context *lock_context ) { Thread_Life_state previous; Per_CPU_Control *cpu_self; _Thread_State_acquire_critical( the_thread, lock_context ); if ( _States_Is_dormant( the_thread->current_state ) ) { _Thread_State_release( the_thread, lock_context ); return false; } the_thread->Start.Entry = *entry; previous = _Thread_Change_life_locked( the_thread, 0, THREAD_LIFE_RESTARTING, 0 ); cpu_self = _Thread_Dispatch_disable_critical( lock_context ); if ( _Thread_Is_life_change_allowed( previous ) ) { _Thread_Add_life_change_request( the_thread ); _Thread_State_release( the_thread, lock_context ); _Thread_Finalize_life_change( the_thread, the_thread->Start.initial_priority ); } else { _Thread_Clear_state_locked( the_thread, STATES_SUSPENDED ); _Thread_State_release( the_thread, lock_context ); } _Thread_Dispatch_enable( cpu_self ); return true; }
void _Rate_monotonic_Cancel( Rate_monotonic_Control *the_period, Thread_Control *owner, ISR_lock_Context *lock_context ) { Per_CPU_Control *cpu_self; Thread_Control *update_priority; _Rate_monotonic_Acquire_critical( the_period, lock_context ); _Watchdog_Per_CPU_remove_relative( &the_period->Timer ); the_period->state = RATE_MONOTONIC_INACTIVE; update_priority = _Scheduler_Cancel_job( the_period->owner ); cpu_self = _Thread_Dispatch_disable_critical( lock_context ); _Rate_monotonic_Release( the_period, lock_context ); _Thread_Update_priority( update_priority ); _Thread_Dispatch_enable( cpu_self ); }
void _Thread_queue_Unblock_critical( bool unblock, Thread_queue_Queue *queue, Thread_Control *the_thread, ISR_lock_Context *lock_context ) { if ( unblock ) { Per_CPU_Control *cpu_self; cpu_self = _Thread_Dispatch_disable_critical( lock_context ); _Thread_queue_Queue_release( queue, lock_context ); _Thread_Remove_timer_and_unblock( the_thread, queue ); _Thread_Dispatch_enable( cpu_self ); } else { _Thread_queue_Queue_release( queue, lock_context ); } }
Thread_Life_state _Thread_Change_life( Thread_Life_state clear, Thread_Life_state set, Thread_Life_state ignore ) { ISR_lock_Context lock_context; Thread_Control *executing; Per_CPU_Control *cpu_self; Thread_Life_state previous; executing = _Thread_State_acquire_for_executing( &lock_context ); previous = _Thread_Change_life_locked( executing, clear, set, ignore ); cpu_self = _Thread_Dispatch_disable_critical( &lock_context ); _Thread_State_release( executing, &lock_context ); _Thread_Dispatch_enable( cpu_self ); return previous; }
void _Thread_Restart_self( Thread_Control *executing, const Thread_Entry_information *entry, ISR_lock_Context *lock_context ) { Per_CPU_Control *cpu_self; Priority_Control unused; _Assert( _Watchdog_Get_state( &executing->Timer.Watchdog ) == WATCHDOG_INACTIVE ); _Assert( executing->current_state == STATES_READY || executing->current_state == STATES_SUSPENDED ); _Thread_State_acquire_critical( executing, lock_context ); executing->Start.Entry = *entry; _Thread_Change_life_locked( executing, 0, THREAD_LIFE_RESTARTING, THREAD_LIFE_PROTECTED | THREAD_LIFE_CHANGE_DEFERRED ); cpu_self = _Thread_Dispatch_disable_critical( lock_context ); _Thread_State_release( executing, lock_context ); _Thread_Set_priority( executing, executing->Start.initial_priority, &unused, true ); _Thread_Dispatch_enable( cpu_self ); RTEMS_UNREACHABLE(); }
static void _Rate_monotonic_Release_postponed_job( Rate_monotonic_Control *the_period, Thread_Control *owner, rtems_interval next_length, ISR_lock_Context *lock_context ) { Per_CPU_Control *cpu_self; Thread_queue_Context queue_context; --the_period->postponed_jobs; _Scheduler_Release_job( owner, &the_period->Priority, the_period->latest_deadline, &queue_context ); cpu_self = _Thread_Dispatch_disable_critical( lock_context ); _Rate_monotonic_Release( the_period, lock_context ); _Thread_Priority_update( &queue_context ); _Thread_Dispatch_enable( cpu_self ); }
static rtems_status_code _RTEMS_tasks_Set_priority( Thread_Control *the_thread, const Scheduler_Control *scheduler, Priority_Control new_priority, Thread_queue_Context *queue_context ) { Priority_Control core_new_priority; bool valid; Per_CPU_Control *cpu_self; core_new_priority = _RTEMS_Priority_To_core( scheduler, new_priority, &valid ); if ( !valid ) { _Thread_Wait_release( the_thread, queue_context ); return RTEMS_INVALID_PRIORITY; } _Thread_Priority_change( the_thread, &the_thread->Real_priority, core_new_priority, false, queue_context ); cpu_self = _Thread_Dispatch_disable_critical( &queue_context->Lock_context.Lock_context ); _Thread_Wait_release( the_thread, queue_context ); _Thread_Priority_update( queue_context ); _Thread_Dispatch_enable( cpu_self ); return RTEMS_SUCCESSFUL; }
int _POSIX_Condition_variables_Wait_support( pthread_cond_t *cond, pthread_mutex_t *mutex, const struct timespec *abstime ) { POSIX_Condition_variables_Control *the_cond; Thread_queue_Context queue_context; int error; int mutex_error; Per_CPU_Control *cpu_self; Thread_Control *executing; Watchdog_Interval timeout; bool already_timedout; TOD_Absolute_timeout_conversion_results status; if ( mutex == NULL ) { return EINVAL; } the_cond = _POSIX_Condition_variables_Get( cond, &queue_context ); if ( the_cond == NULL ) { return EINVAL; } already_timedout = false; if ( abstime != NULL ) { /* * POSIX requires that blocking calls with timeouts that take * an absolute timeout must ignore issues with the absolute * time provided if the operation would otherwise succeed. * So we check the abstime provided, and hold on to whether it * is valid or not. If it isn't correct and in the future, * then we do a polling operation and convert the UNSATISFIED * status into the appropriate error. */ _Assert( the_cond->clock ); status = _TOD_Absolute_timeout_to_ticks(abstime, the_cond->clock, &timeout); if ( status == TOD_ABSOLUTE_TIMEOUT_INVALID ) return EINVAL; if ( status == TOD_ABSOLUTE_TIMEOUT_IS_IN_PAST || status == TOD_ABSOLUTE_TIMEOUT_IS_NOW ) { already_timedout = true; } else { _Thread_queue_Context_set_relative_timeout( &queue_context, timeout ); } } else { _Thread_queue_Context_set_no_timeout( &queue_context ); } _POSIX_Condition_variables_Acquire_critical( the_cond, &queue_context ); if ( the_cond->mutex != POSIX_CONDITION_VARIABLES_NO_MUTEX && the_cond->mutex != *mutex ) { _POSIX_Condition_variables_Release( the_cond, &queue_context ); return EINVAL; } the_cond->mutex = *mutex; cpu_self = _Thread_Dispatch_disable_critical( &queue_context.Lock_context ); executing = _Per_CPU_Get_executing( cpu_self ); if ( !already_timedout ) { _Thread_queue_Context_set_expected_level( &queue_context, 2 ); _Thread_queue_Enqueue_critical( &the_cond->Wait_queue.Queue, POSIX_CONDITION_VARIABLES_TQ_OPERATIONS, executing, STATES_WAITING_FOR_CONDITION_VARIABLE, &queue_context ); } else { _POSIX_Condition_variables_Release( the_cond, &queue_context ); executing->Wait.return_code = STATUS_TIMEOUT; } mutex_error = pthread_mutex_unlock( mutex ); if ( mutex_error != 0 ) { /* * Historically, we ignored the unlock status since the behavior * is undefined by POSIX. But GNU/Linux returns EPERM in this * case, so we follow their lead. */ _Assert( mutex_error == EINVAL || mutex_error == EPERM ); _Thread_queue_Extract( executing ); _Thread_Dispatch_enable( cpu_self ); return EPERM; } /* * Switch ourself out because we blocked as a result of the * _Thread_queue_Enqueue_critical(). */ _Thread_Dispatch_enable( cpu_self ); error = _POSIX_Get_error_after_wait( executing ); /* * If the thread is interrupted, while in the thread queue, by * a POSIX signal, then pthread_cond_wait returns spuriously, * according to the POSIX standard. It means that pthread_cond_wait * returns a success status, except for the fact that it was not * woken up a pthread_cond_signal() or a pthread_cond_broadcast(). */ if ( error == EINTR ) { error = 0; } /* * When we get here the dispatch disable level is 0. */ mutex_error = pthread_mutex_lock( mutex ); if ( mutex_error != 0 ) { _Assert( mutex_error == EINVAL ); return EINVAL; } return error; }
void _Thread_queue_Enqueue_critical( Thread_queue_Queue *queue, const Thread_queue_Operations *operations, Thread_Control *the_thread, States_Control state, Thread_queue_Context *queue_context ) { Thread_queue_Path path; Per_CPU_Control *cpu_self; bool success; #if defined(RTEMS_MULTIPROCESSING) if ( _Thread_MP_Is_receive( the_thread ) && the_thread->receive_packet ) { the_thread = _Thread_MP_Allocate_proxy( state ); } #endif _Thread_Wait_claim( the_thread, queue, operations ); if ( !_Thread_queue_Path_acquire( the_thread, queue, &path ) ) { _Thread_Wait_restore_default( the_thread ); _Thread_queue_Queue_release( queue, &queue_context->Lock_context ); _Thread_Wait_tranquilize( the_thread ); ( *queue_context->deadlock_callout )( the_thread ); return; } ( *operations->enqueue )( queue, the_thread, &path ); _Thread_queue_Path_release( &path ); the_thread->Wait.return_code = STATUS_SUCCESSFUL; _Thread_Wait_flags_set( the_thread, THREAD_QUEUE_INTEND_TO_BLOCK ); cpu_self = _Thread_Dispatch_disable_critical( &queue_context->Lock_context ); _Thread_queue_Queue_release( queue, &queue_context->Lock_context ); if ( cpu_self->thread_dispatch_disable_level != queue_context->expected_thread_dispatch_disable_level ) { _Terminate( INTERNAL_ERROR_CORE, false, INTERNAL_ERROR_THREAD_QUEUE_ENQUEUE_FROM_BAD_STATE ); } /* * Set the blocking state for this thread queue in the thread. */ _Thread_Set_state( the_thread, state ); /* * If the thread wants to timeout, then schedule its timer. */ switch ( queue_context->timeout_discipline ) { case WATCHDOG_RELATIVE: /* A relative timeout of 0 is a special case indefinite (no) timeout */ if ( queue_context->timeout != 0 ) { _Thread_Timer_insert_relative( the_thread, cpu_self, _Thread_Timeout, (Watchdog_Interval) queue_context->timeout ); } break; case WATCHDOG_ABSOLUTE: _Thread_Timer_insert_absolute( the_thread, cpu_self, _Thread_Timeout, queue_context->timeout ); break; default: break; } /* * At this point thread dispatching is disabled, however, we already released * the thread queue lock. Thus, interrupts or threads on other processors * may already changed our state with respect to the thread queue object. * The request could be satisfied or timed out. This situation is indicated * by the thread wait flags. Other parties must not modify our thread state * as long as we are in the THREAD_QUEUE_INTEND_TO_BLOCK thread wait state, * thus we have to cancel the blocking operation ourself if necessary. */ success = _Thread_Wait_flags_try_change_acquire( the_thread, THREAD_QUEUE_INTEND_TO_BLOCK, THREAD_QUEUE_BLOCKED ); if ( !success ) { _Thread_Remove_timer_and_unblock( the_thread, queue ); } _Thread_Update_priority( path.update_priority ); _Thread_Dispatch_enable( cpu_self ); }
static int _Condition_Wake( struct _Condition_Control *_condition, int count ) { Condition_Control *condition; ISR_lock_Context lock_context; Thread_queue_Heads *heads; Chain_Control unblock; Chain_Node *node; Chain_Node *tail; int woken; condition = _Condition_Get( _condition ); _ISR_lock_ISR_disable( &lock_context ); _Condition_Queue_acquire_critical( condition, &lock_context ); /* * In common uses cases of condition variables there are normally no threads * on the queue, so check this condition early. */ heads = condition->Queue.Queue.heads; if ( __predict_true( heads == NULL ) ) { _Condition_Queue_release( condition, &lock_context ); return 0; } woken = 0; _Chain_Initialize_empty( &unblock ); while ( count > 0 && heads != NULL ) { const Thread_queue_Operations *operations; Thread_Control *first; bool do_unblock; operations = CONDITION_TQ_OPERATIONS; first = ( *operations->first )( heads ); do_unblock = _Thread_queue_Extract_locked( &condition->Queue.Queue, operations, first ); if (do_unblock) { _Chain_Append_unprotected( &unblock, &first->Wait.Node.Chain ); } ++woken; --count; heads = condition->Queue.Queue.heads; } node = _Chain_First( &unblock ); tail = _Chain_Tail( &unblock ); if ( node != tail ) { Per_CPU_Control *cpu_self; cpu_self = _Thread_Dispatch_disable_critical( &lock_context ); _Condition_Queue_release( condition, &lock_context ); do { Thread_Control *thread; Chain_Node *next; next = _Chain_Next( node ); thread = THREAD_CHAIN_NODE_TO_THREAD( node ); _Watchdog_Remove_ticks( &thread->Timer ); _Thread_Unblock( thread ); node = next; } while ( node != tail ); _Thread_Dispatch_enable( cpu_self ); } else { _Condition_Queue_release( condition, &lock_context ); } return woken; }
static rtems_status_code _Semaphore_Set_priority( Semaphore_Control *the_semaphore, const Scheduler_Control *scheduler, rtems_task_priority new_priority, rtems_task_priority *old_priority_p, Thread_queue_Context *queue_context ) { rtems_status_code sc; bool valid; Priority_Control core_priority; Priority_Control old_priority; Per_CPU_Control *cpu_self; core_priority = _RTEMS_Priority_To_core( scheduler, new_priority, &valid ); if ( new_priority != RTEMS_CURRENT_PRIORITY && !valid ) { return RTEMS_INVALID_PRIORITY; } _Thread_queue_Context_clear_priority_updates( queue_context ); _Thread_queue_Acquire_critical( &the_semaphore->Core_control.Wait_queue, queue_context ); switch ( the_semaphore->variant ) { case SEMAPHORE_VARIANT_MUTEX_PRIORITY_CEILING: sc = _Semaphore_Is_scheduler_valid( &the_semaphore->Core_control.Mutex, scheduler ); old_priority = _CORE_ceiling_mutex_Get_priority( &the_semaphore->Core_control.Mutex ); if ( sc == RTEMS_SUCCESSFUL && new_priority != RTEMS_CURRENT_PRIORITY ) { _CORE_ceiling_mutex_Set_priority( &the_semaphore->Core_control.Mutex, core_priority, queue_context ); } break; #if defined(RTEMS_SMP) case SEMAPHORE_VARIANT_MRSP: old_priority = _MRSP_Get_priority( &the_semaphore->Core_control.MRSP, scheduler ); if ( new_priority != RTEMS_CURRENT_PRIORITY ) { _MRSP_Set_priority( &the_semaphore->Core_control.MRSP, scheduler, core_priority ); } sc = RTEMS_SUCCESSFUL; break; #endif default: _Assert( the_semaphore->variant == SEMAPHORE_VARIANT_MUTEX_INHERIT_PRIORITY || the_semaphore->variant == SEMAPHORE_VARIANT_MUTEX_NO_PROTOCOL || the_semaphore->variant == SEMAPHORE_VARIANT_SIMPLE_BINARY || the_semaphore->variant == SEMAPHORE_VARIANT_COUNTING ); old_priority = 0; sc = RTEMS_NOT_DEFINED; break; } cpu_self = _Thread_Dispatch_disable_critical( &queue_context->Lock_context.Lock_context ); _Thread_queue_Release( &the_semaphore->Core_control.Wait_queue, queue_context ); _Thread_Priority_update( queue_context ); _Thread_Dispatch_enable( cpu_self ); *old_priority_p = _RTEMS_Priority_From_core( scheduler, old_priority ); return sc; }
int pthread_setschedparam( pthread_t thread, int policy, struct sched_param *param ) { Thread_Control *the_thread; Per_CPU_Control *cpu_self; POSIX_API_Control *api; Thread_CPU_budget_algorithms budget_algorithm; Thread_CPU_budget_algorithm_callout budget_callout; int eno; Priority_Control unused; ISR_lock_Context lock_context; Priority_Control new_priority; /* * Check all the parameters */ if ( param == NULL ) { return EINVAL; } eno = _POSIX_Thread_Translate_sched_param( policy, param, &budget_algorithm, &budget_callout ); if ( eno != 0 ) { return eno; } the_thread = _Thread_Get_interrupt_disable( thread, &lock_context ); if ( the_thread == NULL ) { return ESRCH; } /* * Actually change the scheduling policy and parameters */ cpu_self = _Thread_Dispatch_disable_critical( &lock_context ); _Thread_State_acquire_critical( the_thread, &lock_context ); api = the_thread->API_Extensions[ THREAD_API_POSIX ]; if ( api->schedpolicy == SCHED_SPORADIC ) { _Watchdog_Per_CPU_remove_relative( &api->Sporadic_timer ); } api->schedpolicy = policy; api->schedparam = *param; api->Attributes.schedpolicy = policy; api->Attributes.schedparam = *param; the_thread->budget_algorithm = budget_algorithm; the_thread->budget_callout = budget_callout; switch ( policy ) { case SCHED_OTHER: case SCHED_FIFO: case SCHED_RR: the_thread->cpu_time_budget = rtems_configuration_get_ticks_per_timeslice(); new_priority = _POSIX_Priority_To_core( api->schedparam.sched_priority ); break; case SCHED_SPORADIC: api->ss_high_priority = api->schedparam.sched_priority; break; } _Thread_State_release( the_thread, &lock_context ); switch ( policy ) { case SCHED_OTHER: case SCHED_FIFO: case SCHED_RR: _Thread_Set_priority( the_thread, new_priority, &unused, true ); break; case SCHED_SPORADIC: _POSIX_Threads_Sporadic_budget_TSR( &api->Sporadic_timer ); break; } _Thread_Dispatch_enable( cpu_self ); return 0; }
void _Thread_queue_Enqueue( Thread_queue_Queue *queue, const Thread_queue_Operations *operations, Thread_Control *the_thread, Thread_queue_Context *queue_context ) { Per_CPU_Control *cpu_self; bool success; _Assert( queue_context->enqueue_callout != NULL ); _Assert( (uint8_t) queue_context->timeout_discipline != 0x7f ); #if defined(RTEMS_MULTIPROCESSING) if ( _Thread_MP_Is_receive( the_thread ) && the_thread->receive_packet ) { the_thread = _Thread_MP_Allocate_proxy( queue_context->thread_state ); } #endif _Thread_Wait_claim( the_thread, queue ); if ( !_Thread_queue_Path_acquire_critical( queue, the_thread, queue_context ) ) { _Thread_queue_Path_release_critical( queue_context ); _Thread_Wait_restore_default( the_thread ); _Thread_queue_Queue_release( queue, &queue_context->Lock_context.Lock_context ); _Thread_Wait_tranquilize( the_thread ); _Assert( queue_context->deadlock_callout != NULL ); ( *queue_context->deadlock_callout )( the_thread ); return; } _Thread_queue_Context_clear_priority_updates( queue_context ); _Thread_Wait_claim_finalize( the_thread, operations ); ( *operations->enqueue )( queue, the_thread, queue_context ); _Thread_queue_Path_release_critical( queue_context ); the_thread->Wait.return_code = STATUS_SUCCESSFUL; _Thread_Wait_flags_set( the_thread, THREAD_QUEUE_INTEND_TO_BLOCK ); cpu_self = _Thread_Dispatch_disable_critical( &queue_context->Lock_context.Lock_context ); _Thread_queue_Queue_release( queue, &queue_context->Lock_context.Lock_context ); ( *queue_context->enqueue_callout )( queue, the_thread, queue_context ); /* * Set the blocking state for this thread queue in the thread. */ _Thread_Set_state( the_thread, queue_context->thread_state ); /* * If the thread wants to timeout, then schedule its timer. */ _Thread_queue_Timeout( the_thread, cpu_self, queue_context ); /* * At this point thread dispatching is disabled, however, we already released * the thread queue lock. Thus, interrupts or threads on other processors * may already changed our state with respect to the thread queue object. * The request could be satisfied or timed out. This situation is indicated * by the thread wait flags. Other parties must not modify our thread state * as long as we are in the THREAD_QUEUE_INTEND_TO_BLOCK thread wait state, * thus we have to cancel the blocking operation ourself if necessary. */ success = _Thread_Wait_flags_try_change_acquire( the_thread, THREAD_QUEUE_INTEND_TO_BLOCK, THREAD_QUEUE_BLOCKED ); if ( !success ) { _Thread_Remove_timer_and_unblock( the_thread, queue ); } _Thread_Priority_update( queue_context ); _Thread_Dispatch_direct( cpu_self ); }