void _Semaphore_Wait( struct _Semaphore_Control *_sem ) { Semaphore_Control *sem ; Thread_queue_Context queue_context; Thread_Control *executing; unsigned int count; sem = _Semaphore_Get( _sem ); _Thread_queue_Context_initialize( &queue_context ); executing = _Semaphore_Queue_acquire( sem, &queue_context ); count = sem->count; if ( count > 0 ) { sem->count = count - 1; _Semaphore_Queue_release( sem, &queue_context ); } else { _Thread_queue_Context_set_expected_level( &queue_context, 1 ); _Thread_queue_Context_set_no_timeout( &queue_context ); _Thread_queue_Enqueue_critical( &sem->Queue.Queue, SEMAPHORE_TQ_OPERATIONS, executing, STATES_WAITING_FOR_SYS_LOCK_SEMAPHORE, &queue_context ); } }
void _Mutex_recursive_Acquire( struct _Mutex_recursive_Control *_mutex ) { Mutex_recursive_Control *mutex; Thread_queue_Context queue_context; ISR_Level level; Thread_Control *executing; Thread_Control *owner; mutex = _Mutex_recursive_Get( _mutex ); _Thread_queue_Context_initialize( &queue_context ); _Thread_queue_Context_ISR_disable( &queue_context, level ); executing = _Mutex_Queue_acquire_critical( &mutex->Mutex, &queue_context ); owner = mutex->Mutex.Queue.Queue.owner; if ( __predict_true( owner == NULL ) ) { mutex->Mutex.Queue.Queue.owner = executing; _Thread_Resource_count_increment( executing ); _Mutex_Queue_release( &mutex->Mutex, level, &queue_context ); } else if ( owner == executing ) { ++mutex->nest_level; _Mutex_Queue_release( &mutex->Mutex, level, &queue_context ); } else { _Thread_queue_Context_set_no_timeout( &queue_context ); _Mutex_Acquire_slow( &mutex->Mutex, owner, executing, level, &queue_context ); } }
void _Mutex_recursive_Acquire( struct _Mutex_recursive_Control *_mutex ) { Mutex_recursive_Control *mutex; Thread_queue_Context queue_context; Thread_Control *executing; Thread_Control *owner; mutex = _Mutex_recursive_Get( _mutex ); _Thread_queue_Context_initialize( &queue_context ); executing = _Mutex_Queue_acquire( &mutex->Mutex, &queue_context ); owner = mutex->Mutex.Queue.Queue.owner; if ( __predict_true( owner == NULL ) ) { mutex->Mutex.Queue.Queue.owner = executing; ++executing->resource_count; _Mutex_Queue_release( &mutex->Mutex, &queue_context ); } else if ( owner == executing ) { ++mutex->nest_level; _Mutex_Queue_release( &mutex->Mutex, &queue_context ); } else { _Thread_queue_Context_set_no_timeout( &queue_context ); _Mutex_Acquire_slow( &mutex->Mutex, owner, executing, &queue_context ); } }
void _Thread_Close( Thread_Control *the_thread, Thread_Control *executing ) { Thread_queue_Context queue_context; _Thread_queue_Context_initialize( &queue_context ); _Thread_queue_Context_set_expected_level( &queue_context, 2 ); _Thread_queue_Context_set_no_timeout( &queue_context ); _Thread_State_acquire( the_thread, &queue_context.Lock_context.Lock_context ); _Thread_Join( the_thread, STATES_WAITING_FOR_JOIN, executing, &queue_context ); _Thread_Cancel( the_thread, executing, NULL ); }
void _Thread_Close( Thread_Control *the_thread, Thread_Control *executing, Thread_Close_context *context ) { context->cancel = the_thread; _Thread_queue_Context_set_enqueue_callout( &context->Base, _Thread_Close_enqueue_callout ); _Thread_queue_Context_set_no_timeout( &context->Base ); _Thread_State_acquire_critical( the_thread, &context->Base.Lock_context.Lock_context ); _Thread_Join( the_thread, STATES_WAITING_FOR_JOIN, executing, &context->Base ); }
void _API_Mutex_Lock( API_Mutex_Control *the_mutex ) { Thread_Life_state previous_thread_life_state; Thread_queue_Context queue_context; previous_thread_life_state = _Thread_Set_life_protection( THREAD_LIFE_PROTECTED ); _Thread_queue_Context_initialize( &queue_context ); _ISR_lock_ISR_disable( &queue_context.Lock_context ); _Thread_queue_Context_set_no_timeout( &queue_context ); _CORE_recursive_mutex_Seize( &the_mutex->Mutex, _Thread_Executing, true, _CORE_recursive_mutex_Seize_nested, &queue_context ); if ( the_mutex->Mutex.nest_level == 0 ) { the_mutex->previous_thread_life_state = previous_thread_life_state; } }
int _POSIX_Condition_variables_Wait_support( pthread_cond_t *cond, pthread_mutex_t *mutex, const struct timespec *abstime ) { POSIX_Condition_variables_Control *the_cond; Thread_queue_Context queue_context; int error; int mutex_error; Per_CPU_Control *cpu_self; Thread_Control *executing; Watchdog_Interval timeout; bool already_timedout; TOD_Absolute_timeout_conversion_results status; if ( mutex == NULL ) { return EINVAL; } the_cond = _POSIX_Condition_variables_Get( cond, &queue_context ); if ( the_cond == NULL ) { return EINVAL; } already_timedout = false; if ( abstime != NULL ) { /* * POSIX requires that blocking calls with timeouts that take * an absolute timeout must ignore issues with the absolute * time provided if the operation would otherwise succeed. * So we check the abstime provided, and hold on to whether it * is valid or not. If it isn't correct and in the future, * then we do a polling operation and convert the UNSATISFIED * status into the appropriate error. */ _Assert( the_cond->clock ); status = _TOD_Absolute_timeout_to_ticks(abstime, the_cond->clock, &timeout); if ( status == TOD_ABSOLUTE_TIMEOUT_INVALID ) return EINVAL; if ( status == TOD_ABSOLUTE_TIMEOUT_IS_IN_PAST || status == TOD_ABSOLUTE_TIMEOUT_IS_NOW ) { already_timedout = true; } else { _Thread_queue_Context_set_relative_timeout( &queue_context, timeout ); } } else { _Thread_queue_Context_set_no_timeout( &queue_context ); } _POSIX_Condition_variables_Acquire_critical( the_cond, &queue_context ); if ( the_cond->mutex != POSIX_CONDITION_VARIABLES_NO_MUTEX && the_cond->mutex != *mutex ) { _POSIX_Condition_variables_Release( the_cond, &queue_context ); return EINVAL; } the_cond->mutex = *mutex; cpu_self = _Thread_Dispatch_disable_critical( &queue_context.Lock_context ); executing = _Per_CPU_Get_executing( cpu_self ); if ( !already_timedout ) { _Thread_queue_Context_set_expected_level( &queue_context, 2 ); _Thread_queue_Enqueue_critical( &the_cond->Wait_queue.Queue, POSIX_CONDITION_VARIABLES_TQ_OPERATIONS, executing, STATES_WAITING_FOR_CONDITION_VARIABLE, &queue_context ); } else { _POSIX_Condition_variables_Release( the_cond, &queue_context ); executing->Wait.return_code = STATUS_TIMEOUT; } mutex_error = pthread_mutex_unlock( mutex ); if ( mutex_error != 0 ) { /* * Historically, we ignored the unlock status since the behavior * is undefined by POSIX. But GNU/Linux returns EPERM in this * case, so we follow their lead. */ _Assert( mutex_error == EINVAL || mutex_error == EPERM ); _Thread_queue_Extract( executing ); _Thread_Dispatch_enable( cpu_self ); return EPERM; } /* * Switch ourself out because we blocked as a result of the * _Thread_queue_Enqueue_critical(). */ _Thread_Dispatch_enable( cpu_self ); error = _POSIX_Get_error_after_wait( executing ); /* * If the thread is interrupted, while in the thread queue, by * a POSIX signal, then pthread_cond_wait returns spuriously, * according to the POSIX standard. It means that pthread_cond_wait * returns a success status, except for the fact that it was not * woken up a pthread_cond_signal() or a pthread_cond_broadcast(). */ if ( error == EINTR ) { error = 0; } /* * When we get here the dispatch disable level is 0. */ mutex_error = pthread_mutex_lock( mutex ); if ( mutex_error != 0 ) { _Assert( mutex_error == EINVAL ); return EINVAL; } return error; }