Objects_Control *_Objects_Get_isr_disable( Objects_Information *information, Objects_Id id, Objects_Locations *location, ISR_lock_Context *lock_context ) { Objects_Control *the_object; uint32_t index; index = id - information->minimum_id + 1; if ( information->maximum >= index ) { _ISR_lock_ISR_disable( lock_context ); if ( (the_object = information->local_table[ index ]) != NULL ) { *location = OBJECTS_LOCAL; return the_object; } _ISR_lock_ISR_enable( lock_context ); *location = OBJECTS_ERROR; return NULL; } *location = OBJECTS_ERROR; #if defined(RTEMS_MULTIPROCESSING) _Objects_MP_Is_remote( information, id, location, &the_object ); return the_object; #else return NULL; #endif }
static void test_isr_locks( void ) { ISR_Level normal_interrupt_level = _ISR_Get_level(); ISR_lock_Control initialized = ISR_LOCK_INITIALIZER("test"); ISR_lock_Control lock; ISR_lock_Context lock_context; _ISR_lock_Initialize( &lock, "test" ); rtems_test_assert( memcmp( &lock, &initialized, lock_size ) == 0 ); _ISR_lock_ISR_disable_and_acquire( &lock, &lock_context ); rtems_test_assert( normal_interrupt_level != _ISR_Get_level() ); _ISR_lock_Flash( &lock, &lock_context ); rtems_test_assert( normal_interrupt_level != _ISR_Get_level() ); _ISR_lock_Release_and_ISR_enable( &lock, &lock_context ); rtems_test_assert( normal_interrupt_level == _ISR_Get_level() ); _ISR_lock_ISR_disable( &lock_context ); rtems_test_assert( normal_interrupt_level != _ISR_Get_level() ); _ISR_lock_ISR_enable( &lock_context ); rtems_test_assert( normal_interrupt_level == _ISR_Get_level() ); _ISR_lock_Acquire( &lock, &lock_context ); rtems_test_assert( normal_interrupt_level == _ISR_Get_level() ); _ISR_lock_Release( &lock, &lock_context ); rtems_test_assert( normal_interrupt_level == _ISR_Get_level() ); _ISR_lock_Destroy( &lock ); _ISR_lock_Destroy( &initialized ); }
void _Condition_Wait( struct _Condition_Control *_condition, struct _Mutex_Control *_mutex ) { ISR_lock_Context lock_context; Per_CPU_Control *cpu_self; _ISR_lock_ISR_disable( &lock_context ); cpu_self = _Condition_Do_wait( _condition, 0, &lock_context ); _Mutex_Release( _mutex ); _Thread_Dispatch_enable( cpu_self ); _Mutex_Acquire( _mutex ); }
static Thread_Control *_Semaphore_Queue_acquire( Semaphore_Control *sem, Thread_queue_Context *queue_context ) { Thread_Control *executing; _ISR_lock_ISR_disable( &queue_context->Lock_context.Lock_context ); executing = _Thread_Executing; _Thread_queue_Queue_acquire_critical( &sem->Queue.Queue, &executing->Potpourri_stats, &queue_context->Lock_context.Lock_context ); return executing; }
static Thread_Control *_Mutex_Queue_acquire( Mutex_Control *mutex, ISR_lock_Context *lock_context ) { Thread_Control *executing; _ISR_lock_ISR_disable( lock_context ); executing = _Thread_Executing; _Thread_queue_Queue_acquire_critical( &mutex->Queue.Queue, &executing->Potpourri_stats, lock_context ); return executing; }
/* * This is the operation that is run when a timer expires */ void _POSIX_Timer_TSR( Watchdog_Control *the_watchdog ) { POSIX_Timer_Control *ptimer; ISR_lock_Context lock_context; Per_CPU_Control *cpu; ptimer = RTEMS_CONTAINER_OF( the_watchdog, POSIX_Timer_Control, Timer ); _ISR_lock_ISR_disable( &lock_context ); cpu = _POSIX_Timer_Acquire_critical( ptimer, &lock_context ); /* Increment the number of expirations. */ ptimer->overrun = ptimer->overrun + 1; /* The timer must be reprogrammed */ if ( ( ptimer->timer_data.it_interval.tv_sec != 0 ) || ( ptimer->timer_data.it_interval.tv_nsec != 0 ) ) { _POSIX_Timer_Insert( ptimer, cpu, ptimer->ticks ); } else { /* Indicates that the timer is stopped */ ptimer->state = POSIX_TIMER_STATE_CREATE_STOP; } _POSIX_Timer_Release( cpu, &lock_context ); /* * The sending of the signal to the process running the handling function * specified for that signal is simulated */ if ( pthread_kill ( ptimer->thread_id, ptimer->inf.sigev_signo ) ) { _Assert( FALSE ); /* * TODO: What if an error happens at run-time? This should never * occur because the timer should be canceled if the thread * is deleted. This method is being invoked from the Clock * Tick ISR so even if we decide to take action on an error, * we don't have many options. We shouldn't shut the system down. */ } /* After the signal handler returns, the count of expirations of the * timer must be set to 0. */ ptimer->overrun = 0; }
void _Rate_monotonic_Timeout( Watchdog_Control *the_watchdog ) { Rate_monotonic_Control *the_period; Thread_Control *owner; ISR_lock_Context lock_context; Thread_Wait_flags wait_flags; the_period = RTEMS_CONTAINER_OF( the_watchdog, Rate_monotonic_Control, Timer ); owner = the_period->owner; _ISR_lock_ISR_disable( &lock_context ); _Rate_monotonic_Acquire_critical( the_period, &lock_context ); wait_flags = _Thread_Wait_flags_get( owner ); if ( ( wait_flags & THREAD_WAIT_CLASS_PERIOD ) != 0 && owner->Wait.return_argument == the_period ) { bool unblock; bool success; owner->Wait.return_argument = NULL; success = _Thread_Wait_flags_try_change_release( owner, RATE_MONOTONIC_INTEND_TO_BLOCK, RATE_MONOTONIC_READY_AGAIN ); if ( success ) { unblock = false; } else { _Assert( _Thread_Wait_flags_get( owner ) == RATE_MONOTONIC_BLOCKED ); _Thread_Wait_flags_set( owner, RATE_MONOTONIC_READY_AGAIN ); unblock = true; } _Rate_monotonic_Restart( the_period, owner, &lock_context ); if ( unblock ) { _Thread_Unblock( owner ); } } else { _Rate_monotonic_Renew_deadline( the_period, &lock_context ); } }
void _Condition_Wait_recursive( struct _Condition_Control *_condition, struct _Mutex_recursive_Control *_mutex ) { ISR_lock_Context lock_context; Per_CPU_Control *cpu_self; unsigned int nest_level; _ISR_lock_ISR_disable( &lock_context ); cpu_self = _Condition_Do_wait( _condition, 0, &lock_context ); nest_level = _mutex->_nest_level; _mutex->_nest_level = 0; _Mutex_recursive_Release( _mutex ); _Thread_Dispatch_enable( cpu_self ); _Mutex_recursive_Acquire( _mutex ); _mutex->_nest_level = nest_level; }
int _Condition_Wait_recursive_timed( struct _Condition_Control *_condition, struct _Mutex_recursive_Control *_mutex, const struct timespec *abstime ) { ISR_lock_Context lock_context; Per_CPU_Control *cpu_self; Thread_Control *executing; int eno; unsigned int nest_level; Watchdog_Interval ticks; _ISR_lock_ISR_disable( &lock_context ); switch ( _TOD_Absolute_timeout_to_ticks( abstime, &ticks ) ) { case TOD_ABSOLUTE_TIMEOUT_INVALID: _ISR_lock_ISR_enable( &lock_context ); return EINVAL; case TOD_ABSOLUTE_TIMEOUT_IS_IN_PAST: case TOD_ABSOLUTE_TIMEOUT_IS_NOW: _ISR_lock_ISR_enable( &lock_context ); return ETIMEDOUT; default: break; } cpu_self = _Condition_Do_wait( _condition, ticks, &lock_context ); nest_level = _mutex->_nest_level; _mutex->_nest_level = 0; _Mutex_recursive_Release( _mutex ); executing = cpu_self->executing; _Thread_Dispatch_enable( cpu_self ); eno = (int) executing->Wait.return_code; _Mutex_recursive_Acquire( _mutex ); _mutex->_nest_level = nest_level; return eno; }
void _API_Mutex_Lock( API_Mutex_Control *the_mutex ) { bool previous_thread_life_protection; ISR_lock_Context lock_context; previous_thread_life_protection = _Thread_Set_life_protection( true ); _ISR_lock_ISR_disable( &lock_context ); _CORE_mutex_Seize( &the_mutex->Mutex, _Thread_Executing, the_mutex->Object.id, true, 0, &lock_context ); if ( the_mutex->Mutex.nest_count == 1 ) { the_mutex->previous_thread_life_protection = previous_thread_life_protection; } }
void _API_Mutex_Lock( API_Mutex_Control *the_mutex ) { Thread_Life_state previous_thread_life_state; Thread_queue_Context queue_context; previous_thread_life_state = _Thread_Set_life_protection( THREAD_LIFE_PROTECTED ); _Thread_queue_Context_initialize( &queue_context ); _ISR_lock_ISR_disable( &queue_context.Lock_context ); _Thread_queue_Context_set_no_timeout( &queue_context ); _CORE_recursive_mutex_Seize( &the_mutex->Mutex, _Thread_Executing, true, _CORE_recursive_mutex_Seize_nested, &queue_context ); if ( the_mutex->Mutex.nest_level == 0 ) { the_mutex->previous_thread_life_state = previous_thread_life_state; } }
static int _Condition_Wake( struct _Condition_Control *_condition, int count ) { Condition_Control *condition; ISR_lock_Context lock_context; Thread_queue_Heads *heads; Chain_Control unblock; Chain_Node *node; Chain_Node *tail; int woken; condition = _Condition_Get( _condition ); _ISR_lock_ISR_disable( &lock_context ); _Condition_Queue_acquire_critical( condition, &lock_context ); /* * In common uses cases of condition variables there are normally no threads * on the queue, so check this condition early. */ heads = condition->Queue.Queue.heads; if ( __predict_true( heads == NULL ) ) { _Condition_Queue_release( condition, &lock_context ); return 0; } woken = 0; _Chain_Initialize_empty( &unblock ); while ( count > 0 && heads != NULL ) { const Thread_queue_Operations *operations; Thread_Control *first; bool do_unblock; operations = CONDITION_TQ_OPERATIONS; first = ( *operations->first )( heads ); do_unblock = _Thread_queue_Extract_locked( &condition->Queue.Queue, operations, first ); if (do_unblock) { _Chain_Append_unprotected( &unblock, &first->Wait.Node.Chain ); } ++woken; --count; heads = condition->Queue.Queue.heads; } node = _Chain_First( &unblock ); tail = _Chain_Tail( &unblock ); if ( node != tail ) { Per_CPU_Control *cpu_self; cpu_self = _Thread_Dispatch_disable_critical( &lock_context ); _Condition_Queue_release( condition, &lock_context ); do { Thread_Control *thread; Chain_Node *next; next = _Chain_Next( node ); thread = THREAD_CHAIN_NODE_TO_THREAD( node ); _Watchdog_Remove_ticks( &thread->Timer ); _Thread_Unblock( thread ); node = next; } while ( node != tail ); _Thread_Dispatch_enable( cpu_self ); } else { _Condition_Queue_release( condition, &lock_context ); } return woken; }