static RBTree_Compare_result node_cmp( const RBTree_Node *n1, const RBTree_Node *n2 ) { int key1 = RTEMS_CONTAINER_OF(n1, test_rtems_rbtree_node, node)->data.key; int key2 = RTEMS_CONTAINER_OF(n2, test_rtems_rbtree_node, node)->data.key; return key1 - key2; }
Objects_Id _Thread_Wait_get_id( const Thread_Control *the_thread ) { States_Control current_state; current_state = the_thread->current_state; #if defined(RTEMS_MULTIPROCESSING) if ( ( current_state & STATES_WAITING_FOR_RPC_REPLY ) != 0 ) { return the_thread->Wait.remote_id; } #endif if ( ( current_state & THREAD_WAIT_QUEUE_OBJECT_STATES ) != 0 ) { const Thread_Wait_queue_object *queue_object; queue_object = RTEMS_CONTAINER_OF( the_thread->Wait.queue, Thread_Wait_queue_object, Wait_queue.Queue ); return queue_object->Object.id; } return 0; }
void _Timer_Routine_adaptor( Watchdog_Control *the_watchdog ) { Timer_Control *the_timer; Per_CPU_Control *cpu; the_timer = RTEMS_CONTAINER_OF( the_watchdog, Timer_Control, Ticker ); cpu = _Watchdog_Get_CPU( &the_timer->Ticker ); the_timer->stop_time = _Timer_Get_CPU_ticks( cpu ); ( *the_timer->routine )( the_timer->Object.id, the_timer->user_data ); }
/* * This is the operation that is run when a timer expires */ void _POSIX_Timer_TSR( Watchdog_Control *the_watchdog ) { POSIX_Timer_Control *ptimer; ISR_lock_Context lock_context; Per_CPU_Control *cpu; ptimer = RTEMS_CONTAINER_OF( the_watchdog, POSIX_Timer_Control, Timer ); _ISR_lock_ISR_disable( &lock_context ); cpu = _POSIX_Timer_Acquire_critical( ptimer, &lock_context ); /* Increment the number of expirations. */ ptimer->overrun = ptimer->overrun + 1; /* The timer must be reprogrammed */ if ( ( ptimer->timer_data.it_interval.tv_sec != 0 ) || ( ptimer->timer_data.it_interval.tv_nsec != 0 ) ) { _POSIX_Timer_Insert( ptimer, cpu, ptimer->ticks ); } else { /* Indicates that the timer is stopped */ ptimer->state = POSIX_TIMER_STATE_CREATE_STOP; } _POSIX_Timer_Release( cpu, &lock_context ); /* * The sending of the signal to the process running the handling function * specified for that signal is simulated */ if ( pthread_kill ( ptimer->thread_id, ptimer->inf.sigev_signo ) ) { _Assert( FALSE ); /* * TODO: What if an error happens at run-time? This should never * occur because the timer should be canceled if the thread * is deleted. This method is being invoked from the Clock * Tick ISR so even if we decide to take action on an error, * we don't have many options. We shouldn't shut the system down. */ } /* After the signal handler returns, the count of expirations of the * timer must be set to 0. */ ptimer->overrun = 0; }
void _Rate_monotonic_Timeout( Watchdog_Control *the_watchdog ) { Rate_monotonic_Control *the_period; Thread_Control *owner; ISR_lock_Context lock_context; Thread_Wait_flags wait_flags; the_period = RTEMS_CONTAINER_OF( the_watchdog, Rate_monotonic_Control, Timer ); owner = the_period->owner; _ISR_lock_ISR_disable( &lock_context ); _Rate_monotonic_Acquire_critical( the_period, &lock_context ); wait_flags = _Thread_Wait_flags_get( owner ); if ( ( wait_flags & THREAD_WAIT_CLASS_PERIOD ) != 0 && owner->Wait.return_argument == the_period ) { bool unblock; bool success; owner->Wait.return_argument = NULL; success = _Thread_Wait_flags_try_change_release( owner, RATE_MONOTONIC_INTEND_TO_BLOCK, RATE_MONOTONIC_READY_AGAIN ); if ( success ) { unblock = false; } else { _Assert( _Thread_Wait_flags_get( owner ) == RATE_MONOTONIC_BLOCKED ); _Thread_Wait_flags_set( owner, RATE_MONOTONIC_READY_AGAIN ); unblock = true; } _Rate_monotonic_Restart( the_period, owner, &lock_context ); if ( unblock ) { _Thread_Unblock( owner ); } } else { _Rate_monotonic_Renew_deadline( the_period, &lock_context ); } }
static Status_Control _POSIX_Mutex_Lock_nested( CORE_recursive_mutex_Control *the_recursive_mutex ) { POSIX_Mutex_Control *the_mutex; the_mutex = RTEMS_CONTAINER_OF( the_recursive_mutex, POSIX_Mutex_Control, Mutex.Recursive ); if ( the_mutex->is_recursive ) { return _CORE_recursive_mutex_Seize_nested( the_recursive_mutex ); } else { return STATUS_NESTING_NOT_ALLOWED; } }
static void _Thread_queue_Queue_extract( Thread_queue_Queue *queue, Thread_queue_Heads *heads, Thread_Control *the_thread, void ( *extract )( Thread_queue_Heads *, Thread_Control * ) ) { _Assert( heads != NULL ); the_thread->Wait.spare_heads = RTEMS_CONTAINER_OF( _Chain_Get_first_unprotected( &heads->Free_chain ), Thread_queue_Heads, Free_node ); if ( _Chain_Is_empty( &heads->Free_chain ) ) { queue->heads = NULL; } ( *extract )( heads, the_thread ); }
static Thread_Control *_CORE_RWLock_Flush_filter( Thread_Control *the_thread, Thread_queue_Queue *queue, Thread_queue_Context *queue_context ) { CORE_RWLock_Control *the_rwlock; the_rwlock = RTEMS_CONTAINER_OF( queue, CORE_RWLock_Control, Wait_queue.Queue ); switch ( the_rwlock->current_state ) { case CORE_RWLOCK_LOCKED_FOR_READING: if ( _CORE_RWLock_Is_waiting_for_reading( the_thread ) ) { the_rwlock->number_of_readers += 1; } else { the_thread = NULL; } break; case CORE_RWLOCK_LOCKED_FOR_WRITING: the_thread = NULL; break; default: _Assert( the_rwlock->current_state == CORE_RWLOCK_UNLOCKED ); if ( _CORE_RWLock_Is_waiting_for_reading( the_thread ) ) { the_rwlock->current_state = CORE_RWLOCK_LOCKED_FOR_READING; the_rwlock->number_of_readers = 1; } else { the_rwlock->current_state = CORE_RWLOCK_LOCKED_FOR_WRITING; } break; } return the_thread; }