static void _Thread_Make_zombie( Thread_Control *the_thread ) { ISR_lock_Context lock_context; Thread_Zombie_control *zombies = &_Thread_Zombies; if ( _Thread_Owns_resources( the_thread ) ) { _Terminate( INTERNAL_ERROR_CORE, false, INTERNAL_ERROR_RESOURCE_IN_USE ); } _Objects_Close( _Objects_Get_information_id( the_thread->Object.id ), &the_thread->Object ); _Thread_Set_state( the_thread, STATES_ZOMBIE ); _Thread_queue_Extract_with_proxy( the_thread ); _Watchdog_Remove_ticks( &the_thread->Timer ); _ISR_lock_ISR_disable_and_acquire( &zombies->Lock, &lock_context ); _Chain_Append_unprotected( &zombies->Chain, &the_thread->Object.Node ); _ISR_lock_Release_and_ISR_enable( &zombies->Lock, &lock_context ); }
rtems_task Middle_task( rtems_task_argument argument ) { Chain_Control *ready_queues; thread_dispatch_no_fp_time = benchmark_timer_read(); _Thread_Set_state( _Thread_Executing, STATES_SUSPENDED ); Middle_tcb = _Thread_Executing; ready_queues = (Chain_Control *) _Scheduler.information; _Thread_Executing = (Thread_Control *) _Chain_First(&ready_queues[LOW_PRIORITY]); /* do not force context switch */ _Thread_Dispatch_necessary = false; _Thread_Disable_dispatch(); benchmark_timer_initialize(); _Context_Switch( &Middle_tcb->Registers, &_Thread_Executing->Registers ); benchmark_timer_initialize(); _Context_Switch(&Middle_tcb->Registers, &Low_tcb->Registers); }
static void _Thread_Make_zombie( Thread_Control *the_thread ) { if ( _Thread_Owns_resources( the_thread ) ) { _Terminate( INTERNAL_ERROR_CORE, false, INTERNAL_ERROR_RESOURCE_IN_USE ); } _Objects_Close( _Objects_Get_information_id( the_thread->Object.id ), &the_thread->Object ); _Thread_Set_state( the_thread, STATES_ZOMBIE ); _Thread_queue_Extract_with_proxy( the_thread ); _Thread_Timer_remove( the_thread ); /* * Add the thread to the thread zombie chain before we wake up joining * threads, so that they are able to clean up the thread immediately. This * matters for SMP configurations. */ _Thread_Add_to_zombie_chain( the_thread ); _Thread_Wake_up_joining_threads( the_thread ); }
rtems_task Middle_task( rtems_task_argument argument ) { Scheduler_priority_Context *scheduler_context = _Scheduler_priority_Get_context( _Scheduler_Get( _Thread_Get_executing() ) ); thread_dispatch_no_fp_time = benchmark_timer_read(); _Thread_Set_state( _Thread_Get_executing(), STATES_SUSPENDED ); Middle_tcb = _Thread_Get_executing(); set_thread_executing( (Thread_Control *) _Chain_First(&scheduler_context->Ready[LOW_PRIORITY]) ); /* do not force context switch */ set_thread_dispatch_necessary( false ); _Thread_Dispatch_disable(); benchmark_timer_initialize(); _Context_Switch( &Middle_tcb->Registers, &_Thread_Get_executing()->Registers ); benchmark_timer_initialize(); _Context_Switch(&Middle_tcb->Registers, &Low_tcb->Registers); }
rtems_task Middle_task( rtems_task_argument argument ) { thread_dispatch_no_fp_time = benchmark_timer_read(); _Thread_Set_state( _Thread_Executing, STATES_SUSPENDED ); Middle_tcb = _Thread_Executing; _Thread_Executing = (Thread_Control *) _Thread_Ready_chain[LOW_PRIORITY].first; /* do not force context switch */ _Context_Switch_necessary = false; _Thread_Disable_dispatch(); benchmark_timer_initialize(); _Context_Switch( &Middle_tcb->Registers, &_Thread_Executing->Registers ); benchmark_timer_initialize(); _Context_Switch(&Middle_tcb->Registers, &Low_tcb->Registers); }
rtems_task High_task( rtems_task_argument argument ) { rtems_interrupt_level level; benchmark_timer_initialize(); rtems_interrupt_disable( level ); isr_disable_time = benchmark_timer_read(); benchmark_timer_initialize(); rtems_interrupt_flash( level ); isr_flash_time = benchmark_timer_read(); benchmark_timer_initialize(); rtems_interrupt_enable( level ); isr_enable_time = benchmark_timer_read(); benchmark_timer_initialize(); _Thread_Disable_dispatch(); thread_disable_dispatch_time = benchmark_timer_read(); benchmark_timer_initialize(); _Thread_Enable_dispatch(); thread_enable_dispatch_time = benchmark_timer_read(); benchmark_timer_initialize(); _Thread_Set_state( _Thread_Executing, STATES_SUSPENDED ); thread_set_state_time = benchmark_timer_read(); _Context_Switch_necessary = true; benchmark_timer_initialize(); _Thread_Dispatch(); /* dispatches Middle_task */ }
rtems_status_code rtems_task_wake_after( rtems_interval ticks ) { /* * It is critical to obtain the executing thread after thread dispatching is * disabled on SMP configurations. */ Thread_Control *executing; Per_CPU_Control *cpu_self; cpu_self = _Thread_Dispatch_disable(); executing = _Thread_Executing; if ( ticks == 0 ) { _Thread_Yield( executing ); } else { _Thread_Set_state( executing, STATES_DELAYING ); _Thread_Wait_flags_set( executing, THREAD_WAIT_STATE_BLOCKED ); _Watchdog_Initialize( &executing->Timer, _Thread_Timeout, 0, executing ); _Watchdog_Insert_ticks( &executing->Timer, ticks ); } _Thread_Dispatch_enable( cpu_self ); return RTEMS_SUCCESSFUL; }
void _Thread_Close( Thread_Control *the_thread, Thread_Control *executing ) { _Assert( _Thread_Is_life_protected( executing->Life.state ) ); if ( _States_Is_dormant( the_thread->current_state ) ) { _Thread_Make_zombie( the_thread ); } else { if ( the_thread != executing && !_Thread_Is_life_terminating( executing->Life.state ) ) { /* * Wait for termination of victim thread. If the executing thread is * also terminated, then do not wait. This avoids potential cyclic * dependencies and thus dead lock. */ the_thread->Life.terminator = executing; _Thread_Set_state( executing, STATES_WAITING_FOR_TERMINATION ); } _Thread_Request_life_change( the_thread, executing, executing->current_priority, THREAD_LIFE_TERMINATING ); } }
static void _Thread_Make_zombie( Thread_Control *the_thread ) { #if defined(RTEMS_SCORE_THREAD_ENABLE_RESOURCE_COUNT) if ( _Thread_Owns_resources( the_thread ) ) { _Internal_error( INTERNAL_ERROR_RESOURCE_IN_USE ); } #endif _Objects_Close( _Objects_Get_information_id( the_thread->Object.id ), &the_thread->Object ); _Thread_Set_state( the_thread, STATES_ZOMBIE ); _Thread_queue_Extract_with_proxy( the_thread ); _Thread_Timer_remove( the_thread ); /* * Add the thread to the thread zombie chain before we wake up joining * threads, so that they are able to clean up the thread immediately. This * matters for SMP configurations. */ _Thread_Add_to_zombie_chain( the_thread ); _Thread_Wake_up_joining_threads( the_thread ); }
static void thread_set_state( Thread_Control *thread, States_Control state ) { #if defined( PREVENT_SMP_ASSERT_FAILURES ) _Thread_Disable_dispatch(); #endif _Thread_Set_state( thread, state ); #if defined( PREVENT_SMP_ASSERT_FAILURES ) _Thread_Unnest_dispatch(); #endif }
void _Thread_queue_Enqueue_with_handler( Thread_queue_Control *the_thread_queue, Watchdog_Interval timeout, Thread_queue_Timeout_callout handler ) { Thread_Control *the_thread; ISR_Level level; Thread_blocking_operation_States sync_state; Thread_blocking_operation_States (*enqueue_p)( Thread_queue_Control *, Thread_Control *, ISR_Level * ); the_thread = _Thread_Executing; #if defined(RTEMS_MULTIPROCESSING) if ( _Thread_MP_Is_receive( the_thread ) && the_thread->receive_packet ) the_thread = _Thread_MP_Allocate_proxy( the_thread_queue->state ); else #endif /* * Set the blocking state for this thread queue in the thread. */ _Thread_Set_state( the_thread, the_thread_queue->state ); /* * If the thread wants to timeout, then schedule its timer. */ if ( timeout ) { _Watchdog_Initialize( &the_thread->Timer, handler, the_thread->Object.id, NULL ); _Watchdog_Insert_ticks( &the_thread->Timer, timeout ); } /* * Now enqueue the thread per the discipline for this thread queue. */ if ( the_thread_queue->discipline == THREAD_QUEUE_DISCIPLINE_PRIORITY ) enqueue_p = _Thread_queue_Enqueue_priority; else /* must be THREAD_QUEUE_DISCIPLINE_FIFO */ enqueue_p = _Thread_queue_Enqueue_fifo; sync_state = (*enqueue_p)( the_thread_queue, the_thread, &level ); if ( sync_state != THREAD_BLOCKING_OPERATION_NOTHING_HAPPENED ) _Thread_blocking_operation_Cancel( sync_state, the_thread, level ); }
rtems_task High_task( rtems_task_argument argument ) { rtems_interrupt_level level; _Thread_Dispatch_disable(); benchmark_timer_initialize(); rtems_interrupt_local_disable( level ); isr_disable_time = benchmark_timer_read(); benchmark_timer_initialize(); #if defined(RTEMS_SMP) rtems_interrupt_local_enable( level ); rtems_interrupt_local_disable( level ); #else rtems_interrupt_flash( level ); #endif isr_flash_time = benchmark_timer_read(); benchmark_timer_initialize(); rtems_interrupt_local_enable( level ); isr_enable_time = benchmark_timer_read(); _Thread_Dispatch_enable( _Per_CPU_Get() ); benchmark_timer_initialize(); _Thread_Dispatch_disable(); thread_disable_dispatch_time = benchmark_timer_read(); benchmark_timer_initialize(); _Thread_Dispatch_enable( _Per_CPU_Get() ); thread_enable_dispatch_time = benchmark_timer_read(); benchmark_timer_initialize(); _Thread_Set_state( _Thread_Get_executing(), STATES_SUSPENDED ); thread_set_state_time = benchmark_timer_read(); set_thread_dispatch_necessary( true ); benchmark_timer_initialize(); _Thread_Dispatch(); /* dispatches Middle_task */ }
static rtems_status_code _Rate_monotonic_Block_while_active( Rate_monotonic_Control *the_period, rtems_interval length, Thread_Control *executing, ISR_lock_Context *lock_context ) { Per_CPU_Control *cpu_self; bool success; /* * Update statistics from the concluding period. */ _Rate_monotonic_Update_statistics( the_period ); /* * This tells the _Rate_monotonic_Timeout that this task is * in the process of blocking on the period and that we * may be changing the length of the next period. */ the_period->next_length = length; executing->Wait.return_argument = the_period; _Thread_Wait_flags_set( executing, RATE_MONOTONIC_INTEND_TO_BLOCK ); cpu_self = _Thread_Dispatch_disable_critical( lock_context ); _Rate_monotonic_Release( the_period, lock_context ); _Thread_Set_state( executing, STATES_WAITING_FOR_PERIOD ); success = _Thread_Wait_flags_try_change_acquire( executing, RATE_MONOTONIC_INTEND_TO_BLOCK, RATE_MONOTONIC_BLOCKED ); if ( !success ) { _Assert( _Thread_Wait_flags_get( executing ) == RATE_MONOTONIC_READY_AGAIN ); _Thread_Unblock( executing ); } _Thread_Dispatch_enable( cpu_self ); return RTEMS_SUCCESSFUL; }
static void _Thread_Start_life_change( Thread_Control *the_thread, const Scheduler_Control *scheduler, Priority_Control priority ) { the_thread->is_preemptible = the_thread->Start.is_preemptible; the_thread->budget_algorithm = the_thread->Start.budget_algorithm; the_thread->budget_callout = the_thread->Start.budget_callout; the_thread->real_priority = priority; _Thread_Set_state( the_thread, STATES_RESTARTING ); _Thread_queue_Extract_with_proxy( the_thread ); _Watchdog_Remove( &the_thread->Timer ); _Scheduler_Set_priority_if_higher( scheduler, the_thread, priority ); _Thread_Add_post_switch_action( the_thread, &the_thread->Life.Action ); _Thread_Ready( the_thread ); _Thread_Request_dispatch_if_executing( the_thread ); }
void _POSIX_Thread_Exit( Thread_Control *the_thread, void *value_ptr ) { Thread_Control *unblocked; POSIX_API_Control *api; bool previous_life_protection; api = the_thread->API_Extensions[ THREAD_API_POSIX ]; _Assert( _Debug_Is_thread_dispatching_allowed() ); previous_life_protection = _Thread_Set_life_protection( true ); _Thread_Disable_dispatch(); the_thread->Wait.return_argument = value_ptr; /* * Process join */ if ( api->detachstate == PTHREAD_CREATE_JOINABLE ) { unblocked = _Thread_queue_Dequeue( &api->Join_List ); if ( unblocked ) { do { *(void **)unblocked->Wait.return_argument = value_ptr; } while ( (unblocked = _Thread_queue_Dequeue( &api->Join_List )) ); } else { _Thread_Set_state( the_thread, STATES_WAITING_FOR_JOIN_AT_EXIT ); _Thread_Enable_dispatch(); /* now waiting for thread to arrive */ _Thread_Disable_dispatch(); } } /* * Now shut down the thread */ _Thread_Close( the_thread, _Thread_Executing ); _Thread_Enable_dispatch(); _Thread_Set_life_protection( previous_life_protection ); }
rtems_status_code rtems_task_wake_after( rtems_interval ticks ) { _Thread_Disable_dispatch(); if ( ticks == 0 ) { _Thread_Yield_processor(); } else { _Thread_Set_state( _Thread_Executing, STATES_DELAYING ); _Watchdog_Initialize( &_Thread_Executing->Timer, _Thread_Delay_ended, _Thread_Executing->Object.id, NULL ); _Watchdog_Insert_ticks( &_Thread_Executing->Timer, ticks ); } _Thread_Enable_dispatch(); return RTEMS_SUCCESSFUL; }
static void _Thread_Start_life_change( Thread_Control *the_thread, const Scheduler_Control *scheduler, Priority_Control priority ) { the_thread->is_preemptible = the_thread->Start.is_preemptible; the_thread->budget_algorithm = the_thread->Start.budget_algorithm; the_thread->budget_callout = the_thread->Start.budget_callout; _Thread_Set_state( the_thread, STATES_RESTARTING ); _Thread_queue_Extract_with_proxy( the_thread ); _Watchdog_Remove_ticks( &the_thread->Timer ); _Thread_Change_priority( the_thread, priority, NULL, _Thread_Raise_real_priority_filter, false ); _Thread_Add_life_change_action( the_thread ); _Thread_Ready( the_thread ); }
rtems_status_code rtems_task_wake_when( rtems_time_of_day *time_buffer ) { Watchdog_Interval seconds; if ( !_TOD_Is_set() ) return RTEMS_NOT_DEFINED; if ( !time_buffer ) return RTEMS_INVALID_ADDRESS; time_buffer->ticks = 0; if ( !_TOD_Validate( time_buffer ) ) return RTEMS_INVALID_CLOCK; seconds = _TOD_To_seconds( time_buffer ); if ( seconds <= _TOD_Seconds_since_epoch() ) return RTEMS_INVALID_CLOCK; _Thread_Disable_dispatch(); _Thread_Set_state( _Thread_Executing, STATES_WAITING_FOR_TIME ); _Watchdog_Initialize( &_Thread_Executing->Timer, _Thread_Delay_ended, _Thread_Executing->Object.id, NULL ); _Watchdog_Insert_seconds( &_Thread_Executing->Timer, seconds - _TOD_Seconds_since_epoch() ); _Thread_Enable_dispatch(); return RTEMS_SUCCESSFUL; }
ER dly_tsk( DLYTIME dlytim ) { Watchdog_Interval ticks; ticks = TOD_MILLISECONDS_TO_TICKS(dlytim); _Thread_Disable_dispatch(); if ( ticks == 0 ) { _Thread_Yield_processor(); } else { _Thread_Set_state( _Thread_Executing, STATES_DELAYING ); _Watchdog_Initialize( &_Thread_Executing->Timer, _Thread_Delay_ended, _Thread_Executing->Object.id, NULL ); _Watchdog_Insert_ticks( &_Thread_Executing->Timer, ticks ); } _Thread_Enable_dispatch(); return E_OK; }
void complete_test( void ) { uint32_t index; rtems_id task_id; benchmark_timer_initialize(); _Thread_Resume( Middle_tcb, true ); thread_resume_time = benchmark_timer_read(); _Thread_Set_state( Middle_tcb, STATES_WAITING_FOR_MESSAGE ); benchmark_timer_initialize(); _Thread_Unblock( Middle_tcb ); thread_unblock_time = benchmark_timer_read(); _Thread_Set_state( Middle_tcb, STATES_WAITING_FOR_MESSAGE ); benchmark_timer_initialize(); _Thread_Ready( Middle_tcb ); thread_ready_time = benchmark_timer_read(); benchmark_timer_initialize(); for ( index=1 ; index <= OPERATION_COUNT ; index++ ) (void) benchmark_timer_empty_function(); overhead = benchmark_timer_read(); task_id = Middle_tcb->Object.id; benchmark_timer_initialize(); for ( index=1 ; index <= OPERATION_COUNT ; index++ ) (void) _Thread_Get( task_id, &location ); thread_get_time = benchmark_timer_read(); benchmark_timer_initialize(); for ( index=1 ; index <= OPERATION_COUNT ; index++ ) (void) _Semaphore_Get( Semaphore_id, &location ); semaphore_get_time = benchmark_timer_read(); benchmark_timer_initialize(); for ( index=1 ; index <= OPERATION_COUNT ; index++ ) (void) _Thread_Get( 0x3, &location ); thread_get_invalid_time = benchmark_timer_read(); /* * This is the running task and we have tricked RTEMS out enough where * we need to set some internal tracking information to match this. */ _Thread_Heir = _Thread_Executing; _Context_Switch_necessary = false; _Thread_Dispatch_disable_level = 0; /* * Now dump all the times */ put_time( "_ISR_Disable", isr_disable_time, 1, 0, 0 ); put_time( "_ISR_Flash", isr_flash_time, 1, 0, 0 ); put_time( "_ISR_Enable", isr_enable_time, 1, 0, 0 ); put_time( "_Thread_Disable_dispatch", thread_disable_dispatch_time, 1, 0, 0 ); put_time( "_Thread_Enable_dispatch", thread_enable_dispatch_time, 1, 0, 0 ); put_time( "_Thread_Set_state", thread_set_state_time, 1, 0, 0 ); put_time( "_Thread_Dispatch (NO FP)", thread_dispatch_no_fp_time, 1, 0, 0 ); put_time( "context switch: no floating point contexts", context_switch_no_fp_time, 1, 0, 0 ); put_time( "context switch: self", context_switch_self_time, 1, 0, 0 ); put_time( "context switch: to another task", context_switch_another_task_time, 1, 0, 0 ); #if (CPU_HARDWARE_FP == 1) || (CPU_SOFTWARE_FP == 1) put_time( "fp context switch: restore 1st FP task", context_switch_restore_1st_fp_time, 1, 0, 0 ); put_time( "fp context switch: save idle, restore initialized", context_switch_save_idle_restore_initted_time, 1, 0, 0 ); put_time( "fp context switch: save idle, restore idle", context_switch_save_restore_idle_time, 1, 0, 0 ); put_time( "fp context switch: save initialized, restore initialized", context_switch_save_restore_initted_time, 1, 0, 0 ); #else puts( "fp context switch: restore 1st FP task - NA" ); puts( "fp context switch: save idle, restore initialized - NA" ); puts( "fp context switch: save idle, restore idle - NA" ); puts( "fp context switch: save initialized, restore initialized - NA" ); #endif put_time( "_Thread_Resume", thread_resume_time, 1, 0, 0 ); put_time( "_Thread_Unblock", thread_unblock_time, 1, 0, 0 ); put_time( "_Thread_Ready", thread_ready_time, 1, 0, 0 ); put_time( "_Thread_Get", thread_get_time, OPERATION_COUNT, 0, 0 ); put_time( "_Semaphore_Get", semaphore_get_time, OPERATION_COUNT, 0, 0 ); put_time( "_Thread_Get: invalid id", thread_get_invalid_time, OPERATION_COUNT, 0, 0 ); puts( "*** END OF TEST 26 ***" ); rtems_test_exit( 0 ); }
void _Thread_queue_Enqueue( Thread_queue_Control *the_thread_queue, Thread_Control *the_thread, States_Control state, Watchdog_Interval timeout ) { ISR_lock_Context lock_context; Thread_blocking_operation_States sync_state; #if defined(RTEMS_MULTIPROCESSING) if ( _Thread_MP_Is_receive( the_thread ) && the_thread->receive_packet ) the_thread = _Thread_MP_Allocate_proxy( state ); else #endif /* * Set the blocking state for this thread queue in the thread. */ _Thread_Set_state( the_thread, state ); /* * If the thread wants to timeout, then schedule its timer. */ if ( timeout ) { _Watchdog_Initialize( &the_thread->Timer, _Thread_queue_Timeout, the_thread->Object.id, NULL ); _Watchdog_Insert_ticks( &the_thread->Timer, timeout ); } /* * Now initiate the enqueuing and checking if the blocking operation * should be completed or the thread has had its blocking condition * satisfied before we got here. */ _Thread_queue_Acquire( &lock_context ); sync_state = the_thread_queue->sync_state; the_thread_queue->sync_state = THREAD_BLOCKING_OPERATION_SYNCHRONIZED; if ( sync_state == THREAD_BLOCKING_OPERATION_NOTHING_HAPPENED ) { /* * Invoke the discipline specific enqueue method. */ if ( the_thread_queue->discipline == THREAD_QUEUE_DISCIPLINE_FIFO ) { _Chain_Append_unprotected( &the_thread_queue->Queues.Fifo, &the_thread->Object.Node ); } else { /* must be THREAD_QUEUE_DISCIPLINE_PRIORITY */ _Thread_Lock_set( the_thread, &_Thread_queue_Lock ); _Thread_Priority_set_change_handler( the_thread, _Thread_queue_Requeue_priority, the_thread_queue ); _RBTree_Insert( &the_thread_queue->Queues.Priority, &the_thread->RBNode, _Thread_queue_Compare_priority, false ); } the_thread->Wait.queue = the_thread_queue; the_thread_queue->sync_state = THREAD_BLOCKING_OPERATION_SYNCHRONIZED; _Thread_queue_Release( &lock_context ); } else { /* Cancel a blocking operation due to ISR */ _Assert( sync_state == THREAD_BLOCKING_OPERATION_TIMEOUT || sync_state == THREAD_BLOCKING_OPERATION_SATISFIED ); _Thread_blocking_operation_Finalize( the_thread, &lock_context ); } }
/* * 14.2.5 High Resolution Sleep, P1003.1b-1993, p. 269 */ int nanosleep( const struct timespec *rqtp, struct timespec *rmtp ) { /* * It is critical to obtain the executing thread after thread dispatching is * disabled on SMP configurations. */ Thread_Control *executing; Watchdog_Interval ticks; Watchdog_Interval elapsed; /* * Return EINVAL if the delay interval is negative. * * NOTE: This behavior is beyond the POSIX specification. * FSU and GNU/Linux pthreads shares this behavior. */ if ( !_Timespec_Is_valid( rqtp ) ) rtems_set_errno_and_return_minus_one( EINVAL ); /* * Convert the timespec delay into the appropriate number of clock ticks. */ ticks = _Timespec_To_ticks( rqtp ); /* * A nanosleep for zero time is implemented as a yield. * This behavior is also beyond the POSIX specification but is * consistent with the RTEMS API and yields desirable behavior. */ if ( !ticks ) { _Thread_Disable_dispatch(); executing = _Thread_Executing; _Thread_Yield( executing ); _Thread_Enable_dispatch(); if ( rmtp ) { rmtp->tv_sec = 0; rmtp->tv_nsec = 0; } return 0; } /* * Block for the desired amount of time */ _Thread_Disable_dispatch(); executing = _Thread_Executing; _Thread_Set_state( executing, STATES_DELAYING | STATES_INTERRUPTIBLE_BY_SIGNAL ); _Watchdog_Initialize( &executing->Timer, _Thread_Delay_ended, 0, executing ); _Watchdog_Insert_ticks( &executing->Timer, ticks ); _Thread_Enable_dispatch(); /* * Calculate the time that passed while we were sleeping and how * much remains from what we requested. */ elapsed = executing->Timer.stop_time - executing->Timer.start_time; if ( elapsed >= ticks ) ticks = 0; else ticks -= elapsed; /* * If the user wants the time remaining, do the conversion. */ if ( rmtp ) { _Timespec_From_ticks( ticks, rmtp ); } /* * Only when POSIX is enabled, can a sleep be interrupted. */ #if defined(RTEMS_POSIX_API) /* * If there is time remaining, then we were interrupted by a signal. */ if ( ticks ) rtems_set_errno_and_return_minus_one( EINTR ); #endif return 0; }
void _POSIX_Thread_Exit( Thread_Control *the_thread, void *value_ptr ) { Objects_Information *the_information; Thread_Control *unblocked; POSIX_API_Control *api; the_information = _Objects_Get_information_id( the_thread->Object.id ); api = the_thread->API_Extensions[ THREAD_API_POSIX ]; /* * The_information has to be non-NULL. Otherwise, we couldn't be * running in a thread of this API and class. * * NOTE: Lock and unlock in different order so we do not throw a * fatal error when locking the allocator mutex. And after * we unlock, we want to defer the context switch until we * are ready to be switched out. Otherwise, an ISR could * occur and preempt us out while we still hold the * allocator mutex. */ _RTEMS_Lock_allocator(); _Thread_Disable_dispatch(); the_thread->Wait.return_argument = value_ptr; /* * Process join */ if ( api->detachstate == PTHREAD_CREATE_JOINABLE ) { unblocked = _Thread_queue_Dequeue( &api->Join_List ); if ( unblocked ) { do { *(void **)unblocked->Wait.return_argument = value_ptr; } while ( (unblocked = _Thread_queue_Dequeue( &api->Join_List )) ); } else { _Thread_Set_state( the_thread, STATES_WAITING_FOR_JOIN_AT_EXIT | STATES_TRANSIENT ); _RTEMS_Unlock_allocator(); _Thread_Enable_dispatch(); /* now waiting for thread to arrive */ _RTEMS_Lock_allocator(); _Thread_Disable_dispatch(); } } /* * Now shut down the thread */ _Thread_Close( the_information, the_thread ); _POSIX_Threads_Free( the_thread ); _RTEMS_Unlock_allocator(); _Thread_Enable_dispatch(); }
/** * @brief Timer server body. * * This is the server for task based timers. This task executes whenever a * task-based timer should fire. It services both "after" and "when" timers. * It is not created automatically but must be created explicitly by the * application before task-based timers may be initiated. The parameter * @a arg points to the corresponding timer server control block. */ static rtems_task _Timer_server_Body( rtems_task_argument arg ) { Timer_server_Control *ts = (Timer_server_Control *) arg; Chain_Control insert_chain; Chain_Control fire_chain; _Chain_Initialize_empty( &insert_chain ); _Chain_Initialize_empty( &fire_chain ); while ( true ) { _Timer_server_Get_watchdogs_that_fire_now( ts, &insert_chain, &fire_chain ); if ( !_Chain_Is_empty( &fire_chain ) ) { /* * Fire the watchdogs. */ while ( true ) { Watchdog_Control *watchdog; ISR_Level level; /* * It is essential that interrupts are disable here since an interrupt * service routine may remove a watchdog from the chain. */ _ISR_Disable( level ); watchdog = (Watchdog_Control *) _Chain_Get_unprotected( &fire_chain ); if ( watchdog != NULL ) { watchdog->state = WATCHDOG_INACTIVE; _ISR_Enable( level ); } else { _ISR_Enable( level ); break; } /* * The timer server may block here and wait for resources or time. * The system watchdogs are inactive and will remain inactive since * the active flag of the timer server is true. */ (*watchdog->routine)( watchdog->id, watchdog->user_data ); } } else { ts->active = false; /* * Block until there is something to do. */ _Thread_Disable_dispatch(); _Thread_Set_state( ts->thread, STATES_DELAYING ); _Timer_server_Reset_interval_system_watchdog( ts ); _Timer_server_Reset_tod_system_watchdog( ts ); _Thread_Enable_dispatch(); ts->active = true; /* * Maybe an interrupt did reset the system timers, so we have to stop * them here. Since we are active now, there will be no more resets * until we are inactive again. */ _Timer_server_Stop_interval_system_watchdog( ts ); _Timer_server_Stop_tod_system_watchdog( ts ); } } }
void _Event_Seize( rtems_event_set event_in, rtems_option option_set, rtems_interval ticks, rtems_event_set *event_out ) { Thread_Control *executing; rtems_event_set seized_events; rtems_event_set pending_events; ISR_Level level; RTEMS_API_Control *api; Thread_blocking_operation_States sync_state; executing = _Thread_Executing; executing->Wait.return_code = RTEMS_SUCCESSFUL; api = executing->API_Extensions[ THREAD_API_RTEMS ]; _ISR_Disable( level ); pending_events = api->pending_events; seized_events = _Event_sets_Get( pending_events, event_in ); if ( !_Event_sets_Is_empty( seized_events ) && (seized_events == event_in || _Options_Is_any( option_set )) ) { api->pending_events = _Event_sets_Clear( pending_events, seized_events ); _ISR_Enable( level ); *event_out = seized_events; return; } if ( _Options_Is_no_wait( option_set ) ) { _ISR_Enable( level ); executing->Wait.return_code = RTEMS_UNSATISFIED; *event_out = seized_events; return; } /* * Note what we are waiting for BEFORE we enter the critical section. * The interrupt critical section management code needs this to be * set properly when we are marked as in the event critical section. * * NOTE: Since interrupts are disabled, this isn't that much of an * issue but better safe than sorry. */ executing->Wait.option = (uint32_t) option_set; executing->Wait.count = (uint32_t) event_in; executing->Wait.return_argument = event_out; _Event_Sync_state = THREAD_BLOCKING_OPERATION_NOTHING_HAPPENED; _ISR_Enable( level ); if ( ticks ) { _Watchdog_Initialize( &executing->Timer, _Event_Timeout, executing->Object.id, NULL ); _Watchdog_Insert_ticks( &executing->Timer, ticks ); } _Thread_Set_state( executing, STATES_WAITING_FOR_EVENT ); _ISR_Disable( level ); sync_state = _Event_Sync_state; _Event_Sync_state = THREAD_BLOCKING_OPERATION_SYNCHRONIZED; if ( sync_state == THREAD_BLOCKING_OPERATION_NOTHING_HAPPENED ) { _ISR_Enable( level ); return; } /* * An interrupt completed the thread's blocking request. * The blocking thread was satisfied by an ISR or timed out. * * WARNING! Returning with interrupts disabled! */ _Thread_blocking_operation_Cancel( sync_state, executing, level ); }
void _Thread_Close( Objects_Information *information, Thread_Control *the_thread ) { /* * Now we are in a dispatching critical section again and we * can take the thread OUT of the published set. It is invalid * to use this thread's Id after this call. This will prevent * any other task from attempting to initiate a call on this task. */ _Objects_Invalidate_Id( information, &the_thread->Object ); /* * We assume the Allocator Mutex is locked when we get here. * This provides sufficient protection to let the user extensions * run but as soon as we get back, we will make the thread * disappear and set a transient state on it. So we temporarily * unnest dispatching. */ _Thread_Unnest_dispatch(); _User_extensions_Thread_delete( the_thread ); _Thread_Disable_dispatch(); /* * Now we are in a dispatching critical section again and we * can take the thread OUT of the published set. It is invalid * to use this thread's Id OR name after this call. */ _Objects_Close( information, &the_thread->Object ); /* * By setting the dormant state, the thread will not be considered * for scheduling when we remove any blocking states. */ _Thread_Set_state( the_thread, STATES_DORMANT ); if ( !_Thread_queue_Extract_with_proxy( the_thread ) ) { if ( _Watchdog_Is_active( &the_thread->Timer ) ) (void) _Watchdog_Remove( &the_thread->Timer ); } /* * Free the per-thread scheduling information. */ _Scheduler_Free( the_thread ); /* * The thread might have been FP. So deal with that. */ #if ( CPU_HARDWARE_FP == TRUE ) || ( CPU_SOFTWARE_FP == TRUE ) #if ( CPU_USE_DEFERRED_FP_SWITCH == TRUE ) if ( _Thread_Is_allocated_fp( the_thread ) ) _Thread_Deallocate_fp(); #endif the_thread->fp_context = NULL; _Workspace_Free( the_thread->Start.fp_context ); #endif /* * Free the rest of the memory associated with this task * and set the associated pointers to NULL for safety. */ _Thread_Stack_Free( the_thread ); the_thread->Start.stack = NULL; _Workspace_Free( the_thread->extensions ); the_thread->extensions = NULL; _Workspace_Free( the_thread->Start.tls_area ); }
int nanosleep( const struct timespec *rqtp, struct timespec *rmtp ) { Watchdog_Interval ticks; /* * Return EINVAL if the delay interval is negative. * * NOTE: This behavior is beyond the POSIX specification. * FSU and GNU/Linux pthreads shares this behavior. */ if ( !_Timespec_Is_valid( rqtp ) ) rtems_set_errno_and_return_minus_one( EINVAL ); ticks = _Timespec_To_ticks( rqtp ); /* * A nanosleep for zero time is implemented as a yield. * This behavior is also beyond the POSIX specification but is * consistent with the RTEMS API and yields desirable behavior. */ if ( !ticks ) { _Thread_Disable_dispatch(); _Scheduler_Yield(); _Thread_Enable_dispatch(); if ( rmtp ) { rmtp->tv_sec = 0; rmtp->tv_nsec = 0; } return 0; } /* * Block for the desired amount of time */ _Thread_Disable_dispatch(); _Thread_Set_state( _Thread_Executing, STATES_DELAYING | STATES_INTERRUPTIBLE_BY_SIGNAL ); _Watchdog_Initialize( &_Thread_Executing->Timer, _Thread_Delay_ended, _Thread_Executing->Object.id, NULL ); _Watchdog_Insert_ticks( &_Thread_Executing->Timer, ticks ); _Thread_Enable_dispatch(); /* calculate time remaining */ if ( rmtp ) { ticks -= _Thread_Executing->Timer.stop_time - _Thread_Executing->Timer.start_time; _Timespec_From_ticks( ticks, rmtp ); /* * Only when POSIX is enabled, can a sleep be interrupted. */ #if defined(RTEMS_POSIX_API) /* * If there is time remaining, then we were interrupted by a signal. */ if ( ticks ) rtems_set_errno_and_return_minus_one( EINTR ); #endif } return 0; }
void _Thread_queue_Enqueue( Thread_queue_Queue *queue, const Thread_queue_Operations *operations, Thread_Control *the_thread, Thread_queue_Context *queue_context ) { Per_CPU_Control *cpu_self; bool success; _Assert( queue_context->enqueue_callout != NULL ); _Assert( (uint8_t) queue_context->timeout_discipline != 0x7f ); #if defined(RTEMS_MULTIPROCESSING) if ( _Thread_MP_Is_receive( the_thread ) && the_thread->receive_packet ) { the_thread = _Thread_MP_Allocate_proxy( queue_context->thread_state ); } #endif _Thread_Wait_claim( the_thread, queue ); if ( !_Thread_queue_Path_acquire_critical( queue, the_thread, queue_context ) ) { _Thread_queue_Path_release_critical( queue_context ); _Thread_Wait_restore_default( the_thread ); _Thread_queue_Queue_release( queue, &queue_context->Lock_context.Lock_context ); _Thread_Wait_tranquilize( the_thread ); _Assert( queue_context->deadlock_callout != NULL ); ( *queue_context->deadlock_callout )( the_thread ); return; } _Thread_queue_Context_clear_priority_updates( queue_context ); _Thread_Wait_claim_finalize( the_thread, operations ); ( *operations->enqueue )( queue, the_thread, queue_context ); _Thread_queue_Path_release_critical( queue_context ); the_thread->Wait.return_code = STATUS_SUCCESSFUL; _Thread_Wait_flags_set( the_thread, THREAD_QUEUE_INTEND_TO_BLOCK ); cpu_self = _Thread_Dispatch_disable_critical( &queue_context->Lock_context.Lock_context ); _Thread_queue_Queue_release( queue, &queue_context->Lock_context.Lock_context ); ( *queue_context->enqueue_callout )( queue, the_thread, queue_context ); /* * Set the blocking state for this thread queue in the thread. */ _Thread_Set_state( the_thread, queue_context->thread_state ); /* * If the thread wants to timeout, then schedule its timer. */ _Thread_queue_Timeout( the_thread, cpu_self, queue_context ); /* * At this point thread dispatching is disabled, however, we already released * the thread queue lock. Thus, interrupts or threads on other processors * may already changed our state with respect to the thread queue object. * The request could be satisfied or timed out. This situation is indicated * by the thread wait flags. Other parties must not modify our thread state * as long as we are in the THREAD_QUEUE_INTEND_TO_BLOCK thread wait state, * thus we have to cancel the blocking operation ourself if necessary. */ success = _Thread_Wait_flags_try_change_acquire( the_thread, THREAD_QUEUE_INTEND_TO_BLOCK, THREAD_QUEUE_BLOCKED ); if ( !success ) { _Thread_Remove_timer_and_unblock( the_thread, queue ); } _Thread_Priority_update( queue_context ); _Thread_Dispatch_direct( cpu_self ); }
void complete_test( void ) { uint32_t index; rtems_id task_id; ISR_lock_Context lock_context; Thread_queue_Context queue_context; benchmark_timer_initialize(); thread_resume( Middle_tcb ); thread_resume_time = benchmark_timer_read(); _Thread_Set_state( Middle_tcb, STATES_WAITING_FOR_MESSAGE ); benchmark_timer_initialize(); _Thread_Unblock( Middle_tcb ); thread_unblock_time = benchmark_timer_read(); _Thread_Set_state( Middle_tcb, STATES_WAITING_FOR_MESSAGE ); benchmark_timer_initialize(); _Thread_Clear_state( Middle_tcb, STATES_WAITING_FOR_MESSAGE ); thread_ready_time = benchmark_timer_read(); benchmark_timer_initialize(); for ( index=1 ; index <= OPERATION_COUNT ; index++ ) (void) benchmark_timer_empty_function(); overhead = benchmark_timer_read(); task_id = Middle_tcb->Object.id; benchmark_timer_initialize(); for ( index=1 ; index <= OPERATION_COUNT ; index++ ) { (void) _Thread_Get( task_id, &lock_context ); _ISR_lock_ISR_enable( &lock_context ); } thread_get_time = benchmark_timer_read(); benchmark_timer_initialize(); for ( index=1 ; index <= OPERATION_COUNT ; index++ ) { (void) _Semaphore_Get( Semaphore_id, &queue_context ); _ISR_lock_ISR_enable( &queue_context.Lock_context ); } semaphore_get_time = benchmark_timer_read(); benchmark_timer_initialize(); for ( index=1 ; index <= OPERATION_COUNT ; index++ ) { (void) _Thread_Get( 0x3, &lock_context ); _ISR_lock_ISR_enable( &lock_context ); } thread_get_invalid_time = benchmark_timer_read(); /* * This is the running task and we have tricked RTEMS out enough where * we need to set some internal tracking information to match this. */ set_thread_heir( _Thread_Get_executing() ); set_thread_dispatch_necessary( false ); /* * Now dump all the times */ put_time( "rtems interrupt: _ISR_Local_disable", isr_disable_time, 1, 0, 0 ); put_time( "rtems interrupt: _ISR_Local_flash", isr_flash_time, 1, 0, 0 ); put_time( "rtems interrupt: _ISR_Local_enable", isr_enable_time, 1, 0, 0 ); put_time( "rtems internal: _Thread_Dispatch_disable", thread_disable_dispatch_time, 1, 0, 0 ); put_time( "rtems internal: _Thread_Dispatch_enable", thread_enable_dispatch_time, 1, 0, 0 ); put_time( "rtems internal: _Thread_Set_state", thread_set_state_time, 1, 0, 0 ); put_time( "rtems internal: _Thread_Dispatch NO FP", thread_dispatch_no_fp_time, 1, 0, 0 ); put_time( "rtems internal: context switch: no floating point contexts", context_switch_no_fp_time, 1, 0, 0 ); put_time( "rtems internal: context switch: self", context_switch_self_time, 1, 0, 0 ); put_time( "rtems internal: context switch to another task", context_switch_another_task_time, 1, 0, 0 ); #if (CPU_HARDWARE_FP == 1) || (CPU_SOFTWARE_FP == 1) put_time( "rtems internal: fp context switch restore 1st FP task", context_switch_restore_1st_fp_time, 1, 0, 0 ); put_time( "rtems internal: fp context switch save idle and restore initialized", context_switch_save_idle_restore_initted_time, 1, 0, 0 ); put_time( "rtems internal: fp context switch save idle, restore idle", context_switch_save_restore_idle_time, 1, 0, 0 ); put_time( "rtems internal: fp context switch save initialized, restore initialized", context_switch_save_restore_initted_time, 1, 0, 0 ); #else puts( "rtems internal: fp context switch restore 1st FP task - NA\n" "rtems internal: fp context switch save idle restore initialized - NA\n" "rtems internal: fp context switch save idle restore idle - NA\n" "rtems internal: fp context switch save initialized\n" " restore initialized - NA" ); #endif put_time( "rtems internal: _Thread_Resume", thread_resume_time, 1, 0, 0 ); put_time( "rtems internal: _Thread_Unblock", thread_unblock_time, 1, 0, 0 ); put_time( "rtems internal: _Thread_Ready", thread_ready_time, 1, 0, 0 ); put_time( "rtems internal: _Thread_Get", thread_get_time, OPERATION_COUNT, 0, 0 ); put_time( "rtems internal: _Semaphore_Get", semaphore_get_time, OPERATION_COUNT, 0, 0 ); put_time( "rtems internal: _Thread_Get: invalid id", thread_get_invalid_time, OPERATION_COUNT, 0, 0 ); TEST_END(); rtems_test_exit( 0 ); }
rtems_status_code rtems_rate_monotonic_period( rtems_id id, rtems_interval length ) { Rate_monotonic_Control *the_period; Objects_Locations location; rtems_status_code return_value; rtems_rate_monotonic_period_states local_state; ISR_Level level; the_period = _Rate_monotonic_Get( id, &location ); switch ( location ) { case OBJECTS_LOCAL: if ( !_Thread_Is_executing( the_period->owner ) ) { _Thread_Enable_dispatch(); return RTEMS_NOT_OWNER_OF_RESOURCE; } if ( length == RTEMS_PERIOD_STATUS ) { switch ( the_period->state ) { case RATE_MONOTONIC_INACTIVE: return_value = RTEMS_NOT_DEFINED; break; case RATE_MONOTONIC_EXPIRED: case RATE_MONOTONIC_EXPIRED_WHILE_BLOCKING: return_value = RTEMS_TIMEOUT; break; case RATE_MONOTONIC_ACTIVE: default: /* unreached -- only to remove warnings */ return_value = RTEMS_SUCCESSFUL; break; } _Thread_Enable_dispatch(); return( return_value ); } _ISR_Disable( level ); if ( the_period->state == RATE_MONOTONIC_INACTIVE ) { _ISR_Enable( level ); the_period->next_length = length; /* * Baseline statistics information for the beginning of a period. */ _Rate_monotonic_Initiate_statistics( the_period ); the_period->state = RATE_MONOTONIC_ACTIVE; _Watchdog_Initialize( &the_period->Timer, _Rate_monotonic_Timeout, id, NULL ); _Watchdog_Insert_ticks( &the_period->Timer, length ); _Thread_Enable_dispatch(); return RTEMS_SUCCESSFUL; } if ( the_period->state == RATE_MONOTONIC_ACTIVE ) { /* * Update statistics from the concluding period. */ _Rate_monotonic_Update_statistics( the_period ); /* * This tells the _Rate_monotonic_Timeout that this task is * in the process of blocking on the period and that we * may be changing the length of the next period. */ the_period->state = RATE_MONOTONIC_OWNER_IS_BLOCKING; the_period->next_length = length; _ISR_Enable( level ); _Thread_Executing->Wait.id = the_period->Object.id; _Thread_Set_state( _Thread_Executing, STATES_WAITING_FOR_PERIOD ); /* * Did the watchdog timer expire while we were actually blocking * on it? */ _ISR_Disable( level ); local_state = the_period->state; the_period->state = RATE_MONOTONIC_ACTIVE; _ISR_Enable( level ); /* * If it did, then we want to unblock ourself and continue as * if nothing happen. The period was reset in the timeout routine. */ if ( local_state == RATE_MONOTONIC_EXPIRED_WHILE_BLOCKING ) _Thread_Clear_state( _Thread_Executing, STATES_WAITING_FOR_PERIOD ); _Thread_Enable_dispatch(); return RTEMS_SUCCESSFUL; } if ( the_period->state == RATE_MONOTONIC_EXPIRED ) { /* * Update statistics from the concluding period */ _Rate_monotonic_Update_statistics( the_period ); _ISR_Enable( level ); the_period->state = RATE_MONOTONIC_ACTIVE; the_period->next_length = length; _Watchdog_Insert_ticks( &the_period->Timer, length ); _Scheduler_Release_job(the_period->owner, the_period->next_length); _Thread_Enable_dispatch(); return RTEMS_TIMEOUT; } /* * These should never happen so just return invalid Id. * - RATE_MONOTONIC_OWNER_IS_BLOCKING: * - RATE_MONOTONIC_EXPIRED_WHILE_BLOCKING: */ #if defined(RTEMS_MULTIPROCESSING) case OBJECTS_REMOTE: /* should never return this */ #endif case OBJECTS_ERROR: break; } return RTEMS_INVALID_ID; }