static void _Thread_Make_zombie( Thread_Control *the_thread ) { #if defined(RTEMS_SCORE_THREAD_ENABLE_RESOURCE_COUNT) if ( _Thread_Owns_resources( the_thread ) ) { _Internal_error( INTERNAL_ERROR_RESOURCE_IN_USE ); } #endif _Objects_Close( _Objects_Get_information_id( the_thread->Object.id ), &the_thread->Object ); _Thread_Set_state( the_thread, STATES_ZOMBIE ); _Thread_queue_Extract_with_proxy( the_thread ); _Thread_Timer_remove( the_thread ); /* * Add the thread to the thread zombie chain before we wake up joining * threads, so that they are able to clean up the thread immediately. This * matters for SMP configurations. */ _Thread_Add_to_zombie_chain( the_thread ); _Thread_Wake_up_joining_threads( the_thread ); }
static void rtems_libio_init( void ) { uint32_t i; rtems_libio_t *iop; int eno; if (rtems_libio_number_iops > 0) { iop = rtems_libio_iop_free_head = &rtems_libio_iops[0]; for (i = 0 ; (i + 1) < rtems_libio_number_iops ; i++, iop++) iop->data1 = iop + 1; iop->data1 = NULL; rtems_libio_iop_free_tail = &iop->data1; } /* * Create the posix key for user environment. */ eno = pthread_key_create( &rtems_current_user_env_key, rtems_libio_free_user_env ); if (eno != 0) { _Internal_error( INTERNAL_ERROR_LIBIO_USER_ENV_KEY_CREATE_FAILED ); } }
Status_Control _Thread_queue_Enqueue_sticky( Thread_queue_Queue *queue, const Thread_queue_Operations *operations, Thread_Control *the_thread, Thread_queue_Context *queue_context ) { Per_CPU_Control *cpu_self; _Thread_Wait_claim( the_thread, queue ); if ( !_Thread_queue_Path_acquire_critical( queue, the_thread, queue_context ) ) { _Thread_queue_Path_release_critical( queue_context ); _Thread_Wait_restore_default( the_thread ); _Thread_queue_Queue_release( queue, &queue_context->Lock_context.Lock_context ); _Thread_Wait_tranquilize( the_thread ); ( *queue_context->deadlock_callout )( the_thread ); return _Thread_Wait_get_status( the_thread ); } _Thread_queue_Context_clear_priority_updates( queue_context ); _Thread_Wait_claim_finalize( the_thread, operations ); ( *operations->enqueue )( queue, the_thread, queue_context ); _Thread_queue_Path_release_critical( queue_context ); the_thread->Wait.return_code = STATUS_SUCCESSFUL; _Thread_Wait_flags_set( the_thread, THREAD_QUEUE_INTEND_TO_BLOCK ); cpu_self = _Thread_Dispatch_disable_critical( &queue_context->Lock_context.Lock_context ); _Thread_queue_Queue_release( queue, &queue_context->Lock_context.Lock_context ); if ( cpu_self->thread_dispatch_disable_level != 1 ) { _Internal_error( INTERNAL_ERROR_THREAD_QUEUE_ENQUEUE_STICKY_FROM_BAD_STATE ); } _Thread_queue_Timeout( the_thread, cpu_self, queue_context ); _Thread_Priority_update( queue_context ); _Thread_Priority_and_sticky_update( the_thread, 1 ); _Thread_Dispatch_enable( cpu_self ); while ( _Thread_Wait_flags_get_acquire( the_thread ) == THREAD_QUEUE_INTEND_TO_BLOCK ) { /* Wait */ } _Thread_Wait_tranquilize( the_thread ); _Thread_Timer_remove( the_thread ); return _Thread_Wait_get_status( the_thread ); }
void *_Workspace_Allocate_or_fatal_error( size_t size ) { void *memory; memory = _Heap_Allocate( &_Workspace_Area, size ); #if defined(DEBUG_WORKSPACE) printk( "Workspace_Allocate_or_fatal_error(%d) from %p/%p -> %p\n", size, __builtin_return_address( 0 ), __builtin_return_address( 1 ), memory ); #endif if ( memory == NULL ) _Internal_error( INTERNAL_ERROR_WORKSPACE_ALLOCATION ); return memory; }
void _Workspace_Handler_initialization( Heap_Area *areas, size_t area_count, Heap_Initialization_or_extend_handler extend ) { Heap_Initialization_or_extend_handler init_or_extend = _Heap_Initialize; uintptr_t remaining = rtems_configuration_get_work_space_size(); bool do_zero = rtems_configuration_get_do_zero_of_workspace(); bool unified = rtems_configuration_get_unified_work_area(); uintptr_t page_size = CPU_HEAP_ALIGNMENT; uintptr_t overhead = _Heap_Area_overhead( page_size ); uintptr_t tls_size = _TLS_Get_size(); size_t i; /* * In case we have a non-zero TLS size, then we need a TLS area for each * thread. These areas are allocated from the workspace. Ensure that the * workspace is large enough to fulfill all requests known at configuration * time (so excluding the unlimited option). It is not possible to estimate * the TLS size in the configuration at compile-time. The TLS size is * determined at application link-time. */ if ( tls_size > 0 ) { uintptr_t tls_align = _TLS_Heap_align_up( (uintptr_t) _TLS_Alignment ); uintptr_t tls_alloc = _TLS_Get_allocation_size( tls_size, tls_align ); /* * Memory allocated with an alignment constraint is allocated from the end * of a free block. The last allocation may need one free block of minimum * size. */ remaining += _Heap_Min_block_size( page_size ); remaining += _Get_maximum_thread_count() * _Heap_Size_with_overhead( page_size, tls_alloc, tls_align ); } for (i = 0; i < area_count; ++i) { Heap_Area *area = &areas [i]; if ( do_zero ) { memset( area->begin, 0, area->size ); } if ( area->size > overhead ) { uintptr_t space_available; uintptr_t size; if ( unified ) { size = area->size; } else { if ( remaining > 0 ) { size = remaining < area->size - overhead ? remaining + overhead : area->size; } else { size = 0; } } space_available = (*init_or_extend)( &_Workspace_Area, area->begin, size, page_size ); area->begin = (char *) area->begin + size; area->size -= size; if ( space_available < remaining ) { remaining -= space_available; } else { remaining = 0; } init_or_extend = extend; } } if ( remaining > 0 ) { _Internal_error( INTERNAL_ERROR_TOO_LITTLE_WORKSPACE ); } _Heap_Protection_set_delayed_free_fraction( &_Workspace_Area, 1 ); }
void _Thread_Handler( void ) { Thread_Control *executing; ISR_Level level; Per_CPU_Control *cpu_self; /* * Some CPUs need to tinker with the call frame or registers when the * thread actually begins to execute for the first time. This is a * hook point where the port gets a shot at doing whatever it requires. */ _Context_Initialization_at_thread_begin(); executing = _Thread_Executing; /* * have to put level into a register for those cpu's that use * inline asm here */ level = executing->Start.isr_level; _ISR_Set_level( level ); /* * Initialize the floating point context because we do not come * through _Thread_Dispatch on our first invocation. So the normal * code path for performing the FP context switch is not hit. */ _Thread_Restore_fp( executing ); /* * Do not use the level of the thread control block, since it has a * different format. */ _ISR_Local_disable( level ); /* * At this point, the dispatch disable level BETTER be 1. */ cpu_self = _Per_CPU_Get(); _Assert( cpu_self->thread_dispatch_disable_level == 1 ); /* * Make sure we lose no thread dispatch necessary update and execute the * post-switch actions. As a side-effect change the thread dispatch level * from one to zero. Do not use _Thread_Enable_dispatch() since there is no * valid thread dispatch necessary indicator in this context. */ _Thread_Do_dispatch( cpu_self, level ); /* * Invoke the thread begin extensions in the context of the thread entry * function with thread dispatching enabled. This enables use of dynamic * memory allocation, creation of POSIX keys and use of C++ thread local * storage. Blocking synchronization primitives are allowed also. */ _User_extensions_Thread_begin( executing ); /* * RTEMS supports multiple APIs and each API can define a different * thread/task prototype. The following code supports invoking the * user thread entry point using the prototype expected. */ ( *executing->Start.Entry.adaptor )( executing ); /* * In the call above, the return code from the user thread body which return * something was placed in return_argument. This assumed that if it * returned anything (which is not supporting in all APIs), then it would be * able to fit in a (void *). */ _User_extensions_Thread_exitted( executing ); _Internal_error( INTERNAL_ERROR_THREAD_EXITTED ); }
void _Thread_queue_Deadlock_fatal( Thread_Control *the_thread ) { _Internal_error( INTERNAL_ERROR_THREAD_QUEUE_DEADLOCK ); }
void _Objects_Do_initialize_information( Objects_Information *information, Objects_APIs the_api, uint16_t the_class, uint32_t maximum, uint16_t size, bool is_string, uint32_t maximum_name_length #if defined(RTEMS_MULTIPROCESSING) , Objects_Thread_queue_Extract_callout extract #endif ) { static Objects_Control *null_local_table = NULL; uint32_t minimum_index; Objects_Maximum maximum_per_allocation; information->the_api = the_api; information->the_class = the_class; information->size = size; information->local_table = 0; information->inactive_per_block = 0; information->object_blocks = 0; information->inactive = 0; #if defined(RTEMS_SCORE_OBJECT_ENABLE_STRING_NAMES) information->is_string = is_string; #endif /* * Set the maximum value to 0. It will be updated when objects are * added to the inactive set from _Objects_Extend_information() */ information->maximum = 0; /* * Register this Object Class in the Object Information Table. */ _Objects_Information_table[ the_api ][ the_class ] = information; /* * Are we operating in limited or unlimited (e.g. auto-extend) mode. */ information->auto_extend = _Objects_Is_unlimited( maximum ); maximum_per_allocation = _Objects_Maximum_per_allocation( maximum ); /* * Unlimited and maximum of zero is illogical. */ if ( information->auto_extend && maximum_per_allocation == 0) { _Internal_error( INTERNAL_ERROR_UNLIMITED_AND_MAXIMUM_IS_0 ); } /* * The allocation unit is the maximum value */ information->allocation_size = maximum_per_allocation; /* * Provide a null local table entry for the case of any empty table. */ information->local_table = &null_local_table; /* * Calculate minimum and maximum Id's */ minimum_index = (maximum_per_allocation == 0) ? 0 : 1; information->minimum_id = _Objects_Build_id( the_api, the_class, _Objects_Local_node, minimum_index ); /* * Calculate the maximum name length * * NOTE: Either 4 bytes for Classic API names or an arbitrary * number for POSIX names which are strings that may be * an odd number of bytes. */ information->name_length = maximum_name_length; _Chain_Initialize_empty( &information->Inactive ); /* * Initialize objects .. if there are any */ if ( maximum_per_allocation ) { /* * Always have the maximum size available so the current performance * figures are create are met. If the user moves past the maximum * number then a performance hit is taken. */ _Objects_Extend_information( information ); } /* * Take care of multiprocessing */ #if defined(RTEMS_MULTIPROCESSING) information->extract = extract; _RBTree_Initialize_empty( &information->Global_by_id ); _RBTree_Initialize_empty( &information->Global_by_name ); #endif }