void _ITRON_Task_Initialize_user_tasks_body( void ) { uint32_t index; uint32_t maximum; ER return_value; itron_initialization_tasks_table *user_tasks; user_tasks = Configuration_ITRON_API.User_initialization_tasks_table; maximum = Configuration_ITRON_API.number_of_initialization_tasks; if ( !user_tasks || maximum == 0 ) return; for ( index=0 ; index < maximum ; index++ ) { return_value = cre_tsk( user_tasks[ index ].id, &user_tasks[ index ].attributes ); if ( return_value != E_OK ) _Internal_error_Occurred( INTERNAL_ERROR_ITRON_API, true, return_value ); return_value = sta_tsk( user_tasks[ index ].id, 0 ); if ( return_value != E_OK ) _Internal_error_Occurred( INTERNAL_ERROR_ITRON_API, true, return_value ); } }
void _MPCI_Internal_packets_Process_packet ( MP_packet_Prefix *the_packet_prefix ) { MPCI_Internal_packet *the_packet; uint32_t maximum_nodes; uint32_t maximum_global_objects; the_packet = (MPCI_Internal_packet *) the_packet_prefix; switch ( the_packet->operation ) { case MPCI_PACKETS_SYSTEM_VERIFY: maximum_nodes = the_packet->maximum_nodes; maximum_global_objects = the_packet->maximum_global_objects; if ( maximum_nodes != _Objects_Maximum_nodes || maximum_global_objects != _Objects_MP_Maximum_global_objects ) { _MPCI_Return_packet( the_packet_prefix ); _Internal_error_Occurred( INTERNAL_ERROR_CORE, true, INTERNAL_ERROR_INCONSISTENT_MP_INFORMATION ); } _MPCI_Return_packet( the_packet_prefix ); break; } }
void rtems_fatal( rtems_fatal_source source, rtems_fatal_code error ) { _Internal_error_Occurred( source, false, error ); }
/* * _Workspace_Allocate_or_fatal_error */ void *_Workspace_Allocate_or_fatal_error( size_t size ) { void *memory; memory = _Heap_Allocate( &_Workspace_Area, size ); #if defined(DEBUG_WORKSPACE) printk( "Workspace_Allocate_or_fatal_error(%d) from %p/%p -> %p\n", size, __builtin_return_address( 0 ), __builtin_return_address( 1 ), memory ); #endif if ( memory == NULL ) _Internal_error_Occurred( INTERNAL_ERROR_CORE, true, INTERNAL_ERROR_WORKSPACE_ALLOCATION ); return memory; }
uint32_t _Nios2_ISR_Set_level( uint32_t new_level, uint32_t status ) { switch ( _Nios2_ISR_Get_status_mask() ) { case NIOS2_ISR_STATUS_MASK_IIC: if ( new_level == 0 ) { status |= NIOS2_STATUS_PIE; } else { status &= ~NIOS2_STATUS_PIE; } break; case NIOS2_ISR_STATUS_MASK_EIC_IL: status &= ~NIOS2_STATUS_IL_MASK; status |= (new_level << NIOS2_STATUS_IL_OFFSET) & NIOS2_STATUS_IL_MASK; break; case NIOS2_ISR_STATUS_MASK_EIC_RSIE: if ( new_level == 0 ) { status |= NIOS2_STATUS_RSIE; } else { status &= ~NIOS2_STATUS_RSIE; } break; default: /* FIXME */ _Internal_error_Occurred( INTERNAL_ERROR_CORE, false, 0xdeadbeef ); break; } return status; }
void rtems_shutdown_executive( uint32_t result ) { if ( _System_state_Is_up( _System_state_Get() ) ) { #if defined(RTEMS_SMP) _SMP_Request_other_cores_to_shutdown(); #endif _Per_CPU_Information[0].idle->Wait.return_code = result; _System_state_Set( SYSTEM_STATE_SHUTDOWN ); _Thread_Stop_multitasking(); /******************************************************************* ******************************************************************* ****** RETURN TO RTEMS_INITIALIZE_START_MULTITASKING() ****** ****** AND THEN TO BOOT_CARD() ****** ******************************************************************* *******************************************************************/ } _Internal_error_Occurred( INTERNAL_ERROR_CORE, true, INTERNAL_ERROR_SHUTDOWN_WHEN_NOT_UP ); }
void *rtems_gxx_getspecific(__gthread_key_t key) { rtems_status_code status; void *p= 0; /* register with RTEMS the buffer that will hold the key values */ status = rtems_task_variable_get( RTEMS_SELF, (void **)key, &p ); if ( status == RTEMS_SUCCESSFUL ) { /* We do not have to do this, but what the heck ! */ p= key->val; } else { /* fisrt time, always set to zero, it is unknown the value that the others * threads are using at the moment of this call */ status = rtems_task_variable_add( RTEMS_SELF, (void **)key, key->dtor ); if ( status != RTEMS_SUCCESSFUL ) { _Internal_error_Occurred( INTERNAL_ERROR_CORE, true, INTERNAL_ERROR_GXX_KEY_ADD_FAILED ); } key->val = (void *)0; } #ifdef DEBUG_GXX_WRAPPERS printk( "gxx_wrappers: getspecific key=%x, ptr=%x, id=%x\n", key, p, rtems_task_self() ); #endif return p; }
/* * MUTEX support */ void rtems_gxx_mutex_init (__gthread_mutex_t *mutex) { rtems_status_code status; #ifdef DEBUG_GXX_WRAPPERS printk( "gxx_wrappers: mutex init =%X\n", *mutex ); #endif status = rtems_semaphore_create( rtems_build_name ('G', 'C', 'C', '2'), 1, RTEMS_PRIORITY|RTEMS_BINARY_SEMAPHORE| RTEMS_INHERIT_PRIORITY|RTEMS_NO_PRIORITY_CEILING|RTEMS_LOCAL, 0, (rtems_id *)mutex ); if ( status != RTEMS_SUCCESSFUL ) { #ifdef DEBUG_GXX_WRAPPERS printk( "gxx_wrappers: mutex init failed %s (%d)\n", rtems_status_text(status), status ); #endif _Internal_error_Occurred( INTERNAL_ERROR_CORE, true, INTERNAL_ERROR_GXX_MUTEX_INIT_FAILED ); } #ifdef DEBUG_GXX_WRAPPERS printk( "gxx_wrappers: mutex init complete =%X\n", *mutex ); #endif }
void rtems_fatal_error_occurred( uint32_t the_error ) { _Internal_error_Occurred( INTERNAL_ERROR_RTEMS_API, FALSE, the_error ); /* will not return from this routine */ }
void _RTEMS_tasks_Initialize_user_tasks_body( void ) { uint32_t index; uint32_t maximum; rtems_id id; rtems_status_code return_value; rtems_initialization_tasks_table *user_tasks; /* * Move information into local variables */ user_tasks = Configuration_RTEMS_API.User_initialization_tasks_table; maximum = Configuration_RTEMS_API.number_of_initialization_tasks; /* * Verify that we have a set of user tasks to iterate */ if ( !user_tasks ) return; /* * Now iterate over the initialization tasks and create/start them. */ for ( index=0 ; index < maximum ; index++ ) { return_value = rtems_task_create( user_tasks[ index ].name, user_tasks[ index ].initial_priority, user_tasks[ index ].stack_size, user_tasks[ index ].mode_set, user_tasks[ index ].attribute_set, &id ); if ( !rtems_is_status_successful( return_value ) ) _Internal_error_Occurred( INTERNAL_ERROR_RTEMS_API, true, return_value ); return_value = rtems_task_start( id, user_tasks[ index ].entry_point, user_tasks[ index ].argument ); if ( !rtems_is_status_successful( return_value ) ) _Internal_error_Occurred( INTERNAL_ERROR_RTEMS_API, true, return_value ); } }
Thread _MPCI_Receive_server( uint32_t ignored ) { MP_packet_Prefix *the_packet; MPCI_Packet_processor the_function; Thread_Control *executing; executing = _Thread_Get_executing(); for ( ; ; ) { executing->receive_packet = NULL; _Thread_Disable_dispatch(); _CORE_semaphore_Seize( &_MPCI_Semaphore, executing, 0, true, WATCHDOG_NO_TIMEOUT ); _Thread_Enable_dispatch(); for ( ; ; ) { the_packet = _MPCI_Receive_packet(); if ( !the_packet ) break; executing->receive_packet = the_packet; if ( !_Mp_packet_Is_valid_packet_class ( the_packet->the_class ) ) break; the_function = _MPCI_Packet_processors[ the_packet->the_class ]; if ( !the_function ) _Internal_error_Occurred( INTERNAL_ERROR_CORE, true, INTERNAL_ERROR_BAD_PACKET ); (*the_function)( the_packet ); } } return 0; /* unreached - only to remove warnings */ }
void _ISR_Handler_initialization( void ) { _ISR_Nest_level = 0; #if (CPU_SIMPLE_VECTORED_INTERRUPTS == TRUE) _ISR_Vector_table = _Workspace_Allocate_or_fatal_error( sizeof(ISR_Handler_entry) * ISR_NUMBER_OF_VECTORS ); _CPU_Initialize_vectors(); #endif #if ( CPU_ALLOCATE_INTERRUPT_STACK == TRUE ) { size_t stack_size = rtems_configuration_get_interrupt_stack_size(); if ( !_Stack_Is_enough( stack_size ) ) _Internal_error_Occurred( INTERNAL_ERROR_CORE, true, INTERNAL_ERROR_INTERRUPT_STACK_TOO_SMALL ); _CPU_Interrupt_stack_low = _Workspace_Allocate_or_fatal_error( stack_size ); _CPU_Interrupt_stack_high = _Addresses_Add_offset( _CPU_Interrupt_stack_low, stack_size ); } #if (CPU_STACK_ALIGNMENT != 0) _CPU_Interrupt_stack_high = (void *) ((uintptr_t) _CPU_Interrupt_stack_high & ~(CPU_STACK_ALIGNMENT - 1)); #endif /* Interrupt stack might have to be aligned and/or setup * in a specific way. */ #if defined(_CPU_Interrupt_stack_setup) _CPU_Interrupt_stack_setup(_CPU_Interrupt_stack_low, _CPU_Interrupt_stack_high); #endif #endif #if ( CPU_HAS_HARDWARE_INTERRUPT_STACK == TRUE ) _CPU_Install_interrupt_stack(); #endif }
void _Workspace_Handler_initialization( void *starting_address, size_t size ) { uint32_t *zero_out_array; uint32_t index; uint32_t memory_available; if ( !starting_address || !_Addresses_Is_aligned( starting_address ) ) _Internal_error_Occurred( INTERNAL_ERROR_CORE, TRUE, INTERNAL_ERROR_INVALID_WORKSPACE_ADDRESS ); if ( _CPU_Table.do_zero_of_workspace ) { for( zero_out_array = (uint32_t *) starting_address, index = 0 ; index < size / sizeof( uint32_t ) ; index++ ) zero_out_array[ index ] = 0; } memory_available = _Heap_Initialize( &_Workspace_Area, starting_address, size, CPU_HEAP_ALIGNMENT ); if ( memory_available == 0 ) _Internal_error_Occurred( INTERNAL_ERROR_CORE, TRUE, INTERNAL_ERROR_TOO_LITTLE_WORKSPACE ); }
void _MPCI_Handler_initialization( uint32_t timeout_status ) { CORE_semaphore_Attributes attributes; MPCI_Control *users_mpci_table; users_mpci_table = _Configuration_MP_table->User_mpci_table; if ( _System_state_Is_multiprocessing && !users_mpci_table ) _Internal_error_Occurred( INTERNAL_ERROR_CORE, true, INTERNAL_ERROR_NO_MPCI ); _MPCI_table = users_mpci_table; if ( !_System_state_Is_multiprocessing ) return; /* * Register the MP Process Packet routine. */ _MPCI_Register_packet_processor( MP_PACKET_MPCI_INTERNAL, _MPCI_Internal_packets_Process_packet ); /* * Create the counting semaphore used by the MPCI Receive Server. */ attributes.discipline = CORE_SEMAPHORE_DISCIPLINES_FIFO; _CORE_semaphore_Initialize( &_MPCI_Semaphore, &attributes, /* the_semaphore_attributes */ 0 /* initial_value */ ); _Thread_queue_Initialize( &_MPCI_Remote_blocked_threads, THREAD_QUEUE_DISCIPLINE_FIFO, STATES_WAITING_FOR_RPC_REPLY, timeout_status ); }
void *_Workspace_Allocate_or_fatal_error( size_t size ) { void *memory; memory = _Workspace_Allocate( size ); if ( memory == NULL ) _Internal_error_Occurred( INTERNAL_ERROR_CORE, TRUE, INTERNAL_ERROR_WORKSPACE_ALLOCATION ); return memory; }
Thread_Control *_Thread_MP_Allocate_proxy ( States_Control the_state ) { Thread_Control *the_thread; Thread_Proxy_control *the_proxy; the_thread = (Thread_Control *)_Chain_Get( &_Thread_MP_Inactive_proxies ); if ( !_Thread_Is_null( the_thread ) ) { the_proxy = (Thread_Proxy_control *) the_thread; _Thread_Executing->Wait.return_code = THREAD_STATUS_PROXY_BLOCKING; the_proxy->receive_packet = _MPCI_Receive_server_tcb->receive_packet; the_proxy->Object.id = _MPCI_Receive_server_tcb->receive_packet->source_tid; the_proxy->current_priority = _MPCI_Receive_server_tcb->receive_packet->source_priority; the_proxy->current_state = _States_Set( STATES_DORMANT, the_state ); the_proxy->Wait = _Thread_Executing->Wait; _Chain_Append( &_Thread_MP_Active_proxies, &the_proxy->Active ); return the_thread; } _Internal_error_Occurred( INTERNAL_ERROR_CORE, true, INTERNAL_ERROR_OUT_OF_PROXIES ); /* * NOTE: The following return ensures that the compiler will * think that all paths return a value. */ return NULL; }
void _POSIX_Threads_Initialize_user_threads_body(void) { int status; uint32_t index; uint32_t maximum; posix_initialization_threads_table *user_threads; pthread_t thread_id; pthread_attr_t attr; user_threads = Configuration_POSIX_API.User_initialization_threads_table; maximum = Configuration_POSIX_API.number_of_initialization_threads; if ( !user_threads || maximum == 0 ) return; /* * Be careful .. if the default attribute set changes, this may need to. * * Setting the attributes explicitly is critical, since we don't want * to inherit the idle tasks attributes. */ for ( index=0 ; index < maximum ; index++ ) { /* * There is no way for these calls to fail in this situation. */ (void) pthread_attr_init( &attr ); (void) pthread_attr_setinheritsched( &attr, PTHREAD_EXPLICIT_SCHED ); (void) pthread_attr_setstacksize(&attr, user_threads[ index ].stack_size); status = pthread_create( &thread_id, &attr, user_threads[ index ].thread_entry, NULL ); if ( status ) _Internal_error_Occurred( INTERNAL_ERROR_POSIX_API, true, status ); } }
MP_packet_Prefix *_MPCI_Get_packet ( void ) { MP_packet_Prefix *the_packet; (*_MPCI_table->get_packet)( &the_packet ); if ( the_packet == NULL ) _Internal_error_Occurred( INTERNAL_ERROR_CORE, true, INTERNAL_ERROR_OUT_OF_PACKETS ); /* * Put in a default timeout that will be used for * all packets that do not otherwise have a timeout. */ the_packet->timeout = MPCI_DEFAULT_TIMEOUT; return the_packet; }
/* * _Workspace_Handler_initialization */ void _Workspace_Handler_initialization(void) { uintptr_t memory_available = 0; void *starting_address = rtems_configuration_get_work_space_start(); uintptr_t size = rtems_configuration_get_work_space_size(); if ( rtems_configuration_get_do_zero_of_workspace() ) memset( starting_address, 0, size ); memory_available = _Heap_Initialize( &_Workspace_Area, starting_address, size, CPU_HEAP_ALIGNMENT ); if ( memory_available == 0 ) _Internal_error_Occurred( INTERNAL_ERROR_CORE, true, INTERNAL_ERROR_TOO_LITTLE_WORKSPACE ); }
uint32_t _CPU_ISR_Get_level( void ) { uint32_t status = _Nios2_Get_ctlreg_status(); uint32_t level = 0; switch ( _Nios2_ISR_Get_status_mask() ) { case NIOS2_ISR_STATUS_MASK_IIC: level = (status & NIOS2_STATUS_PIE) == 0; break; case NIOS2_ISR_STATUS_MASK_EIC_IL: level = (status & NIOS2_STATUS_IL_MASK) >> NIOS2_STATUS_IL_OFFSET; break; case NIOS2_ISR_STATUS_MASK_EIC_RSIE: level = (status & NIOS2_STATUS_RSIE) == 0; break; default: /* FIXME */ _Internal_error_Occurred( INTERNAL_ERROR_CORE, false, 0xdeadbeef ); break; } return level; }
void _Thread_Handler( void ) { ISR_Level level; Thread_Control *executing; #if defined(EXECUTE_GLOBAL_CONSTRUCTORS) static char doneConstructors; char doneCons; #endif executing = _Thread_Executing; /* * Some CPUs need to tinker with the call frame or registers when the * thread actually begins to execute for the first time. This is a * hook point where the port gets a shot at doing whatever it requires. */ _Context_Initialization_at_thread_begin(); /* * have to put level into a register for those cpu's that use * inline asm here */ level = executing->Start.isr_level; _ISR_Set_level(level); #if defined(EXECUTE_GLOBAL_CONSTRUCTORS) doneCons = doneConstructors; doneConstructors = 1; #endif #if ( CPU_HARDWARE_FP == TRUE ) || ( CPU_SOFTWARE_FP == TRUE ) #if ( CPU_USE_DEFERRED_FP_SWITCH == TRUE ) if ( (executing->fp_context != NULL) && !_Thread_Is_allocated_fp( executing ) ) { if ( _Thread_Allocated_fp != NULL ) _Context_Save_fp( &_Thread_Allocated_fp->fp_context ); _Thread_Allocated_fp = executing; } #endif #endif /* * Take care that 'begin' extensions get to complete before * 'switch' extensions can run. This means must keep dispatch * disabled until all 'begin' extensions complete. */ _User_extensions_Thread_begin( executing ); /* * At this point, the dispatch disable level BETTER be 1. */ _Thread_Enable_dispatch(); #if defined(EXECUTE_GLOBAL_CONSTRUCTORS) /* * _init could be a weak symbol and we SHOULD test it but it isn't * in any configuration I know of and it generates a warning on every * RTEMS target configuration. --joel (12 May 2007) */ if (!doneCons) /* && (volatile void *)_init) */ { INIT_NAME (); } #endif if ( executing->Start.prototype == THREAD_START_NUMERIC ) { executing->Wait.return_argument = (*(Thread_Entry_numeric) executing->Start.entry_point)( executing->Start.numeric_argument ); } #if defined(RTEMS_POSIX_API) else if ( executing->Start.prototype == THREAD_START_POINTER ) { executing->Wait.return_argument = (*(Thread_Entry_pointer) executing->Start.entry_point)( executing->Start.pointer_argument ); } #endif #if defined(FUNCTIONALITY_NOT_CURRENTLY_USED_BY_ANY_API) else if ( executing->Start.prototype == THREAD_START_BOTH_POINTER_FIRST ) { executing->Wait.return_argument = (*(Thread_Entry_both_pointer_first) executing->Start.entry_point)( executing->Start.pointer_argument, executing->Start.numeric_argument ); } else if ( executing->Start.prototype == THREAD_START_BOTH_NUMERIC_FIRST ) { executing->Wait.return_argument = (*(Thread_Entry_both_numeric_first) executing->Start.entry_point)( executing->Start.numeric_argument, executing->Start.pointer_argument ); } #endif /* * In the switch above, the return code from the user thread body * was placed in return_argument. This assumed that if it returned * anything (which is not supporting in all APIs), then it would be * able to fit in a (void *). */ _User_extensions_Thread_exitted( executing ); _Internal_error_Occurred( INTERNAL_ERROR_CORE, true, INTERNAL_ERROR_THREAD_EXITTED ); }
int pthread_key_create( pthread_key_t *key, void (*destructor)( void * ) ) { POSIX_Keys_Control *the_key; void *table; uint32_t the_api; uint32_t bytes_to_allocate; _Thread_Disable_dispatch(); the_key = _POSIX_Keys_Allocate(); if ( !the_key ) { _Thread_Enable_dispatch(); return EAGAIN; } the_key->destructor = destructor; /* * This is a bit more complex than one might initially expect because * APIs are optional. * * NOTE: Currently RTEMS Classic API tasks are always enabled. */ for ( the_api = 1; the_api <= OBJECTS_APIS_LAST; the_api++ ) { the_key->Values[ the_api ] = NULL; #if defined(RTEMS_DEBUG) /* * Since the removal of ITRON, this cannot occur. */ if ( !_Objects_Information_table[ the_api ] ) continue; /* * Currently all managers are installed if the API is installed. * This would be a horrible implementation error. */ if (_Objects_Information_table[ the_api ][ 1 ] == NULL ) _Internal_error_Occurred( INTERNAL_ERROR_CORE, true, INTERNAL_ERROR_IMPLEMENTATION_KEY_CREATE_INCONSISTENCY ); #endif bytes_to_allocate = sizeof( void * ) * (_Objects_Information_table[ the_api ][ 1 ]->maximum + 1); table = _Workspace_Allocate( bytes_to_allocate ); if ( !table ) { _POSIX_Keys_Free_memory( the_key ); _POSIX_Keys_Free( the_key ); _Thread_Enable_dispatch(); return ENOMEM; } the_key->Values[ the_api ] = table; memset( table, '\0', bytes_to_allocate ); } _Objects_Open_u32( &_POSIX_Keys_Information, &the_key->Object, 0 ); *key = the_key->Object.id; _Thread_Enable_dispatch(); return 0; }
boolean _Heap_Walk( Heap_Control *the_heap, int source, boolean do_dump ) { Heap_Block *the_block = the_heap->start; Heap_Block *const end = the_heap->final; Heap_Block *const tail = _Heap_Tail(the_heap); int error = 0; int passes = 0; do_dump = FALSE; /* * We don't want to allow walking the heap until we have * transferred control to the user task so we watch the * system state. */ /* if ( !_System_state_Is_up( _System_state_Get() ) ) return TRUE; */ if (source < 0) source = the_heap->stats.instance; if (do_dump == TRUE) printk("\nPASS: %d start %p final %p first %p last %p begin %p end %p\n", source, the_block, end, _Heap_First(the_heap), _Heap_Last(the_heap), the_heap->begin, the_heap->end); /* * Handle the 1st block */ if (!_Heap_Is_prev_used(the_block)) { printk("PASS: %d !HEAP_PREV_USED flag of 1st block isn't set\n", source); error = 1; } if (the_block->prev_size != the_heap->page_size) { printk("PASS: %d !prev_size of 1st block isn't page_size\n", source); error = 1; } while ( the_block != end ) { uint32_t const the_size = _Heap_Block_size(the_block); Heap_Block *const next_block = _Heap_Block_at(the_block, the_size); boolean prev_used = _Heap_Is_prev_used(the_block); if (do_dump) { printk("PASS: %d block %p size %d(%c)", source, the_block, the_size, (prev_used ? 'U' : 'F')); if (prev_used) printk(" prev_size %d", the_block->prev_size); else printk(" (prev_size) %d", the_block->prev_size); } if (!_Heap_Is_block_in(the_heap, next_block)) { if (do_dump) printk("\n"); printk("PASS: %d !block %p is out of heap\n", source, next_block); error = 1; break; } if (!_Heap_Is_prev_used(next_block)) { if (do_dump) printk( " prev %p next %p", the_block->prev, the_block->next); if (_Heap_Block_size(the_block) != next_block->prev_size) { if (do_dump) printk("\n"); printk("PASS: %d !front and back sizes don't match", source); error = 1; } if (!prev_used) { if (do_dump || error) printk("\n"); printk("PASS: %d !two consecutive blocks are free", source); error = 1; } { /* Check if 'the_block' is in the free block list */ Heap_Block* block = _Heap_First(the_heap); while(block != the_block && block != tail) block = block->next; if(block != the_block) { if (do_dump || error) printk("\n"); printk("PASS: %d !the_block not in the free list", source); error = 1; } } } if (do_dump || error) printk("\n"); if (the_size < the_heap->min_block_size) { printk("PASS: %d !block size is too small\n", source); error = 1; break; } if (!_Heap_Is_aligned( the_size, the_heap->page_size)) { printk("PASS: %d !block size is misaligned\n", source); error = 1; } if (++passes > (do_dump ? 10 : 0) && error) break; the_block = next_block; } if (the_block != end) { printk("PASS: %d !last block address isn't equal to 'final' %p %p\n", source, the_block, end); error = 1; } if (_Heap_Block_size(the_block) != the_heap->page_size) { printk("PASS: %d !last block's size isn't page_size (%d != %d)\n", source, _Heap_Block_size(the_block), the_heap->page_size); error = 1; } if(do_dump && error) _Internal_error_Occurred( INTERNAL_ERROR_CORE, TRUE, 0xffff0000 ); return error; }
void _Thread_Handler( void ) { ISR_Level level; Thread_Control *executing; #if defined(EXECUTE_GLOBAL_CONSTRUCTORS) bool doCons; #endif executing = _Thread_Executing; /* * Some CPUs need to tinker with the call frame or registers when the * thread actually begins to execute for the first time. This is a * hook point where the port gets a shot at doing whatever it requires. */ _Context_Initialization_at_thread_begin(); #if !defined(RTEMS_SMP) /* * have to put level into a register for those cpu's that use * inline asm here */ level = executing->Start.isr_level; _ISR_Set_level( level ); #endif #if defined(EXECUTE_GLOBAL_CONSTRUCTORS) doCons = _Thread_Handler_is_constructor_execution_required( executing ); #endif /* * Initialize the floating point context because we do not come * through _Thread_Dispatch on our first invocation. So the normal * code path for performing the FP context switch is not hit. */ #if ( CPU_HARDWARE_FP == TRUE ) || ( CPU_SOFTWARE_FP == TRUE ) #if ( CPU_USE_DEFERRED_FP_SWITCH == TRUE ) if ( (executing->fp_context != NULL) && !_Thread_Is_allocated_fp( executing ) ) { if ( _Thread_Allocated_fp != NULL ) _Context_Save_fp( &_Thread_Allocated_fp->fp_context ); _Thread_Allocated_fp = executing; } #endif #endif /* * Take care that 'begin' extensions get to complete before * 'switch' extensions can run. This means must keep dispatch * disabled until all 'begin' extensions complete. */ _User_extensions_Thread_begin( executing ); /* * At this point, the dispatch disable level BETTER be 1. */ #if defined(RTEMS_SMP) { /* * On SMP we enter _Thread_Handler() with interrupts disabled and * _Thread_Dispatch() obtained the per-CPU lock for us. We have to * release it here and set the desired interrupt level of the thread. */ Per_CPU_Control *per_cpu = _Per_CPU_Get(); _Assert( per_cpu->thread_dispatch_disable_level == 1 ); _Assert( _ISR_Get_level() != 0 ); per_cpu->thread_dispatch_disable_level = 0; _Per_CPU_Release( per_cpu ); level = executing->Start.isr_level; _ISR_Set_level( level); /* * The thread dispatch level changed from one to zero. Make sure we lose * no thread dispatch necessary update. */ _Thread_Dispatch(); } #else _Thread_Enable_dispatch(); #endif #if defined(EXECUTE_GLOBAL_CONSTRUCTORS) /* * _init could be a weak symbol and we SHOULD test it but it isn't * in any configuration I know of and it generates a warning on every * RTEMS target configuration. --joel (12 May 2007) */ if (doCons) /* && (volatile void *)_init) */ { INIT_NAME (); } #endif /* * RTEMS supports multiple APIs and each API can define a different * thread/task prototype. The following code supports invoking the * user thread entry point using the prototype expected. */ if ( executing->Start.prototype == THREAD_START_NUMERIC ) { executing->Wait.return_argument = (*(Thread_Entry_numeric) executing->Start.entry_point)( executing->Start.numeric_argument ); } #if defined(RTEMS_POSIX_API) else if ( executing->Start.prototype == THREAD_START_POINTER ) { executing->Wait.return_argument = (*(Thread_Entry_pointer) executing->Start.entry_point)( executing->Start.pointer_argument ); } #endif #if defined(FUNCTIONALITY_NOT_CURRENTLY_USED_BY_ANY_API) else if ( executing->Start.prototype == THREAD_START_BOTH_POINTER_FIRST ) { executing->Wait.return_argument = (*(Thread_Entry_both_pointer_first) executing->Start.entry_point)( executing->Start.pointer_argument, executing->Start.numeric_argument ); } else if ( executing->Start.prototype == THREAD_START_BOTH_NUMERIC_FIRST ) { executing->Wait.return_argument = (*(Thread_Entry_both_numeric_first) executing->Start.entry_point)( executing->Start.numeric_argument, executing->Start.pointer_argument ); } #endif /* * In the switch above, the return code from the user thread body * was placed in return_argument. This assumed that if it returned * anything (which is not supporting in all APIs), then it would be * able to fit in a (void *). */ _User_extensions_Thread_exitted( executing ); _Internal_error_Occurred( INTERNAL_ERROR_CORE, true, INTERNAL_ERROR_THREAD_EXITTED ); }
void _POSIX_Fatal_error( POSIX_Fatal_domain domain, int eno ) { uint32_t code = ( domain << 8 ) | ( ( uint32_t ) eno & 0xffU ); _Internal_error_Occurred( INTERNAL_ERROR_POSIX_API, false, code ); }
void _Objects_Initialize_information( Objects_Information *information, Objects_APIs the_api, uint32_t the_class, uint32_t maximum, uint16_t size, bool is_string, uint32_t maximum_name_length #if defined(RTEMS_MULTIPROCESSING) , bool supports_global, Objects_Thread_queue_Extract_callout extract #endif ) { static Objects_Control *null_local_table = NULL; uint32_t minimum_index; uint32_t name_length; uint32_t maximum_per_allocation; #if defined(RTEMS_MULTIPROCESSING) uint32_t index; #endif information->the_api = the_api; information->the_class = the_class; information->size = size; information->local_table = 0; information->inactive_per_block = 0; information->object_blocks = 0; information->inactive = 0; #if defined(RTEMS_SCORE_OBJECT_ENABLE_STRING_NAMES) information->is_string = is_string; #endif /* * Set the maximum value to 0. It will be updated when objects are * added to the inactive set from _Objects_Extend_information() */ information->maximum = 0; /* * Register this Object Class in the Object Information Table. */ _Objects_Information_table[ the_api ][ the_class ] = information; /* * Are we operating in limited or unlimited (e.g. auto-extend) mode. */ information->auto_extend = (maximum & OBJECTS_UNLIMITED_OBJECTS) ? true : false; maximum_per_allocation = maximum & ~OBJECTS_UNLIMITED_OBJECTS; /* * Unlimited and maximum of zero is illogical. */ if ( information->auto_extend && maximum_per_allocation == 0) { _Internal_error_Occurred( INTERNAL_ERROR_CORE, true, INTERNAL_ERROR_UNLIMITED_AND_MAXIMUM_IS_0 ); } /* * The allocation unit is the maximum value */ information->allocation_size = maximum_per_allocation; /* * Provide a null local table entry for the case of any empty table. */ information->local_table = &null_local_table; /* * Calculate minimum and maximum Id's */ minimum_index = (maximum_per_allocation == 0) ? 0 : 1; information->minimum_id = _Objects_Build_id( the_api, the_class, _Objects_Local_node, minimum_index ); /* * Calculate the maximum name length */ name_length = maximum_name_length; if ( name_length & (OBJECTS_NAME_ALIGNMENT-1) ) name_length = (name_length + OBJECTS_NAME_ALIGNMENT) & ~(OBJECTS_NAME_ALIGNMENT-1); information->name_length = name_length; _Chain_Initialize_empty( &information->Inactive ); /* * Initialize objects .. if there are any */ if ( maximum_per_allocation ) { /* * Always have the maximum size available so the current performance * figures are create are met. If the user moves past the maximum * number then a performance hit is taken. */ _Objects_Extend_information( information ); } /* * Take care of multiprocessing */ #if defined(RTEMS_MULTIPROCESSING) information->extract = extract; if ( (supports_global == true) && _System_state_Is_multiprocessing ) { information->global_table = (Chain_Control *) _Workspace_Allocate_or_fatal_error( (_Objects_Maximum_nodes + 1) * sizeof(Chain_Control) ); for ( index=1; index <= _Objects_Maximum_nodes ; index++ ) _Chain_Initialize_empty( &information->global_table[ index ] ); } else information->global_table = NULL; #endif }