void _Chain_Initialize( Chain_Control *the_chain, void *starting_address, size_t number_nodes, size_t node_size ) { size_t count; Chain_Node *current; Chain_Node *next; count = number_nodes; current = _Chain_Head( the_chain ); the_chain->permanent_null = NULL; next = starting_address; while ( count-- ) { current->next = next; next->previous = current; current = next; next = (Chain_Node *) _Addresses_Add_offset( (void *) next, node_size ); } current->next = _Chain_Tail( the_chain ); the_chain->last = current; }
rtems_status_code rtems_port_internal_to_external( rtems_id id, void *internal, void **external ) { register Dual_ported_memory_Control *the_port; Objects_Locations location; uint32_t ending; if ( !external ) return RTEMS_INVALID_ADDRESS; the_port = _Dual_ported_memory_Get( id, &location ); switch ( location ) { case OBJECTS_LOCAL: ending = _Addresses_Subtract( internal, the_port->internal_base ); if ( ending > the_port->length ) *external = internal; else *external = _Addresses_Add_offset( the_port->external_base, ending ); _Thread_Enable_dispatch(); return RTEMS_SUCCESSFUL; #if defined(RTEMS_MULTIPROCESSING) case OBJECTS_REMOTE: /* this error cannot be returned */ #endif case OBJECTS_ERROR: break; } return RTEMS_INVALID_ID; }
void _SMP_Handler_initialize(void) { int cpu; size_t size; uintptr_t ptr; /* * Initialize per CPU structures. */ size = (_SMP_Processor_count) * sizeof(Per_CPU_Control); memset( _Per_CPU_Information, '\0', size ); /* * Initialize per cpu pointer table */ size = Configuration.interrupt_stack_size; _Per_CPU_Information_p[0] = &_Per_CPU_Information[0]; for (cpu=1 ; cpu < rtems_configuration_smp_maximum_processors; cpu++ ) { Per_CPU_Control *p = &_Per_CPU_Information[cpu]; _Per_CPU_Information_p[cpu] = p; p->interrupt_stack_low = _Workspace_Allocate_or_fatal_error( size ); ptr = (uintptr_t) _Addresses_Add_offset( p->interrupt_stack_low, size ); ptr &= ~(CPU_STACK_ALIGNMENT - 1); p->interrupt_stack_high = (void *)ptr; p->state = RTEMS_BSP_SMP_CPU_INITIAL_STATE; RTEMS_COMPILER_MEMORY_BARRIER(); } }
void _RBTree_Initialize( RBTree_Control *the_rbtree, RBTree_Compare_function compare_function, void *starting_address, size_t number_nodes, size_t node_size, bool is_unique ) { size_t count; RBTree_Node *next; /* TODO: Error message? */ if (!the_rbtree) return; /* could do sanity checks here */ _RBTree_Initialize_empty(the_rbtree, compare_function, is_unique); count = number_nodes; next = starting_address; while ( count-- ) { _RBTree_Insert(the_rbtree, next); next = (RBTree_Node *) _Addresses_Add_offset( (void *) next, node_size ); } }
void _Chain_Initialize( Chain_Control *the_chain, void *starting_address, size_t number_nodes, size_t node_size ) { size_t count = number_nodes; Chain_Node *head = _Chain_Head( the_chain ); Chain_Node *tail = _Chain_Tail( the_chain ); Chain_Node *current = head; Chain_Node *next = starting_address; head->previous = NULL; while ( count-- ) { current->next = next; next->previous = current; current = next; next = (Chain_Node *) _Addresses_Add_offset( (void *) next, node_size ); } current->next = tail; tail->previous = current; }
rtems_status_code rtems_port_internal_to_external( rtems_id id, void *internal, void **external ) { Dual_ported_memory_Control *the_port; ISR_lock_Context lock_context; uint32_t ending; if ( external == NULL ) { return RTEMS_INVALID_ADDRESS; } the_port = _Dual_ported_memory_Get( id, &lock_context ); if ( the_port == NULL ) { return RTEMS_INVALID_ID; } ending = _Addresses_Subtract( internal, the_port->internal_base ); if ( ending > the_port->length ) { *external = internal; } else { *external = _Addresses_Add_offset( the_port->external_base, ending ); } _ISR_lock_ISR_enable( &lock_context ); return RTEMS_SUCCESSFUL; }
void _ISR_Handler_initialization( void ) { _ISR_Nest_level = 0; #if (CPU_SIMPLE_VECTORED_INTERRUPTS == TRUE) _ISR_Vector_table = _Workspace_Allocate_or_fatal_error( sizeof(ISR_Handler_entry) * ISR_NUMBER_OF_VECTORS ); _CPU_Initialize_vectors(); #endif #if ( CPU_ALLOCATE_INTERRUPT_STACK == TRUE ) { size_t stack_size = rtems_configuration_get_interrupt_stack_size(); uint32_t max_cpus = rtems_configuration_get_maximum_processors(); uint32_t cpu; if ( !_Stack_Is_enough( stack_size ) ) _Terminate( INTERNAL_ERROR_CORE, true, INTERNAL_ERROR_INTERRUPT_STACK_TOO_SMALL ); for ( cpu = 0 ; cpu < max_cpus; ++cpu ) { Per_CPU_Control *per_cpu = _Per_CPU_Get_by_index( cpu ); void *low = _Workspace_Allocate_or_fatal_error( stack_size ); void *high = _Addresses_Add_offset( low, stack_size ); #if (CPU_STACK_ALIGNMENT != 0) high = _Addresses_Align_down( high, CPU_STACK_ALIGNMENT ); #endif per_cpu->interrupt_stack_low = low; per_cpu->interrupt_stack_high = high; /* * Interrupt stack might have to be aligned and/or setup in a specific * way. Do not use the local low or high variables here since * _CPU_Interrupt_stack_setup() is a nasty macro that might want to play * with the real memory locations. */ #if defined(_CPU_Interrupt_stack_setup) _CPU_Interrupt_stack_setup( per_cpu->interrupt_stack_low, per_cpu->interrupt_stack_high ); #endif } } #endif #if ( CPU_HAS_HARDWARE_INTERRUPT_STACK == TRUE ) _CPU_Install_interrupt_stack(); #endif }
void _ISR_Handler_initialization( void ) { _ISR_Nest_level = 0; #if (CPU_SIMPLE_VECTORED_INTERRUPTS == TRUE) _ISR_Vector_table = _Workspace_Allocate_or_fatal_error( sizeof(ISR_Handler_entry) * ISR_NUMBER_OF_VECTORS ); _CPU_Initialize_vectors(); #endif #if ( CPU_ALLOCATE_INTERRUPT_STACK == TRUE ) { size_t stack_size = rtems_configuration_get_interrupt_stack_size(); if ( !_Stack_Is_enough( stack_size ) ) _Internal_error_Occurred( INTERNAL_ERROR_CORE, true, INTERNAL_ERROR_INTERRUPT_STACK_TOO_SMALL ); _CPU_Interrupt_stack_low = _Workspace_Allocate_or_fatal_error( stack_size ); _CPU_Interrupt_stack_high = _Addresses_Add_offset( _CPU_Interrupt_stack_low, stack_size ); } #if (CPU_STACK_ALIGNMENT != 0) _CPU_Interrupt_stack_high = (void *) ((uintptr_t) _CPU_Interrupt_stack_high & ~(CPU_STACK_ALIGNMENT - 1)); #endif /* Interrupt stack might have to be aligned and/or setup * in a specific way. */ #if defined(_CPU_Interrupt_stack_setup) _CPU_Interrupt_stack_setup(_CPU_Interrupt_stack_low, _CPU_Interrupt_stack_high); #endif #endif #if ( CPU_HAS_HARDWARE_INTERRUPT_STACK == TRUE ) _CPU_Install_interrupt_stack(); #endif }
void _SMP_Handler_initialize(void) { uint32_t max_cpus = rtems_configuration_get_maximum_processors(); uint32_t cpu; /* * Initialize per cpu pointer table */ _Per_CPU_Information_p[0] = _Per_CPU_Get_by_index( 0 ); for ( cpu = 1 ; cpu < max_cpus; ++cpu ) { Per_CPU_Control *p = _Per_CPU_Get_by_index( cpu ); _Per_CPU_Information_p[cpu] = p; #if CPU_ALLOCATE_INTERRUPT_STACK == TRUE { size_t size = rtems_configuration_get_interrupt_stack_size(); uintptr_t ptr; p->interrupt_stack_low = _Workspace_Allocate_or_fatal_error( size ); ptr = (uintptr_t) _Addresses_Add_offset( p->interrupt_stack_low, size ); ptr &= ~(CPU_STACK_ALIGNMENT - 1); p->interrupt_stack_high = (void *)ptr; } #endif } /* * Discover and initialize the secondary cores in an SMP system. */ max_cpus = bsp_smp_initialize( max_cpus ); _SMP_Processor_count = max_cpus; for ( cpu = 1 ; cpu < max_cpus; ++cpu ) { const Per_CPU_Control *per_cpu = _Per_CPU_Get_by_index( cpu ); _Per_CPU_Wait_for_state( per_cpu, PER_CPU_STATE_READY_TO_BEGIN_MULTITASKING ); } }
void _RBTree_Initialize( RBTree_Control *the_rbtree, RBTree_Compare compare, void *starting_address, size_t number_nodes, size_t node_size, bool is_unique ) { size_t count; RBTree_Node *next; /* could do sanity checks here */ _RBTree_Initialize_empty( the_rbtree ); count = number_nodes; next = starting_address; while ( count-- ) { _RBTree_Insert( the_rbtree, next, compare, is_unique ); next = (RBTree_Node *) _Addresses_Add_offset( next, node_size ); } }
void _Objects_Extend_information( Objects_Information *information ) { Objects_Control *the_object; Chain_Control Inactive; uint32_t block_count; uint32_t block; uint32_t index_base; uint32_t minimum_index; uint32_t index; uint32_t maximum; size_t block_size; void *new_object_block; bool do_extend; /* * Search for a free block of indexes. If we do NOT need to allocate or * extend the block table, then we will change do_extend. */ do_extend = true; minimum_index = _Objects_Get_index( information->minimum_id ); index_base = minimum_index; block = 0; /* if ( information->maximum < minimum_index ) */ if ( information->object_blocks == NULL ) block_count = 0; else { block_count = information->maximum / information->allocation_size; for ( ; block < block_count; block++ ) { if ( information->object_blocks[ block ] == NULL ) { do_extend = false; break; } else index_base += information->allocation_size; } } maximum = (uint32_t) information->maximum + information->allocation_size; /* * We need to limit the number of objects to the maximum number * representable in the index portion of the object Id. In the * case of 16-bit Ids, this is only 256 object instances. */ if ( maximum > OBJECTS_ID_FINAL_INDEX ) { return; } /* * Allocate the name table, and the objects and if it fails either return or * generate a fatal error depending on auto-extending being active. */ block_size = information->allocation_size * information->size; if ( information->auto_extend ) { new_object_block = _Workspace_Allocate( block_size ); if ( !new_object_block ) return; } else { new_object_block = _Workspace_Allocate_or_fatal_error( block_size ); } /* * Do we need to grow the tables? */ if ( do_extend ) { ISR_Level level; void **object_blocks; uint32_t *inactive_per_block; Objects_Control **local_table; void *old_tables; size_t block_size; /* * Growing the tables means allocating a new area, doing a copy and * updating the information table. * * If the maximum is minimum we do not have a table to copy. First * time through. * * The allocation has : * * void *objects[block_count]; * uint32_t inactive_count[block_count]; * Objects_Control *local_table[maximum]; * * This is the order in memory. Watch changing the order. See the memcpy * below. */ /* * Up the block count and maximum */ block_count++; /* * Allocate the tables and break it up. */ block_size = block_count * (sizeof(void *) + sizeof(uint32_t) + sizeof(Objects_Name *)) + ((maximum + minimum_index) * sizeof(Objects_Control *)); object_blocks = (void**) _Workspace_Allocate( block_size ); if ( !object_blocks ) { _Workspace_Free( new_object_block ); return; } /* * Break the block into the various sections. */ inactive_per_block = (uint32_t *) _Addresses_Add_offset( object_blocks, block_count * sizeof(void*) ); local_table = (Objects_Control **) _Addresses_Add_offset( inactive_per_block, block_count * sizeof(uint32_t) ); /* * Take the block count down. Saves all the (block_count - 1) * in the copies. */ block_count--; if ( information->maximum > minimum_index ) { /* * Copy each section of the table over. This has to be performed as * separate parts as size of each block has changed. */ memcpy( object_blocks, information->object_blocks, block_count * sizeof(void*) ); memcpy( inactive_per_block, information->inactive_per_block, block_count * sizeof(uint32_t) ); memcpy( local_table, information->local_table, (information->maximum + minimum_index) * sizeof(Objects_Control *) ); } else { /* * Deal with the special case of the 0 to minimum_index */ for ( index = 0; index < minimum_index; index++ ) { local_table[ index ] = NULL; } } /* * Initialise the new entries in the table. */ object_blocks[block_count] = NULL; inactive_per_block[block_count] = 0; for ( index=index_base ; index < ( information->allocation_size + index_base ); index++ ) { local_table[ index ] = NULL; } _ISR_Disable( level ); old_tables = information->object_blocks; information->object_blocks = object_blocks; information->inactive_per_block = inactive_per_block; information->local_table = local_table; information->maximum = (Objects_Maximum) maximum; information->maximum_id = _Objects_Build_id( information->the_api, information->the_class, _Objects_Local_node, information->maximum ); _ISR_Enable( level ); _Workspace_Free( old_tables ); block_count++; } /* * Assign the new object block to the object block table. */ information->object_blocks[ block ] = new_object_block; /* * Initialize objects .. add to a local chain first. */ _Chain_Initialize( &Inactive, information->object_blocks[ block ], information->allocation_size, information->size ); /* * Move from the local chain, initialise, then append to the inactive chain */ index = index_base; while ((the_object = (Objects_Control *) _Chain_Get( &Inactive )) != NULL ) { the_object->id = _Objects_Build_id( information->the_api, information->the_class, _Objects_Local_node, index ); _Chain_Append( &information->Inactive, &the_object->Node ); index++; } information->inactive_per_block[ block ] = information->allocation_size; information->inactive = (Objects_Maximum)(information->inactive + information->allocation_size); }