/* Check whether it is OK to enable thread support */ int rtems_gdb_stub_thread_support_ok(void) { if (_System_state_Get() == SYSTEM_STATE_UP) { return 1; } return 0; }
static int rtems_gdb_stub_id_to_index( Objects_Id thread_obj_id ) { int gdb_index = 0; int first = 1; size_t api_index; if (_System_state_Get() != SYSTEM_STATE_UP) { /* We have one thread let us use value reserved for idle thread */ gdb_index = 1; } for ( api_index = 1; gdb_index == 0 && api_index <= OBJECTS_APIS_LAST; ++api_index ) { if (_Objects_Information_table[api_index] != NULL) { const Objects_Information *info = _Objects_Information_table[api_index][1]; Objects_Id min_id = info->minimum_id; Objects_Id max_id = info->maximum_id; int last = first + (int) (max_id - min_id); if (thread_obj_id >= min_id && thread_obj_id < max_id) { gdb_index = first + (int) (thread_obj_id - min_id); } first = last + 1; } } return gdb_index; }
void _Thread_Disable_dispatch( void ) { /* * This check is very brutal to system performance but is very helpful * at finding blown stack problems. If you have a stack problem and * need help finding it, then uncomment this code. Every system * call will check the stack and since mutexes are used frequently * in most systems, you might get lucky. */ #if defined(RTEMS_HEAVY_STACK_DEBUG) if (_System_state_Is_up(_System_state_Get()) && (_ISR_Nest_level == 0)) { if ( rtems_stack_checker_is_blown() ) { printk( "Stack blown!!\n" ); rtems_fatal_error_occurred( 99 ); } } #endif _Thread_Dispatch_increment_disable_level(); RTEMS_COMPILER_MEMORY_BARRIER(); /* * This check is even more brutal than the other one. This enables * malloc heap integrity checking upon entry to every system call. */ #if defined(RTEMS_HEAVY_MALLOC_DEBUG) if ( _Thread_Dispatch_get_disable_level() == 1 ) { _Heap_Walk( RTEMS_Malloc_Heap,99, false ); } #endif }
static void test_cache_coherent_alloc(void) { void *p0; void *p1; System_state_Codes previous_state; printf("test cache coherent allocation\n"); p0 = rtems_cache_coherent_allocate(1, 0, 0); rtems_test_assert(p0 != NULL); rtems_cache_coherent_free(p0); p0 = rtems_cache_coherent_allocate(1, 0, 0); rtems_test_assert(p0 != NULL); add_area(&cache_coherent_area_0[0]); add_area(&cache_coherent_area_1[0]); previous_state = _System_state_Get(); _System_state_Set(previous_state + 1); add_area(&cache_coherent_area_2[0]); _System_state_Set(previous_state); p1 = rtems_cache_coherent_allocate(1, 0, 0); rtems_test_assert(p1 != NULL); rtems_cache_coherent_free(p0); rtems_cache_coherent_free(p1); }
/* i2c_transfer_wait -- * Initiate I2C bus transfer and block until this transfer will be * finished. This function wait the semaphore if system in * SYSTEM_STATE_UP state, or poll done flag in other states. * * PARAMETERS: * bus - I2C bus number * msg - pointer to transfer messages array * nmsg - number of messages in transfer * * RETURNS: * I2C_SUCCESSFUL, if tranfer finished successfully, * I2C_RESOURCE_NOT_AVAILABLE, if semaphore operations has failed, * value of status field of first error-finished message in transfer, * if something wrong. */ i2c_message_status i2c_transfer_wait(i2c_bus_number bus, i2c_message *msg, int nmsg) { rtems_status_code sc; int i; if (_System_state_Is_up(_System_state_Get())) { sc = i2c_transfer_wait_sema(bus, msg, nmsg); } else { sc = i2c_transfer_wait_poll(bus, msg, nmsg); } if (sc != RTEMS_SUCCESSFUL) return I2C_RESOURCE_NOT_AVAILABLE; for (i = 0; i < nmsg; i++) { if (msg[i].status != I2C_SUCCESSFUL) { return msg[i].status; } } return I2C_SUCCESSFUL; }
void rtems_shutdown_executive( uint32_t result ) { if ( _System_state_Is_up( _System_state_Get() ) ) { #if defined(RTEMS_SMP) _SMP_Request_other_cores_to_shutdown(); #endif _Per_CPU_Information[0].idle->Wait.return_code = result; _System_state_Set( SYSTEM_STATE_SHUTDOWN ); _Thread_Stop_multitasking(); /******************************************************************* ******************************************************************* ****** RETURN TO RTEMS_INITIALIZE_START_MULTITASKING() ****** ****** AND THEN TO BOOT_CARD() ****** ******************************************************************* *******************************************************************/ } _Internal_error_Occurred( INTERNAL_ERROR_CORE, true, INTERNAL_ERROR_SHUTDOWN_WHEN_NOT_UP ); }
void *realloc( void *ptr, size_t size ) { uintptr_t old_size; char *new_area; /* * Do not attempt to allocate memory if in a critical section or ISR. */ if (_System_state_Is_up(_System_state_Get())) { if (!_Thread_Dispatch_is_enabled()) return (void *) 0; } /* * Continue with realloc(). */ if ( !ptr ) return malloc( size ); if ( !size ) { free( ptr ); return (void *) 0; } if ( !_Protected_heap_Get_block_size(RTEMS_Malloc_Heap, ptr, &old_size) ) { errno = EINVAL; return (void *) 0; } /* * Now resize it. */ if ( _Protected_heap_Resize_block( RTEMS_Malloc_Heap, ptr, size ) ) { return ptr; } /* * There used to be a free on this error case but it is wrong to * free the memory per OpenGroup Single UNIX Specification V2 * and the C Standard. */ new_area = malloc( size ); if ( !new_area ) { return (void *) 0; } memcpy( new_area, ptr, (size < old_size) ? size : old_size ); free( ptr ); return new_area; }
void _Assert_Owner_of_giant( void ) { Giant_Control *giant = &_Giant; _Assert( giant->owner_cpu == _SMP_Get_current_processor() || !_System_state_Is_up( _System_state_Get() ) ); }
void rtems_shutdown_executive( uint32_t result ) { if ( !_System_state_Is_shutdown( _System_state_Get() ) ) { _System_state_Set( SYSTEM_STATE_SHUTDOWN ); _Thread_Stop_multitasking(); } }
void *malloc( size_t size ) { void *return_this; MSBUMP(malloc_calls, 1); /* * If some free's have been deferred, then do them now. */ malloc_deferred_frees_process(); /* * Validate the parameters */ if ( !size ) return (void *) 0; /* * Do not attempt to allocate memory if not in correct system state. */ if ( _System_state_Is_up(_System_state_Get()) && !malloc_is_system_state_OK() ) return NULL; /* * Try to give a segment in the current heap if there is not * enough space then try to grow the heap. * If this fails then return a NULL pointer. */ return_this = _Protected_heap_Allocate( RTEMS_Malloc_Heap, size ); if ( !return_this ) { return_this = (*rtems_malloc_extend_handler)( RTEMS_Malloc_Heap, size ); if ( !return_this ) { errno = ENOMEM; return (void *) 0; } } /* * If the user wants us to dirty the allocated memory, then do it. */ if ( rtems_malloc_dirty_helper ) (*rtems_malloc_dirty_helper)( return_this, size ); /* * If configured, update the statistics */ if ( rtems_malloc_statistics_helpers ) (*rtems_malloc_statistics_helpers->at_malloc)(return_this); return return_this; }
static void safe_printf (const char *fmt, ...) { va_list ap; va_start(ap, fmt); if ( _System_state_Is_up( _System_state_Get() ) ) vfprintf( stderr, fmt, ap ); else vprintk( fmt, ap ); va_end(ap); }
static void test_system_not_up(void) { rtems_interrupt_level level; puts( "start with a system state != SYSTEM_STATE_UP" ); rtems_interrupt_disable( level ); System_state_Codes state = _System_state_Get(); _System_state_Set( SYSTEM_STATE_FAILED ); test_call_heap_walk( true ); _System_state_Set( state ); rtems_interrupt_enable( level ); }
void rtems_cache_coherent_add_area( void *area_begin, uintptr_t area_size ) { if ( _System_state_Is_up( _System_state_Get()) ) { _RTEMS_Lock_allocator(); add_area( area_begin, area_size ); _RTEMS_Unlock_allocator(); } else { add_area( area_begin, area_size ); } }
void Fatal_extension( uint32_t source, bool is_internal, uint32_t error ) { if ( source != INTERNAL_ERROR_RTEMS_API ) { printk( "unexpected fatal source\n" ); } else if ( is_internal ) { printk( "unexpected fatal is internal\n" ); } else if ( error != 0x81 ) { printk( "unexpected fatal error\n" ); } else { printk( "*** END OF TEST STACK CHECKER ***\n" ); } if ( _System_state_Is_up( _System_state_Get() ) ) _Thread_Stop_multitasking(); }
void Fatal_extension( uint32_t source, bool is_internal, uint32_t error ) { print_test_begin_message(); printk( "Fatal error (%s) hit\n", FATAL_ERROR_DESCRIPTION ); if ( source != FATAL_ERROR_EXPECTED_SOURCE ){ printk( "ERROR==> Fatal Extension source Expected ("); Put_Source( FATAL_ERROR_EXPECTED_SOURCE ); printk( ") received ("); Put_Source( source ); printk( ")\n" ); } if ( is_internal != FATAL_ERROR_EXPECTED_IS_INTERNAL ) { if ( is_internal == TRUE ) printk( "ERROR==> Fatal Extension is internal set to TRUE expected FALSE\n" ); else printk( "ERROR==> Fatal Extension is internal set to FALSE expected TRUE\n" ); } if ( error != FATAL_ERROR_EXPECTED_ERROR ) { printk( "ERROR==> Fatal Error Expected ("); Put_Error( source, FATAL_ERROR_EXPECTED_ERROR ); printk( ") received ("); Put_Error( source, error ); printk( ")\n" ); } if ( source == FATAL_ERROR_EXPECTED_SOURCE && is_internal == FATAL_ERROR_EXPECTED_IS_INTERNAL && error == FATAL_ERROR_EXPECTED_ERROR ) { printk( "*** END OF TEST FATAL " FATAL_ERROR_TEST_NAME " ***\n" ); } if ( _System_state_Is_up( _System_state_Get() ) ) _Thread_Stop_multitasking(); }
int rtems_memalign( void **pointer, size_t alignment, size_t size ) { void *return_this; /* * Parameter error checks */ if ( !pointer ) return EINVAL; *pointer = NULL; /* * Do not attempt to allocate memory if not in correct system state. */ if ( _System_state_Is_up(_System_state_Get()) && !malloc_is_system_state_OK() ) return EINVAL; /* * If some free's have been deferred, then do them now. */ malloc_deferred_frees_process(); /* * Perform the aligned allocation requested */ return_this = _Protected_heap_Allocate_aligned( RTEMS_Malloc_Heap, size, alignment ); if ( !return_this ) return ENOMEM; /* * If configured, update the more involved statistics */ if ( rtems_malloc_statistics_helpers ) (*rtems_malloc_statistics_helpers->at_malloc)(pointer); *pointer = return_this; return 0; }
Malloc_System_state _Malloc_System_state( void ) { System_state_Codes state = _System_state_Get(); if ( _System_state_Is_up( state ) ) { if ( _Thread_Dispatch_is_enabled() ) { return MALLOC_SYSTEM_STATE_NORMAL; } else { return MALLOC_SYSTEM_STATE_NO_ALLOCATION; } } else if ( _System_state_Is_before_multitasking( state ) ) { return MALLOC_SYSTEM_STATE_NORMAL; } else { return MALLOC_SYSTEM_STATE_NO_PROTECTION; } }
MC68681_STATIC void mc68681_write_polled( int minor, char cChar ) { uint32_t pMC68681_port; unsigned char ucLineStatus; int iTimeout; getRegister_f getReg; setRegister_f setReg; pMC68681_port = Console_Port_Tbl[minor]->ulCtrlPort2; getReg = Console_Port_Tbl[minor]->getRegister; setReg = Console_Port_Tbl[minor]->setRegister; /* * wait for transmitter holding register to be empty */ iTimeout = 1000; ucLineStatus = (*getReg)(pMC68681_port, MC68681_STATUS); while ((ucLineStatus & (MC68681_TX_READY|MC68681_TX_EMPTY)) == 0) { if ((ucLineStatus & 0xF0)) (*setReg)( pMC68681_port, MC68681_COMMAND, MC68681_MODE_REG_RESET_ERROR ); /* * Yield while we wait */ #if 0 if(_System_state_Is_up(_System_state_Get())) { rtems_task_wake_after(RTEMS_YIELD_PROCESSOR); } #endif ucLineStatus = (*getReg)(pMC68681_port, MC68681_STATUS); if(!--iTimeout) { break; } } /* * transmit character */ (*setReg)(pMC68681_port, MC68681_TX_BUFFER, cChar); }
void _SMP_Multicast_action( const size_t setsize, const cpu_set_t *cpus, SMP_Action_handler handler, void *arg ) { SMP_Multicast_action node; Processor_mask targets; SMP_lock_Context lock_context; uint32_t i; if ( ! _System_state_Is_up( _System_state_Get() ) ) { ( *handler )( arg ); return; } if( cpus == NULL ) { _Processor_mask_Assign( &targets, _SMP_Get_online_processors() ); } else { _Processor_mask_Zero( &targets ); for ( i = 0; i < _SMP_Get_processor_count(); ++i ) { if ( CPU_ISSET_S( i, setsize, cpus ) ) { _Processor_mask_Set( &targets, i ); } } } _Chain_Initialize_node( &node.Node ); node.handler = handler; node.arg = arg; _Processor_mask_Assign( &node.targets, &targets ); _Atomic_Store_ulong( &node.done, 0, ATOMIC_ORDER_RELAXED ); _SMP_lock_ISR_disable_and_acquire( &_SMP_Multicast.Lock, &lock_context ); _Chain_Prepend_unprotected( &_SMP_Multicast.Actions, &node.Node ); _SMP_lock_Release_and_ISR_enable( &_SMP_Multicast.Lock, &lock_context ); _SMP_Send_message_multicast( &targets, SMP_MESSAGE_MULTICAST_ACTION ); _SMP_Multicasts_try_process(); while ( _Atomic_Load_ulong( &node.done, ATOMIC_ORDER_ACQUIRE ) == 0 ) { /* Wait */ }; }
static rtems_status_code bsp_interrupt_lock(void) { rtems_status_code sc = RTEMS_SUCCESSFUL; if (_System_state_Is_up(_System_state_Get())) { if (bsp_interrupt_mutex == RTEMS_ID_NONE) { rtems_id mutex = RTEMS_ID_NONE; rtems_interrupt_level level; /* Create a mutex */ sc = rtems_semaphore_create ( rtems_build_name('I', 'N', 'T', 'R'), 1, RTEMS_BINARY_SEMAPHORE | RTEMS_INHERIT_PRIORITY | RTEMS_PRIORITY, 0, &mutex ); if (sc != RTEMS_SUCCESSFUL) { return sc; } /* Assign the mutex */ rtems_interrupt_disable(level); if (bsp_interrupt_mutex == RTEMS_ID_NONE) { /* Nobody else assigned the mutex in the meantime */ bsp_interrupt_mutex = mutex; rtems_interrupt_enable(level); } else { /* Somebody else won */ rtems_interrupt_enable(level); sc = rtems_semaphore_delete(mutex); if (sc != RTEMS_SUCCESSFUL) { return sc; } } } return rtems_semaphore_obtain( bsp_interrupt_mutex, RTEMS_WAIT, RTEMS_NO_TIMEOUT ); } else { return RTEMS_SUCCESSFUL; } }
/* Get id of the next thread after athread, if argument <= 0 find the first available thread, return thread if found or 0 if not */ int rtems_gdb_stub_get_next_thread(int gdb_index) { int next_gdb_index = 0; int first = 1; size_t api_index; if (_System_state_Get() != SYSTEM_STATE_UP) { /* We have one thread let us use value of idle thread */ return (gdb_index < 1) ? 1 : 0; } for ( api_index = 1; next_gdb_index == 0 && api_index <= OBJECTS_APIS_LAST; ++api_index ) { if (_Objects_Information_table[api_index] != NULL) { const Objects_Information *info = _Objects_Information_table[api_index][1]; Objects_Id min_id = info->minimum_id; Objects_Id max_id = info->maximum_id; int last = first + (int) (max_id - min_id); if (gdb_index <= last) { int start = gdb_index < first ? first : gdb_index + 1; int potential_next; for ( potential_next = start; next_gdb_index == 0 && potential_next <= last; ++potential_next ) { if (info->local_table[potential_next - first + 1] != NULL) { next_gdb_index = potential_next; } } } first = last + 1; } } return next_gdb_index; }
rtems_status_code bsp_interrupt_initialize(void) { rtems_status_code sc = RTEMS_SUCCESSFUL; size_t i = 0; sc = bsp_interrupt_lock(); if (sc != RTEMS_SUCCESSFUL) { return sc; } /* We need one semaphore */ if (_System_state_Is_before_initialization(_System_state_Get())) { Configuration.work_space_size += sizeof(Semaphore_Control); ++Configuration_RTEMS_API.maximum_semaphores; } if (bsp_interrupt_is_initialized()) { bsp_interrupt_unlock(); return RTEMS_INTERNAL_ERROR; } /* Initialize handler table */ for (i = 0; i < BSP_INTERRUPT_HANDLER_TABLE_SIZE; ++i) { bsp_interrupt_handler_table [i].handler = bsp_interrupt_handler_empty; bsp_interrupt_handler_table [i].arg = (void *) i; } sc = bsp_interrupt_facility_initialize(); if (sc != RTEMS_SUCCESSFUL) { bsp_interrupt_unlock(); return sc; } bsp_interrupt_set_initialized(); sc = bsp_interrupt_unlock(); if (sc != RTEMS_SUCCESSFUL) { return sc; } return RTEMS_SUCCESSFUL; }
/* Return the RTEMS thread id from a gdb thread id */ Thread_Control *rtems_gdb_index_to_stub_id( int gdb_index ) { Thread_Control *th = NULL; int first = 1; size_t api_index; ASSERT(registers != NULL); if (_System_state_Get() != SYSTEM_STATE_UP || gdb_index <= 0) { /* Should not happen */ return NULL; } for ( api_index = 1; th == NULL && api_index <= OBJECTS_APIS_LAST; ++api_index ) { if (_Objects_Information_table[api_index] != NULL) { const Objects_Information *info = _Objects_Information_table[api_index][1]; Objects_Id min_id = info->minimum_id; Objects_Id max_id = info->maximum_id; int last = first + (int) (max_id - min_id); if (gdb_index <= first + (int) (max_id - min_id)) { th = (Thread_Control *) info->local_table[gdb_index - first + 1]; } first = last + 1; } } return th; }
void libc_wrapup(void) { /* * In case RTEMS is already down, don't do this. It could be * dangerous. */ if (!_System_state_Is_up(_System_state_Get())) return; /* * This was already done if the user called exit() directly . _wrapup_reent(0); */ if (_REENT != _global_impure_ptr) { _wrapup_reent(_global_impure_ptr); #if 0 /* Don't reclaim this one, just in case we do printfs * on the way out to ROM. */ _reclaim_reent(&libc_global_reent); #endif _REENT = _global_impure_ptr; } /* * Try to drain output buffers. * * Should this be changed to do *all* file streams? * _fwalk (_REENT, fclose); */ fclose (stdin); fclose (stdout); fclose (stderr); }
void *malloc( size_t size ) { void *return_this; MSBUMP(malloc_calls, 1); /* * If some free's have been deferred, then do them now. */ malloc_deferred_frees_process(); /* * Validate the parameters */ if ( !size ) return (void *) 0; /* * Do not attempt to allocate memory if not in correct system state. */ if ( _System_state_Is_up(_System_state_Get()) && !malloc_is_system_state_OK() ) return NULL; /* * Walk the heap and verify its integrity */ #if defined(RTEMS_HEAP_DEBUG) _Protected_heap_Walk( RTEMS_Malloc_Heap, 0, false ); #endif #if defined(RTEMS_MALLOC_BOUNDARY_HELPERS) /* * If the support for a boundary area at the end of the heap * block allocated is turned on, then adjust the size. */ if (rtems_malloc_boundary_helpers) size += (*rtems_malloc_boundary_helpers->overhead)(); #endif /* * Try to give a segment in the current heap if there is not * enough space then try to grow the heap. * If this fails then return a NULL pointer. */ return_this = _Protected_heap_Allocate( RTEMS_Malloc_Heap, size ); if ( !return_this ) { if (rtems_malloc_sbrk_helpers) return_this = (*rtems_malloc_sbrk_helpers->extend)( size ); if ( !return_this ) { errno = ENOMEM; return (void *) 0; } } /* * If the user wants us to dirty the allocated memory, then do it. */ if ( rtems_malloc_dirty_helper ) (*rtems_malloc_dirty_helper)( return_this, size ); /* * If configured, update the statistics */ if ( rtems_malloc_statistics_helpers ) (*rtems_malloc_statistics_helpers->at_malloc)(return_this); #if defined(RTEMS_MALLOC_BOUNDARY_HELPERS) /* * If configured, set the boundary area */ if (rtems_malloc_boundary_helpers) (*rtems_malloc_boundary_helpers->at_malloc)(return_this, size); #endif return return_this; }
bool _Heap_Walk( Heap_Control *heap, int source, bool dump ) { uintptr_t const page_size = heap->page_size; uintptr_t const min_block_size = heap->min_block_size; Heap_Block *const first_block = heap->first_block; Heap_Block *const last_block = heap->last_block; Heap_Block *block = first_block; Heap_Walk_printer printer = dump ? _Heap_Walk_print : _Heap_Walk_print_nothing; if ( !_System_state_Is_up( _System_state_Get() ) ) { return true; } if ( !_Heap_Walk_check_control( source, printer, heap ) ) { return false; } do { uintptr_t const block_begin = (uintptr_t) block; uintptr_t const block_size = _Heap_Block_size( block ); bool const prev_used = _Heap_Is_prev_used( block ); Heap_Block *const next_block = _Heap_Block_at( block, block_size ); uintptr_t const next_block_begin = (uintptr_t) next_block; bool const is_not_last_block = block != last_block; if ( !_Heap_Is_block_in_heap( heap, next_block ) ) { (*printer)( source, true, "block 0x%08x: next block 0x%08x not in heap\n", block, next_block ); return false; } if ( !_Heap_Is_aligned( block_size, page_size ) && is_not_last_block ) { (*printer)( source, true, "block 0x%08x: block size %u not page aligned\n", block, block_size ); return false; } if ( block_size < min_block_size && is_not_last_block ) { (*printer)( source, true, "block 0x%08x: size %u < min block size %u\n", block, block_size, min_block_size ); return false; } if ( next_block_begin <= block_begin && is_not_last_block ) { (*printer)( source, true, "block 0x%08x: next block 0x%08x is not a successor\n", block, next_block ); return false; } if ( !_Heap_Is_prev_used( next_block ) ) { if ( !_Heap_Walk_check_free_block( source, printer, heap, block ) ) { return false; } } else if (prev_used) { (*printer)( source, false, "block 0x%08x: size %u\n", block, block_size ); } else { (*printer)( source, false, "block 0x%08x: size %u, prev_size %u\n", block, block_size, block->prev_size ); } block = next_block; } while ( block != first_block ); return true; }
rtems_status_code rtems_task_mode( rtems_mode mode_set, rtems_mode mask, rtems_mode *previous_mode_set ) { Thread_Control *executing; RTEMS_API_Control *api; ASR_Information *asr; bool is_asr_enabled = false; bool needs_asr_dispatching = false; rtems_mode old_mode; if ( !previous_mode_set ) return RTEMS_INVALID_ADDRESS; executing = _Thread_Executing; api = executing->API_Extensions[ THREAD_API_RTEMS ]; asr = &api->Signal; old_mode = (executing->is_preemptible) ? RTEMS_PREEMPT : RTEMS_NO_PREEMPT; if ( executing->budget_algorithm == THREAD_CPU_BUDGET_ALGORITHM_NONE ) old_mode |= RTEMS_NO_TIMESLICE; else old_mode |= RTEMS_TIMESLICE; old_mode |= (asr->is_enabled) ? RTEMS_ASR : RTEMS_NO_ASR; old_mode |= _ISR_Get_level(); *previous_mode_set = old_mode; /* * These are generic thread scheduling characteristics. */ if ( mask & RTEMS_PREEMPT_MASK ) executing->is_preemptible = _Modes_Is_preempt(mode_set) ? true : false; if ( mask & RTEMS_TIMESLICE_MASK ) { if ( _Modes_Is_timeslice(mode_set) ) { executing->budget_algorithm = THREAD_CPU_BUDGET_ALGORITHM_RESET_TIMESLICE; executing->cpu_time_budget = _Thread_Ticks_per_timeslice; } else executing->budget_algorithm = THREAD_CPU_BUDGET_ALGORITHM_NONE; } /* * Set the new interrupt level */ if ( mask & RTEMS_INTERRUPT_MASK ) _Modes_Set_interrupt_level( mode_set ); /* * This is specific to the RTEMS API */ is_asr_enabled = false; needs_asr_dispatching = false; if ( mask & RTEMS_ASR_MASK ) { is_asr_enabled = _Modes_Is_asr_disabled( mode_set ) ? false : true; if ( is_asr_enabled != asr->is_enabled ) { asr->is_enabled = is_asr_enabled; _ASR_Swap_signals( asr ); if ( _ASR_Are_signals_pending( asr ) ) { needs_asr_dispatching = true; executing->do_post_task_switch_extension = true; } } } if ( _System_state_Is_up( _System_state_Get() ) ) if ( _Thread_Evaluate_mode() || needs_asr_dispatching ) _Thread_Dispatch(); return RTEMS_SUCCESSFUL; }
static bool before_multitasking(void) { return _System_state_Is_before_multitasking(_System_state_Get()); }
/* Not Efficient but simple */ int BSP_vpdRetrieveFields(VpdBuf data) { VpdBuf b, b1; VpdKey k; int l,fd = -1, put, got; int rval = -1; unsigned char mot[9]; static int (*stop)(int fd); memset(mot,0,sizeof(mot)); if ( 0 && _System_state_Is_up(_System_state_Get()) ) { read_bytes = read; stop = close; fd = open(BSP_I2C_VPD_EEPROM_DEV_NAME, 0); if ( fd < 0 ) return -1; } else { fd = (int)dev; /* init(dev); * * Hangs - probably would need a delay here - just leave motload settings */ read_bytes = early_read; stop = early_close; } if ( read_bytes(fd, mot, 8) < 8 ) { goto bail; } if ( strcmp((char*)mot,"MOTOROLA") ) goto bail; l = 0; do { /* skip field -- this is not done the first time since l=0 */ while ( l > sizeof(mot) ) { got = read_buf(fd, mot, sizeof(mot)); if ( got < 1 ) goto bail; l -= got; } if ( read_buf(fd, mot, l) < 0 ) goto bail; /* now get a new header */ if ( read_buf(fd, mot, 2) < 2 ) goto bail; k = mot[0]; l = mot[1]; for ( b = data; b->key != End; b++ ) { if ( b->key == k && (signed char)b->instance >= 0 ) { if ( 0 == b->instance-- ) { /* found 'instance' of field 'type' */ /* limit to buffer size */ put = b->buflen > l ? l : b->buflen; if ( read_buf(fd, b->buf, put) < put ) goto bail; /* if this instance is multiply requested, copy the data */ for ( b1 = b + 1; b1->key != End; b1++ ) { if ( b1->key == k && 0 == b1->instance ) { b1->instance--; /* we dont' handle the case where * the first buffer couldn't hold the entire * item but this one could... */ memcpy(b1->buf, b->buf, put); b1->found = mot[1]; } } l -= put; b->found = mot[1]; } } } } while ( k != End ); rval = 0; bail: stop(fd); return rval; }
/* Get thread information, return 0 if thread does not exist and 1 otherwise */ static int rtems_gdb_stub_get_thread_info( int gdb_index, struct rtems_gdb_stub_thread_info *info ) { int first = 1; size_t api_index; ASSERT(info != NULL); if (gdb_index <= 0) { return 0; } if (_System_state_Get() != SYSTEM_STATE_UP || gdb_index == 1) { /* We have one thread let us use value which will never happen for real thread */ strcpy(info->display, "idle thread"); strcpy(info->name, "IDLE"); info->more_display[0] = 0; /* Nothing */ return 1; } for ( api_index = 1; api_index <= OBJECTS_APIS_LAST; ++api_index ) { if (_Objects_Information_table[api_index] != NULL) { const Objects_Information *obj_info = _Objects_Information_table[api_index][1]; Objects_Id min_id = obj_info->minimum_id; Objects_Id max_id = obj_info->maximum_id; int last = first + (int) (max_id - min_id); if (gdb_index <= last) { Thread_Control *th = (Thread_Control *) obj_info->local_table[gdb_index - first + 1]; if (th != NULL) { char tmp_buf[9]; strcpy(info->display, "task: control at 0x"); tmp_buf[0] = gdb_hexchars[(((int)th) >> 28) & 0xf]; tmp_buf[1] = gdb_hexchars[(((int)th) >> 24) & 0xf]; tmp_buf[2] = gdb_hexchars[(((int)th) >> 20) & 0xf]; tmp_buf[3] = gdb_hexchars[(((int)th) >> 16) & 0xf]; tmp_buf[4] = gdb_hexchars[(((int)th) >> 12) & 0xf]; tmp_buf[5] = gdb_hexchars[(((int)th) >> 8) & 0xf]; tmp_buf[6] = gdb_hexchars[(((int)th) >> 4) & 0xf]; tmp_buf[7] = gdb_hexchars[((int)th) & 0xf]; tmp_buf[8] = 0; strcat(info->display, tmp_buf); rtems_object_get_name( th->Object.id, 5, info->name ); info->more_display[0] = 0; /* Nothing */ return 1; } else { /* Thread does not exist */ return 0; } }