bool malloc_is_system_state_OK(void) { if ( !_Thread_Dispatch_is_enabled() ) return false; return true; }
static void _RTEMS_Tasks_Dispatch_if_necessary( Thread_Control *executing, bool needs_asr_dispatching ) { if ( _Thread_Dispatch_is_enabled() ) { bool dispatch_necessary = needs_asr_dispatching; /* * FIXME: This locking approach is brittle. It only works since the * current simple SMP scheduler has no support for the non-preempt mode. */ #if defined( RTEMS_SMP ) ISR_Level level; _ISR_Disable( level ); #endif if ( !_Thread_Is_heir( executing ) && executing->is_preemptible ) { dispatch_necessary = true; _Thread_Dispatch_necessary = dispatch_necessary; } #if defined( RTEMS_SMP ) _ISR_Enable( level ); #endif if ( dispatch_necessary ) { _Thread_Dispatch(); } } }
static bool _Heap_Protection_determine_block_free( Heap_Control *heap, Heap_Block *block ) { bool do_free = true; Heap_Block *const next = block->Protection_begin.next_delayed_free_block; /* * Sometimes after a free the allocated area is still in use. An example * is the task stack of a thread that deletes itself. The thread dispatch * disable level is a way to detect this use case. */ if ( _Thread_Dispatch_is_enabled() ) { if ( next == NULL ) { _Heap_Protection_delay_block_free( heap, block ); do_free = false; } else if ( next == HEAP_PROTECTION_OBOLUS ) { _Heap_Protection_check_free_block( heap, block ); } else { _Heap_Protection_block_error( heap, block ); } } else if ( next == NULL ) { /* * This is a hack to prevent heavy workspace fragmentation which would * lead to test suite failures. */ _Heap_Protection_free_all_delayed_blocks( heap ); } return do_free; }
void *realloc( void *ptr, size_t size ) { uintptr_t old_size; char *new_area; /* * Do not attempt to allocate memory if in a critical section or ISR. */ if (_System_state_Is_up(_System_state_Get())) { if (!_Thread_Dispatch_is_enabled()) return (void *) 0; } /* * Continue with realloc(). */ if ( !ptr ) return malloc( size ); if ( !size ) { free( ptr ); return (void *) 0; } if ( !_Protected_heap_Get_block_size(RTEMS_Malloc_Heap, ptr, &old_size) ) { errno = EINVAL; return (void *) 0; } /* * Now resize it. */ if ( _Protected_heap_Resize_block( RTEMS_Malloc_Heap, ptr, size ) ) { return ptr; } /* * There used to be a free on this error case but it is wrong to * free the memory per OpenGroup Single UNIX Specification V2 * and the C Standard. */ new_area = malloc( size ); if ( !new_area ) { return (void *) 0; } memcpy( new_area, ptr, (size < old_size) ? size : old_size ); free( ptr ); return new_area; }
void _BSP_Fatal_error(unsigned int v) { unsigned long flags; const char *err = 0; rtems_interrupt_disable(flags); (void) flags; /* avoid set but not used warning */ printk("%s\n",_RTEMS_version); printk("FATAL ERROR:\n"); printk("Environment:"); switch (THESRC) { case INTERNAL_ERROR_CORE: printk(" RTEMS Core\n"); err = rtems_internal_error_text(THEERR); break; case INTERNAL_ERROR_RTEMS_API: printk(" RTEMS API\n"); err = rtems_status_text(THEERR); break; case INTERNAL_ERROR_POSIX_API: printk(" POSIX API (errno)\n"); /* could use strerror but I'd rather avoid using this here */ break; default: printk(" UNKNOWN (0x%x)\n",THESRC); break; } if ( _Thread_Dispatch_is_enabled() ) printk("enabled\n"); else printk( " Error occurred in a Thread Dispatching DISABLED context (level %i)\n", _Thread_Dispatch_get_disable_level()); if ( _ISR_Nest_level ) { printk( " Error occurred from ISR context (ISR nest level %i)\n", _ISR_Nest_level ); } printk("Error %d",THEERR); if (err) { printk(": %s",err); } printk("\n"); printk("Stack Trace:\n"); CPU_print_stack(); rebootQuestion(); }
/* * This routine provides the RTEMS interrupt management. */ void __ISR_Handler( uint32_t vector) { ISR_Level level; _ISR_Disable( level ); _Thread_Dispatch_increment_disable_level(); #if (CPU_HAS_SOFTWARE_INTERRUPT_STACK == TRUE) if ( _ISR_Nest_level == 0 ) { /* Install irq stack */ _old_stack_ptr = stack_ptr; stack_ptr = _CPU_Interrupt_stack_high; } #endif _ISR_Nest_level++; _ISR_Enable( level ); /* call isp */ if ( _ISR_Vector_table[ vector]) (*_ISR_Vector_table[ vector ])( vector ); _ISR_Disable( level ); _Thread_Dispatch_decrement_disable_level(); _ISR_Nest_level--; #if (CPU_HAS_SOFTWARE_INTERRUPT_STACK == TRUE) if ( _ISR_Nest_level == 0 ) /* restore old stack pointer */ stack_ptr = _old_stack_ptr; #endif _ISR_Enable( level ); if ( _ISR_Nest_level ) return; if ( !_Thread_Dispatch_is_enabled() ) { return; } if ( _Thread_Dispatch_necessary ) { _Thread_Dispatch(); } }
rtems_status_code rtems_clock_tick( void ) { _TOD_Tickle_ticks(); _Watchdog_Tickle_ticks(); _Scheduler_Tick(); if ( _Thread_Is_context_switch_necessary() && _Thread_Dispatch_is_enabled() ) _Thread_Dispatch(); return RTEMS_SUCCESSFUL; }
Malloc_System_state _Malloc_System_state( void ) { System_state_Codes state = _System_state_Get(); if ( _System_state_Is_up( state ) ) { if ( _Thread_Dispatch_is_enabled() ) { return MALLOC_SYSTEM_STATE_NORMAL; } else { return MALLOC_SYSTEM_STATE_NO_ALLOCATION; } } else if ( _System_state_Is_before_multitasking( state ) ) { return MALLOC_SYSTEM_STATE_NORMAL; } else { return MALLOC_SYSTEM_STATE_NO_PROTECTION; } }
bool _Protected_heap_Walk( Heap_Control *the_heap, int source, bool do_dump ) { bool status; /* * If we are called from within a dispatching critical section, * then it is forbidden to lock a mutex. But since we are inside * a critical section, it should be safe to walk it unlocked. * * NOTE: Dispatching is also disabled during initialization. */ if ( _Thread_Dispatch_is_enabled() ) { _RTEMS_Lock_allocator(); status = _Heap_Walk( the_heap, source, do_dump ); _RTEMS_Unlock_allocator(); } else { status = _Heap_Walk( the_heap, source, do_dump ); } return status; }
void _Assert_Thread_dispatching_repressed( void ) { _Assert( !_Thread_Dispatch_is_enabled() || _ISR_Get_level() != 0 ); }
static void assert_thread_dispatch_disabled_context(void) { assert(!_Thread_Dispatch_is_enabled()); assert(!_RTEMS_Allocator_is_owner()); assert(!life_protected()); }
static void assert_allocator_protected_thread_context(void) { assert(_Thread_Dispatch_is_enabled() || before_multitasking()); assert(_RTEMS_Allocator_is_owner()); assert(life_protected() || before_multitasking()); }