void _Thread_Disable_dispatch( void ) { /* * This check is very brutal to system performance but is very helpful * at finding blown stack problems. If you have a stack problem and * need help finding it, then uncomment this code. Every system * call will check the stack and since mutexes are used frequently * in most systems, you might get lucky. */ #if defined(RTEMS_HEAVY_STACK_DEBUG) if (_System_state_Is_up(_System_state_Get()) && (_ISR_Nest_level == 0)) { if ( rtems_stack_checker_is_blown() ) { printk( "Stack blown!!\n" ); rtems_fatal_error_occurred( 99 ); } } #endif _Thread_Dispatch_increment_disable_level(); RTEMS_COMPILER_MEMORY_BARRIER(); /* * This check is even more brutal than the other one. This enables * malloc heap integrity checking upon entry to every system call. */ #if defined(RTEMS_HEAVY_MALLOC_DEBUG) if ( _Thread_Dispatch_get_disable_level() == 1 ) { _Heap_Walk( RTEMS_Malloc_Heap,99, false ); } #endif }
void _BSP_Fatal_error(unsigned int v) { unsigned long flags; const char *err = 0; rtems_interrupt_disable(flags); (void) flags; /* avoid set but not used warning */ printk("%s\n",_RTEMS_version); printk("FATAL ERROR:\n"); printk("Environment:"); switch (THESRC) { case INTERNAL_ERROR_CORE: printk(" RTEMS Core\n"); err = rtems_internal_error_text(THEERR); break; case INTERNAL_ERROR_RTEMS_API: printk(" RTEMS API\n"); err = rtems_status_text(THEERR); break; case INTERNAL_ERROR_POSIX_API: printk(" POSIX API (errno)\n"); /* could use strerror but I'd rather avoid using this here */ break; default: printk(" UNKNOWN (0x%x)\n",THESRC); break; } if ( _Thread_Dispatch_is_enabled() ) printk("enabled\n"); else printk( " Error occurred in a Thread Dispatching DISABLED context (level %i)\n", _Thread_Dispatch_get_disable_level()); if ( _ISR_Nest_level ) { printk( " Error occurred from ISR context (ISR nest level %i)\n", _ISR_Nest_level ); } printk("Error %d",THEERR); if (err) { printk(": %s",err); } printk("\n"); printk("Stack Trace:\n"); CPU_print_stack(); rebootQuestion(); }
void rtems_smp_process_interrupt( void ) { Per_CPU_Control *self_cpu = _Per_CPU_Get(); if ( self_cpu->message != 0 ) { uint32_t message; ISR_Level level; _Per_CPU_Lock_acquire( self_cpu, level ); message = self_cpu->message; self_cpu->message = 0; _Per_CPU_Lock_release( self_cpu, level ); #if defined(RTEMS_DEBUG) { void *sp = __builtin_frame_address(0); if ( !(message & RTEMS_BSP_SMP_SHUTDOWN) ) { printk( "ISR on CPU %d -- (0x%02x) (0x%p)\n", _Per_CPU_Get_index( self_cpu ), message, sp ); if ( message & RTEMS_BSP_SMP_SIGNAL_TO_SELF ) printk( "signal to self\n" ); if ( message & RTEMS_BSP_SMP_SHUTDOWN ) printk( "shutdown\n" ); } printk( "Dispatch level %d\n", _Thread_Dispatch_get_disable_level() ); } #endif if ( ( message & RTEMS_BSP_SMP_SHUTDOWN ) != 0 ) { _ISR_Disable( level ); _Thread_Dispatch_set_disable_level( 0 ); _Per_CPU_Change_state( self_cpu, PER_CPU_STATE_SHUTDOWN ); _CPU_Fatal_halt( _Per_CPU_Get_index( self_cpu ) ); /* does not continue past here */ } } }
int _ISR_SMP_Exit(void) { ISR_Level level; int retval; retval = 0; _ISR_Disable_on_this_core( level ); _ISR_Nest_level--; if ( _ISR_Nest_level == 0 ) { if ( _Thread_Dispatch_necessary ) { if ( _Thread_Dispatch_get_disable_level() == 1 ) { retval = 1; } } } /* * SPARC has special support to avoid some nasty recursive type behaviour. * When dispatching in a thread and we want to return to it then it needs * to finish. */ #if defined(__sparc__) if ( _CPU_ISR_Dispatch_disable ) retval = 0; #endif _ISR_Enable_on_this_core( level ); _Thread_Dispatch_decrement_disable_level(); if ( retval == 0 ) _SMP_Request_other_cores_to_dispatch(); return retval; }
void rtems_smp_process_interrupt(void) { int cpu; uint32_t message; ISR_Level level; cpu = bsp_smp_processor_id(); level = _SMP_lock_spinlock_simple_Obtain( &_Per_CPU_Information[cpu].lock ); message = _Per_CPU_Information[cpu].message; #if defined(RTEMS_DEBUG) { void *sp = __builtin_frame_address(0); if ( !(message & RTEMS_BSP_SMP_SHUTDOWN) ) { printk( "ISR on CPU %d -- (0x%02x) (0x%p)\n", cpu, message, sp ); if ( message & RTEMS_BSP_SMP_CONTEXT_SWITCH_NECESSARY ) printk( "context switch necessary\n" ); if ( message & RTEMS_BSP_SMP_SIGNAL_TO_SELF ) printk( "signal to self\n" ); if ( message & RTEMS_BSP_SMP_SHUTDOWN ) printk( "shutdown\n" ); if ( message & RTEMS_BSP_SMP_FIRST_TASK ) printk( "switch to first task\n" ); } printk( "Dispatch level %d\n", _Thread_Dispatch_get_disable_level() ); } #endif if ( message & RTEMS_BSP_SMP_FIRST_TASK ) { _Per_CPU_Information[cpu].isr_nest_level = 0; _Per_CPU_Information[cpu].message &= ~message; _Per_CPU_Information[cpu].state = RTEMS_BSP_SMP_CPU_UP; _SMP_lock_spinlock_simple_Release( &_Per_CPU_Information[cpu].lock, level ); rtems_smp_run_first_task(cpu); /* does not return */ } if ( message & RTEMS_BSP_SMP_SHUTDOWN ) { _Per_CPU_Information[cpu].message &= ~message; _Per_CPU_Information[cpu].isr_nest_level = 0; _Per_CPU_Information[cpu].state = RTEMS_BSP_SMP_CPU_SHUTDOWN; _SMP_lock_spinlock_simple_Release( &_Per_CPU_Information[cpu].lock, level ); _Thread_Enable_dispatch(); /* undo ISR code */ _ISR_Disable_on_this_core( level ); while(1) ; /* does not continue past here */ } if ( message & RTEMS_BSP_SMP_CONTEXT_SWITCH_NECESSARY ) { #if defined(RTEMS_DEBUG) printk( "switch needed\n" ); #endif _Per_CPU_Information[cpu].message &= ~message; _SMP_lock_spinlock_simple_Release( &_Per_CPU_Information[cpu].lock, level ); } }