Ejemplo n.º 1
0
int pthread_spin_unlock( pthread_spinlock_t *lock )
{
  POSIX_Spinlock_Control *the_spinlock;
  ISR_Level               level;

  the_spinlock = _POSIX_Spinlock_Get( lock );
  level = the_spinlock->interrupt_state;
#if defined(POSIX_SPINLOCKS_ARE_SELF_CONTAINED)
#if defined(RTEMS_SMP)
  _SMP_ticket_lock_Release(
    &the_spinlock->Lock,
    &_Per_CPU_Get()->Lock_stats_context
  );
#endif
  _ISR_Local_enable( level );
#else
  if ( --_POSIX_Spinlock_Nest_level == 0 ) {
#if defined(RTEMS_SMP)
    _POSIX_Spinlock_Owner = 0xffffffff;
    _SMP_ticket_lock_Release(
      &the_spinlock->Lock,
      &_Per_CPU_Get()->Lock_stats_context
    );
#endif
    _ISR_Local_enable( level );
  }
#endif
  return 0;
}
Ejemplo n.º 2
0
static void leon3_clock_profiling_interrupt_delay(void)
{
#ifdef RTEMS_PROFILING
  /*
   * We need a small state machine to ignore the first clock interrupt, since
   * it contains the sequential system initialization time.  Do the timestamp
   * initialization on the fly.
   */
  static int state = 1;

  volatile struct irqmp_timestamp_regs *irqmp_ts =
    &LEON3_IrqCtrl_Regs->timestamp[0];
  unsigned int s1_s2 = (1U << 25) | (1U << 26);

  if (state == 0) {
    unsigned int first = irqmp_ts->assertion;
    unsigned int second = irqmp_ts->counter;

    irqmp_ts->control |= s1_s2;

    _Profiling_Update_max_interrupt_delay(_Per_CPU_Get(), second - first);
  } else if (state == 1 && leon3_irqmp_has_timestamp(irqmp_ts)) {
    unsigned int ks = 1U << 5;

    state = 0;

    irqmp_ts->control = ks | s1_s2 | (unsigned int) clkirq;
  } else if (state == 1) {
    state = 2;
  }
#endif
}
Ejemplo n.º 3
0
void _Thread_Start_multitasking( void )
{
  Per_CPU_Control *cpu_self = _Per_CPU_Get();
  Thread_Control  *heir;

#if defined(RTEMS_SMP)
  _Per_CPU_State_change( cpu_self, PER_CPU_STATE_UP );

  /*
   * Threads begin execution in the _Thread_Handler() function.   This
   * function will set the thread dispatch disable level to zero.
   */
  cpu_self->thread_dispatch_disable_level = 1;
#endif

  heir = _Thread_Get_heir_and_make_it_executing( cpu_self );

   /*
    * Get the init task(s) running.
    *
    * Note: Thread_Dispatch() is normally used to dispatch threads.  As
    *       part of its work, Thread_Dispatch() restores floating point
    *       state for the heir task.
    *
    *       This code avoids Thread_Dispatch(), and so we have to restore
    *       (actually initialize) the floating point state "by hand".
    *
    *       Ignore the CPU_USE_DEFERRED_FP_SWITCH because we must always
    *       switch in the first thread if it is FP.
    */
#if ( CPU_HARDWARE_FP == TRUE ) || ( CPU_SOFTWARE_FP == TRUE )
   /*
    *  don't need to worry about saving BSP's floating point state
    */

   if ( heir->fp_context != NULL )
     _Context_Restore_fp( &heir->fp_context );
#endif

  _Profiling_Thread_dispatch_disable( cpu_self, 0 );

#if defined(RTEMS_SMP)
  /*
   * The _CPU_Context_Restart_self() implementations usually assume that self
   * context is executing.
   *
   * FIXME: We have a race condition here in case another thread already
   * performed scheduler operations and moved our heir thread to another
   * processor.  The time frame for this is likely too small to be practically
   * relevant.
   */
  _CPU_Context_Set_is_executing( &heir->Registers, true );
#endif

#if defined(_CPU_Start_multitasking)
  _CPU_Start_multitasking( &heir->Registers );
#else
  _CPU_Context_Restart_self( &heir->Registers );
#endif
}
Ejemplo n.º 4
0
void rtems_smp_secondary_cpu_initialize( void )
{
  Per_CPU_Control *self_cpu = _Per_CPU_Get();
  Thread_Control  *heir;

  #if defined(RTEMS_DEBUG)
    printk( "Made it to %d -- ", _Per_CPU_Get_index( self_cpu ) );
  #endif

  _Per_CPU_Change_state( self_cpu, PER_CPU_STATE_READY_TO_BEGIN_MULTITASKING );

  _Per_CPU_Wait_for_state( self_cpu, PER_CPU_STATE_BEGIN_MULTITASKING );

  _Per_CPU_Change_state( self_cpu, PER_CPU_STATE_UP );

  /*
   *  The Scheduler will have selected the heir thread for each CPU core.
   *  Now we have been requested to perform the first context switch.  So
   *  force a switch to the designated heir and make it executing on
   *  THIS core.
   */
  heir = self_cpu->heir;
  heir->is_executing = true;
  self_cpu->executing->is_executing = false;
  self_cpu->executing = heir;
  self_cpu->dispatch_necessary = false;

  /*
   * Threads begin execution in the _Thread_Handler() function.   This function
   * will call _Thread_Enable_dispatch().
   */
  _Thread_Disable_dispatch();

  _CPU_Context_switch_to_first_task_smp( &heir->Registers );
}
Ejemplo n.º 5
0
static void _Scheduler_simple_smp_Allocate_processor(
  Thread_Control *scheduled,
  Thread_Control *victim
)
{
  Per_CPU_Control *cpu_of_scheduled = scheduled->cpu;
  Per_CPU_Control *cpu_of_victim = victim->cpu;
  Thread_Control *heir;

  scheduled->is_scheduled = true;
  victim->is_scheduled = false;

  if ( scheduled->is_executing ) {
    heir = cpu_of_scheduled->heir;
    cpu_of_scheduled->heir = scheduled;
  } else {
    heir = scheduled;
  }

  if ( heir != victim ) {
    const Per_CPU_Control *cpu_of_executing = _Per_CPU_Get();

    heir->cpu = cpu_of_victim;
    cpu_of_victim->heir = heir;
    cpu_of_victim->dispatch_necessary = true;

    if ( cpu_of_victim != cpu_of_executing ) {
      _Per_CPU_Send_interrupt( cpu_of_victim );
    }
  }
}
Ejemplo n.º 6
0
static int _POSIX_Threads_Join( pthread_t thread, void **value_ptr )
{
  Thread_Control       *the_thread;
  Thread_queue_Context  queue_context;
  Per_CPU_Control      *cpu_self;
  Thread_Control       *executing;
  void                 *value;

  _Thread_queue_Context_initialize( &queue_context );
  _Thread_queue_Context_set_expected_level( &queue_context, 1 );
  the_thread = _Thread_Get( thread, &queue_context.Lock_context );

  if ( the_thread == NULL ) {
    return ESRCH;
  }

  cpu_self = _Per_CPU_Get();
  executing = _Per_CPU_Get_executing( cpu_self );

  if ( executing == the_thread ) {
    _ISR_lock_ISR_enable( &queue_context.Lock_context );
    return EDEADLK;
  }

  _Thread_State_acquire_critical( the_thread, &queue_context.Lock_context );

  if ( !_Thread_Is_joinable( the_thread ) ) {
    _Thread_State_release( the_thread, &queue_context.Lock_context );
    return EINVAL;
  }

  if ( _States_Is_waiting_for_join_at_exit( the_thread->current_state ) ) {
    value = the_thread->Life.exit_value;
    _Thread_Clear_state_locked( the_thread, STATES_WAITING_FOR_JOIN_AT_EXIT );
    _Thread_Dispatch_disable_with_CPU( cpu_self, &queue_context.Lock_context );
    _Thread_State_release( the_thread, &queue_context.Lock_context );
    _Thread_Dispatch_enable( cpu_self );
  } else {
    _Thread_Join(
      the_thread,
      STATES_INTERRUPTIBLE_BY_SIGNAL | STATES_WAITING_FOR_JOIN,
      executing,
      &queue_context
    );

    if ( _POSIX_Get_error_after_wait( executing ) != 0 ) {
      _Assert( _POSIX_Get_error_after_wait( executing ) == EINTR );
      return EINTR;
    }

    value = executing->Wait.return_argument;
  }

  if ( value_ptr != NULL ) {
    *value_ptr = value;
  }

  return 0;
}
Ejemplo n.º 7
0
Archivo: smp.c Proyecto: gedare/rtems
void _SMP_Request_shutdown( void )
{
  ISR_Level level;

  _ISR_Local_disable( level );
  (void) level;

  _Per_CPU_State_change( _Per_CPU_Get(), PER_CPU_STATE_SHUTDOWN );
}
void _Giant_Release( void )
{
  ISR_Level isr_level;

  _ISR_Disable_without_giant( isr_level );
  _Assert( _Thread_Dispatch_disable_level != 0 );
  _Giant_Do_release( _Per_CPU_Get() );
  _ISR_Enable_without_giant( isr_level );
}
Ejemplo n.º 9
0
rtems_task High_task(
  rtems_task_argument argument
)
{
  rtems_interrupt_level level;

  _Thread_Dispatch_disable();

  benchmark_timer_initialize();
    rtems_interrupt_local_disable( level );
  isr_disable_time = benchmark_timer_read();

  benchmark_timer_initialize();
#if defined(RTEMS_SMP)
    rtems_interrupt_local_enable( level );
    rtems_interrupt_local_disable( level );
#else
    rtems_interrupt_flash( level );
#endif
  isr_flash_time = benchmark_timer_read();

  benchmark_timer_initialize();
    rtems_interrupt_local_enable( level );
  isr_enable_time = benchmark_timer_read();

  _Thread_Dispatch_enable( _Per_CPU_Get() );

  benchmark_timer_initialize();
    _Thread_Dispatch_disable();
  thread_disable_dispatch_time = benchmark_timer_read();

  benchmark_timer_initialize();
    _Thread_Dispatch_enable( _Per_CPU_Get() );
  thread_enable_dispatch_time = benchmark_timer_read();

  benchmark_timer_initialize();
    _Thread_Set_state( _Thread_Get_executing(), STATES_SUSPENDED );
  thread_set_state_time = benchmark_timer_read();

  set_thread_dispatch_necessary( true );

  benchmark_timer_initialize();
    _Thread_Dispatch();           /* dispatches Middle_task */
}
Ejemplo n.º 10
0
void qoriq_start_thread(void)
{
  const Per_CPU_Control *cpu_self = _Per_CPU_Get();

  ppc_exc_initialize_interrupt_stack(
    (uintptr_t) cpu_self->interrupt_stack_low,
    rtems_configuration_get_interrupt_stack_size()
  );

  bsp_interrupt_facility_initialize();

  _SMP_Start_multitasking_on_secondary_processor();
}
Ejemplo n.º 11
0
static void test_rtems_heap_allocate_aligned_with_boundary(void)
{
  void *p = NULL;

  p = rtems_heap_allocate_aligned_with_boundary(1, 1, 1);
  rtems_test_assert( p != NULL );
  free(p);

  _Thread_Dispatch_disable();
  p = rtems_heap_allocate_aligned_with_boundary(1, 1, 1);
  _Thread_Dispatch_enable( _Per_CPU_Get() );
  rtems_test_assert( p == NULL );
}
Ejemplo n.º 12
0
void _CORE_mutex_Seize_interrupt_blocking(
  CORE_mutex_Control  *the_mutex,
  Thread_Control      *executing,
  Watchdog_Interval    timeout,
  ISR_lock_Context    *lock_context
)
{
#if !defined(RTEMS_SMP)
  /*
   * We must disable thread dispatching here since we enable the interrupts for
   * priority inheritance mutexes.
   */
  _Thread_Dispatch_disable();
#endif

  if ( _CORE_mutex_Is_inherit_priority( &the_mutex->Attributes ) ) {
    Thread_Control *holder = the_mutex->holder;

#if !defined(RTEMS_SMP)
    /*
     * To enable interrupts here works only since exactly one executing thread
     * exists and only threads are allowed to seize and surrender mutexes with
     * the priority inheritance protocol.  On SMP configurations more than one
     * executing thread may exist, so here we must not release the lock, since
     * otherwise the current holder may be no longer the holder of the mutex
     * once we released the lock.
     */
    _Thread_queue_Release( &the_mutex->Wait_queue, lock_context );
#endif

    _Thread_Inherit_priority( holder, executing );

#if !defined(RTEMS_SMP)
    _Thread_queue_Acquire( &the_mutex->Wait_queue, lock_context );
#endif
  }

  _Thread_queue_Enqueue_critical(
    &the_mutex->Wait_queue.Queue,
    the_mutex->operations,
    executing,
    STATES_WAITING_FOR_MUTEX,
    timeout,
    CORE_MUTEX_TIMEOUT,
    lock_context
  );

#if !defined(RTEMS_SMP)
  _Thread_Dispatch_enable( _Per_CPU_Get() );
#endif
}
Ejemplo n.º 13
0
void __ISR_Handler( uint32_t   vector)
{
  ISR_Level level;

  _ISR_Local_disable( level );

   _Thread_Dispatch_disable();

#if (CPU_HAS_SOFTWARE_INTERRUPT_STACK == TRUE)
  if ( _ISR_Nest_level == 0 )
    {
      /* Install irq stack */
      _old_stack_ptr = stack_ptr;
      stack_ptr = _CPU_Interrupt_stack_high;
    }

#endif

  _ISR_Nest_level++;

  _ISR_Local_enable( level );

  /* call isp */
  if ( _ISR_Vector_table[ vector])
    (*_ISR_Vector_table[ vector ])( vector );

  _ISR_Local_disable( level );

  _Thread_Dispatch_enable( _Per_CPU_Get() );

  _ISR_Nest_level--;

#if (CPU_HAS_SOFTWARE_INTERRUPT_STACK == TRUE)
  if ( _ISR_Nest_level == 0 )
    /* restore old stack pointer */
    stack_ptr = _old_stack_ptr;
#endif

  _ISR_Local_enable( level );

  if ( _ISR_Nest_level )
    return;

  if ( !_Thread_Dispatch_is_enabled() ) {
    return;
  }

  if ( _Thread_Dispatch_necessary ) {
    _Thread_Dispatch();
  }
}
Ejemplo n.º 14
0
Archivo: alarm.c Proyecto: AoLaD/rtems
unsigned int alarm(
  unsigned int seconds
)
{
  unsigned int      remaining;
  Watchdog_Control *the_watchdog;
  ISR_lock_Context  lock_context;
  ISR_lock_Context  lock_context2;
  Per_CPU_Control  *cpu;
  uint64_t          now;
  uint32_t          ticks_per_second;
  uint32_t          ticks;

  the_watchdog = &_POSIX_signals_Alarm_watchdog;
  ticks_per_second = TOD_TICKS_PER_SECOND;
  ticks = seconds * ticks_per_second;

  _ISR_lock_ISR_disable_and_acquire(
    &_POSIX_signals_Alarm_lock,
    &lock_context
  );

  cpu = _Watchdog_Get_CPU( the_watchdog );
  _Watchdog_Per_CPU_acquire_critical( cpu, &lock_context2 );
  now = cpu->Watchdog.ticks;

  remaining = (unsigned long) _Watchdog_Cancel(
    &cpu->Watchdog.Header[ PER_CPU_WATCHDOG_RELATIVE ],
    the_watchdog,
    now
  );

  if ( ticks != 0 ) {
    cpu = _Per_CPU_Get();
    _Watchdog_Set_CPU( the_watchdog, cpu );
    _Watchdog_Insert(
      &cpu->Watchdog.Header[ PER_CPU_WATCHDOG_RELATIVE ],
      the_watchdog,
      now + ticks
    );
  }

  _Watchdog_Per_CPU_release_critical( cpu, &lock_context2 );
  _ISR_lock_Release_and_ISR_enable(
    &_POSIX_signals_Alarm_lock,
    &lock_context
  );

  return ( remaining + ticks_per_second - 1 ) / ticks_per_second;
}
Ejemplo n.º 15
0
void ppc_exc_wrapup(BSP_Exception_frame *frame)
{
  Per_CPU_Control *cpu_self;

  cpu_self = _Per_CPU_Get();

  if (cpu_self->isr_dispatch_disable) {
    return;
  }

  while (cpu_self->dispatch_necessary) {
    rtems_interrupt_level level;

    cpu_self->isr_dispatch_disable = 1;
    cpu_self->thread_dispatch_disable_level = 1;
    _Thread_Do_dispatch(cpu_self, frame->EXC_SRR1);
    rtems_interrupt_local_disable(level);
    (void) level;
    cpu_self = _Per_CPU_Get();
  }

  cpu_self->isr_dispatch_disable = 0;
}
Ejemplo n.º 16
0
Archivo: smp.c Proyecto: gedare/rtems
void _SMP_Request_start_multitasking( void )
{
  Per_CPU_Control *self_cpu = _Per_CPU_Get();
  uint32_t cpu_count = _SMP_Get_processor_count();
  uint32_t cpu_index;

  _Per_CPU_State_change( self_cpu, PER_CPU_STATE_READY_TO_START_MULTITASKING );

  for ( cpu_index = 0 ; cpu_index < cpu_count ; ++cpu_index ) {
    Per_CPU_Control *cpu = _Per_CPU_Get_by_index( cpu_index );

    if ( _Per_CPU_Is_processor_online( cpu ) ) {
      _Per_CPU_State_change( cpu, PER_CPU_STATE_REQUEST_START_MULTITASKING );
    }
  }
}
Ejemplo n.º 17
0
static void thread_disable_dispatch( void )
{
/* Yes, RTEMS_SMP and not PREVENT_SMP_ASSERT_FAILURES */
#if defined( RTEMS_SMP )
  Per_CPU_Control *self_cpu;
  ISR_Level level;

  _ISR_Disable_without_giant( level );
  ( void ) level;

  self_cpu = _Per_CPU_Get();
  self_cpu->thread_dispatch_disable_level = 1;
#else
  _Thread_Disable_dispatch();
#endif
}
Ejemplo n.º 18
0
void _Thread_Dispatch( void )
{
  ISR_Level        level;
  Per_CPU_Control *cpu_self;

  _ISR_Disable_without_giant( level );

  cpu_self = _Per_CPU_Get();

  if ( cpu_self->dispatch_necessary ) {
    _Profiling_Thread_dispatch_disable( cpu_self, 0 );
    cpu_self->thread_dispatch_disable_level = 1;
    _Thread_Do_dispatch( cpu_self, level );
  } else {
    _ISR_Enable_without_giant( level );
  }
}
Ejemplo n.º 19
0
Archivo: smp.c Proyecto: gedare/rtems
void _SMP_Start_multitasking_on_secondary_processor( void )
{
  Per_CPU_Control *self_cpu = _Per_CPU_Get();
  uint32_t cpu_index_self = _Per_CPU_Get_index( self_cpu );

  if ( cpu_index_self >= rtems_configuration_get_maximum_processors() ) {
    _SMP_Fatal( SMP_FATAL_MULTITASKING_START_ON_INVALID_PROCESSOR );
  }

  if ( !_SMP_Should_start_processor( cpu_index_self ) ) {
    _SMP_Fatal( SMP_FATAL_MULTITASKING_START_ON_UNASSIGNED_PROCESSOR );
  }

  _Per_CPU_State_change( self_cpu, PER_CPU_STATE_READY_TO_START_MULTITASKING );

  _Thread_Start_multitasking();
}
Ejemplo n.º 20
0
void rtems_smp_process_interrupt( void )
{
  Per_CPU_Control *self_cpu = _Per_CPU_Get();


  if ( self_cpu->message != 0 ) {
    uint32_t  message;
    ISR_Level level;

    _Per_CPU_Lock_acquire( self_cpu, level );
    message = self_cpu->message;
    self_cpu->message = 0;
    _Per_CPU_Lock_release( self_cpu, level );

    #if defined(RTEMS_DEBUG)
      {
        void *sp = __builtin_frame_address(0);
        if ( !(message & RTEMS_BSP_SMP_SHUTDOWN) ) {
          printk(
            "ISR on CPU %d -- (0x%02x) (0x%p)\n",
            _Per_CPU_Get_index( self_cpu ),
            message,
            sp
          );
          if ( message & RTEMS_BSP_SMP_SIGNAL_TO_SELF )
            printk( "signal to self\n" );
          if ( message & RTEMS_BSP_SMP_SHUTDOWN )
            printk( "shutdown\n" );
        }
        printk( "Dispatch level %d\n", _Thread_Dispatch_get_disable_level() );
      }
    #endif

    if ( ( message & RTEMS_BSP_SMP_SHUTDOWN ) != 0 ) {
      _ISR_Disable( level );

      _Thread_Dispatch_set_disable_level( 0 );

      _Per_CPU_Change_state( self_cpu, PER_CPU_STATE_SHUTDOWN );

      _CPU_Fatal_halt( _Per_CPU_Get_index( self_cpu ) );
      /* does not continue past here */
    }
  }
}
Ejemplo n.º 21
0
uint32_t _Thread_Dispatch_decrement_disable_level( void )
{
  ISR_Level isr_level;
  uint32_t disable_level;
  Per_CPU_Control *self_cpu;

  _ISR_Disable( isr_level );

  self_cpu = _Per_CPU_Get();
  disable_level = self_cpu->thread_dispatch_disable_level;
  --disable_level;
  self_cpu->thread_dispatch_disable_level = disable_level;

  _Giant_Do_release();

  _ISR_Enable( isr_level );

  return disable_level;
}
Ejemplo n.º 22
0
uint32_t _Thread_Dispatch_decrement_disable_level( void )
{
  ISR_Level isr_level;
  uint32_t disable_level;
  Per_CPU_Control *self_cpu;

  _ISR_Disable_without_giant( isr_level );

  self_cpu = _Per_CPU_Get();
  disable_level = self_cpu->thread_dispatch_disable_level;
  --disable_level;
  self_cpu->thread_dispatch_disable_level = disable_level;

  _Giant_Do_release( self_cpu );
  _Assert( disable_level != 0 || _Giant.owner_cpu == NO_OWNER_CPU );

  _ISR_Enable_without_giant( isr_level );

  return disable_level;
}
Ejemplo n.º 23
0
static void
_SMP_Multicasts_try_process( void )
{
  unsigned long message;
  Per_CPU_Control *cpu_self;
  ISR_Level isr_level;

  _ISR_Local_disable( isr_level );

  cpu_self = _Per_CPU_Get();

  message = _Atomic_Load_ulong( &cpu_self->message, ATOMIC_ORDER_RELAXED );

  if ( message & SMP_MESSAGE_MULTICAST_ACTION ) {
    if ( _Atomic_Compare_exchange_ulong( &cpu_self->message, &message,
        message & ~SMP_MESSAGE_MULTICAST_ACTION, ATOMIC_ORDER_RELAXED,
        ATOMIC_ORDER_RELAXED ) ) {
      _SMP_Multicast_actions_process();
    }
  }

  _ISR_Local_enable( isr_level );
}
Ejemplo n.º 24
0
static void _Rate_monotonic_Renew_deadline(
  Rate_monotonic_Control *the_period,
  ISR_lock_Context       *lock_context
)
{
  uint64_t deadline;

  /* stay at 0xffffffff if postponed_jobs is going to overflow */
  if ( the_period->postponed_jobs != UINT32_MAX ) {
    ++the_period->postponed_jobs;
  }

  the_period->state = RATE_MONOTONIC_EXPIRED;

  deadline = _Watchdog_Per_CPU_insert_ticks(
    &the_period->Timer,
    _Per_CPU_Get(),
    the_period->next_length
  );
  the_period->latest_deadline = deadline;

  _Rate_monotonic_Release( the_period, lock_context );
}
Ejemplo n.º 25
0
uint32_t _Thread_Dispatch_increment_disable_level( void )
{
  ISR_Level isr_level;
  uint32_t disable_level;
  Per_CPU_Control *self_cpu;

  _ISR_Disable_without_giant( isr_level );

  /*
   * We must obtain the processor after interrupts are disabled to prevent
   * thread migration.
   */
  self_cpu = _Per_CPU_Get();

  _Giant_Do_acquire( self_cpu );

  disable_level = self_cpu->thread_dispatch_disable_level;
  ++disable_level;
  self_cpu->thread_dispatch_disable_level = disable_level;

  _ISR_Enable_without_giant( isr_level );

  return disable_level;
}
Ejemplo n.º 26
0
void _Scheduler_Request_ask_for_help( Thread_Control *the_thread )
{
  ISR_lock_Context lock_context;

  _Thread_Scheduler_acquire_critical( the_thread, &lock_context );

  if ( _Chain_Is_node_off_chain( &the_thread->Scheduler.Help_node ) ) {
    Per_CPU_Control *cpu;

    cpu = _Thread_Get_CPU( the_thread );
    _Per_CPU_Acquire( cpu );

    _Chain_Append_unprotected(
      &cpu->Threads_in_need_for_help,
      &the_thread->Scheduler.Help_node
    );

    _Per_CPU_Release( cpu );

    _Thread_Dispatch_request( _Per_CPU_Get(), cpu );
  }

  _Thread_Scheduler_release_critical( the_thread, &lock_context );
}
Ejemplo n.º 27
0
void _Thread_Handler( void )
{
  Thread_Control *executing = _Thread_Executing;
  ISR_Level       level;


  /*
   * Some CPUs need to tinker with the call frame or registers when the
   * thread actually begins to execute for the first time.  This is a
   * hook point where the port gets a shot at doing whatever it requires.
   */
  _Context_Initialization_at_thread_begin();

  #if !defined(RTEMS_SMP)
    /*
     * have to put level into a register for those cpu's that use
     * inline asm here
     */
    level = executing->Start.isr_level;
    _ISR_Set_level( level );
  #endif

  /*
   * Initialize the floating point context because we do not come
   * through _Thread_Dispatch on our first invocation. So the normal
   * code path for performing the FP context switch is not hit.
   */
  #if ( CPU_HARDWARE_FP == TRUE ) || ( CPU_SOFTWARE_FP == TRUE )
    #if ( CPU_USE_DEFERRED_FP_SWITCH == TRUE )
      if ( (executing->fp_context != NULL) &&
            !_Thread_Is_allocated_fp( executing ) ) {
        if ( _Thread_Allocated_fp != NULL )
          _Context_Save_fp( &_Thread_Allocated_fp->fp_context );
        _Thread_Allocated_fp = executing;
      }
    #endif
  #endif

  /*
   * Take care that 'begin' extensions get to complete before
   * 'switch' extensions can run.  This means must keep dispatch
   * disabled until all 'begin' extensions complete.
   */
  _User_extensions_Thread_begin( executing );

  /*
   *  At this point, the dispatch disable level BETTER be 1.
   */
  #if defined(RTEMS_SMP)
    {
      /*
       * On SMP we enter _Thread_Handler() with interrupts disabled and
       * _Thread_Dispatch() obtained the per-CPU lock for us.  We have to
       * release it here and set the desired interrupt level of the thread.
       */
      Per_CPU_Control *cpu_self = _Per_CPU_Get();

      _Assert( cpu_self->thread_dispatch_disable_level == 1 );
      _Assert( _ISR_Get_level() != 0 );

      _Thread_Debug_set_real_processor( executing, cpu_self );

      cpu_self->thread_dispatch_disable_level = 0;
      _Profiling_Thread_dispatch_enable( cpu_self, 0 );

      level = executing->Start.isr_level;
      _ISR_Set_level( level);

      /*
       * The thread dispatch level changed from one to zero.  Make sure we lose
       * no thread dispatch necessary update.
       */
      _Thread_Dispatch();
    }
  #else
    _Thread_Enable_dispatch();
  #endif

  /*
   *  RTEMS supports multiple APIs and each API can define a different
   *  thread/task prototype. The following code supports invoking the
   *  user thread entry point using the prototype expected.
   */
  if ( executing->Start.prototype == THREAD_START_NUMERIC ) {
    executing->Wait.return_argument =
      (*(Thread_Entry_numeric) executing->Start.entry_point)(
        executing->Start.numeric_argument
      );
  }
  #if defined(RTEMS_POSIX_API)
    else if ( executing->Start.prototype == THREAD_START_POINTER ) {
      executing->Wait.return_argument =
        (*(Thread_Entry_pointer) executing->Start.entry_point)(
          executing->Start.pointer_argument
        );
    }
  #endif
  #if defined(FUNCTIONALITY_NOT_CURRENTLY_USED_BY_ANY_API)
    else if ( executing->Start.prototype == THREAD_START_BOTH_POINTER_FIRST ) {
      executing->Wait.return_argument =
         (*(Thread_Entry_both_pointer_first) executing->Start.entry_point)(
           executing->Start.pointer_argument,
           executing->Start.numeric_argument
         );
    }
    else if ( executing->Start.prototype == THREAD_START_BOTH_NUMERIC_FIRST ) {
      executing->Wait.return_argument =
       (*(Thread_Entry_both_numeric_first) executing->Start.entry_point)(
         executing->Start.numeric_argument,
         executing->Start.pointer_argument
       );
    }
  #endif

  /*
   *  In the switch above, the return code from the user thread body
   *  was placed in return_argument.  This assumed that if it returned
   *  anything (which is not supporting in all APIs), then it would be
   *  able to fit in a (void *).
   */

  _User_extensions_Thread_exitted( executing );

  _Terminate(
    INTERNAL_ERROR_CORE,
    true,
    INTERNAL_ERROR_THREAD_EXITTED
  );
}
Ejemplo n.º 28
0
void _Thread_Handler( void )
{
    Thread_Control  *executing;
    ISR_Level        level;
    Per_CPU_Control *cpu_self;

    /*
     * Some CPUs need to tinker with the call frame or registers when the
     * thread actually begins to execute for the first time.  This is a
     * hook point where the port gets a shot at doing whatever it requires.
     */
    _Context_Initialization_at_thread_begin();
    executing = _Thread_Executing;

    /*
     * have to put level into a register for those cpu's that use
     * inline asm here
     */
    level = executing->Start.isr_level;
    _ISR_Set_level( level );

    /*
     * Initialize the floating point context because we do not come
     * through _Thread_Dispatch on our first invocation. So the normal
     * code path for performing the FP context switch is not hit.
     */
    _Thread_Restore_fp( executing );

    /*
     * Do not use the level of the thread control block, since it has a
     * different format.
     */
    _ISR_Local_disable( level );

    /*
     *  At this point, the dispatch disable level BETTER be 1.
     */
    cpu_self = _Per_CPU_Get();
    _Assert( cpu_self->thread_dispatch_disable_level == 1 );

    /*
     * Make sure we lose no thread dispatch necessary update and execute the
     * post-switch actions.  As a side-effect change the thread dispatch level
     * from one to zero.  Do not use _Thread_Enable_dispatch() since there is no
     * valid thread dispatch necessary indicator in this context.
     */
    _Thread_Do_dispatch( cpu_self, level );

    /*
     * Invoke the thread begin extensions in the context of the thread entry
     * function with thread dispatching enabled.  This enables use of dynamic
     * memory allocation, creation of POSIX keys and use of C++ thread local
     * storage.  Blocking synchronization primitives are allowed also.
     */
    _User_extensions_Thread_begin( executing );

    /*
     *  RTEMS supports multiple APIs and each API can define a different
     *  thread/task prototype. The following code supports invoking the
     *  user thread entry point using the prototype expected.
     */
    ( *executing->Start.Entry.adaptor )( executing );

    /*
     *  In the call above, the return code from the user thread body which return
     *  something was placed in return_argument.  This assumed that if it
     *  returned anything (which is not supporting in all APIs), then it would be
     *  able to fit in a (void *).
     */

    _User_extensions_Thread_exitted( executing );

    _Internal_error( INTERNAL_ERROR_THREAD_EXITTED );
}
Ejemplo n.º 29
0
/*
 *  A simple test of realloc
 */
static void test_realloc(void)
{
  void *p1, *p2, *p3, *p4;
  size_t i;
  int sc;
  bool malloc_walk_ok;

  /* Test growing reallocation "in place" */
  p1 = malloc(1);
  for (i=2 ; i<2048 ; i++) {
    p2 = realloc(p1, i);
    if (p2 != p1)
      printf( "realloc - failed grow in place: "
              "%p != realloc(%p,%zu)\n", p1, p2, i);
    p1 = p2;
  }
  free(p1);

  /* Test shrinking reallocation "in place" */
  p1 = malloc(2048);
  for (i=2047 ; i>=1; i--)  {
    p2 = realloc(p1, i);
    if (p2 != p1)
      printf( "realloc - failed shrink in place: "
              "%p != realloc(%p,%zu)\n", p1, p2, i);
    p1 = p2;
  }
  free(p1);

  /* Test realloc that should fail "in place", i.e.,
   * fallback to free()-- malloc()
   */
  p1 = malloc(32);
  p2 = malloc(32);
  p3 = realloc(p1, 64);
  if (p3 == p1 || p3 == NULL)
    printf(
      "realloc - failed non-in place: realloc(%p,%d) = %p\n", p1, 64, p3);
  free(p3);
  free(p2);

  /*
   *  Yet another case
   */
  p1 = malloc(8);
  p2 = malloc(8);
  free(p1);
  sc = posix_memalign(&p1, 16, 32);
  if (!sc)
    free(p1);

  /*
   *  Allocate with default alignment coverage
   */
  sc = rtems_memalign( &p4, 0, 8 );
  if ( !sc && p4 )
    free( p4 );

  /*
   * Walk the C Program Heap
   */
  puts( "malloc_walk - normal path" );
  malloc_walk_ok = malloc_walk( 1234, false );
  rtems_test_assert( malloc_walk_ok );

  puts( "malloc_walk - in critical section path" );
  _Thread_Dispatch_disable();
  malloc_walk_ok = malloc_walk( 1234, false );
  rtems_test_assert( malloc_walk_ok );
  _Thread_Dispatch_enable( _Per_CPU_Get() );

  /*
   *  Realloc with a bad pointer to force a point
   */
  p4 = realloc( test_realloc, 32 );

  p4 = _realloc_r( NULL, NULL, 1 );
}
Ejemplo n.º 30
0
CORE_mutex_Status _CORE_mutex_Surrender(
  CORE_mutex_Control                *the_mutex,
#if defined(RTEMS_MULTIPROCESSING)
  Objects_Id                         id,
  CORE_mutex_API_mp_support_callout  api_mutex_mp_support,
#else
  Objects_Id                         id __attribute__((unused)),
  CORE_mutex_API_mp_support_callout  api_mutex_mp_support __attribute__((unused)),
#endif
  ISR_lock_Context                  *lock_context
)
{
  Thread_Control *the_thread;
  Thread_Control *holder;

  holder = the_mutex->holder;

  /*
   *  The following code allows a thread (or ISR) other than the thread
   *  which acquired the mutex to release that mutex.  This is only
   *  allowed when the mutex in quetion is FIFO or simple Priority
   *  discipline.  But Priority Ceiling or Priority Inheritance mutexes
   *  must be released by the thread which acquired them.
   */

  if ( the_mutex->Attributes.only_owner_release ) {
    if ( !_Thread_Is_executing( holder ) ) {
      _ISR_lock_ISR_enable( lock_context );
      return CORE_MUTEX_STATUS_NOT_OWNER_OF_RESOURCE;
    }
  }

  _Thread_queue_Acquire_critical( &the_mutex->Wait_queue, lock_context );

  /* XXX already unlocked -- not right status */

  if ( !the_mutex->nest_count ) {
    _Thread_queue_Release( &the_mutex->Wait_queue, lock_context );
    return CORE_MUTEX_STATUS_SUCCESSFUL;
  }

  the_mutex->nest_count--;

  if ( the_mutex->nest_count != 0 ) {
    /*
     *  All error checking is on the locking side, so if the lock was
     *  allowed to acquired multiple times, then we should just deal with
     *  that.  The RTEMS_DEBUG is just a validation.
     */
    #if defined(RTEMS_DEBUG)
      switch ( the_mutex->Attributes.lock_nesting_behavior ) {
        case CORE_MUTEX_NESTING_ACQUIRES:
          _Thread_queue_Release( &the_mutex->Wait_queue, lock_context );
          return CORE_MUTEX_STATUS_SUCCESSFUL;
        #if defined(RTEMS_POSIX_API)
          case CORE_MUTEX_NESTING_IS_ERROR:
            /* should never occur */
            _Thread_queue_Release( &the_mutex->Wait_queue, lock_context );
            return CORE_MUTEX_STATUS_NESTING_NOT_ALLOWED;
        #endif
        case CORE_MUTEX_NESTING_BLOCKS:
          /* Currently no API exercises this behavior. */
          break;
      }
    #else
      _Thread_queue_Release( &the_mutex->Wait_queue, lock_context );
      /* must be CORE_MUTEX_NESTING_ACQUIRES or we wouldn't be here */
      return CORE_MUTEX_STATUS_SUCCESSFUL;
    #endif
  }

  /*
   *  Formally release the mutex before possibly transferring it to a
   *  blocked thread.
   */
  if ( _CORE_mutex_Is_inherit_priority( &the_mutex->Attributes ) ||
       _CORE_mutex_Is_priority_ceiling( &the_mutex->Attributes ) ) {
    CORE_mutex_Status pop_status =
      _CORE_mutex_Pop_priority( the_mutex, holder );

    if ( pop_status != CORE_MUTEX_STATUS_SUCCESSFUL ) {
      _Thread_queue_Release( &the_mutex->Wait_queue, lock_context );
      return pop_status;
    }

    holder->resource_count--;
  }
  the_mutex->holder = NULL;

  /*
   *  Now we check if another thread was waiting for this mutex.  If so,
   *  transfer the mutex to that thread.
   */
  if ( ( the_thread = _Thread_queue_First_locked( &the_mutex->Wait_queue ) ) ) {
    /*
     * We must extract the thread now since this will restore its default
     * thread lock.  This is necessary to avoid a deadlock in the
     * _Thread_Change_priority() below due to a recursive thread queue lock
     * acquire.
     */
    _Thread_queue_Extract_locked( &the_mutex->Wait_queue, the_thread );

#if defined(RTEMS_MULTIPROCESSING)
    _Thread_Dispatch_disable();

    if ( _Objects_Is_local_id( the_thread->Object.id ) )
#endif
    {
      the_mutex->holder     = the_thread;
      the_mutex->nest_count = 1;

      switch ( the_mutex->Attributes.discipline ) {
        case CORE_MUTEX_DISCIPLINES_FIFO:
        case CORE_MUTEX_DISCIPLINES_PRIORITY:
          break;
        case CORE_MUTEX_DISCIPLINES_PRIORITY_INHERIT:
          _CORE_mutex_Push_priority( the_mutex, the_thread );
          the_thread->resource_count++;
          break;
        case CORE_MUTEX_DISCIPLINES_PRIORITY_CEILING:
          _CORE_mutex_Push_priority( the_mutex, the_thread );
          the_thread->resource_count++;
          _Thread_Raise_priority(
            the_thread,
            the_mutex->Attributes.priority_ceiling
          );
          break;
      }
    }

    _Thread_queue_Unblock_critical(
      &the_mutex->Wait_queue,
      the_thread,
      lock_context
    );

#if defined(RTEMS_MULTIPROCESSING)
    if ( !_Objects_Is_local_id( the_thread->Object.id ) ) {

      the_mutex->holder     = NULL;
      the_mutex->nest_count = 1;

      ( *api_mutex_mp_support)( the_thread, id );

    }

    _Thread_Dispatch_enable( _Per_CPU_Get() );
#endif
  } else {
    _Thread_queue_Release( &the_mutex->Wait_queue, lock_context );
  }

  /*
   *  Whether or not someone is waiting for the mutex, an
   *  inherited priority must be lowered if this is the last
   *  mutex (i.e. resource) this task has.
   */
  if ( !_Thread_Owns_resources( holder ) ) {
    /*
     * Ensure that the holder resource count is visible to all other processors
     * and that we read the latest priority restore hint.
     */
    _Atomic_Fence( ATOMIC_ORDER_ACQ_REL );

    if ( holder->priority_restore_hint ) {
      Per_CPU_Control *cpu_self;

      cpu_self = _Thread_Dispatch_disable();
      _Thread_Restore_priority( holder );
      _Thread_Dispatch_enable( cpu_self );
    }
  }

  return CORE_MUTEX_STATUS_SUCCESSFUL;
}