Beispiel #1
0
static void _Thread_Make_zombie( Thread_Control *the_thread )
{
  ISR_lock_Context lock_context;
  Thread_Zombie_control *zombies = &_Thread_Zombies;

  if ( _Thread_Owns_resources( the_thread ) ) {
    _Terminate(
      INTERNAL_ERROR_CORE,
      false,
      INTERNAL_ERROR_RESOURCE_IN_USE
    );
  }

  _Objects_Close(
    _Objects_Get_information_id( the_thread->Object.id ),
    &the_thread->Object
  );

  _Thread_Set_state( the_thread, STATES_ZOMBIE );
  _Thread_queue_Extract_with_proxy( the_thread );
  _Watchdog_Remove_ticks( &the_thread->Timer );

  _ISR_lock_ISR_disable_and_acquire( &zombies->Lock, &lock_context );
  _Chain_Append_unprotected( &zombies->Chain, &the_thread->Object.Node );
  _ISR_lock_Release_and_ISR_enable( &zombies->Lock, &lock_context );
}
Beispiel #2
0
bool _Objects_MP_Allocate_and_open (
  Objects_Information *information,
  uint32_t             the_name,      /* XXX -- wrong for variable */
  Objects_Id           the_id,
  bool                 is_fatal_error
)
{
  Objects_MP_Control  *the_global_object;

  the_global_object = _Objects_MP_Allocate_global_object();
  if ( _Objects_MP_Is_null_global_object( the_global_object ) ) {

    if ( is_fatal_error == false )
      return false;

    _Terminate(
      INTERNAL_ERROR_CORE,
      true,
      INTERNAL_ERROR_OUT_OF_GLOBAL_OBJECTS
    );

  }

  _Objects_MP_Open( information, the_global_object, the_name, the_id );

  return true;
}
Beispiel #3
0
int rtems_gxx_setspecific(__gthread_key_t key, const void *ptr)
{
  pthread_key_t *pkey = key;
  int eno;

  if ( pkey == NULL ) {
    return EINVAL;
  }

  eno = pthread_setspecific( *pkey, ptr );

  #ifdef DEBUG_GXX_WRAPPERS
    printk(
      "gxx_wrappers: setspecific key=%x, ptr=%x, id=%x\n",
      pkey,
      ptr,
      rtems_task_self()
      );
  #endif

  if ( eno != 0 ) {
    _Terminate(
      INTERNAL_ERROR_CORE,
      true,
      INTERNAL_ERROR_GXX_KEY_ADD_FAILED
    );
  }

  return 0;
}
Beispiel #4
0
void _Objects_MP_Close (
  Objects_Information *information,
  Objects_Id           the_id
)
{
  Chain_Control      *the_chain;
  Chain_Node         *the_node;
  Objects_MP_Control *the_object;

  the_chain = &information->global_table[ _Objects_Get_node( the_id ) ];

  for ( the_node = _Chain_First( the_chain ) ;
        !_Chain_Is_tail( the_chain, the_node ) ;
        the_node = _Chain_Next( the_node ) ) {

    the_object = (Objects_MP_Control *) the_node;

    if ( _Objects_Are_ids_equal( the_object->Object.id, the_id ) ) {

      _Chain_Extract( the_node );
      _Objects_MP_Free_global_object( the_object );
      return;
    }

  }

  _Terminate(
    INTERNAL_ERROR_CORE,
    true,
    INTERNAL_ERROR_INVALID_GLOBAL_ID
  );
}
Beispiel #5
0
/*
 * MUTEX support
 */
void rtems_gxx_mutex_init (__gthread_mutex_t *mutex)
{
  rtems_status_code status;

  #ifdef DEBUG_GXX_WRAPPERS
    printk( "gxx_wrappers: mutex init =%X\n", *mutex );
  #endif

  status = rtems_semaphore_create(
    rtems_build_name ('G', 'C', 'C', '2'),
    1,
    RTEMS_PRIORITY|RTEMS_BINARY_SEMAPHORE|
      RTEMS_INHERIT_PRIORITY|RTEMS_NO_PRIORITY_CEILING|RTEMS_LOCAL,
    0,
    (rtems_id *)mutex
  );
  if ( status != RTEMS_SUCCESSFUL ) {
    #ifdef DEBUG_GXX_WRAPPERS
      printk(
        "gxx_wrappers: mutex init failed %s (%d)\n",
        rtems_status_text(status),
        status
      );
    #endif
    _Terminate(
      INTERNAL_ERROR_CORE,
      true,
      INTERNAL_ERROR_GXX_MUTEX_INIT_FAILED
    );
  }
  #ifdef DEBUG_GXX_WRAPPERS
    printk( "gxx_wrappers: mutex init complete =%X\n", *mutex );
  #endif
}
Beispiel #6
0
void _MPCI_Internal_packets_Process_packet (
  MP_packet_Prefix  *the_packet_prefix
)
{
  MPCI_Internal_packet *the_packet;
  uint32_t                    maximum_nodes;
  uint32_t                    maximum_global_objects;

  the_packet = (MPCI_Internal_packet *) the_packet_prefix;

  switch ( the_packet->operation ) {

    case MPCI_PACKETS_SYSTEM_VERIFY:

      maximum_nodes          = the_packet->maximum_nodes;
      maximum_global_objects = the_packet->maximum_global_objects;
      if ( maximum_nodes != _Objects_Maximum_nodes ||
           maximum_global_objects != _Objects_MP_Maximum_global_objects ) {

        _MPCI_Return_packet( the_packet_prefix );

        _Terminate(
          INTERNAL_ERROR_CORE,
          true,
          INTERNAL_ERROR_INCONSISTENT_MP_INFORMATION
        );
      }

      _MPCI_Return_packet( the_packet_prefix );

      break;
  }
}
static void _Thread_Make_zombie( Thread_Control *the_thread )
{
  if ( _Thread_Owns_resources( the_thread ) ) {
    _Terminate(
      INTERNAL_ERROR_CORE,
      false,
      INTERNAL_ERROR_RESOURCE_IN_USE
    );
  }

  _Objects_Close(
    _Objects_Get_information_id( the_thread->Object.id ),
    &the_thread->Object
  );

  _Thread_Set_state( the_thread, STATES_ZOMBIE );
  _Thread_queue_Extract_with_proxy( the_thread );
  _Thread_Timer_remove( the_thread );

  /*
   * Add the thread to the thread zombie chain before we wake up joining
   * threads, so that they are able to clean up the thread immediately.  This
   * matters for SMP configurations.
   */
  _Thread_Add_to_zombie_chain( the_thread );

  _Thread_Wake_up_joining_threads( the_thread );
}
Beispiel #8
0
void _Thread_queue_Deadlock_fatal( Thread_Control *the_thread )
{
  _Terminate(
    INTERNAL_ERROR_CORE,
    false,
    INTERNAL_ERROR_THREAD_QUEUE_DEADLOCK
  );
}
Beispiel #9
0
void _ISR_Handler_initialization( void )
{
  _ISR_Nest_level = 0;

#if (CPU_SIMPLE_VECTORED_INTERRUPTS == TRUE)
  _ISR_Vector_table = _Workspace_Allocate_or_fatal_error(
     sizeof(ISR_Handler_entry) * ISR_NUMBER_OF_VECTORS
  );

  _CPU_Initialize_vectors();
#endif

#if ( CPU_ALLOCATE_INTERRUPT_STACK == TRUE )
  {
    size_t stack_size = rtems_configuration_get_interrupt_stack_size();
    uint32_t max_cpus = rtems_configuration_get_maximum_processors();
    uint32_t cpu;

    if ( !_Stack_Is_enough( stack_size ) )
      _Terminate(
        INTERNAL_ERROR_CORE,
        true,
        INTERNAL_ERROR_INTERRUPT_STACK_TOO_SMALL
      );

    for ( cpu = 0 ; cpu < max_cpus; ++cpu ) {
      Per_CPU_Control *per_cpu = _Per_CPU_Get_by_index( cpu );
      void *low = _Workspace_Allocate_or_fatal_error( stack_size );
      void *high = _Addresses_Add_offset( low, stack_size );

#if (CPU_STACK_ALIGNMENT != 0)
      high = _Addresses_Align_down( high, CPU_STACK_ALIGNMENT );
#endif

      per_cpu->interrupt_stack_low = low;
      per_cpu->interrupt_stack_high = high;

      /*
       * Interrupt stack might have to be aligned and/or setup in a specific
       * way.  Do not use the local low or high variables here since
       * _CPU_Interrupt_stack_setup() is a nasty macro that might want to play
       * with the real memory locations.
       */
#if defined(_CPU_Interrupt_stack_setup)
      _CPU_Interrupt_stack_setup(
        per_cpu->interrupt_stack_low,
        per_cpu->interrupt_stack_high
      );
#endif
    }
  }

#endif

#if ( CPU_HAS_HARDWARE_INTERRUPT_STACK == TRUE )
  _CPU_Install_interrupt_stack();
#endif
}
Beispiel #10
0
Thread _MPCI_Receive_server(
  uint32_t   ignored
)
{

  MP_packet_Prefix         *the_packet;
  MPCI_Packet_processor     the_function;
  Thread_Control           *executing;

  executing = _Thread_Get_executing();

  for ( ; ; ) {

    executing->receive_packet = NULL;

    _Thread_Disable_dispatch();
    _CORE_semaphore_Seize(
      &_MPCI_Semaphore,
      executing,
      0,
      true,
      WATCHDOG_NO_TIMEOUT
    );
    _Thread_Enable_dispatch();

    for ( ; ; ) {
      the_packet = _MPCI_Receive_packet();

      if ( !the_packet )
        break;

      executing->receive_packet = the_packet;

      if ( !_Mp_packet_Is_valid_packet_class ( the_packet->the_class ) )
        break;

      the_function = _MPCI_Packet_processors[ the_packet->the_class ];

      if ( !the_function )
        _Terminate(
          INTERNAL_ERROR_CORE,
          true,
          INTERNAL_ERROR_BAD_PACKET
        );

        (*the_function)( the_packet );
    }
  }

  return 0;   /* unreached - only to remove warnings */
}
Beispiel #11
0
void _MPCI_Handler_initialization(
  uint32_t   timeout_status
)
{
  CORE_semaphore_Attributes   attributes;
  MPCI_Control               *users_mpci_table;

  users_mpci_table = _Configuration_MP_table->User_mpci_table;

  if ( _System_state_Is_multiprocessing && !users_mpci_table )
    _Terminate(
      INTERNAL_ERROR_CORE,
      true,
      INTERNAL_ERROR_NO_MPCI
    );

  _MPCI_table = users_mpci_table;

  if ( !_System_state_Is_multiprocessing )
    return;

  /*
   *  Register the MP Process Packet routine.
   */

  _MPCI_Register_packet_processor(
    MP_PACKET_MPCI_INTERNAL,
    _MPCI_Internal_packets_Process_packet
  );

  /*
   *  Create the counting semaphore used by the MPCI Receive Server.
   */

  attributes.discipline = CORE_SEMAPHORE_DISCIPLINES_FIFO;

  _CORE_semaphore_Initialize(
    &_MPCI_Semaphore,
    &attributes,              /* the_semaphore_attributes */
    0                         /* initial_value */
  );

  _Thread_queue_Initialize(
    &_MPCI_Remote_blocked_threads,
    THREAD_QUEUE_DISCIPLINE_FIFO,
    STATES_WAITING_FOR_RPC_REPLY,
    timeout_status
  );
}
Beispiel #12
0
void _Objects_MP_Handler_early_initialization(void)
{
  uint32_t   node;
  uint32_t   maximum_nodes;

  node                   = _Configuration_MP_table->node;
  maximum_nodes          = _Configuration_MP_table->maximum_nodes;

  if ( node < 1 || node > maximum_nodes )
    _Terminate(
      INTERNAL_ERROR_CORE,
      true,
      INTERNAL_ERROR_INVALID_NODE
    );

  _Objects_Local_node    = node;
  _Objects_Maximum_nodes = maximum_nodes;
}
Beispiel #13
0
void _Thread_Handler_initialization(void)
{
  rtems_stack_allocate_init_hook stack_allocate_init_hook =
    rtems_configuration_get_stack_allocate_init_hook();
  #if defined(RTEMS_MULTIPROCESSING)
    uint32_t maximum_proxies =
      _Configuration_MP_table->maximum_proxies;
  #endif

  if ( rtems_configuration_get_stack_allocate_hook() == NULL ||
       rtems_configuration_get_stack_free_hook() == NULL)
    _Terminate(
      INTERNAL_ERROR_CORE,
      true,
      INTERNAL_ERROR_BAD_STACK_HOOK
    );

  if ( stack_allocate_init_hook != NULL )
    (*stack_allocate_init_hook)( rtems_configuration_get_stack_space_size() );

  #if defined(RTEMS_MULTIPROCESSING)
    _Thread_MP_Handler_initialization( maximum_proxies );
  #endif

  /*
   *  Initialize the internal class of threads.  We need an IDLE thread
   *  per CPU in an SMP system.  In addition, if this is a loosely
   *  coupled multiprocessing system, account for the MPCI Server Thread.
   */
  _Thread_Initialize_information(
    &_Thread_Internal_information,
    OBJECTS_INTERNAL_API,
    OBJECTS_INTERNAL_THREADS,
    _Thread_Get_maximum_internal_threads(),
    false,                      /* true if names for this object are strings */
    8                           /* maximum length of each object's name */
    #if defined(RTEMS_MULTIPROCESSING)
      ,
      false                       /* true if this is a global object class */
    #endif
  );

}
Beispiel #14
0
MP_packet_Prefix *_MPCI_Get_packet ( void )
{
  MP_packet_Prefix  *the_packet;

  (*_MPCI_table->get_packet)( &the_packet );

  if ( the_packet == NULL )
    _Terminate(
      INTERNAL_ERROR_CORE,
      true,
      INTERNAL_ERROR_OUT_OF_PACKETS
    );

  /*
   *  Put in a default timeout that will be used for
   *  all packets that do not otherwise have a timeout.
   */

  the_packet->timeout = MPCI_DEFAULT_TIMEOUT;

  return the_packet;
}
uint32_t _CPU_ISR_Get_level( void )
{
  uint32_t status = _Nios2_Get_ctlreg_status();
  uint32_t level = 0;

  switch ( _Nios2_ISR_Get_status_mask() ) {
    case NIOS2_ISR_STATUS_MASK_IIC:
      level = (status & NIOS2_STATUS_PIE) == 0;
      break;
    case NIOS2_ISR_STATUS_MASK_EIC_IL:
      level = (status & NIOS2_STATUS_IL_MASK) >> NIOS2_STATUS_IL_OFFSET;
      break;
    case NIOS2_ISR_STATUS_MASK_EIC_RSIE:
      level = (status & NIOS2_STATUS_RSIE) == 0;
      break;
    default:
      /* FIXME */
      _Terminate( INTERNAL_ERROR_CORE, false, 0xdeadbeef );
      break;
  }

  return level;
}
void _Thread_blocking_operation_Cancel(
#if defined(RTEMS_DEBUG)
  Thread_blocking_operation_States  sync_state,
#else
  Thread_blocking_operation_States  sync_state __attribute__((unused)),
#endif
  Thread_Control                   *the_thread,
  ISR_Level                         level
)
{
  /*
   *  Cases that should not happen and why.
   *
   *  THREAD_BLOCKING_OPERATION_SYNCHRONIZED:
   *
   *  This indicates that someone did not enter a blocking
   *  operation critical section.
   *
   *  THREAD_BLOCKING_OPERATION_NOTHING_HAPPENED:
   *
   *  This indicates that there was nothing to cancel so
   *  we should not have been called.
   */

  #if defined(RTEMS_DEBUG)
    if ( (sync_state == THREAD_BLOCKING_OPERATION_SYNCHRONIZED) ||
         (sync_state == THREAD_BLOCKING_OPERATION_NOTHING_HAPPENED) ) {
      _Terminate(
        INTERNAL_ERROR_CORE,
        true,
        INTERNAL_ERROR_IMPLEMENTATION_BLOCKING_OPERATION_CANCEL
      );
    }
  #endif

  _Thread_blocking_operation_Finalize( the_thread, level );
}
Beispiel #17
0
void RTEMS_Malloc_Initialize(
  const Heap_Area *areas,
  size_t area_count,
  Heap_Initialization_or_extend_handler extend
)
{
  Heap_Control *heap = RTEMS_Malloc_Heap;

  if ( !rtems_configuration_get_unified_work_area() ) {
    Heap_Initialization_or_extend_handler init_or_extend = _Heap_Initialize;
    uintptr_t page_size = CPU_HEAP_ALIGNMENT;
    size_t i;

    for (i = 0; i < area_count; ++i) {
      const Heap_Area *area = &areas [i];
      uintptr_t space_available = (*init_or_extend)(
        heap,
        area->begin,
        area->size,
        page_size
      );

      if ( space_available > 0 ) {
        init_or_extend = extend;
      }
    }

    if ( init_or_extend == _Heap_Initialize ) {
      _Terminate(
        INTERNAL_ERROR_CORE,
        true,
        INTERNAL_ERROR_NO_MEMORY_FOR_HEAP
      );
    }
  }
}
Beispiel #18
0
void _POSIX_Fatal_error( POSIX_Fatal_domain domain, int eno )
{
    uint32_t code = ( domain << 8 ) | ( ( uint32_t ) eno & 0xffU );

    _Terminate( INTERNAL_ERROR_POSIX_API, false, code );
}
Beispiel #19
0
void _Thread_queue_Enqueue_critical(
  Thread_queue_Queue            *queue,
  const Thread_queue_Operations *operations,
  Thread_Control                *the_thread,
  States_Control                 state,
  Thread_queue_Context          *queue_context
)
{
  Thread_queue_Path  path;
  Per_CPU_Control   *cpu_self;
  bool               success;

#if defined(RTEMS_MULTIPROCESSING)
  if ( _Thread_MP_Is_receive( the_thread ) && the_thread->receive_packet ) {
    the_thread = _Thread_MP_Allocate_proxy( state );
  }
#endif

  _Thread_Wait_claim( the_thread, queue, operations );

  if ( !_Thread_queue_Path_acquire( the_thread, queue, &path ) ) {
    _Thread_Wait_restore_default( the_thread );
    _Thread_queue_Queue_release( queue, &queue_context->Lock_context );
    _Thread_Wait_tranquilize( the_thread );
    ( *queue_context->deadlock_callout )( the_thread );
    return;
  }

  ( *operations->enqueue )( queue, the_thread, &path );

  _Thread_queue_Path_release( &path );

  the_thread->Wait.return_code = STATUS_SUCCESSFUL;
  _Thread_Wait_flags_set( the_thread, THREAD_QUEUE_INTEND_TO_BLOCK );
  cpu_self = _Thread_Dispatch_disable_critical( &queue_context->Lock_context );
  _Thread_queue_Queue_release( queue, &queue_context->Lock_context );

  if (
    cpu_self->thread_dispatch_disable_level
      != queue_context->expected_thread_dispatch_disable_level
  ) {
    _Terminate(
      INTERNAL_ERROR_CORE,
      false,
      INTERNAL_ERROR_THREAD_QUEUE_ENQUEUE_FROM_BAD_STATE
    );
  }

  /*
   *  Set the blocking state for this thread queue in the thread.
   */
  _Thread_Set_state( the_thread, state );

  /*
   *  If the thread wants to timeout, then schedule its timer.
   */
  switch ( queue_context->timeout_discipline ) {
    case WATCHDOG_RELATIVE:
      /* A relative timeout of 0 is a special case indefinite (no) timeout */
      if ( queue_context->timeout != 0 ) {
        _Thread_Timer_insert_relative(
          the_thread,
          cpu_self,
          _Thread_Timeout,
          (Watchdog_Interval) queue_context->timeout
        );
      }
      break;
    case WATCHDOG_ABSOLUTE:
      _Thread_Timer_insert_absolute(
        the_thread,
        cpu_self,
        _Thread_Timeout,
        queue_context->timeout
      );
      break;
    default:
      break;
  }

  /*
   * At this point thread dispatching is disabled, however, we already released
   * the thread queue lock.  Thus, interrupts or threads on other processors
   * may already changed our state with respect to the thread queue object.
   * The request could be satisfied or timed out.  This situation is indicated
   * by the thread wait flags.  Other parties must not modify our thread state
   * as long as we are in the THREAD_QUEUE_INTEND_TO_BLOCK thread wait state,
   * thus we have to cancel the blocking operation ourself if necessary.
   */
  success = _Thread_Wait_flags_try_change_acquire(
    the_thread,
    THREAD_QUEUE_INTEND_TO_BLOCK,
    THREAD_QUEUE_BLOCKED
  );
  if ( !success ) {
    _Thread_Remove_timer_and_unblock( the_thread, queue );
  }

  _Thread_Update_priority( path.update_priority );
  _Thread_Dispatch_enable( cpu_self );
}
Beispiel #20
0
void _Thread_Handler( void )
{
  Thread_Control *executing = _Thread_Executing;
  ISR_Level       level;


  /*
   * Some CPUs need to tinker with the call frame or registers when the
   * thread actually begins to execute for the first time.  This is a
   * hook point where the port gets a shot at doing whatever it requires.
   */
  _Context_Initialization_at_thread_begin();

  #if !defined(RTEMS_SMP)
    /*
     * have to put level into a register for those cpu's that use
     * inline asm here
     */
    level = executing->Start.isr_level;
    _ISR_Set_level( level );
  #endif

  /*
   * Initialize the floating point context because we do not come
   * through _Thread_Dispatch on our first invocation. So the normal
   * code path for performing the FP context switch is not hit.
   */
  #if ( CPU_HARDWARE_FP == TRUE ) || ( CPU_SOFTWARE_FP == TRUE )
    #if ( CPU_USE_DEFERRED_FP_SWITCH == TRUE )
      if ( (executing->fp_context != NULL) &&
            !_Thread_Is_allocated_fp( executing ) ) {
        if ( _Thread_Allocated_fp != NULL )
          _Context_Save_fp( &_Thread_Allocated_fp->fp_context );
        _Thread_Allocated_fp = executing;
      }
    #endif
  #endif

  /*
   * Take care that 'begin' extensions get to complete before
   * 'switch' extensions can run.  This means must keep dispatch
   * disabled until all 'begin' extensions complete.
   */
  _User_extensions_Thread_begin( executing );

  /*
   *  At this point, the dispatch disable level BETTER be 1.
   */
  #if defined(RTEMS_SMP)
    {
      /*
       * On SMP we enter _Thread_Handler() with interrupts disabled and
       * _Thread_Dispatch() obtained the per-CPU lock for us.  We have to
       * release it here and set the desired interrupt level of the thread.
       */
      Per_CPU_Control *cpu_self = _Per_CPU_Get();

      _Assert( cpu_self->thread_dispatch_disable_level == 1 );
      _Assert( _ISR_Get_level() != 0 );

      _Thread_Debug_set_real_processor( executing, cpu_self );

      cpu_self->thread_dispatch_disable_level = 0;
      _Profiling_Thread_dispatch_enable( cpu_self, 0 );

      level = executing->Start.isr_level;
      _ISR_Set_level( level);

      /*
       * The thread dispatch level changed from one to zero.  Make sure we lose
       * no thread dispatch necessary update.
       */
      _Thread_Dispatch();
    }
  #else
    _Thread_Enable_dispatch();
  #endif

  /*
   *  RTEMS supports multiple APIs and each API can define a different
   *  thread/task prototype. The following code supports invoking the
   *  user thread entry point using the prototype expected.
   */
  if ( executing->Start.prototype == THREAD_START_NUMERIC ) {
    executing->Wait.return_argument =
      (*(Thread_Entry_numeric) executing->Start.entry_point)(
        executing->Start.numeric_argument
      );
  }
  #if defined(RTEMS_POSIX_API)
    else if ( executing->Start.prototype == THREAD_START_POINTER ) {
      executing->Wait.return_argument =
        (*(Thread_Entry_pointer) executing->Start.entry_point)(
          executing->Start.pointer_argument
        );
    }
  #endif
  #if defined(FUNCTIONALITY_NOT_CURRENTLY_USED_BY_ANY_API)
    else if ( executing->Start.prototype == THREAD_START_BOTH_POINTER_FIRST ) {
      executing->Wait.return_argument =
         (*(Thread_Entry_both_pointer_first) executing->Start.entry_point)(
           executing->Start.pointer_argument,
           executing->Start.numeric_argument
         );
    }
    else if ( executing->Start.prototype == THREAD_START_BOTH_NUMERIC_FIRST ) {
      executing->Wait.return_argument =
       (*(Thread_Entry_both_numeric_first) executing->Start.entry_point)(
         executing->Start.numeric_argument,
         executing->Start.pointer_argument
       );
    }
  #endif

  /*
   *  In the switch above, the return code from the user thread body
   *  was placed in return_argument.  This assumed that if it returned
   *  anything (which is not supporting in all APIs), then it would be
   *  able to fit in a (void *).
   */

  _User_extensions_Thread_exitted( executing );

  _Terminate(
    INTERNAL_ERROR_CORE,
    true,
    INTERNAL_ERROR_THREAD_EXITTED
  );
}
Beispiel #21
0
void boot_card(const char *cmdline)
{
  test_context *ctx = &test_instance;
  struct timecounter *tc_soft = &ctx->tc_soft;
  uint64_t soft_freq = 1000000;
  struct bintime bt;
  struct timeval tv;
  struct timespec ts;

  rtems_test_begink();

  assert(time(NULL) == TOD_SECONDS_1970_THROUGH_1988);

  rtems_bsd_bintime(&bt);
  assert(bt.sec == TOD_SECONDS_1970_THROUGH_1988);
  assert(bt.frac == 0);

  rtems_bsd_getbintime(&bt);
  assert(bt.sec == TOD_SECONDS_1970_THROUGH_1988);
  assert(bt.frac == 0);

  rtems_bsd_microtime(&tv);
  assert(tv.tv_sec == TOD_SECONDS_1970_THROUGH_1988);
  assert(tv.tv_usec == 0);

  rtems_bsd_getmicrotime(&tv);
  assert(tv.tv_sec == TOD_SECONDS_1970_THROUGH_1988);
  assert(tv.tv_usec == 0);

  rtems_bsd_nanotime(&ts);
  assert(ts.tv_sec == TOD_SECONDS_1970_THROUGH_1988);
  assert(ts.tv_nsec == 0);

  rtems_bsd_getnanotime(&ts);
  assert(ts.tv_sec == TOD_SECONDS_1970_THROUGH_1988);
  assert(ts.tv_nsec == 0);

  assert(rtems_clock_get_uptime_seconds() == 0);
  assert(rtems_clock_get_uptime_nanoseconds() == 0);

  rtems_bsd_binuptime(&bt);
  assert(bt.sec == 1);
  assert(bt.frac == 0);

  rtems_bsd_getbinuptime(&bt);
  assert(bt.sec == 1);
  assert(bt.frac == 0);

  rtems_bsd_microuptime(&tv);
  assert(tv.tv_sec == 1);
  assert(tv.tv_usec == 0);

  rtems_bsd_getmicrouptime(&tv);
  assert(tv.tv_sec == 1);
  assert(tv.tv_usec == 0);

  rtems_bsd_nanouptime(&ts);
  assert(ts.tv_sec == 1);
  assert(ts.tv_nsec == 0);

  rtems_bsd_getnanouptime(&ts);
  assert(ts.tv_sec == 1);
  assert(ts.tv_nsec == 0);

  /* On RTEMS time does not advance using the dummy timecounter */
  rtems_bsd_binuptime(&bt);
  assert(bt.sec == 1);
  assert(bt.frac == 0);

  rtems_timecounter_tick();
  rtems_bsd_binuptime(&bt);
  assert(bt.sec == 1);
  assert(bt.frac == 0);

  ctx->tc_soft_counter = 0;
  tc_soft->tc_get_timecount = test_get_timecount_soft;
  tc_soft->tc_counter_mask = 0x0fffffff;
  tc_soft->tc_frequency = soft_freq;
  tc_soft->tc_quality = 1234;
  tc_soft->tc_priv = ctx;
  _Timecounter_Install(tc_soft);
  assert(ctx->tc_soft_counter == 3);

  rtems_bsd_binuptime(&bt);
  assert(ctx->tc_soft_counter == 4);

  assert(bt.sec == 1);
  assert(bt.frac == 18446744073708);

  ctx->tc_soft_counter = 0xf0000000 | 3;
  rtems_bsd_binuptime(&bt);
  assert(ctx->tc_soft_counter == (0xf0000000 | 4));

  assert(bt.sec == 1);
  assert(bt.frac == 18446744073708);

  /* Ensure that the fraction overflows and the second remains constant */
  ctx->tc_soft_counter = (0xf0000000 | 3) + soft_freq;
  rtems_bsd_binuptime(&bt);
  assert(ctx->tc_soft_counter == (0xf0000000 | 4) + soft_freq);
  assert(bt.sec == 1);
  assert(bt.frac == 18446742522092);

  rtems_test_endk();

  _Terminate(RTEMS_FATAL_SOURCE_EXIT, false, 0);
}
Beispiel #22
0
Thread_Control *_Thread_MP_Allocate_proxy (
  States_Control  the_state
)
{
  Thread_Proxy_control *the_proxy;
  ISR_lock_Context      lock_context;

  _Thread_MP_Proxies_acquire( &lock_context );

  the_proxy = (Thread_Proxy_control *)
    _Chain_Get_unprotected( &_Thread_MP_Inactive_proxies );
  if ( the_proxy != NULL ) {
    Thread_Control   *executing;
    MP_packet_Prefix *receive_packet;
    Objects_Id        source_tid;

    executing = _Thread_Executing;
    receive_packet = _MPCI_Receive_server_tcb->receive_packet;
    source_tid = receive_packet->source_tid;

    executing->Wait.return_code = THREAD_STATUS_PROXY_BLOCKING;

    the_proxy->receive_packet = receive_packet;
    the_proxy->Object.id = source_tid;
    the_proxy->current_priority = receive_packet->source_priority;
    the_proxy->current_state = _States_Set( STATES_DORMANT, the_state );

    the_proxy->Wait.count                   = executing->Wait.count;
    the_proxy->Wait.return_argument         = executing->Wait.return_argument;
    the_proxy->Wait.return_argument_second  = executing->Wait.return_argument_second;
    the_proxy->Wait.option                  = executing->Wait.option;
    the_proxy->Wait.return_code             = executing->Wait.return_code;
    the_proxy->Wait.timeout_code            = executing->Wait.timeout_code;

    the_proxy->thread_queue_callout = _Thread_queue_MP_callout_do_nothing;

    _RBTree_Insert_inline(
      &_Thread_MP_Active_proxies,
      &the_proxy->Active,
      &source_tid,
      _Thread_MP_Proxy_less
    );

    _Thread_MP_Proxies_release( &lock_context );

    return (Thread_Control *) the_proxy;
  }

  _Thread_MP_Proxies_release( &lock_context );

  _Terminate(
    INTERNAL_ERROR_CORE,
    true,
    INTERNAL_ERROR_OUT_OF_PROXIES
  );

  /*
   *  NOTE: The following return ensures that the compiler will
   *        think that all paths return a value.
   */

  return NULL;
}
Beispiel #23
0
void _RTEMS_tasks_Initialize_user_tasks_body( void )
{
  uint32_t                          index;
  uint32_t                          maximum;
  rtems_id                          id;
  rtems_status_code                 return_value;
  rtems_initialization_tasks_table *user_tasks;
  bool                              register_global_construction;
  rtems_task_entry                  entry_point;

  /*
   *  Move information into local variables
   */
  user_tasks = Configuration_RTEMS_API.User_initialization_tasks_table;
  maximum    = Configuration_RTEMS_API.number_of_initialization_tasks;

  /*
   *  Verify that we have a set of user tasks to iterate
   */
  if ( !user_tasks )
    return;

  register_global_construction = true;

  /*
   *  Now iterate over the initialization tasks and create/start them.
   */
  for ( index=0 ; index < maximum ; index++ ) {
    return_value = rtems_task_create(
      user_tasks[ index ].name,
      user_tasks[ index ].initial_priority,
      user_tasks[ index ].stack_size,
      user_tasks[ index ].mode_set,
      user_tasks[ index ].attribute_set,
      &id
    );
    if ( !rtems_is_status_successful( return_value ) )
      _Terminate( INTERNAL_ERROR_RTEMS_API, true, return_value );

    entry_point = user_tasks[ index ].entry_point;
    if ( entry_point == NULL ) {
      _Terminate(
        INTERNAL_ERROR_CORE,
        false,
        INTERNAL_ERROR_RTEMS_INIT_TASK_ENTRY_IS_NULL
      );
    }

    if ( register_global_construction ) {
      register_global_construction = false;
      entry_point = _RTEMS_Global_construction;
    }

    return_value = rtems_task_start(
      id,
      entry_point,
      user_tasks[ index ].argument
    );
    _Assert( rtems_is_status_successful( return_value ) );
    (void) return_value;
  }
}
void _Thread_Handler( void )
{
  Thread_Control  *executing = _Thread_Executing;
  ISR_Level        level;
  Per_CPU_Control *cpu_self;

  /*
   * Some CPUs need to tinker with the call frame or registers when the
   * thread actually begins to execute for the first time.  This is a
   * hook point where the port gets a shot at doing whatever it requires.
   */
  _Context_Initialization_at_thread_begin();

  /* On SMP we enter _Thread_Handler() with interrupts disabled */
  _SMP_Assert( _ISR_Get_level() != 0 );

  /*
   * have to put level into a register for those cpu's that use
   * inline asm here
   */
  level = executing->Start.isr_level;
  _ISR_Set_level( level );

  /*
   * Initialize the floating point context because we do not come
   * through _Thread_Dispatch on our first invocation. So the normal
   * code path for performing the FP context switch is not hit.
   */
  _Thread_Restore_fp( executing );

  /*
   * Do not use the level of the thread control block, since it has a
   * different format.
   */
  _ISR_Local_disable( level );

  /*
   *  At this point, the dispatch disable level BETTER be 1.
   */
  cpu_self = _Per_CPU_Get();
  _Assert( cpu_self->thread_dispatch_disable_level == 1 );

  /*
   * Make sure we lose no thread dispatch necessary update and execute the
   * post-switch actions.  As a side-effect change the thread dispatch level
   * from one to zero.  Do not use _Thread_Enable_dispatch() since there is no
   * valid thread dispatch necessary indicator in this context.
   */
  _Thread_Do_dispatch( cpu_self, level );

  /*
   * Invoke the thread begin extensions in the context of the thread entry
   * function with thread dispatching enabled.  This enables use of dynamic
   * memory allocation, creation of POSIX keys and use of C++ thread local
   * storage.  Blocking synchronization primitives are allowed also.
   */
  _User_extensions_Thread_begin( executing );

  /*
   *  RTEMS supports multiple APIs and each API can define a different
   *  thread/task prototype. The following code supports invoking the
   *  user thread entry point using the prototype expected.
   */
  ( *executing->Start.Entry.adaptor )( executing );

  /*
   *  In the call above, the return code from the user thread body which return
   *  something was placed in return_argument.  This assumed that if it
   *  returned anything (which is not supporting in all APIs), then it would be
   *  able to fit in a (void *).
   */

  _User_extensions_Thread_exitted( executing );

  _Terminate(
    INTERNAL_ERROR_CORE,
    true,
    INTERNAL_ERROR_THREAD_EXITTED
  );
}