Exemple #1
0
void _TOD_Set(
  const Timestamp_Control *tod_as_timestamp,
  ISR_lock_Context        *lock_context
)
{
  struct timespec tod_as_timespec;
  uint64_t        tod_as_ticks;
  uint32_t        cpu_count;
  uint32_t        cpu_index;

  _Assert( _API_Mutex_Is_owner( _Once_Mutex ) );

  _Timecounter_Set_clock( tod_as_timestamp, lock_context );

  _Timestamp_To_timespec( tod_as_timestamp, &tod_as_timespec );
  tod_as_ticks = _Watchdog_Ticks_from_timespec( &tod_as_timespec );
  cpu_count = _SMP_Get_processor_count();

  for ( cpu_index = 0 ; cpu_index < cpu_count ; ++cpu_index ) {
    Per_CPU_Control *cpu = _Per_CPU_Get_by_index( cpu_index );

    _Watchdog_Per_CPU_tickle_absolute( cpu, tod_as_ticks );
  }

  _TOD.is_set = true;
}
rtems_status_code rtems_scheduler_ident_by_processor(
  uint32_t  cpu_index,
  rtems_id *id
)
{
  const Scheduler_Control *scheduler;

  if ( id == NULL ) {
    return RTEMS_INVALID_ADDRESS;
  }

  if ( cpu_index >= _SMP_Get_processor_count() ) {
    return RTEMS_INVALID_NAME;
  }

  scheduler = _Scheduler_Get_by_CPU( _Per_CPU_Get_by_index( cpu_index ) );
#if defined(RTEMS_SMP)
  if ( scheduler == NULL ) {
    return RTEMS_INCORRECT_STATE;
  }
#else
  _Assert( scheduler != NULL );
#endif

  *id = _Scheduler_Build_id( _Scheduler_Get_index( scheduler ) );
  return RTEMS_SUCCESSFUL;
}
void _Scheduler_default_Tick( Scheduler_Control *scheduler )
{
  uint32_t processor_count = _SMP_Get_processor_count();
  uint32_t processor;

  for ( processor = 0 ; processor < processor_count ; ++processor ) {
    const Per_CPU_Control *per_cpu = _Per_CPU_Get_by_index( processor );

    _Scheduler_default_Tick_for_executing( scheduler, per_cpu->executing );
  }
}
Exemple #4
0
void _Thread_Create_idle( void )
{
  uint32_t cpu_count = _SMP_Get_processor_count();
  uint32_t cpu_index;

  for ( cpu_index = 0 ; cpu_index < cpu_count ; ++cpu_index ) {
    Per_CPU_Control *cpu = _Per_CPU_Get_by_index( cpu_index );

    if ( _Per_CPU_Is_processor_started( cpu ) ) {
      _Thread_Create_idle_for_cpu( cpu );
    }
  }
}
Exemple #5
0
void _SMP_Send_message_multicast(
  const Processor_mask *targets,
  unsigned long         message
)
{
  uint32_t cpu_count = _SMP_Get_processor_count();
  uint32_t cpu_index;

  for ( cpu_index = 0 ; cpu_index < cpu_count ; ++cpu_index ) {
    if ( _Processor_mask_Is_set( targets, cpu_index ) ) {
      _SMP_Send_message( cpu_index, message );
    }
  }
}
Exemple #6
0
void _SMP_Request_start_multitasking( void )
{
  Per_CPU_Control *self_cpu = _Per_CPU_Get();
  uint32_t cpu_count = _SMP_Get_processor_count();
  uint32_t cpu_index;

  _Per_CPU_State_change( self_cpu, PER_CPU_STATE_READY_TO_START_MULTITASKING );

  for ( cpu_index = 0 ; cpu_index < cpu_count ; ++cpu_index ) {
    Per_CPU_Control *cpu = _Per_CPU_Get_by_index( cpu_index );

    if ( _Per_CPU_Is_processor_online( cpu ) ) {
      _Per_CPU_State_change( cpu, PER_CPU_STATE_REQUEST_START_MULTITASKING );
    }
  }
}
Exemple #7
0
void _SMP_Multicast_action(
  const size_t setsize,
  const cpu_set_t *cpus,
  SMP_Action_handler handler,
  void *arg
)
{
  SMP_Multicast_action node;
  Processor_mask       targets;
  SMP_lock_Context     lock_context;
  uint32_t             i;

  if ( ! _System_state_Is_up( _System_state_Get() ) ) {
    ( *handler )( arg );
    return;
  }

  if( cpus == NULL ) {
    _Processor_mask_Assign( &targets, _SMP_Get_online_processors() );
  } else {
    _Processor_mask_Zero( &targets );

    for ( i = 0; i < _SMP_Get_processor_count(); ++i ) {
      if ( CPU_ISSET_S( i, setsize, cpus ) ) {
        _Processor_mask_Set( &targets, i );
      }
    }
  }

  _Chain_Initialize_node( &node.Node );
  node.handler = handler;
  node.arg = arg;
  _Processor_mask_Assign( &node.targets, &targets );
  _Atomic_Store_ulong( &node.done, 0, ATOMIC_ORDER_RELAXED );

  _SMP_lock_ISR_disable_and_acquire( &_SMP_Multicast.Lock, &lock_context );
  _Chain_Prepend_unprotected( &_SMP_Multicast.Actions, &node.Node );
  _SMP_lock_Release_and_ISR_enable( &_SMP_Multicast.Lock, &lock_context );

  _SMP_Send_message_multicast( &targets, SMP_MESSAGE_MULTICAST_ACTION );
  _SMP_Multicasts_try_process();

  while ( _Atomic_Load_ulong( &node.done, ATOMIC_ORDER_ACQUIRE ) == 0 ) {
    /* Wait */
  };
}
Exemple #8
0
void _SMP_Send_message_broadcast( unsigned long message )
{
  uint32_t cpu_count = _SMP_Get_processor_count();
  uint32_t cpu_index_self = _SMP_Get_current_processor();
  uint32_t cpu_index;

  _Assert( _Debug_Is_thread_dispatching_allowed() );

  for ( cpu_index = 0 ; cpu_index < cpu_count ; ++cpu_index ) {
    if (
      cpu_index != cpu_index_self
        && _Processor_mask_Is_set( &_SMP_Online_processors, cpu_index )
    ) {
      _SMP_Send_message( cpu_index, message );
    }
  }
}
Exemple #9
0
void _SMP_Request_other_cores_to_shutdown( void )
{
  uint32_t self = _SMP_Get_current_processor();
  uint32_t ncpus = _SMP_Get_processor_count();
  uint32_t cpu;

  _SMP_Broadcast_message( RTEMS_BSP_SMP_SHUTDOWN );

  for ( cpu = 0 ; cpu < ncpus ; ++cpu ) {
    if ( cpu != self ) {
      _Per_CPU_Wait_for_state(
        _Per_CPU_Get_by_index( cpu ),
        PER_CPU_STATE_SHUTDOWN
      );
    }
  }
}
Exemple #10
0
void _SMP_Request_other_cores_to_perform_first_context_switch( void )
{
  uint32_t self = _SMP_Get_current_processor();
  uint32_t ncpus = _SMP_Get_processor_count();
  uint32_t cpu;

  for ( cpu = 0 ; cpu < ncpus ; ++cpu ) {
    Per_CPU_Control *per_cpu = _Per_CPU_Get_by_index( cpu );

    if ( cpu != self ) {
      _Per_CPU_Change_state( per_cpu, PER_CPU_STATE_BEGIN_MULTITASKING );
    } else {

      _Per_CPU_Change_state( per_cpu, PER_CPU_STATE_UP );
    }
  }
}
Exemple #11
0
/*
 * _CPU_set_Handler_initialization
 */
void _CPU_set_Handler_initialization()
{
  uint32_t cpu_count;
  uint32_t cpu_index;

  /* We do not support a cpu count over CPU_SETSIZE  */
  cpu_count = _SMP_Get_processor_count();

  /* This should never happen */
  _Assert( cpu_count <= CPU_SETSIZE );

  /*  Initialize the affinity to be the set of all available CPU's   */
  cpuset_default.set     = &cpuset_default.preallocated;
  cpuset_default.setsize = sizeof( *cpuset_default.set );
  CPU_ZERO_S( cpuset_default.setsize, &cpuset_default.preallocated );

  for ( cpu_index=0; cpu_index<cpu_count; cpu_index++ )
    CPU_SET_S( (int) cpu_index, cpuset_default.setsize, cpuset_default.set );
}
Exemple #12
0
/*
 * _CPU_set_Handler_initialization
 */
void _CPU_set_Handler_initialization()
{
  int i;
  int max_cpus;

  /* We do not support a cpu count over CPU_SETSIZE  */
  max_cpus = _SMP_Get_processor_count();

  /* This should never happen */
  _Assert( max_cpus <= CPU_SETSIZE );

  /*  Initialize the affinity to be the set of all available CPU's   */
  cpuset_default.set     = &cpuset_default.preallocated;
  cpuset_default.setsize = sizeof( *cpuset_default.set );
  CPU_ZERO_S( cpuset_default.setsize, &cpuset_default.preallocated );

  for (i=0; i<max_cpus; i++)
    CPU_SET_S(i, cpuset_default.setsize, cpuset_default.set );
}
Exemple #13
0
void _SMP_Broadcast_message( uint32_t message )
{
  uint32_t self = _SMP_Get_current_processor();
  uint32_t ncpus = _SMP_Get_processor_count();
  uint32_t cpu;

  for ( cpu = 0 ; cpu < ncpus ; ++cpu ) {
    if ( cpu != self ) {
      Per_CPU_Control *per_cpu = _Per_CPU_Get_by_index( cpu );
      ISR_Level level;

      _Per_CPU_Lock_acquire( per_cpu, level );
      per_cpu->message |= message;
      _Per_CPU_Lock_release( per_cpu, level );
    }
  }

  bsp_smp_broadcast_interrupt();
}
Exemple #14
0
bool _SMP_Before_multitasking_action_broadcast(
  SMP_Action_handler  handler,
  void               *arg
)
{
  bool done = true;
  uint32_t cpu_count = _SMP_Get_processor_count();
  uint32_t cpu_index;

  for ( cpu_index = 0 ; done && cpu_index < cpu_count ; ++cpu_index ) {
    Per_CPU_Control *cpu = _Per_CPU_Get_by_index( cpu_index );

    if (
      !_Per_CPU_Is_boot_processor( cpu )
        && _Per_CPU_Is_processor_online( cpu )
    ) {
      done = _SMP_Before_multitasking_action( cpu, handler, arg );
    }
  }

  return done;
}
Exemple #15
0
void size_rtems(
  int mode
)
{
int uninitialized = 0;
int initialized = 0;

/*
 *  The following data is allocated for each Manager:
 *
 *    + Per Manager Object Information
 *      - local pointer table
 *      - local name table
 *      - the object's control blocks
 *      - global name chains
 *
 *  The following is the data allocate from the RTEMS Workspace Area.
 *  The order indicates the order in which RTEMS allocates it.
 *
 *    + Object MP
 *      - Global Object CB's
 *    + Thread MP
 *      - Proxies Chain
 *    + Scheduler
 *      - Ready queue
 *    + Interrupt Manager
 *      - Interrupt Stack
 *    + Timer Manager
 *      - per Manager Object Data
 *    + Extension Manager
 *      - per Manager Object Data
 *    + Message Queue Manager
 *      - per Manager Object Data
 *      - Message Buffers
 *    + Semaphore Manager
 *      - per Manager Object Data
 *    + Partition Manager
 *      - per Manager Object Data
 *    + Region Manager
 *      - per Manager Object Data
 *    + Dual Ported Memory Manager
 *      - per Manager Object Data
 *    + Rate Monotonic Manager
 *      - per Manager Object Data
 *    + Internal Threads Handler
 *      - MPCI Receive Server Thread TCB
 *      - IDLE Thread TCB
 *      - MPCI Receive Server Thread stack
 *      - MPCI Receive Server Thread FP area (if CPU requires this)
 *      - IDLE Thread stack
 *      - IDLE Thread FP area (if CPU requires this)
 *
 *  This does not take into account any CPU dependent alignment requirements.
 *
 *  The following calculates the overhead needed by RTEMS from the
 *  Workspace Area.
 */
sys_req = SYSTEM_TASKS        +     /* MPCI Receive Server and IDLE */
          NAME_PTR_SIZE       +     /* Task Overhead */
          SCHEDULER_WKSP_SIZE +     /* Scheduler Overhead */
          NAME_PTR_SIZE       +     /* Timer Overhead */
          NAME_PTR_SIZE       +     /* Semaphore Overhead */
          NAME_PTR_SIZE       +     /* Message Queue Overhead */
          NAME_PTR_SIZE       +     /* Region Overhead */
          NAME_PTR_SIZE       +     /* Partition Overhead */
          NAME_PTR_SIZE       +     /* Dual-Ported Memory Overhead */
          NAME_PTR_SIZE       +     /* Rate Monotonic Overhead */
          NAME_PTR_SIZE       +     /* Extension Overhead */
          PER_NODE;                 /* Extra Gobject Table */

uninitialized =
/*address.h*/   0                                         +

/*apiext.h*/    (sizeof _API_extensions_List)             +

/*asr.h*/       0                                         +

/*attr.h*/      0                                         +

/*bitfield.h*/  0                                         +

/*chain.h*/     0                                         +

/*clock.h*/     0                                         +

/*config.h*/
        #if defined(RTEMS_MULTIPROCESSING)
                (sizeof _Configuration_MP_table)          +
        #endif

/*context.h*/   (sizeof _Thread_Dispatch_necessary)        +

/*copyrt.h*/    0                                         +

/*debug.h*/     (sizeof _Debug_Level)                     +

/*dpmemimpl.h*/ (sizeof _Dual_ported_memory_Information)  +

#if defined(RTEMS_MULTIPROCESSING)
/*eventmp.h*/   0                                         +
#endif

/*extensionimpl.h*/ (sizeof _Extension_Information)       +

/*fatal.h*/     0                                         +

/*heap.h*/      0                                         +

/*init.h*/      0                                         +

/*interr.h*/    (sizeof _Internal_errors_What_happened)   +

/*intr.h*/      0                                         +

/*isr.h*/       (sizeof _ISR_Nest_level)                  +
#if (CPU_SIMPLE_VECTORED_INTERRUPTS == TRUE)
                (sizeof _ISR_Vector_table)                +
#endif

/*messageimpl.h*/ (sizeof _Message_queue_Information)     +

/*modes.h*/     0                                         +

#if defined(RTEMS_MULTIPROCESSING)
/*mp.h*/        0                                         +
#endif

#if defined(RTEMS_MULTIPROCESSING)
/*mpciimpl.h*/  (sizeof _MPCI_Remote_blocked_threads)     +
                (sizeof _MPCI_Semaphore)                  +
                (sizeof _MPCI_table)                      +
                (sizeof _MPCI_Receive_server_tcb)         +
                (sizeof _MPCI_Packet_processors)          +
#endif

#if defined(RTEMS_MULTIPROCESSING)
/*mppkt.h*/     0                                         +
#endif

#if defined(RTEMS_MULTIPROCESSING)
/*mptables.h*/  0                                         +
#endif

#if defined(RTEMS_MULTIPROCESSING)
/*msgmp.h*/     0                                         +
#endif

/*object.h*/    (sizeof _Objects_Local_node)              +
                (sizeof _Objects_Maximum_nodes)           +
                (sizeof _Objects_Information_table)       +

#if defined(RTEMS_MULTIPROCESSING)
/*objectmp.h*/  (sizeof _Objects_MP_Maximum_global_objects) +
                (sizeof _Objects_MP_Inactive_global_objects) +
#endif

/*options.h*/   0                                         +

/*partimpl.h*/  (sizeof _Partition_Information)           +

#if defined(RTEMS_MULTIPROCESSING)
/*partmp.h*/    0                                         +
#endif

/*percpu.h*/    (_SMP_Get_processor_count() * sizeof(Per_CPU_Control))  +

/*ratemonimpl.h*/ (sizeof _Rate_monotonic_Information)    +

/*regionimpl.h*/ (sizeof _Region_Information)             +

#if defined(RTEMS_MULTIPROCESSING)
/*regionmp.h*/  0                                         +
#endif

/*rtems.h*/     /* Not applicable */

/*semimpl.h*/   (sizeof _Semaphore_Information)           +

#if defined(RTEMS_MULTIPROCESSING)
/*semmp.h*/     0                                         +
#endif

/*signal.h*/    0                                         +

/*signalmp.h*/  0                                         +

/*stack.h*/     0                                         +

/*states.h*/    0                                         +

/*status.h*/    0                                         +

/*sysstate.h*/  (sizeof _System_state_Current)            +
#if defined(RTEMS_MULTIPROCESSING)
                (sizeof _System_state_Is_multiprocessing) +
#endif

#if defined(RTEMS_MULTIPROCESSING)
/*taskmp.h*/    0                                         +
#endif

/*tasksimpl.h*/ (sizeof _RTEMS_tasks_Information)         +

/*thread.h*/    (sizeof _Thread_Dispatch_disable_level)   +
                (sizeof _Thread_Executing)                +
                (sizeof _Thread_Heir)                     +
#if (CPU_HARDWARE_FP == 1) || (CPU_SOFTWARE_FP == 1)
                (sizeof _Thread_Allocated_fp)             +
#endif
                (sizeof _Thread_Internal_information)     +

#if defined(RTEMS_MULTIPROCESSING)
/*threadmp.h*/  (sizeof _Thread_MP_Active_proxies)        +
                (sizeof _Thread_MP_Inactive_proxies)      +
#endif

/*threadq.h*/

/*timerimpl.h*/ (sizeof _Timer_Information)               +

/*tod.h*/       (sizeof _TOD.now)                         +
                (sizeof _TOD.uptime)                      +

/*tqdata.h*/    0                                         +

/*types.h*/     0                                         +

/*userext.h*/   (sizeof _User_extensions_List)            +

/*watchdog.h*/  (sizeof _Watchdog_Sync_level)             +
                (sizeof _Watchdog_Sync_count)             +
                (sizeof _Watchdog_Ticks_since_boot)       +
                (sizeof _Watchdog_Ticks_header)           +
                (sizeof _Watchdog_Seconds_header)         +

/*wkspace.h*/   (sizeof _Workspace_Area);

#ifndef unix  /* make sure this is not a native compile */

#ifdef __i386__

/* cpu.h */
uninitialized += (sizeof _CPU_Null_fp_context);

#if (CPU_ALLOCATE_INTERRUPT_STACK == TRUE)
uninitialized += (sizeof _CPU_Interrupt_stack_low) +
                 (sizeof _CPU_Interrupt_stack_high);
#endif

#endif

#ifdef __mc68000__

/* cpu.h */
uninitialized += (sizeof _CPU_Interrupt_stack_low) +
                 (sizeof _CPU_Interrupt_stack_high);

#endif

#ifdef __sparc__

/* cpu.h */
uninitialized += (sizeof _CPU_Interrupt_stack_low) +
                 (sizeof _CPU_Interrupt_stack_high) +
                 (sizeof _CPU_Null_fp_context);

#endif


#ifdef no_cpu

/* cpu.h */
uninitialized += (sizeof _CPU_Null_fp_context) +
                 (sizeof _CPU_Interrupt_stack_low) +
                 (sizeof _CPU_Interrupt_stack_high) +
                 (sizeof _CPU_Thread_dispatch_pointer);

#endif

#ifdef __PPC__

/* cpu.h */
uninitialized += (sizeof _CPU_Interrupt_stack_low) +
                 (sizeof _CPU_Interrupt_stack_high);

#endif
#endif /* !unix */

initialized +=
/*copyrt.h*/    (strlen(_Copyright_Notice)+1)             +

#if defined(RTEMS_MULTIPROCESSING)
/*sptables.h*/  (sizeof _Initialization_Default_multiprocessing_table)  +
#endif
                (strlen(_RTEMS_version)+1);



#ifndef unix /* make sure this is not native */
#ifdef __sparc__

initialized +=  (sizeof _CPU_Trap_slot_template);

#endif
#endif /* !unix */

puts( "" );

  if ( mode == 0 ) help_size();
  else             print_formula();

printf( "\n" );
printf( "RTEMS uninitialized data consumes %d bytes\n", uninitialized );
printf( "RTEMS initialized data consumes %d bytes\n", initialized );

}