uint32_t _Thread_Dispatch_increment_disable_level( void )
{
  Giant_Control *giant = &_Giant;
  ISR_Level isr_level;
  uint32_t self_cpu_index;
  uint32_t disable_level;
  Per_CPU_Control *self_cpu;

  _ISR_Disable( isr_level );

  /*
   * We must obtain the processor ID after interrupts are disabled to prevent
   * thread migration.
   */
  self_cpu_index = _SMP_Get_current_processor();

  _Giant_Do_acquire( self_cpu_index );

  self_cpu = _Per_CPU_Get_by_index( self_cpu_index );
  disable_level = self_cpu->thread_dispatch_disable_level;
  ++disable_level;
  self_cpu->thread_dispatch_disable_level = disable_level;

  _ISR_Enable( isr_level );

  return disable_level;
}
Example #2
0
void _SMP_Multicast_actions_process( void )
{
  SMP_lock_Context      lock_context;
  uint32_t              cpu_self_index;
  SMP_Multicast_action *node;
  SMP_Multicast_action *next;

  _SMP_lock_ISR_disable_and_acquire( &_SMP_Multicast.Lock, &lock_context );
  cpu_self_index = _SMP_Get_current_processor();
  node = (SMP_Multicast_action *) _Chain_First( &_SMP_Multicast.Actions );

  while ( !_Chain_Is_tail( &_SMP_Multicast.Actions, &node->Node ) ) {
    next = (SMP_Multicast_action *) _Chain_Next( &node->Node );

    if ( _Processor_mask_Is_set( &node->targets, cpu_self_index ) ) {
      _Processor_mask_Clear( &node->targets, cpu_self_index );

      ( *node->handler )( node->arg );

      if ( _Processor_mask_Is_zero( &node->targets ) ) {
        _Chain_Extract_unprotected( &node->Node );
        _Atomic_Store_ulong( &node->done, 1, ATOMIC_ORDER_RELEASE );
      }
    }

    node = next;
  }

  _SMP_lock_Release_and_ISR_enable( &_SMP_Multicast.Lock, &lock_context );
}
void _Assert_Owner_of_giant( void )
{
  Giant_Control *giant = &_Giant;

  _Assert(
    giant->owner_cpu == _SMP_Get_current_processor()
      || !_System_state_Is_up( _System_state_Get() )
  );
}
void _Giant_Acquire( void )
{
  ISR_Level isr_level;

  _ISR_Disable( isr_level );
  _Assert( _Thread_Dispatch_disable_level != 0 );
  _Giant_Do_acquire( _SMP_Get_current_processor() );
  _ISR_Enable( isr_level );
}
  void _Assert_Thread_dispatching_repressed( void )
  {
    bool dispatch_is_disabled;
    ISR_Level level;
    Per_CPU_Control *per_cpu;

    _ISR_Disable( level );
    per_cpu = _Per_CPU_Get_by_index( _SMP_Get_current_processor() );
    dispatch_is_disabled = per_cpu->thread_dispatch_disable_level != 0;
    _ISR_Enable( level );

    _Assert( dispatch_is_disabled || _ISR_Get_level() != 0 );
  }
Example #6
0
File: smp.c Project: gedare/rtems
static void _SMP_Start_processors( uint32_t cpu_count )
{
  uint32_t cpu_index_self;
  uint32_t cpu_index;

  cpu_index_self = _SMP_Get_current_processor();

  for ( cpu_index = 0 ; cpu_index < cpu_count; ++cpu_index ) {
    const Scheduler_Assignment *assignment;
    Per_CPU_Control            *cpu;
    bool                        started;

    assignment = _Scheduler_Get_initial_assignment( cpu_index );
    cpu = _Per_CPU_Get_by_index( cpu_index );

    if ( cpu_index != cpu_index_self ) {
      if ( _Scheduler_Should_start_processor( assignment ) ) {
        started = _CPU_SMP_Start_processor( cpu_index );

        if ( !started && _Scheduler_Is_mandatory_processor( assignment ) ) {
          _SMP_Fatal( SMP_FATAL_START_OF_MANDATORY_PROCESSOR_FAILED );
        }
      } else {
        started = false;
      }
    } else {
      started = true;

      cpu->boot = true;

      if ( !_Scheduler_Should_start_processor( assignment ) ) {
        _SMP_Fatal( SMP_FATAL_BOOT_PROCESSOR_NOT_ASSIGNED_TO_SCHEDULER );
      }
    }

    cpu->online = started;

    if ( started ) {
      const Scheduler_Control *scheduler;
      Scheduler_Context       *context;

      scheduler = assignment->scheduler;
      context = _Scheduler_Get_context( scheduler );

      _Processor_mask_Set( &_SMP_Online_processors, cpu_index );
      _Processor_mask_Set( &context->Processors, cpu_index );
      cpu->Scheduler.control = scheduler;
      cpu->Scheduler.context = context;
    }
  }
}
Example #7
0
void rpi_ipi_initialize(void)
{
  uint32_t cpu_index_self = _SMP_Get_current_processor();

  /*
   * Includes support only for mailbox 3 interrupt.
   * Further interrupt support has to be added. This will have to be integrated
   * with existing interrupt support for Raspberry Pi
   */

  /* reset mailbox 3 contents to zero */
  BCM2835_REG(BCM2836_MAILBOX_3_READ_CLEAR_BASE + 0x10 * cpu_index_self) = 0xffffffff;

  BCM2835_REG(BCM2836_MAILBOX_IRQ_CTRL(cpu_index_self)) =
    BCM2836_MAILBOX_IRQ_CTRL_MBOX3_IRQ;
}
Example #8
0
File: smp.c Project: gedare/rtems
void _SMP_Send_message_broadcast( unsigned long message )
{
  uint32_t cpu_count = _SMP_Get_processor_count();
  uint32_t cpu_index_self = _SMP_Get_current_processor();
  uint32_t cpu_index;

  _Assert( _Debug_Is_thread_dispatching_allowed() );

  for ( cpu_index = 0 ; cpu_index < cpu_count ; ++cpu_index ) {
    if (
      cpu_index != cpu_index_self
        && _Processor_mask_Is_set( &_SMP_Online_processors, cpu_index )
    ) {
      _SMP_Send_message( cpu_index, message );
    }
  }
}
Example #9
0
void _SMP_Request_other_cores_to_shutdown( void )
{
  uint32_t self = _SMP_Get_current_processor();
  uint32_t ncpus = _SMP_Get_processor_count();
  uint32_t cpu;

  _SMP_Broadcast_message( RTEMS_BSP_SMP_SHUTDOWN );

  for ( cpu = 0 ; cpu < ncpus ; ++cpu ) {
    if ( cpu != self ) {
      _Per_CPU_Wait_for_state(
        _Per_CPU_Get_by_index( cpu ),
        PER_CPU_STATE_SHUTDOWN
      );
    }
  }
}
Example #10
0
void _SMP_Request_other_cores_to_perform_first_context_switch( void )
{
  uint32_t self = _SMP_Get_current_processor();
  uint32_t ncpus = _SMP_Get_processor_count();
  uint32_t cpu;

  for ( cpu = 0 ; cpu < ncpus ; ++cpu ) {
    Per_CPU_Control *per_cpu = _Per_CPU_Get_by_index( cpu );

    if ( cpu != self ) {
      _Per_CPU_Change_state( per_cpu, PER_CPU_STATE_BEGIN_MULTITASKING );
    } else {

      _Per_CPU_Change_state( per_cpu, PER_CPU_STATE_UP );
    }
  }
}
Example #11
0
File: irq.c Project: AoLaD/rtems
/*
 * Determine the source of the interrupt and dispatch the correct handler.
 */
void bsp_interrupt_dispatch(void)
{
  unsigned int pend;
  unsigned int pend_bit;

  rtems_vector_number vector = 255;

#ifdef RTEMS_SMP
  uint32_t cpu_index_self = _SMP_Get_current_processor();
  uint32_t local_source = BCM2835_REG(BCM2836_IRQ_SOURCE_REG(cpu_index_self));

  if ( local_source & BCM2836_IRQ_SOURCE_MBOX3 ) {
    /* reset mailbox 3 contents to zero */
    BCM2835_REG(BCM2836_MAILBOX_3_READ_CLEAR_BASE + 0x10 * cpu_index_self) = 0xffffffff;
    _SMP_Inter_processor_interrupt_handler();
  }
  if ( cpu_index_self != 0 )
    return;
#endif /* RTEMS_SMP */

  pend = BCM2835_REG(BCM2835_IRQ_BASIC);
  if ( pend & BCM2835_IRQ_BASIC_SPEEDUP_USED_BITS ) {
    pend_bit = ffs(pend) - 1;
    vector = bcm2835_irq_speedup_table[pend_bit];
  } else {
    pend = BCM2835_REG(BCM2835_IRQ_PENDING1);
    if ( pend != 0 ) {
      pend_bit = ffs(pend) - 1;
      vector = pend_bit;
    } else {
      pend = BCM2835_REG(BCM2835_IRQ_PENDING2);
      if ( pend != 0 ) {
        pend_bit = ffs(pend) - 1;
        vector = pend_bit + 32;
      }
    }
  }

  if ( vector < 255 )
  {
      bsp_interrupt_handler_dispatch(vector);
  }
}
Example #12
0
void _SMP_Broadcast_message( uint32_t message )
{
  uint32_t self = _SMP_Get_current_processor();
  uint32_t ncpus = _SMP_Get_processor_count();
  uint32_t cpu;

  for ( cpu = 0 ; cpu < ncpus ; ++cpu ) {
    if ( cpu != self ) {
      Per_CPU_Control *per_cpu = _Per_CPU_Get_by_index( cpu );
      ISR_Level level;

      _Per_CPU_Lock_acquire( per_cpu, level );
      per_cpu->message |= message;
      _Per_CPU_Lock_release( per_cpu, level );
    }
  }

  bsp_smp_broadcast_interrupt();
}
Example #13
0
File: bspsmp.c Project: AoLaD/rtems
bool _CPU_SMP_Start_processor( uint32_t cpu_index )
{
  bool started;
  uint32_t cpu_index_self = _SMP_Get_current_processor();

  if (cpu_index != cpu_index_self) {

    BCM2835_REG(BCM2836_MAILBOX_3_WRITE_SET_BASE + 0x10 * cpu_index) = (uint32_t)_start;

    /*
     * Wait for secondary processor to complete its basic initialization so
     * that we can enable the unified L2 cache.
     */
    started = _Per_CPU_State_wait_for_non_initial_state(cpu_index, 0);
  } else {
    started = false;
  }

  return started;
}
Example #14
0
void bsp_start_on_secondary_processor(void)
{
  uint32_t cpu_index_self = _SMP_Get_current_processor();
  const Per_CPU_Control *cpu_self = _Per_CPU_Get_by_index(cpu_index_self);

  ppc_exc_initialize_with_vector_base(
    (uintptr_t) cpu_self->interrupt_stack_low,
    rtems_configuration_get_interrupt_stack_size(),
    bsp_exc_vector_base
  );

  /* Now it is possible to make the code execute only */
  qoriq_mmu_change_perm(
    FSL_EIS_MAS3_SR | FSL_EIS_MAS3_SX,
    FSL_EIS_MAS3_SX,
    FSL_EIS_MAS3_SR
  );

  bsp_interrupt_facility_initialize();

  start_thread_if_necessary(cpu_index_self);

  _SMP_Start_multitasking_on_secondary_processor();
}
Example #15
0
rtems_status_code rtems_task_create(
  rtems_name           name,
  rtems_task_priority  initial_priority,
  size_t               stack_size,
  rtems_mode           initial_modes,
  rtems_attribute      attribute_set,
  rtems_id            *id
)
{
  Thread_Control          *the_thread;
  bool                     is_fp;
#if defined(RTEMS_MULTIPROCESSING)
  Objects_MP_Control      *the_global_object = NULL;
  bool                     is_global;
#endif
  bool                     status;
  rtems_attribute          the_attribute_set;
  Priority_Control         core_priority;
  RTEMS_API_Control       *api;
  ASR_Information         *asr;


  if ( !id )
   return RTEMS_INVALID_ADDRESS;

  if ( !rtems_is_name_valid( name ) )
    return RTEMS_INVALID_NAME;

  /*
   *  Core Thread Initialize insures we get the minimum amount of
   *  stack space.
   */

  /*
   *  Fix the attribute set to match the attributes which
   *  this processor (1) requires and (2) is able to support.
   *  First add in the required flags for attribute_set
   *  Typically this might include FP if the platform
   *  or application required all tasks to be fp aware.
   *  Then turn off the requested bits which are not supported.
   */

  the_attribute_set = _Attributes_Set( attribute_set, ATTRIBUTES_REQUIRED );
  the_attribute_set =
    _Attributes_Clear( the_attribute_set, ATTRIBUTES_NOT_SUPPORTED );

  if ( _Attributes_Is_floating_point( the_attribute_set ) )
    is_fp = true;
  else
    is_fp = false;

  /*
   *  Validate the RTEMS API priority and convert it to the core priority range.
   */

  if ( !_Attributes_Is_system_task( the_attribute_set ) ) {
    if ( !_RTEMS_tasks_Priority_is_valid( initial_priority ) )
      return RTEMS_INVALID_PRIORITY;
  }

  core_priority = _RTEMS_tasks_Priority_to_Core( initial_priority );

#if defined(RTEMS_MULTIPROCESSING)
  if ( _Attributes_Is_global( the_attribute_set ) ) {

    is_global = true;

    if ( !_System_state_Is_multiprocessing )
      return RTEMS_MP_NOT_CONFIGURED;

  } else
    is_global = false;
#endif

  /*
   *  Make sure system is MP if this task is global
   */

  /*
   *  Allocate the thread control block and -- if the task is global --
   *  allocate a global object control block.
   *
   *  NOTE:  This routine does not use the combined allocate and open
   *         global object routine because this results in a lack of
   *         control over when memory is allocated and can be freed in
   *         the event of an error.
   */

  the_thread = _RTEMS_tasks_Allocate();

  if ( !the_thread ) {
    _Objects_Allocator_unlock();
    return RTEMS_TOO_MANY;
  }

#if defined(RTEMS_MULTIPROCESSING)
  if ( is_global ) {
    the_global_object = _Objects_MP_Allocate_global_object();

    if ( _Objects_MP_Is_null_global_object( the_global_object ) ) {
      _RTEMS_tasks_Free( the_thread );
      _Objects_Allocator_unlock();
      return RTEMS_TOO_MANY;
    }
  }
#endif

  /*
   *  Initialize the core thread for this task.
   */

  status = _Thread_Initialize(
    &_RTEMS_tasks_Information,
    the_thread,
    _Scheduler_Get_by_CPU_index( _SMP_Get_current_processor() ),
    NULL,
    stack_size,
    is_fp,
    core_priority,
    _Modes_Is_preempt(initial_modes)   ? true : false,
    _Modes_Is_timeslice(initial_modes) ?
      THREAD_CPU_BUDGET_ALGORITHM_RESET_TIMESLICE :
      THREAD_CPU_BUDGET_ALGORITHM_NONE,
    NULL,        /* no budget algorithm callout */
    _Modes_Get_interrupt_level(initial_modes),
    (Objects_Name) name
  );

  if ( !status ) {
#if defined(RTEMS_MULTIPROCESSING)
    if ( is_global )
      _Objects_MP_Free_global_object( the_global_object );
#endif
    _RTEMS_tasks_Free( the_thread );
    _Objects_Allocator_unlock();
    return RTEMS_UNSATISFIED;
  }

  api = the_thread->API_Extensions[ THREAD_API_RTEMS ];
  asr = &api->Signal;

  asr->is_enabled = _Modes_Is_asr_disabled(initial_modes) ? false : true;

  *id = the_thread->Object.id;

#if defined(RTEMS_MULTIPROCESSING)
  the_thread->is_global = is_global;
  if ( is_global ) {

    _Objects_MP_Open(
      &_RTEMS_tasks_Information.Objects,
      the_global_object,
      name,
      the_thread->Object.id
    );

    _RTEMS_tasks_MP_Send_process_packet(
      RTEMS_TASKS_MP_ANNOUNCE_CREATE,
      the_thread->Object.id,
      name
    );

   }
#endif

  _Objects_Allocator_unlock();
  return RTEMS_SUCCESSFUL;
}