示例#1
0
文件: mutex.c 项目: ketul93/rtems
static void _Mutex_Release_slow(
  Mutex_Control      *mutex,
  Thread_Control     *executing,
  Thread_queue_Heads *heads,
  bool                keep_priority,
  ISR_lock_Context   *lock_context
)
{
  if (heads != NULL) {
    const Thread_queue_Operations *operations;
    Thread_Control *first;

    operations = MUTEX_TQ_OPERATIONS;
    first = ( *operations->first )( heads );

    mutex->owner = first;
    _Thread_queue_Extract_critical(
      &mutex->Queue.Queue,
      operations,
      first,
      lock_context
    );
  } else {
    _Mutex_Queue_release( mutex, lock_context);
  }

  if ( !keep_priority ) {
    Per_CPU_Control *cpu_self;

    cpu_self = _Thread_Dispatch_disable();
    _Thread_Restore_priority( executing );
    _Thread_Dispatch_enable( cpu_self );
  }
}
示例#2
0
文件: init.c 项目: greenmeent/rtems
rtems_task Init(
  rtems_task_argument argument
)
{
  rtems_status_code  sc;
  rtems_id           mutex;
  Per_CPU_Control   *cpu_self;

  TEST_BEGIN();

  sc = rtems_semaphore_create(
    rtems_build_name('M', 'U', 'T', 'X'),
    0,
    RTEMS_BINARY_SEMAPHORE,
    0,
    &mutex
  );
  directive_failed(sc, "rtems_semaphore_create");

  /*
   *  Call semaphore obtain with dispatching disabled.  Reenable
   *  dispatching before checking the status returned since
   *  directive_failed() checks for dispatching being enabled.
   */
  puts( "rtems_semaphore_obtain - with dispatching disabled" );
  cpu_self = _Thread_Dispatch_disable();
    sc = rtems_semaphore_obtain(mutex, RTEMS_NO_WAIT, RTEMS_NO_TIMEOUT);
  _Thread_Dispatch_enable(cpu_self);
  directive_failed(sc, "rtems_semaphore_obtain");

  TEST_END();
  rtems_test_exit(0);
}
示例#3
0
rtems_status_code rtems_task_wake_after(
  rtems_interval ticks
)
{
  /*
   * It is critical to obtain the executing thread after thread dispatching is
   * disabled on SMP configurations.
   */
  Thread_Control  *executing;
  Per_CPU_Control *cpu_self;

  cpu_self = _Thread_Dispatch_disable();
    executing = _Thread_Executing;

    if ( ticks == 0 ) {
      _Thread_Yield( executing );
    } else {
      _Thread_Set_state( executing, STATES_DELAYING );
      _Thread_Wait_flags_set( executing, THREAD_WAIT_STATE_BLOCKED );
      _Watchdog_Initialize(
        &executing->Timer,
        _Thread_Timeout,
        0,
        executing
      );
      _Watchdog_Insert_ticks( &executing->Timer, ticks );
    }
  _Thread_Dispatch_enable( cpu_self );
  return RTEMS_SUCCESSFUL;
}
示例#4
0
文件: task1.c 项目: greenmeent/rtems
rtems_task Middle_task(
  rtems_task_argument argument
)
{
  Scheduler_priority_Context *scheduler_context =
    _Scheduler_priority_Get_context( _Scheduler_Get( _Thread_Get_executing() ) );

  thread_dispatch_no_fp_time = benchmark_timer_read();

  _Thread_Set_state( _Thread_Get_executing(), STATES_SUSPENDED );

  Middle_tcb   = _Thread_Get_executing();

  set_thread_executing(
    (Thread_Control *) _Chain_First(&scheduler_context->Ready[LOW_PRIORITY])
  );

  /* do not force context switch */

  set_thread_dispatch_necessary( false );

  _Thread_Dispatch_disable();

  benchmark_timer_initialize();
    _Context_Switch(
      &Middle_tcb->Registers,
      &_Thread_Get_executing()->Registers
    );

  benchmark_timer_initialize();
    _Context_Switch(&Middle_tcb->Registers, &Low_tcb->Registers);
}
static Per_CPU_Control *_Thread_Wait_for_join(
  Thread_Control  *executing,
  Per_CPU_Control *cpu_self
)
{
#if defined(RTEMS_POSIX_API)
  ISR_lock_Context lock_context;

  _Thread_State_acquire( executing, &lock_context );

  if (
    _Thread_Is_joinable( executing )
      && _Thread_queue_Is_empty( &executing->Join_queue.Queue )
  ) {
    _Thread_Set_state_locked( executing, STATES_WAITING_FOR_JOIN_AT_EXIT );
    _Thread_State_release( executing, &lock_context );
    _Thread_Dispatch_enable( cpu_self );

    /* Let other threads run */

    cpu_self = _Thread_Dispatch_disable();
  } else {
    _Thread_State_release( executing, &lock_context );
  }
#endif

  return cpu_self;
}
示例#6
0
文件: __times.c 项目: fsmd/RTEMS
/**
 *  POSIX 1003.1b 4.5.2 - Get Process Times
 */
clock_t _times(
   struct tms  *ptms
)
{
  rtems_interval ticks, us_per_tick;
  Thread_Control *executing;

  if ( !ptms )
    rtems_set_errno_and_return_minus_one( EFAULT );

  /*
   *  This call does not depend on TOD being initialized and can't fail.
   */

  ticks = rtems_clock_get_ticks_since_boot();
  us_per_tick = rtems_configuration_get_microseconds_per_tick();

  /*
   *  RTEMS technically has no notion of system versus user time
   *  since there is no separation of OS from application tasks.
   *  But we can at least make a distinction between the number
   *  of ticks since boot and the number of ticks executed by this
   *  this thread.
   */
  {
    Timestamp_Control  per_tick;
    uint32_t           ticks_of_executing;
    uint32_t           fractional_ticks;
    Per_CPU_Control   *cpu_self;

    _Timestamp_Set(
      &per_tick,
      rtems_configuration_get_microseconds_per_tick() /
	  TOD_MICROSECONDS_PER_SECOND,
      (rtems_configuration_get_nanoseconds_per_tick() %
	  TOD_NANOSECONDS_PER_SECOND)
    );

    cpu_self = _Thread_Dispatch_disable();
    executing = _Thread_Executing;
    _Thread_Update_cpu_time_used(
      executing,
      &_Thread_Time_of_last_context_switch
    );
    _Timestamp_Divide(
      &executing->cpu_time_used,
      &per_tick,
      &ticks_of_executing,
      &fractional_ticks
    );
    _Thread_Dispatch_enable( cpu_self );
    ptms->tms_utime = ticks_of_executing * us_per_tick;
  }
  ptms->tms_stime  = ticks * us_per_tick;
  ptms->tms_cutime = 0;
  ptms->tms_cstime = 0;

  return ticks * us_per_tick;
}
示例#7
0
int sched_yield( void )
{
  Per_CPU_Control *cpu_self;

  cpu_self = _Thread_Dispatch_disable();
    _Thread_Yield( _Per_CPU_Get_executing( cpu_self ) );
  _Thread_Dispatch_direct( cpu_self );
  return 0;
}
示例#8
0
文件: task1.c 项目: greenmeent/rtems
rtems_task High_task(
  rtems_task_argument argument
)
{
  rtems_interrupt_level level;

  _Thread_Dispatch_disable();

  benchmark_timer_initialize();
    rtems_interrupt_local_disable( level );
  isr_disable_time = benchmark_timer_read();

  benchmark_timer_initialize();
#if defined(RTEMS_SMP)
    rtems_interrupt_local_enable( level );
    rtems_interrupt_local_disable( level );
#else
    rtems_interrupt_flash( level );
#endif
  isr_flash_time = benchmark_timer_read();

  benchmark_timer_initialize();
    rtems_interrupt_local_enable( level );
  isr_enable_time = benchmark_timer_read();

  _Thread_Dispatch_enable( _Per_CPU_Get() );

  benchmark_timer_initialize();
    _Thread_Dispatch_disable();
  thread_disable_dispatch_time = benchmark_timer_read();

  benchmark_timer_initialize();
    _Thread_Dispatch_enable( _Per_CPU_Get() );
  thread_enable_dispatch_time = benchmark_timer_read();

  benchmark_timer_initialize();
    _Thread_Set_state( _Thread_Get_executing(), STATES_SUSPENDED );
  thread_set_state_time = benchmark_timer_read();

  set_thread_dispatch_necessary( true );

  benchmark_timer_initialize();
    _Thread_Dispatch();           /* dispatches Middle_task */
}
示例#9
0
文件: task1.c 项目: greenmeent/rtems
rtems_task Floating_point_task_1(
  rtems_task_argument argument
)
{
  Scheduler_priority_Context *scheduler_context =
    _Scheduler_priority_Get_context( _Scheduler_Get( _Thread_Get_executing() ) );
  Thread_Control             *executing;
  FP_DECLARE;

  context_switch_restore_1st_fp_time = benchmark_timer_read();

  executing = _Thread_Get_executing();

  set_thread_executing(
    (Thread_Control *) _Chain_First(&scheduler_context->Ready[FP2_PRIORITY])
  );

  /* do not force context switch */

  set_thread_dispatch_necessary( false );

  _Thread_Dispatch_disable();

  benchmark_timer_initialize();
#if (CPU_HARDWARE_FP == 1) || (CPU_SOFTWARE_FP == 1)
    _Context_Save_fp( &executing->fp_context );
    _Context_Restore_fp( &_Thread_Get_executing()->fp_context );
#endif
    _Context_Switch(
      &executing->Registers,
      &_Thread_Get_executing()->Registers
    );
  /* switch to Floating_point_task_2 */

  context_switch_save_idle_restore_initted_time = benchmark_timer_read();

  FP_LOAD( 1.0 );

  executing = _Thread_Get_executing();

  set_thread_executing(
    (Thread_Control *) _Chain_First(&scheduler_context->Ready[FP2_PRIORITY])
  );

  benchmark_timer_initialize();
#if (CPU_HARDWARE_FP == 1) || (CPU_SOFTWARE_FP == 1)
    _Context_Save_fp( &executing->fp_context );
    _Context_Restore_fp( &_Thread_Get_executing()->fp_context );
#endif
    _Context_Switch(
      &executing->Registers,
      &_Thread_Get_executing()->Registers
    );
  /* switch to Floating_point_task_2 */
}
Status_Control _CORE_mutex_Surrender_slow(
  CORE_mutex_Control   *the_mutex,
  Thread_Control       *executing,
  Thread_queue_Heads   *heads,
  bool                  keep_priority,
  Thread_queue_Context *queue_context
)
{
  if ( heads != NULL ) {
    const Thread_queue_Operations *operations;
    Thread_Control                *new_owner;
    bool                           unblock;

    operations = CORE_MUTEX_TQ_OPERATIONS;
    new_owner = ( *operations->first )( heads );

    _CORE_mutex_Set_owner( the_mutex, new_owner );

    unblock = _Thread_queue_Extract_locked(
      &the_mutex->Wait_queue.Queue,
      operations,
      new_owner,
      queue_context
    );

#if defined(RTEMS_MULTIPROCESSING)
    if ( _Objects_Is_local_id( new_owner->Object.id ) )
#endif
    {
      ++new_owner->resource_count;
      _Thread_queue_Boost_priority( &the_mutex->Wait_queue.Queue, new_owner );
    }

    _Thread_queue_Unblock_critical(
      unblock,
      &the_mutex->Wait_queue.Queue,
      new_owner,
      &queue_context->Lock_context
    );
  } else {
    _CORE_mutex_Release( the_mutex, queue_context );
  }

  if ( !keep_priority ) {
    Per_CPU_Control *cpu_self;

    cpu_self = _Thread_Dispatch_disable();
    _Thread_Restore_priority( executing );
    _Thread_Dispatch_enable( cpu_self );
  }

  return STATUS_SUCCESSFUL;
}
示例#11
0
文件: pthreadexit.c 项目: AoLaD/rtems
void pthread_exit( void *value_ptr )
{
  Thread_Control  *executing;
  Per_CPU_Control *cpu_self;

  cpu_self = _Thread_Dispatch_disable();
  executing = _Per_CPU_Get_executing( cpu_self );

  _Thread_Exit( executing, THREAD_LIFE_TERMINATING, value_ptr );

  _Thread_Dispatch_enable( cpu_self );
  RTEMS_UNREACHABLE();
}
示例#12
0
文件: init.c 项目: greenmeent/rtems
static void test_rtems_heap_allocate_aligned_with_boundary(void)
{
  void *p = NULL;

  p = rtems_heap_allocate_aligned_with_boundary(1, 1, 1);
  rtems_test_assert( p != NULL );
  free(p);

  _Thread_Dispatch_disable();
  p = rtems_heap_allocate_aligned_with_boundary(1, 1, 1);
  _Thread_Dispatch_enable( _Per_CPU_Get() );
  rtems_test_assert( p == NULL );
}
示例#13
0
void _CORE_mutex_Seize_interrupt_blocking(
  CORE_mutex_Control  *the_mutex,
  Thread_Control      *executing,
  Watchdog_Interval    timeout,
  ISR_lock_Context    *lock_context
)
{
#if !defined(RTEMS_SMP)
  /*
   * We must disable thread dispatching here since we enable the interrupts for
   * priority inheritance mutexes.
   */
  _Thread_Dispatch_disable();
#endif

  if ( _CORE_mutex_Is_inherit_priority( &the_mutex->Attributes ) ) {
    Thread_Control *holder = the_mutex->holder;

#if !defined(RTEMS_SMP)
    /*
     * To enable interrupts here works only since exactly one executing thread
     * exists and only threads are allowed to seize and surrender mutexes with
     * the priority inheritance protocol.  On SMP configurations more than one
     * executing thread may exist, so here we must not release the lock, since
     * otherwise the current holder may be no longer the holder of the mutex
     * once we released the lock.
     */
    _Thread_queue_Release( &the_mutex->Wait_queue, lock_context );
#endif

    _Thread_Inherit_priority( holder, executing );

#if !defined(RTEMS_SMP)
    _Thread_queue_Acquire( &the_mutex->Wait_queue, lock_context );
#endif
  }

  _Thread_queue_Enqueue_critical(
    &the_mutex->Wait_queue.Queue,
    the_mutex->operations,
    executing,
    STATES_WAITING_FOR_MUTEX,
    timeout,
    CORE_MUTEX_TIMEOUT,
    lock_context
  );

#if !defined(RTEMS_SMP)
  _Thread_Dispatch_enable( _Per_CPU_Get() );
#endif
}
示例#14
0
文件: cpu_asm.c 项目: AoLaD/rtems
void __ISR_Handler( uint32_t   vector)
{
  ISR_Level level;

  _ISR_Local_disable( level );

   _Thread_Dispatch_disable();

#if (CPU_HAS_SOFTWARE_INTERRUPT_STACK == TRUE)
  if ( _ISR_Nest_level == 0 )
    {
      /* Install irq stack */
      _old_stack_ptr = stack_ptr;
      stack_ptr = _CPU_Interrupt_stack_high;
    }

#endif

  _ISR_Nest_level++;

  _ISR_Local_enable( level );

  /* call isp */
  if ( _ISR_Vector_table[ vector])
    (*_ISR_Vector_table[ vector ])( vector );

  _ISR_Local_disable( level );

  _Thread_Dispatch_enable( _Per_CPU_Get() );

  _ISR_Nest_level--;

#if (CPU_HAS_SOFTWARE_INTERRUPT_STACK == TRUE)
  if ( _ISR_Nest_level == 0 )
    /* restore old stack pointer */
    stack_ptr = _old_stack_ptr;
#endif

  _ISR_Local_enable( level );

  if ( _ISR_Nest_level )
    return;

  if ( !_Thread_Dispatch_is_enabled() ) {
    return;
  }

  if ( _Thread_Dispatch_necessary ) {
    _Thread_Dispatch();
  }
}
示例#15
0
文件: error.c 项目: AoLaD/rtems
int rtems_verror(
  rtems_error_code_t  error_flag,
  const char         *printf_format,
  va_list             arglist
)
{
  int               local_errno = 0;
  int               chars_written = 0;
  rtems_status_code status;

  if (error_flag & RTEMS_ERROR_PANIC) {
    if (rtems_panic_in_progress++)
      _Thread_Dispatch_disable();       /* disable task switches */

    /* don't aggravate things */
    if (rtems_panic_in_progress > 2)
      return 0;
  }

  (void) fflush(stdout);            /* in case stdout/stderr same */

  status = error_flag & ~RTEMS_ERROR_MASK;
  if (error_flag & RTEMS_ERROR_ERRNO)     /* include errno? */
    local_errno = errno;

  #if defined(RTEMS_MULTIPROCESSING)
    if (_System_state_Is_multiprocessing)
      fprintf(stderr, "[%" PRIu32 "] ", _Configuration_MP_table->node);
  #endif

  chars_written += vfprintf(stderr, printf_format, arglist);

  if (status)
    chars_written +=
      fprintf(stderr, " (status: %s)", rtems_status_text(status));

  if (local_errno) {
    if ((local_errno > 0) && *strerror(local_errno))
      chars_written += fprintf(stderr, " (errno: %s)", strerror(local_errno));
    else
      chars_written += fprintf(stderr, " (unknown errno=%d)", local_errno);
  }

  chars_written += fprintf(stderr, "\n");

  (void) fflush(stderr);

  return chars_written;
}
示例#16
0
文件: init.c 项目: greenmeent/rtems
static bool test_body(void *arg)
{
  test_context *ctx = arg;
  int busy;
  Per_CPU_Control *cpu_self;

  cpu_self = _Thread_Dispatch_disable();

  rtems_test_assert(
    _Thread_Wait_get_status( ctx->semaphore_task_tcb ) == STATUS_SUCCESSFUL
  );

  /*
   * Spend some time to make it more likely that we hit the test condition
   * below.
   */
  for (busy = 0; busy < 1000; ++busy) {
    __asm__ volatile ("");
  }

  if (ctx->semaphore_task_tcb->Wait.queue == NULL) {
    ctx->thread_queue_was_null = true;
  }

  _Thread_Timeout(&ctx->semaphore_task_tcb->Timer.Watchdog);

  switch (_Thread_Wait_get_status(ctx->semaphore_task_tcb)) {
    case STATUS_SUCCESSFUL:
      ctx->status_was_successful = true;
      break;
    case STATUS_TIMEOUT:
      ctx->status_was_timeout = true;
      break;
    default:
      rtems_test_assert(0);
      break;
  }

  _Thread_Dispatch_enable(cpu_self);

  return ctx->thread_queue_was_null
    && ctx->status_was_successful
    && ctx->status_was_timeout;
}
示例#17
0
文件: cleanuppush.c 项目: AoLaD/rtems
void _pthread_cleanup_pop(
  struct _pthread_cleanup_context *context,
  int                              execute
)
{
  Per_CPU_Control *cpu_self;
  Thread_Control  *executing;

  if ( execute != 0 ) {
    ( *context->_routine )( context->_arg );
  }

  cpu_self = _Thread_Dispatch_disable();

  executing = _Per_CPU_Get_executing( cpu_self );
  executing->last_cleanup_context = context->_previous;

  _Thread_Dispatch_enable( cpu_self );
}
示例#18
0
文件: task1.c 项目: greenmeent/rtems
rtems_task Low_task(
  rtems_task_argument argument
)
{
  Scheduler_priority_Context *scheduler_context =
    _Scheduler_priority_Get_context( _Scheduler_Get( _Thread_Get_executing() ) );
  Thread_Control             *executing;

  context_switch_no_fp_time = benchmark_timer_read();

  executing    = _Thread_Get_executing();

  Low_tcb = executing;

  benchmark_timer_initialize();
    _Context_Switch( &executing->Registers, &executing->Registers );

  context_switch_self_time = benchmark_timer_read();

  _Context_Switch(&executing->Registers, &Middle_tcb->Registers);

  context_switch_another_task_time = benchmark_timer_read();

  set_thread_executing(
    (Thread_Control *) _Chain_First(&scheduler_context->Ready[FP1_PRIORITY])
  );

  /* do not force context switch */

  set_thread_dispatch_necessary( false );

  _Thread_Dispatch_disable();

  benchmark_timer_initialize();
    _Context_Switch(
      &executing->Registers,
      &_Thread_Get_executing()->Registers
    );
}
示例#19
0
文件: cleanuppush.c 项目: AoLaD/rtems
void _pthread_cleanup_push(
  struct _pthread_cleanup_context   *context,
  void                            ( *routine )( void * ),
  void                              *arg
)
{
  Per_CPU_Control *cpu_self;
  Thread_Control  *executing;

  context->_routine = routine;
  context->_arg = arg;

  /* This value is unused, just provide a deterministic value */
  context->_canceltype = -1;

  cpu_self = _Thread_Dispatch_disable();

  executing = _Per_CPU_Get_executing( cpu_self );
  context->_previous = executing->last_cleanup_context;
  executing->last_cleanup_context = context;

  _Thread_Dispatch_enable( cpu_self );
}
示例#20
0
CORE_mutex_Status _CORE_mutex_Initialize(
  CORE_mutex_Control           *the_mutex,
  Thread_Control               *executing,
  const CORE_mutex_Attributes  *the_mutex_attributes,
  bool                          initially_locked
)
{

/* Add this to the RTEMS environment later ?????????
  rtems_assert( initial_lock == CORE_MUTEX_LOCKED ||
                initial_lock == CORE_MUTEX_UNLOCKED );
 */

  the_mutex->Attributes    = *the_mutex_attributes;

  if ( initially_locked ) {
    bool is_priority_ceiling =
      _CORE_mutex_Is_priority_ceiling( &the_mutex->Attributes );

    the_mutex->nest_count = 1;
    the_mutex->holder     = executing;

    if (  is_priority_ceiling ||
         _CORE_mutex_Is_inherit_priority( &the_mutex->Attributes ) ) {
      Priority_Control ceiling = the_mutex->Attributes.priority_ceiling;
      Per_CPU_Control *cpu_self;

      /* The mutex initialization is only protected by the allocator lock */
      cpu_self = _Thread_Dispatch_disable();

      /*
       * The test to check for a ceiling violation is a bit arbitrary.  In case
       * this thread is the owner of a priority inheritance mutex, then it may
       * get a higher priority later or anytime on SMP configurations.
       */
      if ( is_priority_ceiling && executing->current_priority < ceiling ) {
        /*
         * There is no need to undo the previous work since this error aborts
         * the object creation.
         */
        _Thread_Dispatch_enable( cpu_self );
        return CORE_MUTEX_STATUS_CEILING_VIOLATED;
      }

#ifdef __RTEMS_STRICT_ORDER_MUTEX__
       _Chain_Prepend_unprotected( &executing->lock_mutex,
                                   &the_mutex->queue.lock_queue );
       the_mutex->queue.priority_before = executing->current_priority;
#endif

      executing->resource_count++;

      if ( is_priority_ceiling ) {
        _Thread_Raise_priority( executing, ceiling );
      }

      _Thread_Dispatch_enable( cpu_self );
    }
  } else {
    the_mutex->nest_count = 0;
    the_mutex->holder     = NULL;
  }

  _Thread_queue_Initialize( &the_mutex->Wait_queue );

  if ( _CORE_mutex_Is_priority( the_mutex_attributes ) ) {
    the_mutex->operations = &_Thread_queue_Operations_priority;
  } else {
    the_mutex->operations = &_Thread_queue_Operations_FIFO;
  }

  return CORE_MUTEX_STATUS_SUCCESSFUL;
}
示例#21
0
文件: init.c 项目: greenmeent/rtems
/*
 *  A simple test of realloc
 */
static void test_realloc(void)
{
  void *p1, *p2, *p3, *p4;
  size_t i;
  int sc;
  bool malloc_walk_ok;

  /* Test growing reallocation "in place" */
  p1 = malloc(1);
  for (i=2 ; i<2048 ; i++) {
    p2 = realloc(p1, i);
    if (p2 != p1)
      printf( "realloc - failed grow in place: "
              "%p != realloc(%p,%zu)\n", p1, p2, i);
    p1 = p2;
  }
  free(p1);

  /* Test shrinking reallocation "in place" */
  p1 = malloc(2048);
  for (i=2047 ; i>=1; i--)  {
    p2 = realloc(p1, i);
    if (p2 != p1)
      printf( "realloc - failed shrink in place: "
              "%p != realloc(%p,%zu)\n", p1, p2, i);
    p1 = p2;
  }
  free(p1);

  /* Test realloc that should fail "in place", i.e.,
   * fallback to free()-- malloc()
   */
  p1 = malloc(32);
  p2 = malloc(32);
  p3 = realloc(p1, 64);
  if (p3 == p1 || p3 == NULL)
    printf(
      "realloc - failed non-in place: realloc(%p,%d) = %p\n", p1, 64, p3);
  free(p3);
  free(p2);

  /*
   *  Yet another case
   */
  p1 = malloc(8);
  p2 = malloc(8);
  free(p1);
  sc = posix_memalign(&p1, 16, 32);
  if (!sc)
    free(p1);

  /*
   *  Allocate with default alignment coverage
   */
  sc = rtems_memalign( &p4, 0, 8 );
  if ( !sc && p4 )
    free( p4 );

  /*
   * Walk the C Program Heap
   */
  puts( "malloc_walk - normal path" );
  malloc_walk_ok = malloc_walk( 1234, false );
  rtems_test_assert( malloc_walk_ok );

  puts( "malloc_walk - in critical section path" );
  _Thread_Dispatch_disable();
  malloc_walk_ok = malloc_walk( 1234, false );
  rtems_test_assert( malloc_walk_ok );
  _Thread_Dispatch_enable( _Per_CPU_Get() );

  /*
   *  Realloc with a bad pointer to force a point
   */
  p4 = realloc( test_realloc, 32 );

  p4 = _realloc_r( NULL, NULL, 1 );
}
示例#22
0
文件: taskmode.c 项目: Dipupo/rtems
rtems_status_code rtems_task_mode(
  rtems_mode  mode_set,
  rtems_mode  mask,
  rtems_mode *previous_mode_set
)
{
  Thread_Control     *executing;
  RTEMS_API_Control  *api;
  ASR_Information    *asr;
  bool                preempt_enabled;
  bool                needs_asr_dispatching;
  rtems_mode          old_mode;

  if ( !previous_mode_set )
    return RTEMS_INVALID_ADDRESS;

  executing     = _Thread_Get_executing();
  api = executing->API_Extensions[ THREAD_API_RTEMS ];
  asr = &api->Signal;

  old_mode  = (executing->is_preemptible) ? RTEMS_PREEMPT : RTEMS_NO_PREEMPT;

  if ( executing->budget_algorithm == THREAD_CPU_BUDGET_ALGORITHM_NONE )
    old_mode |= RTEMS_NO_TIMESLICE;
  else
    old_mode |= RTEMS_TIMESLICE;

  old_mode |= (asr->is_enabled) ? RTEMS_ASR : RTEMS_NO_ASR;
  old_mode |= _ISR_Get_level();

  *previous_mode_set = old_mode;

  /*
   *  These are generic thread scheduling characteristics.
   */
  preempt_enabled = false;
  if ( mask & RTEMS_PREEMPT_MASK ) {
#if defined( RTEMS_SMP )
    if ( rtems_configuration_is_smp_enabled() &&
         !_Modes_Is_preempt( mode_set ) ) {
      return RTEMS_NOT_IMPLEMENTED;
    }
#endif
    bool is_preempt_enabled = _Modes_Is_preempt( mode_set );

    preempt_enabled = !executing->is_preemptible && is_preempt_enabled;
    executing->is_preemptible = is_preempt_enabled;
  }

  if ( mask & RTEMS_TIMESLICE_MASK ) {
    if ( _Modes_Is_timeslice(mode_set) ) {
      executing->budget_algorithm = THREAD_CPU_BUDGET_ALGORITHM_RESET_TIMESLICE;
      executing->cpu_time_budget =
        rtems_configuration_get_ticks_per_timeslice();
    } else
      executing->budget_algorithm = THREAD_CPU_BUDGET_ALGORITHM_NONE;
  }

  /*
   *  Set the new interrupt level
   */
  if ( mask & RTEMS_INTERRUPT_MASK )
    _Modes_Set_interrupt_level( mode_set );

  /*
   *  This is specific to the RTEMS API
   */
  needs_asr_dispatching = false;
  if ( mask & RTEMS_ASR_MASK ) {
    bool is_asr_enabled = !_Modes_Is_asr_disabled( mode_set );

    if ( is_asr_enabled != asr->is_enabled ) {
      asr->is_enabled = is_asr_enabled;
      _ASR_Swap_signals( asr );
      if ( _ASR_Are_signals_pending( asr ) ) {
        needs_asr_dispatching = true;
        _Thread_Add_post_switch_action(
          executing,
          &api->Signal_action,
          _Signal_Action_handler
        );
      }
    }
  }

  if ( preempt_enabled || needs_asr_dispatching ) {
    Per_CPU_Control  *cpu_self;
    ISR_lock_Context  lock_context;

    cpu_self = _Thread_Dispatch_disable();
    _Scheduler_Acquire( executing, &lock_context );
    _Scheduler_Schedule( executing );
    _Scheduler_Release( executing, &lock_context );
    _Thread_Dispatch_enable( cpu_self );
  }

  return RTEMS_SUCCESSFUL;
}
示例#23
0
CORE_mutex_Status _CORE_mutex_Surrender(
  CORE_mutex_Control                *the_mutex,
#if defined(RTEMS_MULTIPROCESSING)
  Objects_Id                         id,
  CORE_mutex_API_mp_support_callout  api_mutex_mp_support,
#else
  Objects_Id                         id __attribute__((unused)),
  CORE_mutex_API_mp_support_callout  api_mutex_mp_support __attribute__((unused)),
#endif
  ISR_lock_Context                  *lock_context
)
{
  Thread_Control *the_thread;
  Thread_Control *holder;

  holder = the_mutex->holder;

  /*
   *  The following code allows a thread (or ISR) other than the thread
   *  which acquired the mutex to release that mutex.  This is only
   *  allowed when the mutex in quetion is FIFO or simple Priority
   *  discipline.  But Priority Ceiling or Priority Inheritance mutexes
   *  must be released by the thread which acquired them.
   */

  if ( the_mutex->Attributes.only_owner_release ) {
    if ( !_Thread_Is_executing( holder ) ) {
      _ISR_lock_ISR_enable( lock_context );
      return CORE_MUTEX_STATUS_NOT_OWNER_OF_RESOURCE;
    }
  }

  _Thread_queue_Acquire_critical( &the_mutex->Wait_queue, lock_context );

  /* XXX already unlocked -- not right status */

  if ( !the_mutex->nest_count ) {
    _Thread_queue_Release( &the_mutex->Wait_queue, lock_context );
    return CORE_MUTEX_STATUS_SUCCESSFUL;
  }

  the_mutex->nest_count--;

  if ( the_mutex->nest_count != 0 ) {
    /*
     *  All error checking is on the locking side, so if the lock was
     *  allowed to acquired multiple times, then we should just deal with
     *  that.  The RTEMS_DEBUG is just a validation.
     */
    #if defined(RTEMS_DEBUG)
      switch ( the_mutex->Attributes.lock_nesting_behavior ) {
        case CORE_MUTEX_NESTING_ACQUIRES:
          _Thread_queue_Release( &the_mutex->Wait_queue, lock_context );
          return CORE_MUTEX_STATUS_SUCCESSFUL;
        #if defined(RTEMS_POSIX_API)
          case CORE_MUTEX_NESTING_IS_ERROR:
            /* should never occur */
            _Thread_queue_Release( &the_mutex->Wait_queue, lock_context );
            return CORE_MUTEX_STATUS_NESTING_NOT_ALLOWED;
        #endif
        case CORE_MUTEX_NESTING_BLOCKS:
          /* Currently no API exercises this behavior. */
          break;
      }
    #else
      _Thread_queue_Release( &the_mutex->Wait_queue, lock_context );
      /* must be CORE_MUTEX_NESTING_ACQUIRES or we wouldn't be here */
      return CORE_MUTEX_STATUS_SUCCESSFUL;
    #endif
  }

  /*
   *  Formally release the mutex before possibly transferring it to a
   *  blocked thread.
   */
  if ( _CORE_mutex_Is_inherit_priority( &the_mutex->Attributes ) ||
       _CORE_mutex_Is_priority_ceiling( &the_mutex->Attributes ) ) {
    CORE_mutex_Status pop_status =
      _CORE_mutex_Pop_priority( the_mutex, holder );

    if ( pop_status != CORE_MUTEX_STATUS_SUCCESSFUL ) {
      _Thread_queue_Release( &the_mutex->Wait_queue, lock_context );
      return pop_status;
    }

    holder->resource_count--;
  }
  the_mutex->holder = NULL;

  /*
   *  Now we check if another thread was waiting for this mutex.  If so,
   *  transfer the mutex to that thread.
   */
  if ( ( the_thread = _Thread_queue_First_locked( &the_mutex->Wait_queue ) ) ) {
    /*
     * We must extract the thread now since this will restore its default
     * thread lock.  This is necessary to avoid a deadlock in the
     * _Thread_Change_priority() below due to a recursive thread queue lock
     * acquire.
     */
    _Thread_queue_Extract_locked( &the_mutex->Wait_queue, the_thread );

#if defined(RTEMS_MULTIPROCESSING)
    _Thread_Dispatch_disable();

    if ( _Objects_Is_local_id( the_thread->Object.id ) )
#endif
    {
      the_mutex->holder     = the_thread;
      the_mutex->nest_count = 1;

      switch ( the_mutex->Attributes.discipline ) {
        case CORE_MUTEX_DISCIPLINES_FIFO:
        case CORE_MUTEX_DISCIPLINES_PRIORITY:
          break;
        case CORE_MUTEX_DISCIPLINES_PRIORITY_INHERIT:
          _CORE_mutex_Push_priority( the_mutex, the_thread );
          the_thread->resource_count++;
          break;
        case CORE_MUTEX_DISCIPLINES_PRIORITY_CEILING:
          _CORE_mutex_Push_priority( the_mutex, the_thread );
          the_thread->resource_count++;
          _Thread_Raise_priority(
            the_thread,
            the_mutex->Attributes.priority_ceiling
          );
          break;
      }
    }

    _Thread_queue_Unblock_critical(
      &the_mutex->Wait_queue,
      the_thread,
      lock_context
    );

#if defined(RTEMS_MULTIPROCESSING)
    if ( !_Objects_Is_local_id( the_thread->Object.id ) ) {

      the_mutex->holder     = NULL;
      the_mutex->nest_count = 1;

      ( *api_mutex_mp_support)( the_thread, id );

    }

    _Thread_Dispatch_enable( _Per_CPU_Get() );
#endif
  } else {
    _Thread_queue_Release( &the_mutex->Wait_queue, lock_context );
  }

  /*
   *  Whether or not someone is waiting for the mutex, an
   *  inherited priority must be lowered if this is the last
   *  mutex (i.e. resource) this task has.
   */
  if ( !_Thread_Owns_resources( holder ) ) {
    /*
     * Ensure that the holder resource count is visible to all other processors
     * and that we read the latest priority restore hint.
     */
    _Atomic_Fence( ATOMIC_ORDER_ACQ_REL );

    if ( holder->priority_restore_hint ) {
      Per_CPU_Control *cpu_self;

      cpu_self = _Thread_Dispatch_disable();
      _Thread_Restore_priority( holder );
      _Thread_Dispatch_enable( cpu_self );
    }
  }

  return CORE_MUTEX_STATUS_SUCCESSFUL;
}
void _Thread_Life_action_handler(
  Thread_Control   *executing,
  Thread_Action    *action,
  ISR_lock_Context *lock_context
)
{
  Thread_Life_state  previous_life_state;
  Per_CPU_Control   *cpu_self;

  (void) action;

  previous_life_state = executing->Life.state;
  executing->Life.state = previous_life_state | THREAD_LIFE_PROTECTED;

  _Thread_State_release( executing, lock_context );

  if ( _Thread_Is_life_terminating( previous_life_state ) ) {
    _User_extensions_Thread_terminate( executing );
  } else {
    _Assert( _Thread_Is_life_restarting( previous_life_state ) );

    _User_extensions_Thread_restart( executing );
  }

  cpu_self = _Thread_Dispatch_disable();

  if ( _Thread_Is_life_terminating( previous_life_state ) ) {
    cpu_self = _Thread_Wait_for_join( executing, cpu_self );

    _Thread_Make_zombie( executing );

    /* FIXME: Workaround for https://devel.rtems.org/ticket/2751 */
    cpu_self->dispatch_necessary = true;

    _Assert( cpu_self->heir != executing );
    _Thread_Dispatch_enable( cpu_self );
    RTEMS_UNREACHABLE();
  }

  _Assert( _Thread_Is_life_restarting( previous_life_state ) );

  _Thread_State_acquire( executing, lock_context );

  _Thread_Change_life_locked(
    executing,
    THREAD_LIFE_PROTECTED | THREAD_LIFE_RESTARTING,
    0,
    0
  );

  _Thread_State_release( executing, lock_context );

  _Assert(
    _Watchdog_Get_state( &executing->Timer.Watchdog ) == WATCHDOG_INACTIVE
  );
  _Assert(
    executing->current_state == STATES_READY
      || executing->current_state == STATES_SUSPENDED
  );

  _User_extensions_Destroy_iterators( executing );
  _Thread_Load_environment( executing );

#if ( CPU_HARDWARE_FP == TRUE ) || ( CPU_SOFTWARE_FP == TRUE )
  if ( executing->fp_context != NULL ) {
    _Context_Restore_fp( &executing->fp_context );
  }
#endif

  _Context_Restart_self( &executing->Registers );
  RTEMS_UNREACHABLE();
}