Пример #1
0
static void _Thread_Free( Thread_Control *the_thread )
{
  _User_extensions_Thread_delete( the_thread );

  /*
   * Free the per-thread scheduling information.
   */
  _Scheduler_Node_destroy( _Scheduler_Get( the_thread ), the_thread );

  /*
   *  The thread might have been FP.  So deal with that.
   */
#if ( CPU_HARDWARE_FP == TRUE ) || ( CPU_SOFTWARE_FP == TRUE )
#if ( CPU_USE_DEFERRED_FP_SWITCH == TRUE )
  if ( _Thread_Is_allocated_fp( the_thread ) )
    _Thread_Deallocate_fp();
#endif

  _Workspace_Free( the_thread->Start.fp_context );
#endif

  /*
   *  Free the rest of the memory associated with this task
   *  and set the associated pointers to NULL for safety.
   */
  _Thread_Stack_Free( the_thread );

  _Workspace_Free( the_thread->Start.tls_area );

  _Objects_Free(
    _Objects_Get_information_id( the_thread->Object.id ),
    &the_thread->Object
  );
}
Пример #2
0
static void _Thread_Free( Thread_Control *the_thread )
{
  Thread_Information *information = (Thread_Information *)
    _Objects_Get_information_id( the_thread->Object.id );

  _User_extensions_Thread_delete( the_thread );
  _User_extensions_Destroy_iterators( the_thread );
  _ISR_lock_Destroy( &the_thread->Keys.Lock );
  _Scheduler_Node_destroy(
    _Thread_Scheduler_get_home( the_thread ),
    _Thread_Scheduler_get_home_node( the_thread )
  );
  _ISR_lock_Destroy( &the_thread->Timer.Lock );

  /*
   *  The thread might have been FP.  So deal with that.
   */
#if ( CPU_HARDWARE_FP == TRUE ) || ( CPU_SOFTWARE_FP == TRUE )
#if ( CPU_USE_DEFERRED_FP_SWITCH == TRUE )
  if ( _Thread_Is_allocated_fp( the_thread ) )
    _Thread_Deallocate_fp();
#endif

  _Workspace_Free( the_thread->Start.fp_context );
#endif

  _Freechain_Put(
    &information->Free_thread_queue_heads,
    the_thread->Wait.spare_heads
  );

  /*
   *  Free the rest of the memory associated with this task
   *  and set the associated pointers to NULL for safety.
   */
  _Thread_Stack_Free( the_thread );

  _Workspace_Free( the_thread->Start.tls_area );

#if defined(RTEMS_SMP)
  _ISR_lock_Destroy( &the_thread->Scheduler.Lock );
  _ISR_lock_Destroy( &the_thread->Wait.Lock.Default );
  _SMP_lock_Stats_destroy( &the_thread->Potpourri_stats );
#endif

  _Thread_queue_Destroy( &the_thread->Join_queue );

  _Objects_Free( &information->Objects, &the_thread->Object );
}
Пример #3
0
static void _RTEMS_tasks_Delete_extension(
  Thread_Control *executing,
  Thread_Control *deleted
)
{
  rtems_task_variable_t *tvp, *next;

  /*
   *  Free per task variable memory
   */

  tvp = deleted->task_variables;
  deleted->task_variables = NULL;
  while (tvp) {
    next = (rtems_task_variable_t *)tvp->next;
    _RTEMS_Tasks_Invoke_task_variable_dtor( deleted, tvp );
    tvp = next;
  }

  /*
   *  Free API specific memory
   */

  (void) _Workspace_Free( deleted->API_Extensions[ THREAD_API_RTEMS ] );
  deleted->API_Extensions[ THREAD_API_RTEMS ] = NULL;
}
void _CORE_message_queue_Close(
  CORE_message_queue_Control *the_message_queue,
  Thread_queue_Flush_callout  remote_extract_callout,
  uint32_t                    status
)
{

  /*
   *  This will flush blocked threads whether they were blocked on
   *  a send or receive.
   */

  _Thread_queue_Flush(
    &the_message_queue->Wait_queue,
    remote_extract_callout,
    status
  );

  /*
   *  This removes all messages from the pending message queue.  Since
   *  we just flushed all waiting threads, we don't have to worry about
   *  the flush satisfying any blocked senders as a side-effect.
   */

  if ( the_message_queue->number_of_pending_messages != 0 )
    (void) _CORE_message_queue_Flush_support( the_message_queue );

  (void) _Workspace_Free( the_message_queue->message_buffers );

}
void _POSIX_Threads_cancel_run(
  Thread_Control *the_thread
)
{
  POSIX_Cancel_Handler_control      *handler;
  Chain_Control                     *handler_stack;
  POSIX_API_Control                 *thread_support;
  ISR_Level                          level;

  thread_support = the_thread->API_Extensions[ THREAD_API_POSIX ];

  handler_stack = &thread_support->Cancellation_Handlers;

  thread_support->cancelability_state = PTHREAD_CANCEL_DISABLE;

  while ( !_Chain_Is_empty( handler_stack ) ) {
    _ISR_Disable( level );
      handler = (POSIX_Cancel_Handler_control *)
           _Chain_Tail( handler_stack )->previous;
      _Chain_Extract_unprotected( &handler->Node );
    _ISR_Enable( level );

    (*handler->routine)( handler->arg );

    _Workspace_Free( handler );
  }
}
Пример #6
0
void _POSIX_Message_queue_Delete(
  POSIX_Message_queue_Control *the_mq
)
{
  if ( !the_mq->linked && !the_mq->open_count ) {
      Objects_Control *the_object = &the_mq->Object;

      #if defined(RTEMS_DEBUG)
        /*
         *  the name memory will have been freed by unlink.
         */
	if ( the_object->name.name_p ) {
          printk(
            "POSIX MQ name (%p) not freed by unlink\n",
	    (void *)the_object->name.name_p
          );
	  _Workspace_Free( (void *)the_object->name.name_p );
        }
      #endif

      _Objects_Close( &_POSIX_Message_queue_Information, the_object );

      _CORE_message_queue_Close(
        &the_mq->Message_queue,
        NULL,        /* no MP support */
        CORE_MESSAGE_QUEUE_STATUS_WAS_DELETED
      );

    _POSIX_Message_queue_Free( the_mq );

  }
}
Пример #7
0
void bsp_stack_free(void *stack)
{
  bool ok = _Heap_Free(&bsp_stack_heap, stack);

  if (!ok) {
    _Workspace_Free(stack);
  }
}
void _Objects_Shrink_information(
  Objects_Information *information
)
{
  Objects_Control  *the_object;
  Objects_Control  *extract_me;
  uint32_t          block_count;
  uint32_t          block;
  uint32_t          index_base;
  uint32_t          index;

  /*
   * Search the list to find block or chunk with all objects inactive.
   */

  index_base = _Objects_Get_index( information->minimum_id );
  block_count = (information->maximum - index_base) /
                 information->allocation_size;

  for ( block = 0; block < block_count; block++ ) {
    if ( information->inactive_per_block[ block ] ==
         information->allocation_size ) {

      /*
       *  Assume the Inactive chain is never empty at this point
       */
      the_object = (Objects_Control *) information->Inactive.first;

      do {
         index = _Objects_Get_index( the_object->id );
         /*
          *  Get the next node before the node is extracted
          */
         extract_me = the_object;
         the_object = (Objects_Control *) the_object->Node.next;
         if ((index >= index_base) &&
             (index < (index_base + information->allocation_size))) {
           _Chain_Extract( &extract_me->Node );
         }
       }
       while ( the_object );
      /*
       *  Free the memory and reset the structures in the object' information
       */

      _Workspace_Free( information->object_blocks[ block ] );
      information->object_blocks[ block ] = NULL;
      information->inactive_per_block[ block ] = 0;

      information->inactive -= information->allocation_size;

      return;
    }

    index_base += information->allocation_size;
  }
}
Пример #9
0
void _POSIX_Keys_Free_memory(
  POSIX_Keys_Control *the_key
)
{
  uint32_t            the_api;

  for ( the_api = 1; the_api <= OBJECTS_APIS_LAST; the_api++ )
    _Workspace_Free( the_key->Values[ the_api ] );
}
Пример #10
0
static inline POSIX_Shm_Control *shm_allocate(
  const char *name_arg,
  size_t name_len,
  int oflag,
  mode_t mode,
  int *error
)
{
  POSIX_Shm_Control *shm;
  char *name;
  struct timeval tv;

  /* Reject any name without a leading slash. */
  if ( name_arg[0] != '/' ) {
    *error = EINVAL;
    return NULL;
  }

  /* Only create the object if requested. */
  if ( ( oflag & O_CREAT ) != O_CREAT ) {
    *error = ENOENT;
    return NULL;
  }

  name = _Workspace_String_duplicate( name_arg, name_len );
  if ( name == NULL ) {
    *error = ENOSPC;
    return NULL;
  }

  shm = _POSIX_Shm_Allocate_unprotected();
  if ( shm == NULL ) {
    _Workspace_Free( name );
    *error = ENFILE;
    return NULL;
  }

  gettimeofday( &tv, 0 );

  shm->reference_count = 1;
  shm->shm_object.handle = NULL;
  shm->shm_object.size = 0;
  shm->shm_object.ops = &_POSIX_Shm_Object_operations;
  shm->mode = mode & ~rtems_filesystem_umask;
  shm->oflag = oflag;
  shm->uid = geteuid();
  shm->gid = getegid();
  shm->atime = (time_t) tv.tv_sec;
  shm->mtime = (time_t) tv.tv_sec;
  shm->ctime = (time_t) tv.tv_sec;

  _Objects_Open_string( &_POSIX_Shm_Information, &shm->Object, name );

  return shm;
}
Пример #11
0
int _Scheduler_CBS_Cleanup (void)
{
  unsigned int i;

  for ( i = 0; i<_Scheduler_CBS_Maximum_servers; i++ ) {
    if ( _Scheduler_CBS_Server_list[ i ] )
      _Scheduler_CBS_Destroy_server( i );
  }
  _Workspace_Free( _Scheduler_CBS_Server_list );
  return SCHEDULER_CBS_OK;
}
Пример #12
0
static void _RTEMS_tasks_Delete_extension(
  Thread_Control *executing,
  Thread_Control *deleted
)
{
  /*
   *  Free API specific memory
   */

  (void) _Workspace_Free( deleted->API_Extensions[ THREAD_API_RTEMS ] );
}
Пример #13
0
void _Objects_Shrink_information(
  Objects_Information *information
)
{
  uint32_t          block_count;
  uint32_t          block;
  uint32_t          index_base;

  /*
   * Search the list to find block or chunk with all objects inactive.
   */

  index_base = _Objects_Get_index( information->minimum_id );
  block_count = (information->maximum - index_base) /
                 information->allocation_size;

  for ( block = 0; block < block_count; block++ ) {
    if ( information->inactive_per_block[ block ] ==
         information->allocation_size ) {
      Chain_Node       *node = _Chain_First( &information->Inactive );
      const Chain_Node *tail = _Chain_Immutable_tail( &information->Inactive );
      uint32_t          index_end = index_base + information->allocation_size;

      while ( node != tail ) {
        Objects_Control *object = (Objects_Control *) node;
        uint32_t         index = _Objects_Get_index( object->id );

        /*
         *  Get the next node before the node is extracted
         */
        node = _Chain_Next( node );

        if ( index >= index_base && index < index_end ) {
          _Chain_Extract( &object->Node );
        }
      }

      /*
       *  Free the memory and reset the structures in the object' information
       */

      _Workspace_Free( information->object_blocks[ block ] );
      information->object_blocks[ block ] = NULL;
      information->inactive_per_block[ block ] = 0;

      information->inactive -= information->allocation_size;

      return;
    }

    index_base += information->allocation_size;
  }
}
Пример #14
0
User_extensions_routine _ITRON_Task_Delete_extension(
  Thread_Control *executing,
  Thread_Control *deleted
)
{
  /*
   *  Until we actually put data in this structure, do not even
   *  allocate it.
   */
#if 0
  (void) _Workspace_Free( deleted->API_Extensions[ THREAD_API_ITRON ] );

  deleted->API_Extensions[ THREAD_API_ITRON ] = NULL;
#endif
}
Пример #15
0
void pthread_cleanup_pop(
    int    execute
)
{
    POSIX_Cancel_Handler_control      *handler;
    POSIX_Cancel_Handler_control      tmp_handler;
    Chain_Control                     *handler_stack;
    POSIX_API_Control                 *thread_support;
    ISR_Level                          level;

    thread_support = _Thread_Executing->API_Extensions[ THREAD_API_POSIX ];

    handler_stack = &thread_support->Cancellation_Handlers;

    /*
     * We need interrupts disabled to safely check the chain and pull
     * the last element off.  But we also need dispatching disabled to
     * ensure that we do not get prempted and deleted while we are holding
     * memory that needs to be freed.
     */

    _Thread_Disable_dispatch();
    _ISR_Disable( level );

    if ( _Chain_Is_empty( handler_stack ) ) {
        _Thread_Enable_dispatch();
        _ISR_Enable( level );
        return;
    }

    handler = (POSIX_Cancel_Handler_control *)
              _Chain_Tail( handler_stack )->previous;
    _Chain_Extract_unprotected( &handler->Node );

    _ISR_Enable( level );

    tmp_handler = *handler;

    _Workspace_Free( handler );

    _Thread_Enable_dispatch();

    if ( execute )
        (*tmp_handler.routine)( tmp_handler.arg );
}
Пример #16
0
bool _Objects_Set_name(
  Objects_Information *information,
  Objects_Control     *the_object,
  const char          *name
)
{
  size_t                 length;
  const char            *s;

  s      = name;
  length = strnlen( name, information->name_length );

#if defined(RTEMS_SCORE_OBJECT_ENABLE_STRING_NAMES)
  if ( information->is_string ) {
    char *d;

    d = _Workspace_Allocate( length + 1 );
    if ( !d )
      return false;

    if ( the_object->name.name_p ) {
      _Workspace_Free( (void *)the_object->name.name_p );
      the_object->name.name_p = NULL;
    }

    strncpy( d, name, length );
    d[length] = '\0';
    the_object->name.name_p = d;
  } else
#endif
  {
    the_object->name.name_u32 =  _Objects_Build_name(
      ((0 <= length) ? s[ 0 ] : ' '),
      ((1 <  length) ? s[ 1 ] : ' '),
      ((2 <  length) ? s[ 2 ] : ' '),
      ((3 <  length) ? s[ 3 ] : ' ')
    );

  }

  return true;
}
Пример #17
0
int _Scheduler_CBS_Destroy_server (
  Scheduler_CBS_Server_id server_id
)
{
  int ret = SCHEDULER_CBS_OK;
  rtems_id tid;

  if ( server_id >= _Scheduler_CBS_Maximum_servers )
    return SCHEDULER_CBS_ERROR_INVALID_PARAMETER;

  if ( !_Scheduler_CBS_Server_list[server_id] )
    return SCHEDULER_CBS_ERROR_NOSERVER;

  if ( (tid = _Scheduler_CBS_Server_list[server_id]->task_id) != -1 )
    ret = _Scheduler_CBS_Detach_thread ( server_id, tid );

  _Workspace_Free( _Scheduler_CBS_Server_list[server_id] );
  _Scheduler_CBS_Server_list[server_id] = NULL;
  return ret;
}
Пример #18
0
void _RTEMS_Tasks_Invoke_task_variable_dtor(
  Thread_Control        *the_thread,
  rtems_task_variable_t *tvp
)
{
  void (*dtor)(void *);
  void *value;

  dtor = tvp->dtor;
  if (_Thread_Is_executing(the_thread)) {
    value = *tvp->ptr;
    *tvp->ptr = tvp->gval;
  } else {
    value = tvp->tval;
  }

  if ( dtor )
    (*dtor)(value);

  _Workspace_Free(tvp);
}
Пример #19
0
void _Objects_Namespace_remove(
  Objects_Information  *information,
  Objects_Control      *the_object
)
{
  #if defined(RTEMS_SCORE_OBJECT_ENABLE_STRING_NAMES)
    /*
     *  If this is a string format name, then free the memory.
     */
    if ( information->is_string )
       _Workspace_Free( (void *)the_object->name.name_p );
  #endif

  /*
   * Clear out either format.
   */
  #if defined(RTEMS_SCORE_OBJECT_ENABLE_STRING_NAMES)
    the_object->name.name_p   = NULL;
  #endif
  the_object->name.name_u32 = 0;
}
Пример #20
0
void _CORE_message_queue_Close(
  CORE_message_queue_Control *the_message_queue,
  Thread_queue_Context       *queue_context
)
{

  /*
   *  This will flush blocked threads whether they were blocked on
   *  a send or receive.
   */

  _Thread_queue_Flush_critical(
    &the_message_queue->Wait_queue.Queue,
    the_message_queue->operations,
    _CORE_message_queue_Was_deleted,
    queue_context
  );

  (void) _Workspace_Free( the_message_queue->message_buffers );

  _Thread_queue_Destroy( &the_message_queue->Wait_queue );
}
Пример #21
0
int pthread_key_delete(
  pthread_key_t  key
)
{
  register POSIX_Keys_Control *the_key;
  Objects_Locations            location;
  uint32_t                     the_api;

  the_key = _POSIX_Keys_Get( key, &location );
  switch ( location ) {

    case OBJECTS_LOCAL:
      _Objects_Close( &_POSIX_Keys_Information, &the_key->Object );

      for ( the_api = 1; the_api <= OBJECTS_APIS_LAST; the_api++ )
        if ( the_key->Values[ the_api ] )
          _Workspace_Free( the_key->Values[ the_api ] );

      /*
       *  NOTE:  The destructor is not called and it is the responsibility
       *         of the application to free the memory.
       */

      _POSIX_Keys_Free( the_key );
      _Thread_Enable_dispatch();
      return 0;

#if defined(RTEMS_MULTIPROCESSING)
    case OBJECTS_REMOTE:   /* should never happen */
#endif
    case OBJECTS_ERROR:
      break;
  }

  return EINVAL;
}
bool _Thread_Initialize(
  Objects_Information                  *information,
  Thread_Control                       *the_thread,
  const Scheduler_Control              *scheduler,
  void                                 *stack_area,
  size_t                                stack_size,
  bool                                  is_fp,
  Priority_Control                      priority,
  bool                                  is_preemptible,
  Thread_CPU_budget_algorithms          budget_algorithm,
  Thread_CPU_budget_algorithm_callout   budget_callout,
  uint32_t                              isr_level,
  Objects_Name                          name
)
{
  uintptr_t                tls_size = _TLS_Get_size();
  size_t                   actual_stack_size = 0;
  void                    *stack = NULL;
  #if ( CPU_HARDWARE_FP == TRUE ) || ( CPU_SOFTWARE_FP == TRUE )
    void                  *fp_area = NULL;
  #endif
  bool                     extension_status;
  size_t                   i;
  bool                     scheduler_node_initialized = false;
  Per_CPU_Control         *cpu = _Per_CPU_Get_by_index( 0 );

#if defined( RTEMS_SMP )
  if ( rtems_configuration_is_smp_enabled() && !is_preemptible ) {
    return false;
  }
#endif

  for ( i = 0 ; i < _Thread_Control_add_on_count ; ++i ) {
    const Thread_Control_add_on *add_on = &_Thread_Control_add_ons[ i ];

    *(void **) ( (char *) the_thread + add_on->destination_offset ) =
      (char *) the_thread + add_on->source_offset;
  }

  /*
   *  Initialize the Ada self pointer
   */
  #if __RTEMS_ADA__
    the_thread->rtems_ada_self = NULL;
  #endif

  the_thread->Start.tls_area = NULL;

  /*
   *  Allocate and Initialize the stack for this thread.
   */
  #if !defined(RTEMS_SCORE_THREAD_ENABLE_USER_PROVIDED_STACK_VIA_API)
    actual_stack_size = _Thread_Stack_Allocate( the_thread, stack_size );
    if ( !actual_stack_size || actual_stack_size < stack_size )
      return false;                     /* stack allocation failed */

    stack = the_thread->Start.stack;
  #else
    if ( !stack_area ) {
      actual_stack_size = _Thread_Stack_Allocate( the_thread, stack_size );
      if ( !actual_stack_size || actual_stack_size < stack_size )
        return false;                     /* stack allocation failed */

      stack = the_thread->Start.stack;
      the_thread->Start.core_allocated_stack = true;
    } else {
      stack = stack_area;
      actual_stack_size = stack_size;
      the_thread->Start.core_allocated_stack = false;
    }
  #endif

  _Stack_Initialize(
     &the_thread->Start.Initial_stack,
     stack,
     actual_stack_size
  );

  /* Thread-local storage (TLS) area allocation */
  if ( tls_size > 0 ) {
    uintptr_t tls_align = _TLS_Heap_align_up( (uintptr_t) _TLS_Alignment );
    uintptr_t tls_alloc = _TLS_Get_allocation_size( tls_size, tls_align );

    the_thread->Start.tls_area =
      _Workspace_Allocate_aligned( tls_alloc, tls_align );

    if ( the_thread->Start.tls_area == NULL ) {
      goto failed;
    }
  }

  /*
   *  Allocate the floating point area for this thread
   */
  #if ( CPU_HARDWARE_FP == TRUE ) || ( CPU_SOFTWARE_FP == TRUE )
    if ( is_fp ) {
      fp_area = _Workspace_Allocate( CONTEXT_FP_SIZE );
      if ( !fp_area )
        goto failed;
      fp_area = _Context_Fp_start( fp_area, 0 );
    }
    the_thread->fp_context       = fp_area;
    the_thread->Start.fp_context = fp_area;
  #endif

  /*
   *  Initialize the thread timer
   */
  _Watchdog_Initialize( &the_thread->Timer, NULL, 0, NULL );

  #ifdef __RTEMS_STRICT_ORDER_MUTEX__
    /* Initialize the head of chain of held mutexes */
    _Chain_Initialize_empty(&the_thread->lock_mutex);
  #endif

  /*
   * Clear the extensions area so extension users can determine
   * if they are linked to the thread. An extension user may
   * create the extension long after tasks have been created
   * so they cannot rely on the thread create user extension
   * call.  The object index starts with one, so the first extension context is
   * unused.
   */
  for ( i = 1 ; i <= rtems_configuration_get_maximum_extensions() ; ++i )
    the_thread->extensions[ i ] = NULL;

  /*
   *  General initialization
   */

  the_thread->Start.isr_level        = isr_level;
  the_thread->Start.is_preemptible   = is_preemptible;
  the_thread->Start.budget_algorithm = budget_algorithm;
  the_thread->Start.budget_callout   = budget_callout;

  switch ( budget_algorithm ) {
    case THREAD_CPU_BUDGET_ALGORITHM_NONE:
    case THREAD_CPU_BUDGET_ALGORITHM_RESET_TIMESLICE:
      break;
    #if defined(RTEMS_SCORE_THREAD_ENABLE_EXHAUST_TIMESLICE)
      case THREAD_CPU_BUDGET_ALGORITHM_EXHAUST_TIMESLICE:
        the_thread->cpu_time_budget =
          rtems_configuration_get_ticks_per_timeslice();
        break;
    #endif
    #if defined(RTEMS_SCORE_THREAD_ENABLE_SCHEDULER_CALLOUT)
      case THREAD_CPU_BUDGET_ALGORITHM_CALLOUT:
	break;
    #endif
  }

#if defined(RTEMS_SMP)
  the_thread->Scheduler.state = THREAD_SCHEDULER_BLOCKED;
  the_thread->Scheduler.own_control = scheduler;
  the_thread->Scheduler.control = scheduler;
  the_thread->Scheduler.own_node = the_thread->Scheduler.node;
  _Resource_Node_initialize( &the_thread->Resource_node );
  _CPU_Context_Set_is_executing( &the_thread->Registers, false );
#endif

  _Thread_Debug_set_real_processor( the_thread, cpu );

  /* Initialize the CPU for the non-SMP schedulers */
  _Thread_Set_CPU( the_thread, cpu );

  the_thread->current_state           = STATES_DORMANT;
  the_thread->Wait.queue              = NULL;
  the_thread->resource_count          = 0;
  the_thread->real_priority           = priority;
  the_thread->Start.initial_priority  = priority;

  _Scheduler_Node_initialize( scheduler, the_thread );
  scheduler_node_initialized = true;

  _Thread_Set_priority( the_thread, priority );

  /*
   *  Initialize the CPU usage statistics
   */
  #ifndef __RTEMS_USE_TICKS_FOR_STATISTICS__
    _Timestamp_Set_to_zero( &the_thread->cpu_time_used );
  #else
    the_thread->cpu_time_used = 0;
  #endif

  /*
   * initialize thread's key vaule node chain
   */
  _Chain_Initialize_empty( &the_thread->Key_Chain );

  _Thread_Action_control_initialize( &the_thread->Post_switch_actions );

  _Thread_Action_initialize(
    &the_thread->Life.Action,
    _Thread_Life_action_handler
  );
  the_thread->Life.state = THREAD_LIFE_NORMAL;
  the_thread->Life.terminator = NULL;

  /*
   *  Open the object
   */
  _Objects_Open( information, &the_thread->Object, name );

  /*
   *  We assume the Allocator Mutex is locked and dispatching is
   *  enabled when we get here.  We want to be able to run the
   *  user extensions with dispatching enabled.  The Allocator
   *  Mutex provides sufficient protection to let the user extensions
   *  run safely.
   */
  extension_status = _User_extensions_Thread_create( the_thread );
  if ( extension_status )
    return true;

failed:

  if ( scheduler_node_initialized ) {
    _Scheduler_Node_destroy( scheduler, the_thread );
  }

  _Workspace_Free( the_thread->Start.tls_area );

  #if ( CPU_HARDWARE_FP == TRUE ) || ( CPU_SOFTWARE_FP == TRUE )
    _Workspace_Free( fp_area );
  #endif

   _Thread_Stack_Free( the_thread );
  return false;
}
Пример #23
0
bool _Thread_Initialize(
  Thread_Information                   *information,
  Thread_Control                       *the_thread,
  const Scheduler_Control              *scheduler,
  void                                 *stack_area,
  size_t                                stack_size,
  bool                                  is_fp,
  Priority_Control                      priority,
  bool                                  is_preemptible,
  Thread_CPU_budget_algorithms          budget_algorithm,
  Thread_CPU_budget_algorithm_callout   budget_callout,
  uint32_t                              isr_level,
  Objects_Name                          name
)
{
  uintptr_t                tls_size = _TLS_Get_size();
  size_t                   actual_stack_size = 0;
  void                    *stack = NULL;
  #if ( CPU_HARDWARE_FP == TRUE ) || ( CPU_SOFTWARE_FP == TRUE )
    void                  *fp_area = NULL;
  #endif
  bool                     extension_status;
  size_t                   i;
  bool                     scheduler_node_initialized = false;
  Per_CPU_Control         *cpu = _Per_CPU_Get_by_index( 0 );

#if defined( RTEMS_SMP )
  if ( rtems_configuration_is_smp_enabled() && !is_preemptible ) {
    return false;
  }
#endif

  memset(
    &the_thread->current_state,
    0,
    information->Objects.size - offsetof( Thread_Control, current_state )
  );

  for ( i = 0 ; i < _Thread_Control_add_on_count ; ++i ) {
    const Thread_Control_add_on *add_on = &_Thread_Control_add_ons[ i ];

    *(void **) ( (char *) the_thread + add_on->destination_offset ) =
      (char *) the_thread + add_on->source_offset;
  }

  /*
   *  Allocate and Initialize the stack for this thread.
   */
  #if !defined(RTEMS_SCORE_THREAD_ENABLE_USER_PROVIDED_STACK_VIA_API)
    actual_stack_size = _Thread_Stack_Allocate( the_thread, stack_size );
    if ( !actual_stack_size || actual_stack_size < stack_size )
      return false;                     /* stack allocation failed */

    stack = the_thread->Start.stack;
  #else
    if ( !stack_area ) {
      actual_stack_size = _Thread_Stack_Allocate( the_thread, stack_size );
      if ( !actual_stack_size || actual_stack_size < stack_size )
        return false;                     /* stack allocation failed */

      stack = the_thread->Start.stack;
      the_thread->Start.core_allocated_stack = true;
    } else {
      stack = stack_area;
      actual_stack_size = stack_size;
      the_thread->Start.core_allocated_stack = false;
    }
  #endif

  _Stack_Initialize(
     &the_thread->Start.Initial_stack,
     stack,
     actual_stack_size
  );

  /* Thread-local storage (TLS) area allocation */
  if ( tls_size > 0 ) {
    uintptr_t tls_align = _TLS_Heap_align_up( (uintptr_t) _TLS_Alignment );
    uintptr_t tls_alloc = _TLS_Get_allocation_size( tls_size, tls_align );

    the_thread->Start.tls_area =
      _Workspace_Allocate_aligned( tls_alloc, tls_align );

    if ( the_thread->Start.tls_area == NULL ) {
      goto failed;
    }
  }

  /*
   *  Allocate the floating point area for this thread
   */
  #if ( CPU_HARDWARE_FP == TRUE ) || ( CPU_SOFTWARE_FP == TRUE )
    if ( is_fp ) {
      fp_area = _Workspace_Allocate( CONTEXT_FP_SIZE );
      if ( !fp_area )
        goto failed;
      fp_area = _Context_Fp_start( fp_area, 0 );
    }
    the_thread->fp_context       = fp_area;
    the_thread->Start.fp_context = fp_area;
  #endif

  /*
   *  Get thread queue heads
   */
  the_thread->Wait.spare_heads = _Freechain_Get(
    &information->Free_thread_queue_heads,
    _Workspace_Allocate,
    _Objects_Extend_size( &information->Objects ),
    THREAD_QUEUE_HEADS_SIZE( _Scheduler_Count )
  );
  if ( the_thread->Wait.spare_heads == NULL ) {
    goto failed;
  }
  _Thread_queue_Heads_initialize( the_thread->Wait.spare_heads );

  /*
   *  General initialization
   */

  the_thread->is_fp                  = is_fp;
  the_thread->Start.isr_level        = isr_level;
  the_thread->Start.is_preemptible   = is_preemptible;
  the_thread->Start.budget_algorithm = budget_algorithm;
  the_thread->Start.budget_callout   = budget_callout;

  _Thread_Timer_initialize( &the_thread->Timer, cpu );

  switch ( budget_algorithm ) {
    case THREAD_CPU_BUDGET_ALGORITHM_NONE:
    case THREAD_CPU_BUDGET_ALGORITHM_RESET_TIMESLICE:
      break;
    #if defined(RTEMS_SCORE_THREAD_ENABLE_EXHAUST_TIMESLICE)
      case THREAD_CPU_BUDGET_ALGORITHM_EXHAUST_TIMESLICE:
        the_thread->cpu_time_budget =
          rtems_configuration_get_ticks_per_timeslice();
        break;
    #endif
    #if defined(RTEMS_SCORE_THREAD_ENABLE_SCHEDULER_CALLOUT)
      case THREAD_CPU_BUDGET_ALGORITHM_CALLOUT:
	break;
    #endif
  }

#if defined(RTEMS_SMP)
  RTEMS_STATIC_ASSERT( THREAD_SCHEDULER_BLOCKED == 0, Scheduler_state );
  the_thread->Scheduler.own_control = scheduler;
  the_thread->Scheduler.control = scheduler;
  the_thread->Scheduler.own_node = the_thread->Scheduler.node;
  _Resource_Node_initialize( &the_thread->Resource_node );
  the_thread->Lock.current = &the_thread->Lock.Default;
  _SMP_ticket_lock_Initialize( &the_thread->Lock.Default );
  _SMP_lock_Stats_initialize( &the_thread->Lock.Stats, "Thread Lock" );
  _SMP_lock_Stats_initialize( &the_thread->Potpourri_stats, "Thread Potpourri" );
#endif

  _Thread_Debug_set_real_processor( the_thread, cpu );

  /* Initialize the CPU for the non-SMP schedulers */
  _Thread_Set_CPU( the_thread, cpu );

  _Thread_queue_Initialize( &the_thread->Join_queue );

  the_thread->current_state           = STATES_DORMANT;
  the_thread->Wait.operations         = &_Thread_queue_Operations_default;
  the_thread->current_priority        = priority;
  the_thread->real_priority           = priority;
  the_thread->Start.initial_priority  = priority;

  RTEMS_STATIC_ASSERT( THREAD_WAIT_FLAGS_INITIAL == 0, Wait_flags );

  _Scheduler_Node_initialize( scheduler, the_thread );
  scheduler_node_initialized = true;

  _Scheduler_Update_priority( the_thread, priority );

  /* POSIX Keys */
  _RBTree_Initialize_empty( &the_thread->Keys.Key_value_pairs );
  _ISR_lock_Initialize( &the_thread->Keys.Lock, "POSIX Key Value Pairs" );

  _Thread_Action_control_initialize( &the_thread->Post_switch_actions );

  RTEMS_STATIC_ASSERT( THREAD_LIFE_NORMAL == 0, Life_state );

  /*
   *  Open the object
   */
  _Objects_Open( &information->Objects, &the_thread->Object, name );

  /*
   *  We assume the Allocator Mutex is locked and dispatching is
   *  enabled when we get here.  We want to be able to run the
   *  user extensions with dispatching enabled.  The Allocator
   *  Mutex provides sufficient protection to let the user extensions
   *  run safely.
   */
  extension_status = _User_extensions_Thread_create( the_thread );
  if ( extension_status )
    return true;

failed:

  if ( scheduler_node_initialized ) {
    _Scheduler_Node_destroy( scheduler, the_thread );
  }

  _Workspace_Free( the_thread->Start.tls_area );

  _Freechain_Put(
    &information->Free_thread_queue_heads,
    the_thread->Wait.spare_heads
  );

  #if ( CPU_HARDWARE_FP == TRUE ) || ( CPU_SOFTWARE_FP == TRUE )
    _Workspace_Free( fp_area );
  #endif

   _Thread_Stack_Free( the_thread );
  return false;
}
Пример #24
0
void _Objects_Extend_information(
  Objects_Information *information
)
{
  Objects_Control  *the_object;
  Chain_Control     Inactive;
  uint32_t          block_count;
  uint32_t          block;
  uint32_t          index_base;
  uint32_t          minimum_index;
  uint32_t          index;
  uint32_t          maximum;
  size_t            block_size;
  void             *new_object_block;
  bool              do_extend;

  /*
   *  Search for a free block of indexes. If we do NOT need to allocate or
   *  extend the block table, then we will change do_extend.
   */
  do_extend     = true;
  minimum_index = _Objects_Get_index( information->minimum_id );
  index_base    = minimum_index;
  block         = 0;

  /* if ( information->maximum < minimum_index ) */
  if ( information->object_blocks == NULL )
    block_count = 0;
  else {
    block_count = information->maximum / information->allocation_size;

    for ( ; block < block_count; block++ ) {
      if ( information->object_blocks[ block ] == NULL ) {
        do_extend = false;
        break;
      } else
        index_base += information->allocation_size;
    }
  }

  maximum = (uint32_t) information->maximum + information->allocation_size;

  /*
   *  We need to limit the number of objects to the maximum number
   *  representable in the index portion of the object Id.  In the
   *  case of 16-bit Ids, this is only 256 object instances.
   */
  if ( maximum > OBJECTS_ID_FINAL_INDEX ) {
    return;
  }

  /*
   * Allocate the name table, and the objects and if it fails either return or
   * generate a fatal error depending on auto-extending being active.
   */
  block_size = information->allocation_size * information->size;
  if ( information->auto_extend ) {
    new_object_block = _Workspace_Allocate( block_size );
    if ( !new_object_block )
      return;
  } else {
    new_object_block = _Workspace_Allocate_or_fatal_error( block_size );
  }

  /*
   *  Do we need to grow the tables?
   */
  if ( do_extend ) {
    ISR_Level         level;
    void            **object_blocks;
    uint32_t         *inactive_per_block;
    Objects_Control **local_table;
    void             *old_tables;
    size_t            block_size;

    /*
     *  Growing the tables means allocating a new area, doing a copy and
     *  updating the information table.
     *
     *  If the maximum is minimum we do not have a table to copy. First
     *  time through.
     *
     *  The allocation has :
     *
     *      void            *objects[block_count];
     *      uint32_t         inactive_count[block_count];
     *      Objects_Control *local_table[maximum];
     *
     *  This is the order in memory. Watch changing the order. See the memcpy
     *  below.
     */

    /*
     *  Up the block count and maximum
     */
    block_count++;

    /*
     *  Allocate the tables and break it up.
     */
    block_size = block_count *
           (sizeof(void *) + sizeof(uint32_t) + sizeof(Objects_Name *)) +
          ((maximum + minimum_index) * sizeof(Objects_Control *));
    object_blocks = (void**) _Workspace_Allocate( block_size );

    if ( !object_blocks ) {
      _Workspace_Free( new_object_block );
      return;
    }

    /*
     *  Break the block into the various sections.
     */
    inactive_per_block = (uint32_t *) _Addresses_Add_offset(
        object_blocks, block_count * sizeof(void*) );
    local_table = (Objects_Control **) _Addresses_Add_offset(
        inactive_per_block, block_count * sizeof(uint32_t) );

    /*
     *  Take the block count down. Saves all the (block_count - 1)
     *  in the copies.
     */
    block_count--;

    if ( information->maximum > minimum_index ) {

      /*
       *  Copy each section of the table over. This has to be performed as
       *  separate parts as size of each block has changed.
       */

      memcpy( object_blocks,
              information->object_blocks,
              block_count * sizeof(void*) );
      memcpy( inactive_per_block,
              information->inactive_per_block,
              block_count * sizeof(uint32_t) );
      memcpy( local_table,
              information->local_table,
              (information->maximum + minimum_index) * sizeof(Objects_Control *) );
    } else {

      /*
       *  Deal with the special case of the 0 to minimum_index
       */
      for ( index = 0; index < minimum_index; index++ ) {
        local_table[ index ] = NULL;
      }
    }

    /*
     *  Initialise the new entries in the table.
     */
    object_blocks[block_count] = NULL;
    inactive_per_block[block_count] = 0;

    for ( index=index_base ;
          index < ( information->allocation_size + index_base );
          index++ ) {
      local_table[ index ] = NULL;
    }

    _ISR_Disable( level );

    old_tables = information->object_blocks;

    information->object_blocks = object_blocks;
    information->inactive_per_block = inactive_per_block;
    information->local_table = local_table;
    information->maximum = (Objects_Maximum) maximum;
    information->maximum_id = _Objects_Build_id(
        information->the_api,
        information->the_class,
        _Objects_Local_node,
        information->maximum
      );

    _ISR_Enable( level );

    _Workspace_Free( old_tables );

    block_count++;
  }

  /*
   *  Assign the new object block to the object block table.
   */
  information->object_blocks[ block ] = new_object_block;

  /*
   *  Initialize objects .. add to a local chain first.
   */
  _Chain_Initialize(
    &Inactive,
    information->object_blocks[ block ],
    information->allocation_size,
    information->size
  );

  /*
   *  Move from the local chain, initialise, then append to the inactive chain
   */
  index = index_base;

  while ((the_object = (Objects_Control *) _Chain_Get( &Inactive )) != NULL ) {

    the_object->id = _Objects_Build_id(
        information->the_api,
        information->the_class,
        _Objects_Local_node,
        index
      );

    _Chain_Append( &information->Inactive, &the_object->Node );

    index++;
  }

  information->inactive_per_block[ block ] = information->allocation_size;
  information->inactive =
    (Objects_Maximum)(information->inactive + information->allocation_size);
}
Пример #25
0
static mqd_t _POSIX_Message_queue_Create(
  const char           *name_arg,
  size_t                name_len,
  int                   oflag,
  const struct mq_attr *attr
)
{
  POSIX_Message_queue_Control  *the_mq;
  char                         *name;

  /* length of name has already been validated */

  if ( attr->mq_maxmsg <= 0 ){
    rtems_set_errno_and_return_value( EINVAL, MQ_OPEN_FAILED );
  }

  if ( attr->mq_msgsize <= 0 ){
    rtems_set_errno_and_return_value( EINVAL, MQ_OPEN_FAILED );
  }

  the_mq = _POSIX_Message_queue_Allocate_unprotected();
  if ( !the_mq ) {
    rtems_set_errno_and_return_value( ENFILE, MQ_OPEN_FAILED );
  }

  /*
   * Make a copy of the user's string for name just in case it was
   * dynamically constructed.
   */
  name = _Workspace_String_duplicate( name_arg, name_len );
  if ( !name ) {
    _POSIX_Message_queue_Free( the_mq );
    rtems_set_errno_and_return_value( ENOMEM, MQ_OPEN_FAILED );
  }

  the_mq->open_count = 1;
  the_mq->linked = true;
  the_mq->oflag = oflag;

  /*
   *  NOTE: That thread blocking discipline should be based on the
   *  current scheduling policy.
   *
   *  Joel: Cite POSIX or OpenGroup on above statement so we can determine
   *        if it is a real requirement.
   */
  if (
    !_CORE_message_queue_Initialize(
      &the_mq->Message_queue,
      CORE_MESSAGE_QUEUE_DISCIPLINES_FIFO,
      attr->mq_maxmsg,
      attr->mq_msgsize
    )
  ) {
    _POSIX_Message_queue_Free( the_mq );
    _Workspace_Free( name );
    rtems_set_errno_and_return_value( ENOSPC, MQ_OPEN_FAILED );
  }

  _Objects_Open_string(
    &_POSIX_Message_queue_Information,
    &the_mq->Object,
    name
  );
  return the_mq->Object.id;
}
Пример #26
0
/*
 *  _POSIX_Semaphore_Create_support
 *
 *  This routine does the actual creation and initialization of
 *  a poxix semaphore.  It is a support routine for sem_init and
 *  sem_open.
 */
int _POSIX_Semaphore_Create_support(
  const char                *name_arg,
  size_t                     name_len,
  int                        pshared,
  unsigned int               value,
  POSIX_Semaphore_Control  **the_sem
)
{
  POSIX_Semaphore_Control *the_semaphore;
  char                    *name;

  /* Sharing semaphores among processes is not currently supported */
  if (pshared != 0)
    rtems_set_errno_and_return_minus_one( ENOSYS );

  /*
   * Make a copy of the user's string for name just in case it was
   * dynamically constructed.
   */
  if ( name_arg != NULL ) {
    name = _Workspace_String_duplicate( name_arg, name_len );
    if ( !name ) {
      rtems_set_errno_and_return_minus_one( ENOMEM );
    }
  } else {
    name = NULL;
  }

  the_semaphore = _POSIX_Semaphore_Allocate_unprotected();
  if ( !the_semaphore ) {
    _Workspace_Free( name );
    rtems_set_errno_and_return_minus_one( ENOSPC );
  }

  the_semaphore->process_shared  = pshared;

  if ( name ) {
    the_semaphore->named = true;
    the_semaphore->open_count = 1;
    the_semaphore->linked = true;
  } else {
    the_semaphore->named = false;
    the_semaphore->open_count = 0;
    the_semaphore->linked = false;
  }

  /*
   *  POSIX does not appear to specify what the discipline for
   *  blocking tasks on this semaphore should be.  It could somehow
   *  be derived from the current scheduling policy.  One
   *  thing is certain, no matter what we decide, it won't be
   *  the same as  all other POSIX implementations. :)
   */
  _CORE_semaphore_Initialize( &the_semaphore->Semaphore, value );

  /*
   *  Make the semaphore available for use.
   */
  _Objects_Open_string(
    &_POSIX_Semaphore_Information,
    &the_semaphore->Object,
    name
  );

  *the_sem = the_semaphore;

  return 0;
}
Пример #27
0
int _POSIX_Message_queue_Create_support(
  const char                    *name_arg,
  size_t                         name_len,
  int                            pshared,
  struct mq_attr                *attr_ptr,
  POSIX_Message_queue_Control  **message_queue
)
{
  POSIX_Message_queue_Control   *the_mq;
  CORE_message_queue_Attributes *the_mq_attr;
  struct mq_attr                 attr;
  char                          *name;

  /* length of name has already been validated */

  _Thread_Disable_dispatch();

  /*
   *  There is no real basis for the default values.  They will work
   *  but were not compared against any existing implementation for
   *  compatibility.  See README.mqueue for an example program we
   *  think will print out the defaults.  Report anything you find with it.
   */
  if ( attr_ptr == NULL ) {
    attr.mq_maxmsg  = 10;
    attr.mq_msgsize = 16;
  } else {
    if ( attr_ptr->mq_maxmsg <= 0 ){
      _Thread_Enable_dispatch();
      rtems_set_errno_and_return_minus_one( EINVAL );
    }

    if ( attr_ptr->mq_msgsize <= 0 ){
      _Thread_Enable_dispatch();
      rtems_set_errno_and_return_minus_one( EINVAL );
    }

    attr = *attr_ptr;
  }

  the_mq = _POSIX_Message_queue_Allocate();
  if ( !the_mq ) {
    _Thread_Enable_dispatch();
    rtems_set_errno_and_return_minus_one( ENFILE );
  }

  /*
   * Make a copy of the user's string for name just in case it was
   * dynamically constructed.
   */
  name = _Workspace_String_duplicate( name_arg, name_len );
  if ( !name ) {
    _POSIX_Message_queue_Free( the_mq );
    _Thread_Enable_dispatch();
    rtems_set_errno_and_return_minus_one( ENOMEM );
  }

  the_mq->process_shared  = pshared;
  the_mq->named = true;
  the_mq->open_count = 1;
  the_mq->linked = true;

  /*
   *  NOTE: That thread blocking discipline should be based on the
   *  current scheduling policy.
   *
   *  Joel: Cite POSIX or OpenGroup on above statement so we can determine
   *        if it is a real requirement.
   */
  the_mq_attr = &the_mq->Message_queue.Attributes;
  the_mq_attr->discipline = CORE_MESSAGE_QUEUE_DISCIPLINES_FIFO;

  if ( !_CORE_message_queue_Initialize(
           &the_mq->Message_queue,
           the_mq_attr,
           attr.mq_maxmsg,
           attr.mq_msgsize
      ) ) {

    _POSIX_Message_queue_Free( the_mq );
    _Workspace_Free(name);
    _Thread_Enable_dispatch();
    rtems_set_errno_and_return_minus_one( ENOSPC );
  }

  _Objects_Open_string(
    &_POSIX_Message_queue_Information,
    &the_mq->Object,
    name
  );

  *message_queue = the_mq;

  _Thread_Enable_dispatch();
  return 0;
}
Пример #28
0
bool _Thread_Initialize(
    Objects_Information                  *information,
    Thread_Control                       *the_thread,
    void                                 *stack_area,
    size_t                                stack_size,
    bool                                  is_fp,
    Priority_Control                      priority,
    bool                                  is_preemptible,
    Thread_CPU_budget_algorithms          budget_algorithm,
    Thread_CPU_budget_algorithm_callout   budget_callout,
    uint32_t                              isr_level,
    Objects_Name                          name
)
{
    size_t               actual_stack_size = 0;
    void                *stack = NULL;
#if ( CPU_HARDWARE_FP == TRUE ) || ( CPU_SOFTWARE_FP == TRUE )
    void              *fp_area;
#endif
    void                *sched = NULL;
    void                *extensions_area;
    bool                 extension_status;
    int                  i;

#if defined( RTEMS_SMP )
    if ( rtems_configuration_is_smp_enabled() && !is_preemptible ) {
        return false;
    }
#endif

    /*
     *  Initialize the Ada self pointer
     */
#if __RTEMS_ADA__
    the_thread->rtems_ada_self = NULL;
#endif

    /*
     *  Zero out all the allocated memory fields
     */
    for ( i=0 ; i <= THREAD_API_LAST ; i++ )
        the_thread->API_Extensions[i] = NULL;

    extensions_area = NULL;
    the_thread->libc_reent = NULL;

#if ( CPU_HARDWARE_FP == TRUE ) || ( CPU_SOFTWARE_FP == TRUE )
    fp_area = NULL;
#endif

    /*
     *  Allocate and Initialize the stack for this thread.
     */
#if !defined(RTEMS_SCORE_THREAD_ENABLE_USER_PROVIDED_STACK_VIA_API)
    actual_stack_size = _Thread_Stack_Allocate( the_thread, stack_size );
    if ( !actual_stack_size || actual_stack_size < stack_size )
        return false;                     /* stack allocation failed */

    stack = the_thread->Start.stack;
#else
    if ( !stack_area ) {
        actual_stack_size = _Thread_Stack_Allocate( the_thread, stack_size );
        if ( !actual_stack_size || actual_stack_size < stack_size )
            return false;                     /* stack allocation failed */

        stack = the_thread->Start.stack;
        the_thread->Start.core_allocated_stack = true;
    } else {
        stack = stack_area;
        actual_stack_size = stack_size;
        the_thread->Start.core_allocated_stack = false;
    }
#endif

    _Stack_Initialize(
        &the_thread->Start.Initial_stack,
        stack,
        actual_stack_size
    );

    /*
     *  Allocate the floating point area for this thread
     */
#if ( CPU_HARDWARE_FP == TRUE ) || ( CPU_SOFTWARE_FP == TRUE )
    if ( is_fp ) {
        fp_area = _Workspace_Allocate( CONTEXT_FP_SIZE );
        if ( !fp_area )
            goto failed;
        fp_area = _Context_Fp_start( fp_area, 0 );
    }
    the_thread->fp_context       = fp_area;
    the_thread->Start.fp_context = fp_area;
#endif

    /*
     *  Initialize the thread timer
     */
    _Watchdog_Initialize( &the_thread->Timer, NULL, 0, NULL );

#ifdef __RTEMS_STRICT_ORDER_MUTEX__
    /* Initialize the head of chain of held mutexes */
    _Chain_Initialize_empty(&the_thread->lock_mutex);
#endif

    /*
     *  Allocate the extensions area for this thread
     */
    if ( _Thread_Maximum_extensions ) {
        extensions_area = _Workspace_Allocate(
                              (_Thread_Maximum_extensions + 1) * sizeof( void * )
                          );
        if ( !extensions_area )
            goto failed;
    }
    the_thread->extensions = (void **) extensions_area;

    /*
     * Clear the extensions area so extension users can determine
     * if they are linked to the thread. An extension user may
     * create the extension long after tasks have been created
     * so they cannot rely on the thread create user extension
     * call.
     */
    if ( the_thread->extensions ) {
        for ( i = 0; i <= _Thread_Maximum_extensions ; i++ )
            the_thread->extensions[i] = NULL;
    }

    /*
     *  General initialization
     */

    the_thread->Start.is_preemptible   = is_preemptible;
    the_thread->Start.budget_algorithm = budget_algorithm;
    the_thread->Start.budget_callout   = budget_callout;

    switch ( budget_algorithm ) {
    case THREAD_CPU_BUDGET_ALGORITHM_NONE:
    case THREAD_CPU_BUDGET_ALGORITHM_RESET_TIMESLICE:
        break;
#if defined(RTEMS_SCORE_THREAD_ENABLE_EXHAUST_TIMESLICE)
    case THREAD_CPU_BUDGET_ALGORITHM_EXHAUST_TIMESLICE:
        the_thread->cpu_time_budget = _Thread_Ticks_per_timeslice;
        break;
#endif
#if defined(RTEMS_SCORE_THREAD_ENABLE_SCHEDULER_CALLOUT)
    case THREAD_CPU_BUDGET_ALGORITHM_CALLOUT:
        break;
#endif
    }

    the_thread->Start.isr_level         = isr_level;

#if defined(RTEMS_SMP)
    the_thread->is_scheduled            = false;
    the_thread->is_executing            = false;

    /* Initialize the cpu field for the non-SMP schedulers */
    the_thread->cpu                     = _Per_CPU_Get_by_index( 0 );
#endif

    the_thread->current_state           = STATES_DORMANT;
    the_thread->Wait.queue              = NULL;
    the_thread->resource_count          = 0;
    the_thread->real_priority           = priority;
    the_thread->Start.initial_priority  = priority;
    sched =_Scheduler_Allocate( the_thread );
    if ( !sched )
        goto failed;
    _Thread_Set_priority( the_thread, priority );

    /*
     *  Initialize the CPU usage statistics
     */
#ifndef __RTEMS_USE_TICKS_FOR_STATISTICS__
    _Timestamp_Set_to_zero( &the_thread->cpu_time_used );
#else
    the_thread->cpu_time_used = 0;
#endif

    /*
     *  Open the object
     */
    _Objects_Open( information, &the_thread->Object, name );

    /*
     *  We assume the Allocator Mutex is locked and dispatching is
     *  enabled when we get here.  We want to be able to run the
     *  user extensions with dispatching enabled.  The Allocator
     *  Mutex provides sufficient protection to let the user extensions
     *  run safely.
     */
    extension_status = _User_extensions_Thread_create( the_thread );
    if ( extension_status )
        return true;

failed:
    _Workspace_Free( the_thread->libc_reent );

    for ( i=0 ; i <= THREAD_API_LAST ; i++ )
        _Workspace_Free( the_thread->API_Extensions[i] );

    _Workspace_Free( extensions_area );

#if ( CPU_HARDWARE_FP == TRUE ) || ( CPU_SOFTWARE_FP == TRUE )
    _Workspace_Free( fp_area );
#endif

    _Workspace_Free( sched );

    _Thread_Stack_Free( the_thread );
    return false;
}
Пример #29
0
void _Thread_Close(
  Objects_Information  *information,
  Thread_Control       *the_thread
)
{
  /*
   *  Now we are in a dispatching critical section again and we
   *  can take the thread OUT of the published set.  It is invalid
   *  to use this thread's Id after this call.  This will prevent
   *  any other task from attempting to initiate a call on this task.
   */
  _Objects_Invalidate_Id( information, &the_thread->Object );

  /*
   *  We assume the Allocator Mutex is locked when we get here.
   *  This provides sufficient protection to let the user extensions
   *  run but as soon as we get back, we will make the thread
   *  disappear and set a transient state on it.  So we temporarily
   *  unnest dispatching.
   */
  _Thread_Unnest_dispatch();

  _User_extensions_Thread_delete( the_thread );

  _Thread_Disable_dispatch();

  /*
   *  Now we are in a dispatching critical section again and we
   *  can take the thread OUT of the published set.  It is invalid
   *  to use this thread's Id OR name after this call.
   */
  _Objects_Close( information, &the_thread->Object );

  /*
   *  By setting the dormant state, the thread will not be considered
   *  for scheduling when we remove any blocking states.
   */
  _Thread_Set_state( the_thread, STATES_DORMANT );

  if ( !_Thread_queue_Extract_with_proxy( the_thread ) ) {
    if ( _Watchdog_Is_active( &the_thread->Timer ) )
      (void) _Watchdog_Remove( &the_thread->Timer );
  }

  /*
   * Free the per-thread scheduling information.
   */
  _Scheduler_Free( the_thread );

  /*
   *  The thread might have been FP.  So deal with that.
   */
#if ( CPU_HARDWARE_FP == TRUE ) || ( CPU_SOFTWARE_FP == TRUE )
#if ( CPU_USE_DEFERRED_FP_SWITCH == TRUE )
  if ( _Thread_Is_allocated_fp( the_thread ) )
    _Thread_Deallocate_fp();
#endif
  the_thread->fp_context = NULL;

  _Workspace_Free( the_thread->Start.fp_context );
#endif

  /*
   *  Free the rest of the memory associated with this task
   *  and set the associated pointers to NULL for safety.
   */
  _Thread_Stack_Free( the_thread );
  the_thread->Start.stack = NULL;

  _Workspace_Free( the_thread->extensions );
  the_thread->extensions = NULL;

  _Workspace_Free( the_thread->Start.tls_area );
}