static void rtems_malloc_boundary_at_malloc(
  void     *pointer,
  size_t    size
)
{
  void *return_this;
  struct mallocNode *mp = (struct mallocNode *)pointer;
  intptr_t *fp, *nfp;
  int i;

  _RTEMS_Lock_allocator();
    mp->memory = mp + 1;
    return_this = mp->memory;
    mp->size = size - (sizeof(struct mallocNode) + SENTINELSIZE);
    fp = (intptr_t *)&size - 2;
    for (i = 0 ; i < CALLCHAINSIZE ; i++) {
      mp->callChain[i] = fp[1];
      nfp = (intptr_t *)(fp[0]);
      if((nfp <= fp) || (nfp > (intptr_t *)(INT32_C(0x1000000) /* 1 << 24 */)))
       break;
      fp = nfp;
    }
    while (i < CALLCHAINSIZE)
      mp->callChain[i++] = 0;
    memcpy((char *)mp->memory + mp->size, SENTINEL, SENTINELSIZE);
    mp->forw = mallocNodeHead.forw;
    mp->back = &mallocNodeHead;
    mallocNodeHead.forw->back = mp;
    mallocNodeHead.forw = mp;
  _RTEMS_Unlock_allocator();
}
static void rtems_malloc_boundary_at_free(
  void     *pointer
)
{
  struct mallocNode *mp = (struct mallocNode *)pointer - 1;
  struct mallocNode *mp1;

  _RTEMS_Lock_allocator();
    if ((mp->memory != (mp + 1)) ||
        (memcmp((char *)mp->memory + mp->size, SENTINEL, SENTINELSIZE) != 0))
      reportMallocError("Freeing with inconsistent pointer/sentinel", mp);
    mp1 = mallocNodeHead.forw;
    while (mp1 != &mallocNodeHead) {
      if (mp1 == mp)
        break;
      mp1 = mp1->forw;
    }
    if (mp1 != mp)
      reportMallocError("Freeing, but not on allocated list", mp);
    mp->forw->back = mp->back;
    mp->back->forw = mp->forw;
    mp->back = mp->forw = NULL;
    pointer = mp;
  _RTEMS_Unlock_allocator();
}
Esempio n. 3
0
void *rtems_cache_coherent_allocate(
  size_t size,
  uintptr_t alignment,
  uintptr_t boundary
)
{
  void *ptr;
  Heap_Control *heap;

  _RTEMS_Lock_allocator();

  heap = cache_coherent_heap;
  if ( heap == NULL ) {
    heap = RTEMS_Malloc_Heap;
  }

  ptr = _Heap_Allocate_aligned_with_boundary(
    heap,
    size,
    alignment,
    boundary
  );

  _RTEMS_Unlock_allocator();

  return ptr;
}
Esempio n. 4
0
rtems_status_code rtems_region_return_segment(
  rtems_id  id,
  void     *segment
)
{
  Objects_Locations        location;
  rtems_status_code        return_status;
#ifdef RTEMS_REGION_FREE_SHRED_PATTERN
  uint32_t                 size;
#endif
  int                      status;
  register Region_Control *the_region;

  _RTEMS_Lock_allocator();

    the_region = _Region_Get( id, &location );
    switch ( location ) {

      case OBJECTS_LOCAL:

        _Region_Debug_Walk( the_region, 3 );

#ifdef RTEMS_REGION_FREE_SHRED_PATTERN
        if ( !_Heap_Size_of_alloc_area( &the_region->Memory, segment, &size ) )
          return_status = RTEMS_INVALID_ADDRESS;
        else {
          memset( segment, (RTEMS_REGION_FREE_SHRED_PATTERN & 0xFF), size );
#endif
          status = _Region_Free_segment( the_region, segment );

          _Region_Debug_Walk( the_region, 4 );

          if ( !status )
            return_status = RTEMS_INVALID_ADDRESS;
          else {
            the_region->number_of_used_blocks -= 1;

            _Region_Process_queue(the_region); /* unlocks allocator */

            return RTEMS_SUCCESSFUL;
          }
#ifdef RTEMS_REGION_FREE_SHRED_PATTERN
        }
#endif
        break;

#if defined(RTEMS_MULTIPROCESSING)
      case OBJECTS_REMOTE:        /* this error cannot be returned */
        break;
#endif

      case OBJECTS_ERROR:
      default:
        return_status = RTEMS_INVALID_ID;
        break;
    }

  _RTEMS_Unlock_allocator();
  return return_status;
}
rtems_status_code rtems_task_delete(
  rtems_id id
)
{
  register Thread_Control *the_thread;
  Objects_Locations        location;
  Objects_Information     *the_information;

  _RTEMS_Lock_allocator();

  the_thread = _Thread_Get( id, &location );
  switch ( location ) {

    case OBJECTS_LOCAL:
      the_information = _Objects_Get_information_id( the_thread->Object.id );

      #if defined(RTEMS_DEBUG)
	if ( !the_information ) {
	  _Thread_Enable_dispatch();
	  return RTEMS_INVALID_ID;
	  /* This should never happen if _Thread_Get() works right */
	}
      #endif

      #if defined(RTEMS_MULTIPROCESSING)
        if ( the_thread->is_global ) {
          _Objects_MP_Close( &_RTEMS_tasks_Information, the_thread->Object.id );
          _RTEMS_tasks_MP_Send_process_packet(
            RTEMS_TASKS_MP_ANNOUNCE_DELETE,
            the_thread->Object.id,
            0                                /* Not used */
          );
        }
      #endif

      _Thread_Close( the_information, the_thread );

      _RTEMS_tasks_Free( the_thread );

      _RTEMS_Unlock_allocator();
      _Thread_Enable_dispatch();
      return RTEMS_SUCCESSFUL;

#if defined(RTEMS_MULTIPROCESSING)
    case OBJECTS_REMOTE:
      _RTEMS_Unlock_allocator();
      _Thread_Dispatch();
      return RTEMS_ILLEGAL_ON_REMOTE_OBJECT;
#endif

    case OBJECTS_ERROR:
      break;
  }

  _RTEMS_Unlock_allocator();
  return RTEMS_INVALID_ID;
}
Esempio n. 6
0
void _Protected_heap_Iterate(
  Heap_Control *heap,
  Heap_Block_visitor visitor,
  void *visitor_arg
)
{
  _RTEMS_Lock_allocator();
  _Heap_Iterate( heap, visitor, visitor_arg );
  _RTEMS_Unlock_allocator();
}
bool _Protected_heap_Free(
  Heap_Control *the_heap,
  void         *start_address
)
{
  bool    status;

  _RTEMS_Lock_allocator();
    status = _Heap_Free( the_heap, start_address );
  _RTEMS_Unlock_allocator();
  return status;
}
Esempio n. 8
0
rtems_status_code rtems_region_extend(
  rtems_id   id,
  void      *starting_address,
  uintptr_t  length
)
{
  uintptr_t           amount_extended;
  Objects_Locations   location;
  rtems_status_code   return_status;
  Region_Control     *the_region;

  if ( !starting_address )
    return RTEMS_INVALID_ADDRESS;

  _RTEMS_Lock_allocator(); /* to prevent deletion */

    the_region = _Region_Get( id, &location );
    switch ( location ) {

      case OBJECTS_LOCAL:

        amount_extended = _Heap_Extend(
          &the_region->Memory,
          starting_address,
          length,
          0
        );

        if ( amount_extended > 0 ) {
          the_region->length                += amount_extended;
          the_region->maximum_segment_size  += amount_extended;
          return_status = RTEMS_SUCCESSFUL;
        } else {
          return_status = RTEMS_INVALID_ADDRESS;
        }
        break;

#if defined(RTEMS_MULTIPROCESSING)
      case OBJECTS_REMOTE:        /* this error cannot be returned */
        break;
#endif

      case OBJECTS_ERROR:
      default:
        return_status = RTEMS_INVALID_ID;
        break;
    }

  _RTEMS_Unlock_allocator();

  return return_status;
}
void *_Protected_heap_Allocate_aligned(
  Heap_Control *the_heap,
  size_t        size,
  uint32_t      alignment
)
{
  void *p;

  _RTEMS_Lock_allocator();
    p = _Heap_Allocate_aligned( the_heap, size, alignment );
  _RTEMS_Unlock_allocator();
  return p;
}
Esempio n. 10
0
bool _Protected_heap_Get_block_size(
  Heap_Control        *the_heap,
  void                *starting_address,
  uintptr_t           *size
)
{
  bool status;

  _RTEMS_Lock_allocator();
    status = _Heap_Size_of_alloc_area( the_heap, starting_address, size );
  _RTEMS_Unlock_allocator();
  return status;
}
Esempio n. 11
0
void *rtems_heap_greedy_allocate(
  const uintptr_t *block_sizes,
  size_t block_count
)
{
  void *opaque;

  _RTEMS_Lock_allocator();
  opaque = _Heap_Greedy_allocate( RTEMS_Malloc_Heap, block_sizes, block_count );
  _RTEMS_Unlock_allocator();

  return opaque;
}
static void checkMallocArena(void)
{
  struct mallocNode *mp;

  _RTEMS_Lock_allocator();
    for ( mp = mallocNodeHead.forw; mp != &mallocNodeHead ; mp = mp->forw ) {
      if ((mp->forw->back != mp) || (mp->back->forw != mp))
        reportMallocError("Pointers mangled", mp);
      if ((mp->memory != (mp + 1)) ||
          (memcmp((char *)mp->memory + mp->size, SENTINEL, SENTINELSIZE) != 0))
        reportMallocError("Inconsistent pointer/sentinel", mp);
    }
  _RTEMS_Unlock_allocator();
}
Esempio n. 13
0
bool _Protected_heap_Extend(
  Heap_Control *the_heap,
  void         *starting_address,
  uintptr_t     size
)
{
  bool      extend_ok;
  uintptr_t amount_extended;

  _RTEMS_Lock_allocator();
    extend_ok = _Heap_Extend(the_heap, starting_address, size, &amount_extended);
  _RTEMS_Unlock_allocator();
  return extend_ok;
}
boolean _Protected_heap_Extend(
  Heap_Control *the_heap,
  void         *starting_address,
  size_t        size
)
{
  Heap_Extend_status status;
  uint32_t           amount_extended;

  _RTEMS_Lock_allocator();
    status = _Heap_Extend(the_heap, starting_address, size, &amount_extended);
  _RTEMS_Unlock_allocator();
  return (status == HEAP_EXTEND_SUCCESSFUL);
}
Esempio n. 15
0
void rtems_cache_coherent_add_area(
  void *area_begin,
  uintptr_t area_size
)
{
  if ( _System_state_Is_up( _System_state_Get()) ) {
    _RTEMS_Lock_allocator();

    add_area( area_begin, area_size );

    _RTEMS_Unlock_allocator();
  } else {
    add_area( area_begin, area_size );
  }
}
void
rtems_bsd_chunk_free(rtems_bsd_chunk_control *self,
    void *some_addr_in_chunk)
{
	rtems_bsd_chunk_info *info = rtems_bsd_chunk_get_info(self,
	    some_addr_in_chunk);

	_RTEMS_Lock_allocator();
	rtems_rbtree_extract(&self->chunks, &info->node);
	_RTEMS_Unlock_allocator();

	(*self->info_dtor)(self, info);

	free(info, M_RTEMS_HEAP);
}
Esempio n. 17
0
void *rtems_heap_greedy_allocate_all_except_largest(
  uintptr_t *allocatable_size
)
{
  void *opaque;

  _RTEMS_Lock_allocator();
  opaque = _Heap_Greedy_allocate_all_except_largest(
    RTEMS_Malloc_Heap,
    allocatable_size
  );
  _RTEMS_Unlock_allocator();

  return opaque;
}
boolean _Protected_heap_Resize_block(
  Heap_Control *the_heap,
  void         *starting_address,
  size_t        size
)
{
  Heap_Resize_status status;
  uint32_t           old_mem_size;
  uint32_t           avail_mem_size;

  _RTEMS_Lock_allocator();
    status = _Heap_Resize_block( 
      the_heap, starting_address, size, &old_mem_size, &avail_mem_size );
  _RTEMS_Unlock_allocator();
  return (status == HEAP_RESIZE_SUCCESSFUL);
}
Esempio n. 19
0
bool _Protected_heap_Get_information(
  Heap_Control            *the_heap,
  Heap_Information_block  *the_info
)
{
  if ( !the_heap )
    return false;

  if ( !the_info )
    return false;

  _RTEMS_Lock_allocator();
    _Heap_Get_information( the_heap, the_info );
  _RTEMS_Unlock_allocator();

  return true;
}
Esempio n. 20
0
rtems_status_code rtems_region_get_free_information(
  rtems_id                id,
  Heap_Information_block *the_info
)
{
  Objects_Locations        location;
  rtems_status_code        return_status;
  Region_Control          *the_region;

  if ( !the_info )
    return RTEMS_INVALID_ADDRESS;

  _RTEMS_Lock_allocator();

    the_region = _Region_Get( id, &location );
    switch ( location ) {

      case OBJECTS_LOCAL:

        the_info->Used.number   = 0;
        the_info->Used.total    = 0;
        the_info->Used.largest  = 0;

        _Heap_Get_free_information( &the_region->Memory, &the_info->Free );

        return_status = RTEMS_SUCCESSFUL;
        break;

#if defined(RTEMS_MULTIPROCESSING)
      case OBJECTS_REMOTE:        /* this error cannot be returned */
        break;
#endif

      case OBJECTS_ERROR:
      default:
        return_status = RTEMS_INVALID_ID;
        break;
    }

  _RTEMS_Unlock_allocator();
  return return_status;
}
Esempio n. 21
0
void *_Protected_heap_Allocate_aligned_with_boundary(
    Heap_Control *heap,
    uintptr_t     size,
    uintptr_t     alignment,
    uintptr_t     boundary
)
{
    void *p;

    _RTEMS_Lock_allocator();
    p = _Heap_Allocate_aligned_with_boundary(
            heap,
            size,
            alignment,
            boundary
        );
    _RTEMS_Unlock_allocator();

    return p;
}
Esempio n. 22
0
rtems_status_code rtems_region_get_segment_size(
  rtems_id   id,
  void      *segment,
  uintptr_t *size
)
{
  Objects_Locations        location;
  rtems_status_code        return_status = RTEMS_SUCCESSFUL;
  register Region_Control *the_region;

  if ( !segment )
    return RTEMS_INVALID_ADDRESS;

  if ( !size )
    return RTEMS_INVALID_ADDRESS;

  _RTEMS_Lock_allocator();

    the_region = _Region_Get( id, &location );
    switch ( location ) {

      case OBJECTS_LOCAL:
        if ( !_Heap_Size_of_alloc_area( &the_region->Memory, segment, size ) )
          return_status = RTEMS_INVALID_ADDRESS;
        break;

#if defined(RTEMS_MULTIPROCESSING)
      case OBJECTS_REMOTE:        /* this error cannot be returned */
        break;
#endif

      case OBJECTS_ERROR:
        return_status = RTEMS_INVALID_ID;
        break;
    }

  _RTEMS_Unlock_allocator();
  return return_status;
}
void *
rtems_bsd_chunk_alloc(rtems_bsd_chunk_control *self, uintptr_t chunk_size)
{
	char *p = rtems_cache_aligned_malloc(chunk_size + self->info_size);

	if (p != NULL) {
		rtems_bsd_chunk_info *info = (rtems_bsd_chunk_info *) p;

		p += self->info_size;

		info->begin = (uintptr_t) p;
		info->end = (uintptr_t) p + chunk_size;

		(*self->info_ctor)(self, info);

		_RTEMS_Lock_allocator();
		rtems_rbtree_insert(&self->chunks, &info->node, chunk_compare, true);
		_RTEMS_Unlock_allocator();
	}

	return p;
}
Esempio n. 24
0
rtems_status_code rtems_region_delete(
  rtems_id id
)
{
  Objects_Locations   location;
  rtems_status_code   return_status;
  Region_Control     *the_region;

  _RTEMS_Lock_allocator();

    the_region = _Region_Get( id, &location );
    switch ( location ) {

      case OBJECTS_LOCAL:
        _Region_Debug_Walk( the_region, 5 );
        if ( the_region->number_of_used_blocks != 0 )
          return_status = RTEMS_RESOURCE_IN_USE;
        else {
          _Objects_Close( &_Region_Information, &the_region->Object );
          _Region_Free( the_region );
          return_status = RTEMS_SUCCESSFUL;
        }
        break;

#if defined(RTEMS_MULTIPROCESSING)
      case OBJECTS_REMOTE:        /* this error cannot be returned */
        break;
#endif

      case OBJECTS_ERROR:
      default:
        return_status = RTEMS_INVALID_ID;
        break;
    }

  _RTEMS_Unlock_allocator();
  return return_status;
}
Esempio n. 25
0
void rtems_cache_coherent_free( void *ptr )
{
  Heap_Control *heap;

  _RTEMS_Lock_allocator();

  heap = cache_coherent_heap;
  if ( heap != NULL ) {
    if ( _Heap_Free( heap, ptr ) ) {
      heap = NULL;
    } else {
      heap = RTEMS_Malloc_Heap;
    }
  } else {
    heap = RTEMS_Malloc_Heap;
  }

  if ( heap != NULL ) {
    _Heap_Free( heap, ptr );
  }

  _RTEMS_Unlock_allocator();
}
Esempio n. 26
0
void rtems_resource_snapshot_take(rtems_resource_snapshot *snapshot)
{
  uint32_t *active = &snapshot->active_posix_keys;
  size_t i;

  memset(snapshot, 0, sizeof(*snapshot));

  _RTEMS_Lock_allocator();

  _Thread_Kill_zombies();

  get_heap_info(RTEMS_Malloc_Heap, &snapshot->heap_info);
  get_heap_info(&_Workspace_Area, &snapshot->workspace_info);

  for (i = 0; i < RTEMS_ARRAY_SIZE(objects_info_table); ++i) {
    active [i] = _Objects_Active_count(objects_info_table[i]);
  }

  _RTEMS_Unlock_allocator();

  snapshot->active_posix_key_value_pairs = get_active_posix_key_value_pairs();
  snapshot->open_files = open_files();
}
Esempio n. 27
0
bool _Protected_heap_Walk(
  Heap_Control *the_heap,
  int           source,
  bool          do_dump
)
{
  bool    status;

  /*
   * If we are called from within a dispatching critical section,
   * then it is forbidden to lock a mutex.  But since we are inside
   * a critical section, it should be safe to walk it unlocked.
   *
   * NOTE: Dispatching is also disabled during initialization.
   */
  if ( !_Thread_Dispatch_disable_level ) {
    _RTEMS_Lock_allocator();
      status = _Heap_Walk( the_heap, source, do_dump );
    _RTEMS_Unlock_allocator();
  } else {
    status = _Heap_Walk( the_heap, source, do_dump );
  }
  return status;
}
Esempio n. 28
0
rtems_status_code rtems_region_get_segment(
  rtems_id           id,
  uintptr_t          size,
  rtems_option       option_set,
  rtems_interval     timeout,
  void              **segment
)
{
  Thread_Control     *executing;
  Objects_Locations   location;
  rtems_status_code   return_status;
  Region_Control     *the_region;
  void               *the_segment;

  if ( !segment )
    return RTEMS_INVALID_ADDRESS;

  *segment = NULL;

  if ( size == 0 )
    return RTEMS_INVALID_SIZE;

  _RTEMS_Lock_allocator();

    executing  = _Thread_Get_executing();
    the_region = _Region_Get( id, &location );
    switch ( location ) {

      case OBJECTS_LOCAL:
        if ( size > the_region->maximum_segment_size )
          return_status = RTEMS_INVALID_SIZE;

        else {
          _Region_Debug_Walk( the_region, 1 );

          the_segment = _Region_Allocate_segment( the_region, size );

          _Region_Debug_Walk( the_region, 2 );

          if ( the_segment ) {
            the_region->number_of_used_blocks += 1;
            *segment = the_segment;
            return_status = RTEMS_SUCCESSFUL;
          } else if ( _Options_Is_no_wait( option_set ) ) {
            return_status = RTEMS_UNSATISFIED;
          } else {
            /*
             *  Switch from using the memory allocation mutex to using a
             *  dispatching disabled critical section.  We have to do this
             *  because this thread is going to block.
             */
            /* FIXME: Lock order reversal */
            _Thread_Disable_dispatch();
            _RTEMS_Unlock_allocator();

            executing->Wait.queue           = &the_region->Wait_queue;
            executing->Wait.id              = id;
            executing->Wait.count           = size;
            executing->Wait.return_argument = segment;

            _Thread_queue_Enter_critical_section( &the_region->Wait_queue );

            _Thread_queue_Enqueue(
              &the_region->Wait_queue,
              executing,
              timeout
            );

            _Objects_Put( &the_region->Object );

            return (rtems_status_code) executing->Wait.return_code;
          }
        }
        break;

#if defined(RTEMS_MULTIPROCESSING)
      case OBJECTS_REMOTE:        /* this error cannot be returned */
        break;
#endif

      case OBJECTS_ERROR:
      default:
        return_status = RTEMS_INVALID_ID;
        break;
    }

  _RTEMS_Unlock_allocator();

  return return_status;
}
Esempio n. 29
0
int pthread_create(
  pthread_t              *thread,
  const pthread_attr_t   *attr,
  void                 *(*start_routine)( void * ),
  void                   *arg
)
{
  const pthread_attr_t               *the_attr;
  Priority_Control                    core_priority;
  Thread_CPU_budget_algorithms        budget_algorithm;
  Thread_CPU_budget_algorithm_callout budget_callout;
  bool                                is_fp;
  bool                                status;
  Thread_Control                     *the_thread;
  POSIX_API_Control                  *api;
  int                                 schedpolicy = SCHED_RR;
  struct sched_param                  schedparam;
  Objects_Name                        name;
  int                                 rc;

  if ( !start_routine )
    return EFAULT;

  the_attr = (attr) ? attr : &_POSIX_Threads_Default_attributes;

  if ( !the_attr->is_initialized )
    return EINVAL;

  /*
   *  Core Thread Initialize ensures we get the minimum amount of
   *  stack space if it is allowed to allocate it itself.
   *
   *  NOTE: If the user provides the stack we will let it drop below
   *        twice the minimum.
   */
  if ( the_attr->stackaddr && !_Stack_Is_enough(the_attr->stacksize) )
    return EINVAL;

  #if 0
    int  cputime_clock_allowed;  /* see time.h */
    rtems_set_errno_and_return_minus_one( ENOSYS );
  #endif

  /*
   *  P1003.1c/Draft 10, p. 121.
   *
   *  If inheritsched is set to PTHREAD_INHERIT_SCHED, then this thread
   *  inherits scheduling attributes from the creating thread.   If it is
   *  PTHREAD_EXPLICIT_SCHED, then scheduling parameters come from the
   *  attributes structure.
   */
  switch ( the_attr->inheritsched ) {
    case PTHREAD_INHERIT_SCHED:
      api = _Thread_Executing->API_Extensions[ THREAD_API_POSIX ];
      schedpolicy = api->schedpolicy;
      schedparam  = api->schedparam;
      break;

    case PTHREAD_EXPLICIT_SCHED:
      schedpolicy = the_attr->schedpolicy;
      schedparam  = the_attr->schedparam;
      break;

    default:
      return EINVAL;
  }

  /*
   *  Check the contentionscope since rtems only supports PROCESS wide
   *  contention (i.e. no system wide contention).
   */
  if ( the_attr->contentionscope != PTHREAD_SCOPE_PROCESS )
    return ENOTSUP;

  /*
   *  Interpret the scheduling parameters.
   */
  if ( !_POSIX_Priority_Is_valid( schedparam.sched_priority ) )
    return EINVAL;

  core_priority = _POSIX_Priority_To_core( schedparam.sched_priority );

  /*
   *  Set the core scheduling policy information.
   */
  rc = _POSIX_Thread_Translate_sched_param(
    schedpolicy,
    &schedparam,
    &budget_algorithm,
    &budget_callout
  );
  if ( rc )
    return rc;

  /*
   *  Currently all POSIX threads are floating point if the hardware
   *  supports it.
   */
  #if ( CPU_HARDWARE_FP == TRUE ) || ( CPU_SOFTWARE_FP == TRUE )
    is_fp = true;
  #else
    is_fp = false;
  #endif

  /*
   *  Lock the allocator mutex for protection
   */
  _RTEMS_Lock_allocator();

  /*
   *  Allocate the thread control block.
   *
   *  NOTE:  Global threads are not currently supported.
   */
  the_thread = _POSIX_Threads_Allocate();
  if ( !the_thread ) {
    _RTEMS_Unlock_allocator();
    return EAGAIN;
  }

  /*
   *  Initialize the core thread for this task.
   */
  name.name_p = NULL;   /* posix threads don't have a name by default */
  status = _Thread_Initialize(
    &_POSIX_Threads_Information,
    the_thread,
    the_attr->stackaddr,
    _POSIX_Threads_Ensure_minimum_stack(the_attr->stacksize),
    is_fp,
    core_priority,
    true,                 /* preemptible */
    budget_algorithm,
    budget_callout,
    0,                    /* isr level */
    name                  /* posix threads don't have a name */
  );

  if ( !status ) {
    _POSIX_Threads_Free( the_thread );
    _RTEMS_Unlock_allocator();
    return EAGAIN;
  }

  /*
   *  finish initializing the per API structure
   */
  api = the_thread->API_Extensions[ THREAD_API_POSIX ];

  api->Attributes  = *the_attr;
  api->detachstate = the_attr->detachstate;
  api->schedpolicy = schedpolicy;
  api->schedparam  = schedparam;

  /*
   *  This insures we evaluate the process-wide signals pending when we
   *  first run.
   *
   *  NOTE:  Since the thread starts with all unblocked, this is necessary.
   */
  the_thread->do_post_task_switch_extension = true;

  /*
   *  POSIX threads are allocated and started in one operation.
   */
  status = _Thread_Start(
    the_thread,
    THREAD_START_POINTER,
    start_routine,
    arg,
    0                     /* unused */
  );

  #if defined(RTEMS_DEBUG)
    /*
     *  _Thread_Start only fails if the thread was in the incorrect state
     *
     *  NOTE: This can only happen if someone slips in and touches the
     *        thread while we are creating it.
     */
    if ( !status ) {
      _POSIX_Threads_Free( the_thread );
      _RTEMS_Unlock_allocator();
      return EINVAL;
    }
  #endif

  if ( schedpolicy == SCHED_SPORADIC ) {
    _Watchdog_Insert_ticks(
      &api->Sporadic_timer,
      _Timespec_To_ticks( &api->schedparam.sched_ss_repl_period )
    );
  }

  /*
   *  Return the id and indicate we successfully created the thread
   */
  *thread = the_thread->Object.id;

  _RTEMS_Unlock_allocator();
  return 0;
}
Esempio n. 30
0
void _POSIX_Thread_Exit(
  Thread_Control *the_thread,
  void           *value_ptr
)
{
  Objects_Information  *the_information;
  Thread_Control       *unblocked;
  POSIX_API_Control    *api;

  the_information = _Objects_Get_information_id( the_thread->Object.id );

  api = the_thread->API_Extensions[ THREAD_API_POSIX ];


  /*
   * The_information has to be non-NULL.  Otherwise, we couldn't be
   * running in a thread of this API and class.
   *
   * NOTE: Lock and unlock in different order so we do not throw a
   *       fatal error when locking the allocator mutex.  And after
   *       we unlock, we want to defer the context switch until we
   *       are ready to be switched out.  Otherwise, an ISR could
   *       occur and preempt us out while we still hold the
   *       allocator mutex.
   */

  _RTEMS_Lock_allocator();
    _Thread_Disable_dispatch();

      the_thread->Wait.return_argument = value_ptr;

      /*
       * Process join
       */
      if ( api->detachstate == PTHREAD_CREATE_JOINABLE ) {
        unblocked = _Thread_queue_Dequeue( &api->Join_List );
        if ( unblocked ) {
          do {
            *(void **)unblocked->Wait.return_argument = value_ptr;
          } while ( (unblocked = _Thread_queue_Dequeue( &api->Join_List )) );
        } else {
          _Thread_Set_state(
            the_thread,
            STATES_WAITING_FOR_JOIN_AT_EXIT | STATES_TRANSIENT
          );
           _RTEMS_Unlock_allocator();
          _Thread_Enable_dispatch();
          /* now waiting for thread to arrive */
          _RTEMS_Lock_allocator();
          _Thread_Disable_dispatch();
        }
      }

      /*
       *  Now shut down the thread
       */
      _Thread_Close( the_information, the_thread );

      _POSIX_Threads_Free( the_thread );

    _RTEMS_Unlock_allocator();
  _Thread_Enable_dispatch();
}