Exemple #1
0
int _POSIX_Condition_variables_Signal_support(
    pthread_cond_t            *cond,
    bool                       is_broadcast
)
{
    register POSIX_Condition_variables_Control *the_cond;
    Objects_Locations                           location;
    Thread_Control                             *the_thread;

    the_cond = _POSIX_Condition_variables_Get( cond, &location );
    switch ( location ) {

    case OBJECTS_LOCAL:
        do {
            the_thread = _Thread_queue_Dequeue( &the_cond->Wait_queue );
            if ( !the_thread )
                the_cond->Mutex = POSIX_CONDITION_VARIABLES_NO_MUTEX;
        } while ( is_broadcast && the_thread );

        _Thread_Enable_dispatch();

        return 0;

#if defined(RTEMS_MULTIPROCESSING)
    case OBJECTS_REMOTE:
#endif
    case OBJECTS_ERROR:
        break;
    }

    return EINVAL;
}
Exemple #2
0
void _POSIX_Thread_Exit(
  Thread_Control *the_thread,
  void           *value_ptr
)
{
  Thread_Control    *unblocked;
  POSIX_API_Control *api;
  bool               previous_life_protection;

  api = the_thread->API_Extensions[ THREAD_API_POSIX ];

  _Assert( _Debug_Is_thread_dispatching_allowed() );

  previous_life_protection = _Thread_Set_life_protection( true );
  _Thread_Disable_dispatch();

  the_thread->Wait.return_argument = value_ptr;

  /*
   * Process join
   */
  if ( api->detachstate == PTHREAD_CREATE_JOINABLE ) {
    unblocked = _Thread_queue_Dequeue( &api->Join_List );
    if ( unblocked ) {
      do {
        *(void **)unblocked->Wait.return_argument = value_ptr;
      } while ( (unblocked = _Thread_queue_Dequeue( &api->Join_List )) );
    } else {
      _Thread_Set_state( the_thread, STATES_WAITING_FOR_JOIN_AT_EXIT );
      _Thread_Enable_dispatch();
      /* now waiting for thread to arrive */
      _Thread_Disable_dispatch();
    }
  }

  /*
   *  Now shut down the thread
   */
  _Thread_Close( the_thread, _Thread_Executing );

  _Thread_Enable_dispatch();
  _Thread_Set_life_protection( previous_life_protection );
}
Exemple #3
0
void _Thread_queue_Flush(
  Thread_queue_Control       *the_thread_queue,
#if defined(RTEMS_MULTIPROCESSING)
  Thread_queue_Flush_callout  remote_extract_callout,
#else
  Thread_queue_Flush_callout  remote_extract_callout __attribute__((unused)),
#endif
  uint32_t                    status
)
{
  Thread_Control *the_thread;

  while ( (the_thread = _Thread_queue_Dequeue( the_thread_queue )) ) {
#if defined(RTEMS_MULTIPROCESSING)
    if ( !_Objects_Is_local_id( the_thread->Object.id ) )
      ( *remote_extract_callout )( the_thread );
    else
#endif
      the_thread->Wait.return_code = status;
  }
}
Exemple #4
0
void _POSIX_Thread_Exit(
  Thread_Control *the_thread,
  void           *value_ptr
)
{
  Objects_Information  *the_information;
  Thread_Control       *unblocked;
  POSIX_API_Control    *api;

  the_information = _Objects_Get_information_id( the_thread->Object.id );

  api = the_thread->API_Extensions[ THREAD_API_POSIX ];


  /*
   * The_information has to be non-NULL.  Otherwise, we couldn't be
   * running in a thread of this API and class.
   *
   * NOTE: Lock and unlock in different order so we do not throw a
   *       fatal error when locking the allocator mutex.  And after
   *       we unlock, we want to defer the context switch until we
   *       are ready to be switched out.  Otherwise, an ISR could
   *       occur and preempt us out while we still hold the
   *       allocator mutex.
   */

  _RTEMS_Lock_allocator();
    _Thread_Disable_dispatch();

      the_thread->Wait.return_argument = value_ptr;

      /*
       * Process join
       */
      if ( api->detachstate == PTHREAD_CREATE_JOINABLE ) {
        unblocked = _Thread_queue_Dequeue( &api->Join_List );
        if ( unblocked ) {
          do {
            *(void **)unblocked->Wait.return_argument = value_ptr;
          } while ( (unblocked = _Thread_queue_Dequeue( &api->Join_List )) );
        } else {
          _Thread_Set_state(
            the_thread,
            STATES_WAITING_FOR_JOIN_AT_EXIT | STATES_TRANSIENT
          );
           _RTEMS_Unlock_allocator();
          _Thread_Enable_dispatch();
          /* now waiting for thread to arrive */
          _RTEMS_Lock_allocator();
          _Thread_Disable_dispatch();
        }
      }

      /*
       *  Now shut down the thread
       */
      _Thread_Close( the_information, the_thread );

      _POSIX_Threads_Free( the_thread );

    _RTEMS_Unlock_allocator();
  _Thread_Enable_dispatch();
}
Exemple #5
0
CORE_mutex_Status _CORE_mutex_Surrender(
  CORE_mutex_Control                *the_mutex,
#if defined(RTEMS_MULTIPROCESSING)
  Objects_Id                         id,
  CORE_mutex_API_mp_support_callout  api_mutex_mp_support
#else
  Objects_Id                         id __attribute__((unused)),
  CORE_mutex_API_mp_support_callout  api_mutex_mp_support __attribute__((unused))
#endif
)
{
  Thread_Control *the_thread;
  Thread_Control *holder;

  holder = the_mutex->holder;

  /*
   *  The following code allows a thread (or ISR) other than the thread
   *  which acquired the mutex to release that mutex.  This is only
   *  allowed when the mutex in quetion is FIFO or simple Priority
   *  discipline.  But Priority Ceiling or Priority Inheritance mutexes
   *  must be released by the thread which acquired them.
   */

  if ( the_mutex->Attributes.only_owner_release ) {
    if ( !_Thread_Is_executing( holder ) )
      return CORE_MUTEX_STATUS_NOT_OWNER_OF_RESOURCE;
  }

  /* XXX already unlocked -- not right status */

  if ( !the_mutex->nest_count )
    return CORE_MUTEX_STATUS_SUCCESSFUL;

  the_mutex->nest_count--;

  if ( the_mutex->nest_count != 0 ) {
    /*
     *  All error checking is on the locking side, so if the lock was
     *  allowed to acquired multiple times, then we should just deal with
     *  that.  The RTEMS_DEBUG is just a validation.
     */
    #if defined(RTEMS_DEBUG)
      switch ( the_mutex->Attributes.lock_nesting_behavior ) {
        case CORE_MUTEX_NESTING_ACQUIRES:
          return CORE_MUTEX_STATUS_SUCCESSFUL;
        #if defined(RTEMS_POSIX_API)
          case CORE_MUTEX_NESTING_IS_ERROR:
            /* should never occur */
            return CORE_MUTEX_STATUS_NESTING_NOT_ALLOWED;
        #endif
        case CORE_MUTEX_NESTING_BLOCKS:
          /* Currently no API exercises this behavior. */
          break;
      }
    #else
      /* must be CORE_MUTEX_NESTING_ACQUIRES or we wouldn't be here */
      return CORE_MUTEX_STATUS_SUCCESSFUL;
    #endif
  }

  /*
   *  Formally release the mutex before possibly transferring it to a
   *  blocked thread.
   */
  if ( _CORE_mutex_Is_inherit_priority( &the_mutex->Attributes ) ||
       _CORE_mutex_Is_priority_ceiling( &the_mutex->Attributes ) ) {
    CORE_mutex_Status pop_status =
      _CORE_mutex_Pop_priority( the_mutex, holder );

    if ( pop_status != CORE_MUTEX_STATUS_SUCCESSFUL )
      return pop_status;

    holder->resource_count--;

    /*
     *  Whether or not someone is waiting for the mutex, an
     *  inherited priority must be lowered if this is the last
     *  mutex (i.e. resource) this task has.
     */
    if ( holder->resource_count == 0 &&
         holder->real_priority != holder->current_priority ) {
      _Thread_Change_priority( holder, holder->real_priority, true );
    }
  }
  the_mutex->holder    = NULL;
  the_mutex->holder_id = 0;

  /*
   *  Now we check if another thread was waiting for this mutex.  If so,
   *  transfer the mutex to that thread.
   */
  if ( ( the_thread = _Thread_queue_Dequeue( &the_mutex->Wait_queue ) ) ) {

#if defined(RTEMS_MULTIPROCESSING)
    if ( !_Objects_Is_local_id( the_thread->Object.id ) ) {

      the_mutex->holder     = NULL;
      the_mutex->holder_id  = the_thread->Object.id;
      the_mutex->nest_count = 1;

      ( *api_mutex_mp_support)( the_thread, id );

    } else
#endif
    {

      the_mutex->holder     = the_thread;
      the_mutex->holder_id  = the_thread->Object.id;
      the_mutex->nest_count = 1;

      switch ( the_mutex->Attributes.discipline ) {
        case CORE_MUTEX_DISCIPLINES_FIFO:
        case CORE_MUTEX_DISCIPLINES_PRIORITY:
          break;
        case CORE_MUTEX_DISCIPLINES_PRIORITY_INHERIT:
          _CORE_mutex_Push_priority( the_mutex, the_thread );
          the_thread->resource_count++;
          break;
        case CORE_MUTEX_DISCIPLINES_PRIORITY_CEILING:
          _CORE_mutex_Push_priority( the_mutex, the_thread );
          the_thread->resource_count++;
          if (the_mutex->Attributes.priority_ceiling <
              the_thread->current_priority){
              _Thread_Change_priority(
                the_thread,
                the_mutex->Attributes.priority_ceiling,
                false
              );
          }
          break;
      }
    }
  } else
    the_mutex->lock = CORE_MUTEX_UNLOCKED;

  return CORE_MUTEX_STATUS_SUCCESSFUL;
}
Exemple #6
0
void _CORE_message_queue_Seize(
  CORE_message_queue_Control      *the_message_queue,
  Thread_Control                  *executing,
  Objects_Id                       id,
  void                            *buffer,
  size_t                          *size_p,
  bool                             wait,
  Watchdog_Interval                timeout
)
{
  ISR_Level                          level;
  CORE_message_queue_Buffer_control *the_message;

  executing->Wait.return_code = CORE_MESSAGE_QUEUE_STATUS_SUCCESSFUL;
  _ISR_Disable( level );
  the_message = _CORE_message_queue_Get_pending_message( the_message_queue );
  if ( the_message != NULL ) {
    the_message_queue->number_of_pending_messages -= 1;
    _ISR_Enable( level );

    *size_p = the_message->Contents.size;
    executing->Wait.count =
      _CORE_message_queue_Get_message_priority( the_message );
    _CORE_message_queue_Copy_buffer(
      the_message->Contents.buffer,
      buffer,
      *size_p
    );

    #if !defined(RTEMS_SCORE_COREMSG_ENABLE_BLOCKING_SEND)
      /*
       *  There is not an API with blocking sends enabled.
       *  So return immediately.
       */
      _CORE_message_queue_Free_message_buffer(the_message_queue, the_message);
      return;
    #else
    {
      Thread_Control   *the_thread;

      /*
       *  There could be a thread waiting to send a message.  If there
       *  is not, then we can go ahead and free the buffer.
       *
       *  NOTE: If we note that the queue was not full before this receive,
       *  then we can avoid this dequeue.
       */
      the_thread = _Thread_queue_Dequeue( &the_message_queue->Wait_queue );
      if ( !the_thread ) {
        _CORE_message_queue_Free_message_buffer(
          the_message_queue,
          the_message
        );
        return;
      }

      /*
       *  There was a thread waiting to send a message.  This code
       *  puts the messages in the message queue on behalf of the
       *  waiting task.
       */
      _CORE_message_queue_Set_message_priority(
        the_message,
        the_thread->Wait.count
      );
      the_message->Contents.size = (size_t) the_thread->Wait.option;
      _CORE_message_queue_Copy_buffer(
        the_thread->Wait.return_argument_second.immutable_object,
        the_message->Contents.buffer,
        the_message->Contents.size
      );

      _CORE_message_queue_Insert_message(
         the_message_queue,
         the_message,
         _CORE_message_queue_Get_message_priority( the_message )
      );
      return;
    }
    #endif
  }

  if ( !wait ) {
    _ISR_Enable( level );
    executing->Wait.return_code = CORE_MESSAGE_QUEUE_STATUS_UNSATISFIED_NOWAIT;
    return;
  }

  _Thread_queue_Enter_critical_section( &the_message_queue->Wait_queue );
  executing->Wait.queue = &the_message_queue->Wait_queue;
  executing->Wait.id = id;
  executing->Wait.return_argument_second.mutable_object = buffer;
  executing->Wait.return_argument = size_p;
  /* Wait.count will be filled in with the message priority */
  _ISR_Enable( level );

  _Thread_queue_Enqueue(
    &the_message_queue->Wait_queue,
    executing,
    STATES_WAITING_FOR_MESSAGE,
    timeout
  );
}
CORE_message_queue_Status _CORE_message_queue_Broadcast(
  CORE_message_queue_Control                *the_message_queue,
  const void                                *buffer,
  size_t                                     size,
  #if defined(RTEMS_MULTIPROCESSING)
    Objects_Id                                 id,
    CORE_message_queue_API_mp_support_callout  api_message_queue_mp_support,
  #else
    Objects_Id                                 id __attribute__((unused)),
    CORE_message_queue_API_mp_support_callout  api_message_queue_mp_support __attribute__((unused)),
  #endif
  uint32_t                                  *count
)
{
  Thread_Control          *the_thread;
  uint32_t                 number_broadcasted;
  Thread_Wait_information *waitp;

  if ( size > the_message_queue->maximum_message_size ) {
    return CORE_MESSAGE_QUEUE_STATUS_INVALID_SIZE;
  }

  /*
   *  If there are pending messages, then there can't be threads
   *  waiting for us to send them a message.
   *
   *  NOTE: This check is critical because threads can block on
   *        send and receive and this ensures that we are broadcasting
   *        the message to threads waiting to receive -- not to send.
   */

  if ( the_message_queue->number_of_pending_messages != 0 ) {
    *count = 0;
    return CORE_MESSAGE_QUEUE_STATUS_SUCCESSFUL;
  }

  /*
   *  There must be no pending messages if there is a thread waiting to
   *  receive a message.
   */
  number_broadcasted = 0;
  while ((the_thread =
          _Thread_queue_Dequeue(&the_message_queue->Wait_queue))) {
    waitp = &the_thread->Wait;
    number_broadcasted += 1;

    _CORE_message_queue_Copy_buffer(
      buffer,
      waitp->return_argument_second.mutable_object,
      size
    );

    *(size_t *) the_thread->Wait.return_argument = size;

    #if defined(RTEMS_MULTIPROCESSING)
      if ( !_Objects_Is_local_id( the_thread->Object.id ) )
        (*api_message_queue_mp_support) ( the_thread, id );
    #endif

  }
  *count = number_broadcasted;
  return CORE_MESSAGE_QUEUE_STATUS_SUCCESSFUL;
}
CORE_message_queue_Status _CORE_message_queue_Submit(
  CORE_message_queue_Control                *the_message_queue,
  void                                      *buffer,
  size_t                                     size,
  Objects_Id                                 id,
  CORE_message_queue_API_mp_support_callout  api_message_queue_mp_support,
  CORE_message_queue_Submit_types            submit_type,
  boolean                                    wait,
  Watchdog_Interval                          timeout
)
{
  ISR_Level                            level;
  CORE_message_queue_Buffer_control   *the_message;
  Thread_Control                      *the_thread;

  if ( size > the_message_queue->maximum_message_size ) {
    return CORE_MESSAGE_QUEUE_STATUS_INVALID_SIZE;
  }

  /*
   *  Is there a thread currently waiting on this message queue?
   */

  if ( the_message_queue->number_of_pending_messages == 0 ) {
    the_thread = _Thread_queue_Dequeue( &the_message_queue->Wait_queue );
    if ( the_thread ) {
      _CORE_message_queue_Copy_buffer(
        buffer,
        the_thread->Wait.return_argument,
        size
      );
      *(size_t *)the_thread->Wait.return_argument_1 = size;
      the_thread->Wait.count = submit_type;

#if defined(RTEMS_MULTIPROCESSING)
      if ( !_Objects_Is_local_id( the_thread->Object.id ) )
        (*api_message_queue_mp_support) ( the_thread, id );
#endif
      return CORE_MESSAGE_QUEUE_STATUS_SUCCESSFUL;
    }
  }

  /*
   *  No one waiting on the message queue at this time, so attempt to
   *  queue the message up for a future receive.
   */

  if ( the_message_queue->number_of_pending_messages <
       the_message_queue->maximum_pending_messages ) {

    the_message =
        _CORE_message_queue_Allocate_message_buffer( the_message_queue );

    /*
     *  NOTE: If the system is consistent, this error should never occur.
     */

    if ( !the_message ) {
      return CORE_MESSAGE_QUEUE_STATUS_UNSATISFIED;
    }

    _CORE_message_queue_Copy_buffer(
      buffer,
      the_message->Contents.buffer,
      size
    );
    the_message->Contents.size = size;
    the_message->priority  = submit_type;

    _CORE_message_queue_Insert_message(
       the_message_queue,
       the_message,
       submit_type
    );
    return CORE_MESSAGE_QUEUE_STATUS_SUCCESSFUL;
  }

  /*
   *  No message buffers were available so we may need to return an
   *  overflow error or block the sender until the message is placed
   *  on the queue.
   */

  if ( !wait ) {
    return CORE_MESSAGE_QUEUE_STATUS_TOO_MANY;
  }

  /*
   *  Do NOT block on a send if the caller is in an ISR.  It is
   *  deadly to block in an ISR.
   */

  if ( _ISR_Is_in_progress() ) {
    return CORE_MESSAGE_QUEUE_STATUS_UNSATISFIED;
  }

  /*
   *  WARNING!! executing should NOT be used prior to this point.
   *  Thus the unusual choice to open a new scope and declare
   *  it as a variable.  Doing this emphasizes how dangerous it
   *  would be to use this variable prior to here.
   */

  {
    Thread_Control  *executing = _Thread_Executing;

    _ISR_Disable( level );
    _Thread_queue_Enter_critical_section( &the_message_queue->Wait_queue );
    executing->Wait.queue              = &the_message_queue->Wait_queue;
    executing->Wait.id                 = id;
    executing->Wait.return_argument    = buffer;
    executing->Wait.option             = size;
    executing->Wait.count              = submit_type;
    _ISR_Enable( level );

    _Thread_queue_Enqueue( &the_message_queue->Wait_queue, timeout );
  }

  return CORE_MESSAGE_QUEUE_STATUS_UNSATISFIED_WAIT;
}
Exemple #9
0
CORE_RWLock_Status _CORE_RWLock_Release(
  CORE_RWLock_Control *the_rwlock,
  Thread_Control      *executing
)
{
  ISR_Level       level;
  Thread_Control *next;

  /*
   *  If unlocked, then OK to read.
   *  Otherwise, we have to block.
   *  If locked for reading and no waiters, then OK to read.
   *  If any thread is waiting, then we wait.
   */

  _ISR_Disable( level );
    if ( the_rwlock->current_state == CORE_RWLOCK_UNLOCKED){
      _ISR_Enable( level );
      executing->Wait.return_code = CORE_RWLOCK_UNAVAILABLE;
      return CORE_RWLOCK_SUCCESSFUL;
    }
    if ( the_rwlock->current_state == CORE_RWLOCK_LOCKED_FOR_READING ) {
	the_rwlock->number_of_readers -= 1;
	if ( the_rwlock->number_of_readers != 0 ) {
          /* must be unlocked again */
	  _ISR_Enable( level );
          return CORE_RWLOCK_SUCCESSFUL;
        }
    }

    /* CORE_RWLOCK_LOCKED_FOR_WRITING or READING with readers */
    executing->Wait.return_code = CORE_RWLOCK_SUCCESSFUL;

    /*
     * Implicitly transition to "unlocked" and find another thread interested
     * in obtaining this rwlock.
     */
    the_rwlock->current_state = CORE_RWLOCK_UNLOCKED;
  _ISR_Enable( level );

  next = _Thread_queue_Dequeue( &the_rwlock->Wait_queue );

  if ( next ) {
    if ( next->Wait.option == CORE_RWLOCK_THREAD_WAITING_FOR_WRITE ) {
      the_rwlock->current_state = CORE_RWLOCK_LOCKED_FOR_WRITING;
      return CORE_RWLOCK_SUCCESSFUL;
    }

    /*
     * Must be CORE_RWLOCK_THREAD_WAITING_FOR_READING
     */
    the_rwlock->number_of_readers += 1;
    the_rwlock->current_state = CORE_RWLOCK_LOCKED_FOR_READING;

    /*
     * Now see if more readers can be let go.
     */
    while ( 1 ) {
      next = _Thread_queue_First( &the_rwlock->Wait_queue );
      if ( !next ||
           next->Wait.option == CORE_RWLOCK_THREAD_WAITING_FOR_WRITE )
        return CORE_RWLOCK_SUCCESSFUL;
      the_rwlock->number_of_readers += 1;
      _Thread_queue_Extract( &the_rwlock->Wait_queue, next );
    }
  }

  /* indentation is to match _ISR_Disable at top */

  return CORE_RWLOCK_SUCCESSFUL;
}