Beispiel #1
0
/**
 *  @brief Finalize a blocking operation.
 *
 *  This method is used to finalize a blocking operation that was
 *  satisfied. It may be used with thread queues or any other synchronization
 *  object that uses the blocking states and watchdog times for timeout.
 *
 *  This method will restore the previous ISR disable level during the cancel
 *  operation.  Thus it is an implicit _ISR_Enable().
 *
 *  @param[in] the_thread is the thread whose blocking is canceled
 *  @param[in] lock_context is the previous ISR disable level
 */
static void _Thread_blocking_operation_Finalize(
  Thread_Control   *the_thread,
  ISR_lock_Context *lock_context
)
{
  /*
   * The thread is not waiting on anything after this completes.
   */
  the_thread->Wait.queue = NULL;

  /*
   *  If the sync state is timed out, this is very likely not needed.
   *  But better safe than sorry when it comes to critical sections.
   */
  if ( _Watchdog_Is_active( &the_thread->Timer ) ) {
    _Watchdog_Deactivate( &the_thread->Timer );
    _Thread_queue_Release( lock_context );
    (void) _Watchdog_Remove( &the_thread->Timer );
  } else
    _Thread_queue_Release( lock_context );

  /*
   *  Global objects with thread queue's should not be operated on from an
   *  ISR.  But the sync code still must allow short timeouts to be processed
   *  correctly.
   */

  _Thread_Unblock( the_thread );

#if defined(RTEMS_MULTIPROCESSING)
  if ( !_Objects_Is_local_id( the_thread->Object.id ) )
    _Thread_MP_Free_proxy( the_thread );
#endif
}
Beispiel #2
0
rtems_id
rtems_monitor_object_canonical_next(
    const rtems_monitor_object_info_t *info,
    rtems_id                     id,
    void                        *canonical
)
{
  rtems_id    next_id;
  const void *raw_item;

#if defined(RTEMS_MULTIPROCESSING)
    if ( ! _Objects_Is_local_id(id) ) {
       next_id = rtems_monitor_object_canonical_next_remote(
         info->type,
         id,
         canonical
      );
    } else
#endif
    {
      next_id = id;

      raw_item = info->next(
        info->object_information,
        canonical,
        &next_id
      );

     if (raw_item) {
       info->canonical(canonical, raw_item);
       _Thread_Enable_dispatch();
     }
  }
  return next_id;
}
void *
rtems_monitor_manager_next(
    void      *table_void,
    void      *canonical,
    rtems_id  *next_id
)
{
    Objects_Information     *table = table_void;
    rtems_monitor_generic_t *copy;
    Objects_Control         *object = 0;
    Objects_Locations        location;

    /*
     * When we are called, it must be local
     */

#if defined(RTEMS_MULTIPROCESSING)
    if ( ! _Objects_Is_local_id(*next_id) )
        goto done;
#endif

    object = _Objects_Get_next(table, *next_id, &location, next_id);

    if (object)
    {
        copy = (rtems_monitor_generic_t *) canonical;
        copy->id = object->id;
        copy->name = object->name.name_u32;
    }

#if defined(RTEMS_MULTIPROCESSING)
done:
#endif
    return object;
}
Thread_Control *_Thread_queue_Dequeue_fifo(
  Thread_queue_Control *the_thread_queue
)
{
  ISR_Level              level;
  Thread_Control *the_thread;

  _ISR_Disable( level );
  if ( !_Chain_Is_empty( &the_thread_queue->Queues.Fifo ) ) {

    the_thread = (Thread_Control *)
       _Chain_Get_first_unprotected( &the_thread_queue->Queues.Fifo );

    the_thread->Wait.queue = NULL;
    if ( !_Watchdog_Is_active( &the_thread->Timer ) ) {
      _ISR_Enable( level );
      _Thread_Unblock( the_thread );
    } else {
      _Watchdog_Deactivate( &the_thread->Timer );
      _ISR_Enable( level );
      (void) _Watchdog_Remove( &the_thread->Timer );
      _Thread_Unblock( the_thread );
    }

#if defined(RTEMS_MULTIPROCESSING)
    if ( !_Objects_Is_local_id( the_thread->Object.id ) )
      _Thread_MP_Free_proxy( the_thread );
#endif

    return the_thread;
  }

  _ISR_Enable( level );
  return NULL;
}
Status_Control _CORE_mutex_Surrender_slow(
  CORE_mutex_Control   *the_mutex,
  Thread_Control       *executing,
  Thread_queue_Heads   *heads,
  bool                  keep_priority,
  Thread_queue_Context *queue_context
)
{
  if ( heads != NULL ) {
    const Thread_queue_Operations *operations;
    Thread_Control                *new_owner;
    bool                           unblock;

    operations = CORE_MUTEX_TQ_OPERATIONS;
    new_owner = ( *operations->first )( heads );

    _CORE_mutex_Set_owner( the_mutex, new_owner );

    unblock = _Thread_queue_Extract_locked(
      &the_mutex->Wait_queue.Queue,
      operations,
      new_owner,
      queue_context
    );

#if defined(RTEMS_MULTIPROCESSING)
    if ( _Objects_Is_local_id( new_owner->Object.id ) )
#endif
    {
      ++new_owner->resource_count;
      _Thread_queue_Boost_priority( &the_mutex->Wait_queue.Queue, new_owner );
    }

    _Thread_queue_Unblock_critical(
      unblock,
      &the_mutex->Wait_queue.Queue,
      new_owner,
      &queue_context->Lock_context
    );
  } else {
    _CORE_mutex_Release( the_mutex, queue_context );
  }

  if ( !keep_priority ) {
    Per_CPU_Control *cpu_self;

    cpu_self = _Thread_Dispatch_disable();
    _Thread_Restore_priority( executing );
    _Thread_Dispatch_enable( cpu_self );
  }

  return STATUS_SUCCESSFUL;
}
Beispiel #6
0
void _Thread_queue_Flush(
  Thread_queue_Control       *the_thread_queue,
#if defined(RTEMS_MULTIPROCESSING)
  Thread_queue_Flush_callout  remote_extract_callout,
#else
  Thread_queue_Flush_callout  remote_extract_callout RTEMS_UNUSED,
#endif
  uint32_t                    status
)
{
  ISR_lock_Context  lock_context;
  Thread_Control   *the_thread;

  _Thread_queue_Acquire( the_thread_queue, &lock_context );

  while ( (the_thread = _Thread_queue_First_locked( the_thread_queue ) ) ) {
#if defined(RTEMS_MULTIPROCESSING)
    if ( _Objects_Is_local_id( the_thread->Object.id ) )
#endif
      the_thread->Wait.return_code = status;

    _Thread_queue_Extract_critical(
      &the_thread_queue->Queue,
      the_thread_queue->operations,
      the_thread,
      &lock_context
    );

#if defined(RTEMS_MULTIPROCESSING)
    if ( !_Objects_Is_local_id( the_thread->Object.id ) )
      ( *remote_extract_callout )( the_thread );
#endif

    _Thread_queue_Acquire( the_thread_queue, &lock_context );
  }

  _Thread_queue_Release( the_thread_queue, &lock_context );
}
Beispiel #7
0
bool _Thread_queue_Do_extract_locked(
  Thread_queue_Queue            *queue,
  const Thread_queue_Operations *operations,
  Thread_Control                *the_thread
#if defined(RTEMS_MULTIPROCESSING)
  ,
  const Thread_queue_Context    *queue_context
#endif
)
{
  bool success;
  bool unblock;

#if defined(RTEMS_MULTIPROCESSING)
  if ( !_Objects_Is_local_id( the_thread->Object.id ) ) {
    Thread_Proxy_control    *the_proxy;
    Thread_queue_MP_callout  mp_callout;

    the_proxy = (Thread_Proxy_control *) the_thread;
    mp_callout = queue_context->mp_callout;
    _Assert( mp_callout != NULL );
    the_proxy->thread_queue_callout = queue_context->mp_callout;
  }
#endif

  ( *operations->extract )( queue, the_thread );

  /*
   * We must update the wait flags under protection of the current thread lock,
   * otherwise a _Thread_Timeout() running on another processor may interfere.
   */
  success = _Thread_Wait_flags_try_change_release(
    the_thread,
    THREAD_QUEUE_INTEND_TO_BLOCK,
    THREAD_QUEUE_READY_AGAIN
  );
  if ( success ) {
    unblock = false;
  } else {
    _Assert( _Thread_Wait_flags_get( the_thread ) == THREAD_QUEUE_BLOCKED );
    _Thread_Wait_flags_set( the_thread, THREAD_QUEUE_READY_AGAIN );
    unblock = true;
  }

  _Thread_Wait_restore_default( the_thread );

  return unblock;
}
int sem_unlink(
  const char *name
)
{
  int  status;
  register POSIX_Semaphore_Control *the_semaphore;
  sem_t                        the_semaphore_id;

  _Thread_Disable_dispatch();

  status = _POSIX_Semaphore_Name_to_id( name, &the_semaphore_id );
  if ( status != 0 ) {
    _Thread_Enable_dispatch();
    rtems_set_errno_and_return_minus_one( status );
  }

  /*
   *  Don't support unlinking a remote semaphore.
   */

#if defined(RTEMS_MULTIPROCESSING)
  if ( !_Objects_Is_local_id((Objects_Id)the_semaphore_id) ) {
    _Thread_Enable_dispatch();
    rtems_set_errno_and_return_minus_one( ENOSYS );
  }
#endif

  the_semaphore = (POSIX_Semaphore_Control *) _Objects_Get_local_object(
    &_POSIX_Semaphore_Information,
    _Objects_Get_index( the_semaphore_id )
  );

#if defined(RTEMS_MULTIPROCESSING)
  if ( the_semaphore->process_shared == PTHREAD_PROCESS_SHARED ) {
    _Objects_MP_Close( &_POSIX_Semaphore_Information, the_semaphore_id );
  }
#endif

  the_semaphore->linked = FALSE;
  _POSIX_Semaphore_Namespace_remove( the_semaphore );
  _POSIX_Semaphore_Delete( the_semaphore );

  _Thread_Enable_dispatch();
  return 0;
}
Beispiel #9
0
static bool _Thread_queue_MP_set_callout(
    Thread_Control             *the_thread,
    const Thread_queue_Context *queue_context
)
{
    Thread_Proxy_control    *the_proxy;
    Thread_queue_MP_callout  mp_callout;

    if ( _Objects_Is_local_id( the_thread->Object.id ) ) {
        return false;
    }

    the_proxy = (Thread_Proxy_control *) the_thread;
    mp_callout = queue_context->mp_callout;
    _Assert( mp_callout != NULL );
    the_proxy->thread_queue_callout = queue_context->mp_callout;
    return true;
}
Beispiel #10
0
void _Thread_queue_Flush(
  Thread_queue_Control       *the_thread_queue,
#if defined(RTEMS_MULTIPROCESSING)
  Thread_queue_Flush_callout  remote_extract_callout,
#else
  Thread_queue_Flush_callout  remote_extract_callout __attribute__((unused)),
#endif
  uint32_t                    status
)
{
  Thread_Control *the_thread;

  while ( (the_thread = _Thread_queue_Dequeue( the_thread_queue )) ) {
#if defined(RTEMS_MULTIPROCESSING)
    if ( !_Objects_Is_local_id( the_thread->Object.id ) )
      ( *remote_extract_callout )( the_thread );
    else
#endif
      the_thread->Wait.return_code = status;
  }
}
Beispiel #11
0
CORE_mutex_Status _CORE_mutex_Surrender(
  CORE_mutex_Control                *the_mutex,
#if defined(RTEMS_MULTIPROCESSING)
  Objects_Id                         id,
  CORE_mutex_API_mp_support_callout  api_mutex_mp_support
#else
  Objects_Id                         id __attribute__((unused)),
  CORE_mutex_API_mp_support_callout  api_mutex_mp_support __attribute__((unused))
#endif
)
{
  Thread_Control *the_thread;
  Thread_Control *holder;

  holder = the_mutex->holder;

  /*
   *  The following code allows a thread (or ISR) other than the thread
   *  which acquired the mutex to release that mutex.  This is only
   *  allowed when the mutex in quetion is FIFO or simple Priority
   *  discipline.  But Priority Ceiling or Priority Inheritance mutexes
   *  must be released by the thread which acquired them.
   */

  if ( the_mutex->Attributes.only_owner_release ) {
    if ( !_Thread_Is_executing( holder ) )
      return CORE_MUTEX_STATUS_NOT_OWNER_OF_RESOURCE;
  }

  /* XXX already unlocked -- not right status */

  if ( !the_mutex->nest_count )
    return CORE_MUTEX_STATUS_SUCCESSFUL;

  the_mutex->nest_count--;

  if ( the_mutex->nest_count != 0 ) {
    /*
     *  All error checking is on the locking side, so if the lock was
     *  allowed to acquired multiple times, then we should just deal with
     *  that.  The RTEMS_DEBUG is just a validation.
     */
    #if defined(RTEMS_DEBUG)
      switch ( the_mutex->Attributes.lock_nesting_behavior ) {
        case CORE_MUTEX_NESTING_ACQUIRES:
          return CORE_MUTEX_STATUS_SUCCESSFUL;
        #if defined(RTEMS_POSIX_API)
          case CORE_MUTEX_NESTING_IS_ERROR:
            /* should never occur */
            return CORE_MUTEX_STATUS_NESTING_NOT_ALLOWED;
        #endif
        case CORE_MUTEX_NESTING_BLOCKS:
          /* Currently no API exercises this behavior. */
          break;
      }
    #else
      /* must be CORE_MUTEX_NESTING_ACQUIRES or we wouldn't be here */
      return CORE_MUTEX_STATUS_SUCCESSFUL;
    #endif
  }

  /*
   *  Formally release the mutex before possibly transferring it to a
   *  blocked thread.
   */
  if ( _CORE_mutex_Is_inherit_priority( &the_mutex->Attributes ) ||
       _CORE_mutex_Is_priority_ceiling( &the_mutex->Attributes ) ) {
    CORE_mutex_Status pop_status =
      _CORE_mutex_Pop_priority( the_mutex, holder );

    if ( pop_status != CORE_MUTEX_STATUS_SUCCESSFUL )
      return pop_status;

    holder->resource_count--;

    /*
     *  Whether or not someone is waiting for the mutex, an
     *  inherited priority must be lowered if this is the last
     *  mutex (i.e. resource) this task has.
     */
    if ( holder->resource_count == 0 &&
         holder->real_priority != holder->current_priority ) {
      _Thread_Change_priority( holder, holder->real_priority, true );
    }
  }
  the_mutex->holder    = NULL;
  the_mutex->holder_id = 0;

  /*
   *  Now we check if another thread was waiting for this mutex.  If so,
   *  transfer the mutex to that thread.
   */
  if ( ( the_thread = _Thread_queue_Dequeue( &the_mutex->Wait_queue ) ) ) {

#if defined(RTEMS_MULTIPROCESSING)
    if ( !_Objects_Is_local_id( the_thread->Object.id ) ) {

      the_mutex->holder     = NULL;
      the_mutex->holder_id  = the_thread->Object.id;
      the_mutex->nest_count = 1;

      ( *api_mutex_mp_support)( the_thread, id );

    } else
#endif
    {

      the_mutex->holder     = the_thread;
      the_mutex->holder_id  = the_thread->Object.id;
      the_mutex->nest_count = 1;

      switch ( the_mutex->Attributes.discipline ) {
        case CORE_MUTEX_DISCIPLINES_FIFO:
        case CORE_MUTEX_DISCIPLINES_PRIORITY:
          break;
        case CORE_MUTEX_DISCIPLINES_PRIORITY_INHERIT:
          _CORE_mutex_Push_priority( the_mutex, the_thread );
          the_thread->resource_count++;
          break;
        case CORE_MUTEX_DISCIPLINES_PRIORITY_CEILING:
          _CORE_mutex_Push_priority( the_mutex, the_thread );
          the_thread->resource_count++;
          if (the_mutex->Attributes.priority_ceiling <
              the_thread->current_priority){
              _Thread_Change_priority(
                the_thread,
                the_mutex->Attributes.priority_ceiling,
                false
              );
          }
          break;
      }
    }
  } else
    the_mutex->lock = CORE_MUTEX_UNLOCKED;

  return CORE_MUTEX_STATUS_SUCCESSFUL;
}
CORE_message_queue_Status _CORE_message_queue_Broadcast(
  CORE_message_queue_Control                *the_message_queue,
  const void                                *buffer,
  size_t                                     size,
  #if defined(RTEMS_MULTIPROCESSING)
    Objects_Id                                 id,
    CORE_message_queue_API_mp_support_callout  api_message_queue_mp_support,
  #else
    Objects_Id                                 id __attribute__((unused)),
    CORE_message_queue_API_mp_support_callout  api_message_queue_mp_support __attribute__((unused)),
  #endif
  uint32_t                                  *count
)
{
  Thread_Control          *the_thread;
  uint32_t                 number_broadcasted;
  Thread_Wait_information *waitp;

  if ( size > the_message_queue->maximum_message_size ) {
    return CORE_MESSAGE_QUEUE_STATUS_INVALID_SIZE;
  }

  /*
   *  If there are pending messages, then there can't be threads
   *  waiting for us to send them a message.
   *
   *  NOTE: This check is critical because threads can block on
   *        send and receive and this ensures that we are broadcasting
   *        the message to threads waiting to receive -- not to send.
   */

  if ( the_message_queue->number_of_pending_messages != 0 ) {
    *count = 0;
    return CORE_MESSAGE_QUEUE_STATUS_SUCCESSFUL;
  }

  /*
   *  There must be no pending messages if there is a thread waiting to
   *  receive a message.
   */
  number_broadcasted = 0;
  while ((the_thread =
          _Thread_queue_Dequeue(&the_message_queue->Wait_queue))) {
    waitp = &the_thread->Wait;
    number_broadcasted += 1;

    _CORE_message_queue_Copy_buffer(
      buffer,
      waitp->return_argument_second.mutable_object,
      size
    );

    *(size_t *) the_thread->Wait.return_argument = size;

    #if defined(RTEMS_MULTIPROCESSING)
      if ( !_Objects_Is_local_id( the_thread->Object.id ) )
        (*api_message_queue_mp_support) ( the_thread, id );
    #endif

  }
  *count = number_broadcasted;
  return CORE_MESSAGE_QUEUE_STATUS_SUCCESSFUL;
}
Beispiel #13
0
CORE_mutex_Status _CORE_mutex_Surrender(
  CORE_mutex_Control                *the_mutex,
#if defined(RTEMS_MULTIPROCESSING)
  Objects_Id                         id,
  CORE_mutex_API_mp_support_callout  api_mutex_mp_support,
#else
  Objects_Id                         id __attribute__((unused)),
  CORE_mutex_API_mp_support_callout  api_mutex_mp_support __attribute__((unused)),
#endif
  ISR_lock_Context                  *lock_context
)
{
  Thread_Control *the_thread;
  Thread_Control *holder;

  holder = the_mutex->holder;

  /*
   *  The following code allows a thread (or ISR) other than the thread
   *  which acquired the mutex to release that mutex.  This is only
   *  allowed when the mutex in quetion is FIFO or simple Priority
   *  discipline.  But Priority Ceiling or Priority Inheritance mutexes
   *  must be released by the thread which acquired them.
   */

  if ( the_mutex->Attributes.only_owner_release ) {
    if ( !_Thread_Is_executing( holder ) ) {
      _ISR_lock_ISR_enable( lock_context );
      return CORE_MUTEX_STATUS_NOT_OWNER_OF_RESOURCE;
    }
  }

  _Thread_queue_Acquire_critical( &the_mutex->Wait_queue, lock_context );

  /* XXX already unlocked -- not right status */

  if ( !the_mutex->nest_count ) {
    _Thread_queue_Release( &the_mutex->Wait_queue, lock_context );
    return CORE_MUTEX_STATUS_SUCCESSFUL;
  }

  the_mutex->nest_count--;

  if ( the_mutex->nest_count != 0 ) {
    /*
     *  All error checking is on the locking side, so if the lock was
     *  allowed to acquired multiple times, then we should just deal with
     *  that.  The RTEMS_DEBUG is just a validation.
     */
    #if defined(RTEMS_DEBUG)
      switch ( the_mutex->Attributes.lock_nesting_behavior ) {
        case CORE_MUTEX_NESTING_ACQUIRES:
          _Thread_queue_Release( &the_mutex->Wait_queue, lock_context );
          return CORE_MUTEX_STATUS_SUCCESSFUL;
        #if defined(RTEMS_POSIX_API)
          case CORE_MUTEX_NESTING_IS_ERROR:
            /* should never occur */
            _Thread_queue_Release( &the_mutex->Wait_queue, lock_context );
            return CORE_MUTEX_STATUS_NESTING_NOT_ALLOWED;
        #endif
        case CORE_MUTEX_NESTING_BLOCKS:
          /* Currently no API exercises this behavior. */
          break;
      }
    #else
      _Thread_queue_Release( &the_mutex->Wait_queue, lock_context );
      /* must be CORE_MUTEX_NESTING_ACQUIRES or we wouldn't be here */
      return CORE_MUTEX_STATUS_SUCCESSFUL;
    #endif
  }

  /*
   *  Formally release the mutex before possibly transferring it to a
   *  blocked thread.
   */
  if ( _CORE_mutex_Is_inherit_priority( &the_mutex->Attributes ) ||
       _CORE_mutex_Is_priority_ceiling( &the_mutex->Attributes ) ) {
    CORE_mutex_Status pop_status =
      _CORE_mutex_Pop_priority( the_mutex, holder );

    if ( pop_status != CORE_MUTEX_STATUS_SUCCESSFUL ) {
      _Thread_queue_Release( &the_mutex->Wait_queue, lock_context );
      return pop_status;
    }

    holder->resource_count--;
  }
  the_mutex->holder = NULL;

  /*
   *  Now we check if another thread was waiting for this mutex.  If so,
   *  transfer the mutex to that thread.
   */
  if ( ( the_thread = _Thread_queue_First_locked( &the_mutex->Wait_queue ) ) ) {
    /*
     * We must extract the thread now since this will restore its default
     * thread lock.  This is necessary to avoid a deadlock in the
     * _Thread_Change_priority() below due to a recursive thread queue lock
     * acquire.
     */
    _Thread_queue_Extract_locked( &the_mutex->Wait_queue, the_thread );

#if defined(RTEMS_MULTIPROCESSING)
    _Thread_Dispatch_disable();

    if ( _Objects_Is_local_id( the_thread->Object.id ) )
#endif
    {
      the_mutex->holder     = the_thread;
      the_mutex->nest_count = 1;

      switch ( the_mutex->Attributes.discipline ) {
        case CORE_MUTEX_DISCIPLINES_FIFO:
        case CORE_MUTEX_DISCIPLINES_PRIORITY:
          break;
        case CORE_MUTEX_DISCIPLINES_PRIORITY_INHERIT:
          _CORE_mutex_Push_priority( the_mutex, the_thread );
          the_thread->resource_count++;
          break;
        case CORE_MUTEX_DISCIPLINES_PRIORITY_CEILING:
          _CORE_mutex_Push_priority( the_mutex, the_thread );
          the_thread->resource_count++;
          _Thread_Raise_priority(
            the_thread,
            the_mutex->Attributes.priority_ceiling
          );
          break;
      }
    }

    _Thread_queue_Unblock_critical(
      &the_mutex->Wait_queue,
      the_thread,
      lock_context
    );

#if defined(RTEMS_MULTIPROCESSING)
    if ( !_Objects_Is_local_id( the_thread->Object.id ) ) {

      the_mutex->holder     = NULL;
      the_mutex->nest_count = 1;

      ( *api_mutex_mp_support)( the_thread, id );

    }

    _Thread_Dispatch_enable( _Per_CPU_Get() );
#endif
  } else {
    _Thread_queue_Release( &the_mutex->Wait_queue, lock_context );
  }

  /*
   *  Whether or not someone is waiting for the mutex, an
   *  inherited priority must be lowered if this is the last
   *  mutex (i.e. resource) this task has.
   */
  if ( !_Thread_Owns_resources( holder ) ) {
    /*
     * Ensure that the holder resource count is visible to all other processors
     * and that we read the latest priority restore hint.
     */
    _Atomic_Fence( ATOMIC_ORDER_ACQ_REL );

    if ( holder->priority_restore_hint ) {
      Per_CPU_Control *cpu_self;

      cpu_self = _Thread_Dispatch_disable();
      _Thread_Restore_priority( holder );
      _Thread_Dispatch_enable( cpu_self );
    }
  }

  return CORE_MUTEX_STATUS_SUCCESSFUL;
}
Beispiel #14
0
Thread_Control *_Thread_queue_Dequeue_priority(
    Thread_queue_Control *the_thread_queue
)
{
    uint32_t        index;
    ISR_Level       level;
    Thread_Control *the_thread = NULL;  /* just to remove warnings */
    Thread_Control *new_first_thread;
    Chain_Node     *head;
    Chain_Node     *tail;
    Chain_Node     *new_first_node;
    Chain_Node     *new_second_node;
    Chain_Node     *last_node;
    Chain_Node     *next_node;
    Chain_Node     *previous_node;

    _ISR_Disable( level );
    for( index=0 ;
            index < TASK_QUEUE_DATA_NUMBER_OF_PRIORITY_HEADERS ;
            index++ ) {
        if ( !_Chain_Is_empty( &the_thread_queue->Queues.Priority[ index ] ) ) {
            the_thread = (Thread_Control *) _Chain_First(
                             &the_thread_queue->Queues.Priority[ index ]
                         );
            goto dequeue;
        }
    }

    /*
     * We did not find a thread to unblock.
     */
    _ISR_Enable( level );
    return NULL;

dequeue:
    the_thread->Wait.queue = NULL;
    new_first_node   = _Chain_First( &the_thread->Wait.Block2n );
    new_first_thread = (Thread_Control *) new_first_node;
    next_node        = the_thread->Object.Node.next;
    previous_node    = the_thread->Object.Node.previous;

    if ( !_Chain_Is_empty( &the_thread->Wait.Block2n ) ) {
        last_node       = _Chain_Last( &the_thread->Wait.Block2n );
        new_second_node = new_first_node->next;

        previous_node->next      = new_first_node;
        next_node->previous      = new_first_node;
        new_first_node->next     = next_node;
        new_first_node->previous = previous_node;

        if ( !_Chain_Has_only_one_node( &the_thread->Wait.Block2n ) ) {
            /* > two threads on 2-n */
            head = _Chain_Head( &new_first_thread->Wait.Block2n );
            tail = _Chain_Tail( &new_first_thread->Wait.Block2n );

            new_second_node->previous = head;
            head->next = new_second_node;
            tail->previous = last_node;
            last_node->next = tail;
        }
    } else {
        previous_node->next = next_node;
        next_node->previous = previous_node;
    }

    if ( !_Watchdog_Is_active( &the_thread->Timer ) ) {
        _ISR_Enable( level );
        _Thread_Unblock( the_thread );
    } else {
        _Watchdog_Deactivate( &the_thread->Timer );
        _ISR_Enable( level );
        (void) _Watchdog_Remove( &the_thread->Timer );
        _Thread_Unblock( the_thread );
    }

#if defined(RTEMS_MULTIPROCESSING)
    if ( !_Objects_Is_local_id( the_thread->Object.id ) )
        _Thread_MP_Free_proxy( the_thread );
#endif
    return( the_thread );
}
Beispiel #15
0
CORE_message_queue_Status _CORE_message_queue_Submit(
  CORE_message_queue_Control                *the_message_queue,
  Thread_Control                            *executing,
  const void                                *buffer,
  size_t                                     size,
  Objects_Id                                 id,
  #if defined(RTEMS_MULTIPROCESSING)
    CORE_message_queue_API_mp_support_callout  api_message_queue_mp_support,
  #else
    CORE_message_queue_API_mp_support_callout  api_message_queue_mp_support  RTEMS_UNUSED,
  #endif
  CORE_message_queue_Submit_types            submit_type,
  bool                                       wait,
  Watchdog_Interval                          timeout,
  ISR_lock_Context                          *lock_context
)
{
  CORE_message_queue_Buffer_control *the_message;
  Thread_Control                    *the_thread;

  if ( size > the_message_queue->maximum_message_size ) {
    _ISR_lock_ISR_enable( lock_context );
    return CORE_MESSAGE_QUEUE_STATUS_INVALID_SIZE;
  }

  _CORE_message_queue_Acquire_critical( the_message_queue, lock_context );

  /*
   *  Is there a thread currently waiting on this message queue?
   */

  the_thread = _CORE_message_queue_Dequeue_receiver(
    the_message_queue,
    buffer,
    size,
    submit_type,
    lock_context
  );
  if ( the_thread != NULL ) {
    #if defined(RTEMS_MULTIPROCESSING)
      if ( !_Objects_Is_local_id( the_thread->Object.id ) )
        (*api_message_queue_mp_support) ( the_thread, id );

      _Thread_Dispatch_enable( _Per_CPU_Get() );
    #endif
    return CORE_MESSAGE_QUEUE_STATUS_SUCCESSFUL;
  }

  /*
   *  No one waiting on the message queue at this time, so attempt to
   *  queue the message up for a future receive.
   */
  the_message =
      _CORE_message_queue_Allocate_message_buffer( the_message_queue );
  if ( the_message ) {
    the_message->Contents.size = size;
    _CORE_message_queue_Set_message_priority( the_message, submit_type );
    _CORE_message_queue_Copy_buffer(
      buffer,
      the_message->Contents.buffer,
      size
    );

    _CORE_message_queue_Insert_message(
       the_message_queue,
       the_message,
       submit_type
    );
    _CORE_message_queue_Release( the_message_queue, lock_context );
    return CORE_MESSAGE_QUEUE_STATUS_SUCCESSFUL;
  }

  #if !defined(RTEMS_SCORE_COREMSG_ENABLE_BLOCKING_SEND)
    _CORE_message_queue_Release( the_message_queue, lock_context );
    return CORE_MESSAGE_QUEUE_STATUS_TOO_MANY;
  #else
    /*
     *  No message buffers were available so we may need to return an
     *  overflow error or block the sender until the message is placed
     *  on the queue.
     */
    if ( !wait ) {
      _CORE_message_queue_Release( the_message_queue, lock_context );
      return CORE_MESSAGE_QUEUE_STATUS_TOO_MANY;
    }

    /*
     *  Do NOT block on a send if the caller is in an ISR.  It is
     *  deadly to block in an ISR.
     */
    if ( _ISR_Is_in_progress() ) {
      _CORE_message_queue_Release( the_message_queue, lock_context );
      return CORE_MESSAGE_QUEUE_STATUS_UNSATISFIED;
    }

    /*
     *  WARNING!! executing should NOT be used prior to this point.
     *  Thus the unusual choice to open a new scope and declare
     *  it as a variable.  Doing this emphasizes how dangerous it
     *  would be to use this variable prior to here.
     */
    executing->Wait.id = id;
    executing->Wait.return_argument_second.immutable_object = buffer;
    executing->Wait.option = (uint32_t) size;
    executing->Wait.count = submit_type;

    _Thread_queue_Enqueue_critical(
      &the_message_queue->Wait_queue.Queue,
      the_message_queue->Wait_queue.operations,
      executing,
      STATES_WAITING_FOR_MESSAGE,
      timeout,
      CORE_MESSAGE_QUEUE_STATUS_TIMEOUT,
      lock_context
    );
    #if defined(RTEMS_MULTIPROCESSING)
      _Thread_Dispatch_enable( _Per_CPU_Get() );
    #endif

    return CORE_MESSAGE_QUEUE_STATUS_UNSATISFIED_WAIT;
  #endif
}
CORE_message_queue_Status _CORE_message_queue_Submit(
  CORE_message_queue_Control                *the_message_queue,
  void                                      *buffer,
  size_t                                     size,
  Objects_Id                                 id,
  CORE_message_queue_API_mp_support_callout  api_message_queue_mp_support,
  CORE_message_queue_Submit_types            submit_type,
  boolean                                    wait,
  Watchdog_Interval                          timeout
)
{
  ISR_Level                            level;
  CORE_message_queue_Buffer_control   *the_message;
  Thread_Control                      *the_thread;

  if ( size > the_message_queue->maximum_message_size ) {
    return CORE_MESSAGE_QUEUE_STATUS_INVALID_SIZE;
  }

  /*
   *  Is there a thread currently waiting on this message queue?
   */

  if ( the_message_queue->number_of_pending_messages == 0 ) {
    the_thread = _Thread_queue_Dequeue( &the_message_queue->Wait_queue );
    if ( the_thread ) {
      _CORE_message_queue_Copy_buffer(
        buffer,
        the_thread->Wait.return_argument,
        size
      );
      *(size_t *)the_thread->Wait.return_argument_1 = size;
      the_thread->Wait.count = submit_type;

#if defined(RTEMS_MULTIPROCESSING)
      if ( !_Objects_Is_local_id( the_thread->Object.id ) )
        (*api_message_queue_mp_support) ( the_thread, id );
#endif
      return CORE_MESSAGE_QUEUE_STATUS_SUCCESSFUL;
    }
  }

  /*
   *  No one waiting on the message queue at this time, so attempt to
   *  queue the message up for a future receive.
   */

  if ( the_message_queue->number_of_pending_messages <
       the_message_queue->maximum_pending_messages ) {

    the_message =
        _CORE_message_queue_Allocate_message_buffer( the_message_queue );

    /*
     *  NOTE: If the system is consistent, this error should never occur.
     */

    if ( !the_message ) {
      return CORE_MESSAGE_QUEUE_STATUS_UNSATISFIED;
    }

    _CORE_message_queue_Copy_buffer(
      buffer,
      the_message->Contents.buffer,
      size
    );
    the_message->Contents.size = size;
    the_message->priority  = submit_type;

    _CORE_message_queue_Insert_message(
       the_message_queue,
       the_message,
       submit_type
    );
    return CORE_MESSAGE_QUEUE_STATUS_SUCCESSFUL;
  }

  /*
   *  No message buffers were available so we may need to return an
   *  overflow error or block the sender until the message is placed
   *  on the queue.
   */

  if ( !wait ) {
    return CORE_MESSAGE_QUEUE_STATUS_TOO_MANY;
  }

  /*
   *  Do NOT block on a send if the caller is in an ISR.  It is
   *  deadly to block in an ISR.
   */

  if ( _ISR_Is_in_progress() ) {
    return CORE_MESSAGE_QUEUE_STATUS_UNSATISFIED;
  }

  /*
   *  WARNING!! executing should NOT be used prior to this point.
   *  Thus the unusual choice to open a new scope and declare
   *  it as a variable.  Doing this emphasizes how dangerous it
   *  would be to use this variable prior to here.
   */

  {
    Thread_Control  *executing = _Thread_Executing;

    _ISR_Disable( level );
    _Thread_queue_Enter_critical_section( &the_message_queue->Wait_queue );
    executing->Wait.queue              = &the_message_queue->Wait_queue;
    executing->Wait.id                 = id;
    executing->Wait.return_argument    = buffer;
    executing->Wait.option             = size;
    executing->Wait.count              = submit_type;
    _ISR_Enable( level );

    _Thread_queue_Enqueue( &the_message_queue->Wait_queue, timeout );
  }

  return CORE_MESSAGE_QUEUE_STATUS_UNSATISFIED_WAIT;
}