예제 #1
0
static Thread_queue_Heads *_Thread_queue_Queue_enqueue(
  Thread_queue_Queue *queue,
  Thread_Control     *the_thread,
  void             ( *initialize )( Thread_queue_Heads *, Thread_Control * ),
  void             ( *enqueue )( Thread_queue_Heads *, Thread_Control * )
)
{
  Thread_queue_Heads *heads;
  Thread_queue_Heads *spare_heads;

  heads = queue->heads;
  spare_heads = the_thread->Wait.spare_heads;
  the_thread->Wait.spare_heads = NULL;

  if ( heads == NULL ) {
    _Assert( spare_heads != NULL );
    _Assert( _Chain_Is_empty( &spare_heads->Free_chain ) );

    heads = spare_heads;
    queue->heads = heads;
    _Chain_Prepend_unprotected( &heads->Free_chain, &spare_heads->Free_node );
    ( *initialize )( heads, the_thread );
  } else {
    _Chain_Prepend_unprotected( &heads->Free_chain, &spare_heads->Free_node );
    ( *enqueue )( heads, the_thread );
  }

  return heads;
}
예제 #2
0
파일: coremutex.c 프로젝트: Fyleo/rtems
CORE_mutex_Status _CORE_mutex_Initialize(
  CORE_mutex_Control           *the_mutex,
  Thread_Control               *executing,
  const CORE_mutex_Attributes  *the_mutex_attributes,
  bool                          initially_locked
)
{

/* Add this to the RTEMS environment later ?????????
  rtems_assert( initial_lock == CORE_MUTEX_LOCKED ||
                initial_lock == CORE_MUTEX_UNLOCKED );
 */

  the_mutex->Attributes    = *the_mutex_attributes;

  if ( initially_locked ) {
    the_mutex->nest_count = 1;
    the_mutex->holder     = executing;
    if ( _CORE_mutex_Is_inherit_priority( &the_mutex->Attributes ) ||
         _CORE_mutex_Is_priority_ceiling( &the_mutex->Attributes ) ) {
      Priority_Control ceiling = the_mutex->Attributes.priority_ceiling;

      /*
       * The mutex initialization is only protected by the allocator lock in
       * general.  Disable thread dispatching before the priority check to
       * prevent interference with priority inheritance.
       */
      _Thread_Disable_dispatch();

      if ( executing->current_priority < ceiling ) {
        _Thread_Enable_dispatch();
        return CORE_MUTEX_STATUS_CEILING_VIOLATED;
      }

#ifdef __RTEMS_STRICT_ORDER_MUTEX__
       _Chain_Prepend_unprotected( &executing->lock_mutex,
                                   &the_mutex->queue.lock_queue );
       the_mutex->queue.priority_before = executing->current_priority;
#endif

      executing->resource_count++;

      _Thread_Change_priority( executing, ceiling, false );
      _Thread_Enable_dispatch();
    }
  } else {
    the_mutex->nest_count = 0;
    the_mutex->holder     = NULL;
  }

  _Thread_queue_Initialize(
    &the_mutex->Wait_queue,
    _CORE_mutex_Is_fifo( the_mutex_attributes ) ?
      THREAD_QUEUE_DISCIPLINE_FIFO : THREAD_QUEUE_DISCIPLINE_PRIORITY,
    STATES_WAITING_FOR_MUTEX,
    CORE_MUTEX_TIMEOUT
  );

  return CORE_MUTEX_STATUS_SUCCESSFUL;
}
예제 #3
0
파일: freechain.c 프로젝트: gedare/rtems
void _Freechain_Put( Freechain_Control *freechain, void *node )
{
  if ( node != NULL ) {
    _Chain_Initialize_node( node );
    _Chain_Prepend_unprotected( &freechain->Free, node );
  }
}
예제 #4
0
 static inline void _CORE_mutex_Push_priority(
   CORE_mutex_Control *mutex,
   Thread_Control *thread
 )
 {
   _Chain_Prepend_unprotected(
     &thread->lock_mutex,
     &mutex->queue.lock_queue
   );
   mutex->queue.priority_before = thread->current_priority;
 }
예제 #5
0
파일: chainsmp.c 프로젝트: hazirguo/rtems
void rtems_chain_prepend(
    rtems_chain_control *chain,
    rtems_chain_node *node
)
{
    ISR_Level level;

    _ISR_lock_ISR_disable_and_acquire( &chain->Lock, level );
    _Chain_Prepend_unprotected( &chain->Chain, node );
    _ISR_lock_Release_and_ISR_enable( &chain->Lock, level );
}
예제 #6
0
void rtems_chain_prepend(
  rtems_chain_control *chain,
  rtems_chain_node *node
)
{
  rtems_interrupt_lock_context lock_context;

  chain_acquire( &lock_context );
  _Chain_Prepend_unprotected( chain, node );
  chain_release( &lock_context );
}
예제 #7
0
파일: coremutex.c 프로젝트: epicsdeb/rtems
CORE_mutex_Status _CORE_mutex_Initialize(
  CORE_mutex_Control           *the_mutex,
  CORE_mutex_Attributes        *the_mutex_attributes,
  uint32_t                      initial_lock
)
{

/* Add this to the RTEMS environment later ?????????
  rtems_assert( initial_lock == CORE_MUTEX_LOCKED ||
                initial_lock == CORE_MUTEX_UNLOCKED );
 */

  the_mutex->Attributes    = *the_mutex_attributes;
  the_mutex->lock          = initial_lock;
  the_mutex->blocked_count = 0;

  if ( initial_lock == CORE_MUTEX_LOCKED ) {
    the_mutex->nest_count = 1;
    the_mutex->holder     = _Thread_Executing;
    the_mutex->holder_id  = _Thread_Executing->Object.id;
    if ( _CORE_mutex_Is_inherit_priority( &the_mutex->Attributes ) ||
         _CORE_mutex_Is_priority_ceiling( &the_mutex->Attributes ) ) {

      if ( _Thread_Executing->current_priority <
             the_mutex->Attributes.priority_ceiling )
       return CORE_MUTEX_STATUS_CEILING_VIOLATED;
#ifdef __RTEMS_STRICT_ORDER_MUTEX__
       _Chain_Prepend_unprotected( &_Thread_Executing->lock_mutex,
                                   &the_mutex->queue.lock_queue );
       the_mutex->queue.priority_before = _Thread_Executing->current_priority;
#endif

      _Thread_Executing->resource_count++;
    }
  } else {
    the_mutex->nest_count = 0;
    the_mutex->holder     = NULL;
    the_mutex->holder_id  = 0;
  }

  _Thread_queue_Initialize(
    &the_mutex->Wait_queue,
    _CORE_mutex_Is_fifo( the_mutex_attributes ) ?
      THREAD_QUEUE_DISCIPLINE_FIFO : THREAD_QUEUE_DISCIPLINE_PRIORITY,
    STATES_WAITING_FOR_MUTEX,
    CORE_MUTEX_TIMEOUT
  );

  return CORE_MUTEX_STATUS_SUCCESSFUL;
}
예제 #8
0
void _SMP_Multicast_action(
  const size_t setsize,
  const cpu_set_t *cpus,
  SMP_Action_handler handler,
  void *arg
)
{
  SMP_Multicast_action node;
  Processor_mask       targets;
  SMP_lock_Context     lock_context;
  uint32_t             i;

  if ( ! _System_state_Is_up( _System_state_Get() ) ) {
    ( *handler )( arg );
    return;
  }

  if( cpus == NULL ) {
    _Processor_mask_Assign( &targets, _SMP_Get_online_processors() );
  } else {
    _Processor_mask_Zero( &targets );

    for ( i = 0; i < _SMP_Get_processor_count(); ++i ) {
      if ( CPU_ISSET_S( i, setsize, cpus ) ) {
        _Processor_mask_Set( &targets, i );
      }
    }
  }

  _Chain_Initialize_node( &node.Node );
  node.handler = handler;
  node.arg = arg;
  _Processor_mask_Assign( &node.targets, &targets );
  _Atomic_Store_ulong( &node.done, 0, ATOMIC_ORDER_RELAXED );

  _SMP_lock_ISR_disable_and_acquire( &_SMP_Multicast.Lock, &lock_context );
  _Chain_Prepend_unprotected( &_SMP_Multicast.Actions, &node.Node );
  _SMP_lock_Release_and_ISR_enable( &_SMP_Multicast.Lock, &lock_context );

  _SMP_Send_message_multicast( &targets, SMP_MESSAGE_MULTICAST_ACTION );
  _SMP_Multicasts_try_process();

  while ( _Atomic_Load_ulong( &node.done, ATOMIC_ORDER_ACQUIRE ) == 0 ) {
    /* Wait */
  };
}
예제 #9
0
void _CORE_message_queue_Insert_message(
  CORE_message_queue_Control        *the_message_queue,
  CORE_message_queue_Buffer_control *the_message,
  CORE_message_queue_Submit_types    submit_type
)
{
  Chain_Control *pending_messages;
#if defined(RTEMS_SCORE_COREMSG_ENABLE_NOTIFICATION)
  bool           notify;
#endif

  _CORE_message_queue_Set_message_priority( the_message, submit_type );
  pending_messages = &the_message_queue->Pending_messages;

#if defined(RTEMS_SCORE_COREMSG_ENABLE_NOTIFICATION)
  notify = ( the_message_queue->number_of_pending_messages == 0 );
#endif
  ++the_message_queue->number_of_pending_messages;

  if ( submit_type == CORE_MESSAGE_QUEUE_SEND_REQUEST ) {
    _Chain_Append_unprotected( pending_messages, &the_message->Node );
#if defined(RTEMS_SCORE_COREMSG_ENABLE_MESSAGE_PRIORITY)
  } else  if ( submit_type != CORE_MESSAGE_QUEUE_URGENT_REQUEST ) {
    _Chain_Insert_ordered_unprotected(
      pending_messages,
      &the_message->Node,
      _CORE_message_queue_Order
    );
#endif
  } else {
    _Chain_Prepend_unprotected( pending_messages, &the_message->Node );
  }

  #if defined(RTEMS_SCORE_COREMSG_ENABLE_NOTIFICATION)
    /*
     *  According to POSIX, does this happen before or after the message
     *  is actually enqueued.  It is logical to think afterwards, because
     *  the message is actually in the queue at this point.
     */
    if ( notify && the_message_queue->notify_handler )
      (*the_message_queue->notify_handler)(the_message_queue->notify_argument);
  #endif
}
CORE_mutex_Status _CORE_mutex_Surrender(
  CORE_mutex_Control                *the_mutex,
#if defined(RTEMS_MULTIPROCESSING)
  Objects_Id                         id,
  CORE_mutex_API_mp_support_callout  api_mutex_mp_support
#else
  Objects_Id                         id __attribute__((unused)),
  CORE_mutex_API_mp_support_callout  api_mutex_mp_support __attribute__((unused))
#endif
)
{
  Thread_Control *the_thread;
  Thread_Control *holder;
#ifdef __RTEMS_STRICT_ORDER_MUTEX__
  Chain_Node *first_node;
#endif
  holder    = the_mutex->holder;

  /*
   *  The following code allows a thread (or ISR) other than the thread
   *  which acquired the mutex to release that mutex.  This is only
   *  allowed when the mutex in quetion is FIFO or simple Priority
   *  discipline.  But Priority Ceiling or Priority Inheritance mutexes
   *  must be released by the thread which acquired them.
   */

  if ( the_mutex->Attributes.only_owner_release ) {
    if ( !_Thread_Is_executing( holder ) )
      return CORE_MUTEX_STATUS_NOT_OWNER_OF_RESOURCE;
  }

  /* XXX already unlocked -- not right status */

  if ( !the_mutex->nest_count )
    return CORE_MUTEX_STATUS_SUCCESSFUL;

  the_mutex->nest_count--;

  if ( the_mutex->nest_count != 0 ) {
    /*
     *  All error checking is on the locking side, so if the lock was
     *  allowed to acquired multiple times, then we should just deal with
     *  that.  The RTEMS_DEBUG is just a validation.
     */
    #if defined(RTEMS_DEBUG)
      switch ( the_mutex->Attributes.lock_nesting_behavior ) {
        case CORE_MUTEX_NESTING_ACQUIRES:
          return CORE_MUTEX_STATUS_SUCCESSFUL;
        case CORE_MUTEX_NESTING_IS_ERROR:
          /* should never occur */
          return CORE_MUTEX_STATUS_NESTING_NOT_ALLOWED;
        case CORE_MUTEX_NESTING_BLOCKS:
          /* Currently no API exercises this behavior. */
          break;
      }
    #else
      /* must be CORE_MUTEX_NESTING_ACQUIRES or we wouldn't be here */
      return CORE_MUTEX_STATUS_SUCCESSFUL;
    #endif
  }

  /*
   *  Formally release the mutex before possibly transferring it to a
   *  blocked thread.
   */
  if ( _CORE_mutex_Is_inherit_priority( &the_mutex->Attributes ) ||
       _CORE_mutex_Is_priority_ceiling( &the_mutex->Attributes ) ){
#ifdef __RTEMS_STRICT_ORDER_MUTEX__
    /*Check whether the holder release the mutex in LIFO order
      if not return error code*/
    if(holder->lock_mutex.first != &the_mutex->queue.lock_queue){
      the_mutex->nest_count++;
      return CORE_MUTEX_RELEASE_NOT_ORDER;
    }
    first_node = _Chain_Get_first_unprotected(&holder->lock_mutex);
#endif
    holder->resource_count--;
  }
  the_mutex->holder    = NULL;
  the_mutex->holder_id = 0;

  /*
   *  Whether or not someone is waiting for the mutex, an
   *  inherited priority must be lowered if this is the last
   *  mutex (i.e. resource) this task has.
   */
  if ( _CORE_mutex_Is_inherit_priority( &the_mutex->Attributes ) ||
       _CORE_mutex_Is_priority_ceiling( &the_mutex->Attributes ) ) {
#ifdef __RTEMS_STRICT_ORDER_MUTEX__
    if(the_mutex->queue.priority_before != holder->current_priority)
      _Thread_Change_priority(holder,the_mutex->queue.priority_before,true);
#endif
    if ( holder->resource_count == 0 &&
         holder->real_priority != holder->current_priority ) {
      _Thread_Change_priority( holder, holder->real_priority, true );
    }
  }

  /*
   *  Now we check if another thread was waiting for this mutex.  If so,
   *  transfer the mutex to that thread.
   */
  if ( ( the_thread = _Thread_queue_Dequeue( &the_mutex->Wait_queue ) ) ) {

#if defined(RTEMS_MULTIPROCESSING)
    if ( !_Objects_Is_local_id( the_thread->Object.id ) ) {

      the_mutex->holder     = NULL;
      the_mutex->holder_id  = the_thread->Object.id;
      the_mutex->nest_count = 1;

      ( *api_mutex_mp_support)( the_thread, id );

    } else
#endif
    {

      the_mutex->holder     = the_thread;
      the_mutex->holder_id  = the_thread->Object.id;
      the_mutex->nest_count = 1;

      switch ( the_mutex->Attributes.discipline ) {
        case CORE_MUTEX_DISCIPLINES_FIFO:
        case CORE_MUTEX_DISCIPLINES_PRIORITY:
          break;
        case CORE_MUTEX_DISCIPLINES_PRIORITY_INHERIT:
#ifdef __RTEMS_STRICT_ORDER_MUTEX__
	  _Chain_Prepend_unprotected(&the_thread->lock_mutex,&the_mutex->queue.lock_queue);
	  the_mutex->queue.priority_before = the_thread->current_priority;
#endif
          the_thread->resource_count++;
          break;
        case CORE_MUTEX_DISCIPLINES_PRIORITY_CEILING:
#ifdef __RTEMS_STRICT_ORDER_MUTEX__
	  _Chain_Prepend_unprotected(&the_thread->lock_mutex,&the_mutex->queue.lock_queue);
	  the_mutex->queue.priority_before = the_thread->current_priority;
#endif
          the_thread->resource_count++;
          if (the_mutex->Attributes.priority_ceiling <
              the_thread->current_priority){
              _Thread_Change_priority(
                the_thread,
                the_mutex->Attributes.priority_ceiling,
                false
              );
          }
          break;
      }
    }
  } else
    the_mutex->lock = CORE_MUTEX_UNLOCKED;

  return CORE_MUTEX_STATUS_SUCCESSFUL;
}
예제 #11
0
void _Thread_Change_priority(
  Thread_Control   *the_thread,
  Priority_Control  new_priority,
  bool              prepend_it
)
{
  ISR_Level      level;
  States_Control state, original_state;

  /*
   *  If this is a case where prepending the task to its priority is
   *  potentially desired, then we need to consider whether to do it.
   *  This usually occurs when a task lowers its priority implcitly as
   *  the result of losing inherited priority.  Normal explicit priority
   *  change calls (e.g. rtems_task_set_priority) should always do an
   *  append not a prepend.
   */
/*
  if ( prepend_it &&
       _Thread_Is_executing( the_thread ) &&
       new_priority >= the_thread->current_priority )
    prepend_it = true;
*/

  /*
   * Save original state
   */
  original_state = the_thread->current_state;

  /*
   * Set a transient state for the thread so it is pulled off the Ready chains.
   * This will prevent it from being scheduled no matter what happens in an
   * ISR.
   */
  _Thread_Set_transient( the_thread );

  /*
   *  Do not bother recomputing all the priority related information if
   *  we are not REALLY changing priority.
   */
 if ( the_thread->current_priority != new_priority )
    _Thread_Set_priority( the_thread, new_priority );

  _ISR_Disable( level );

  /*
   *  If the thread has more than STATES_TRANSIENT set, then it is blocked,
   *  If it is blocked on a thread queue, then we need to requeue it.
   */
  state = the_thread->current_state;
  if ( state != STATES_TRANSIENT ) {
    /* Only clear the transient state if it wasn't set already */
    if ( ! _States_Is_transient( original_state ) )
      the_thread->current_state = _States_Clear( STATES_TRANSIENT, state );
    _ISR_Enable( level );
    if ( _States_Is_waiting_on_thread_queue( state ) ) {
      _Thread_queue_Requeue( the_thread->Wait.queue, the_thread );
    }
    return;
  }

  /* Only clear the transient state if it wasn't set already */
  if ( ! _States_Is_transient( original_state ) ) {
    /*
     *  Interrupts are STILL disabled.
     *  We now know the thread will be in the READY state when we remove
     *  the TRANSIENT state.  So we have to place it on the appropriate
     *  Ready Queue with interrupts off.
     */
    the_thread->current_state = _States_Clear( STATES_TRANSIENT, state );

    _Priority_Add_to_bit_map( &the_thread->Priority_map );
    if ( prepend_it )
      _Chain_Prepend_unprotected( the_thread->ready, &the_thread->Object.Node );
    else
      _Chain_Append_unprotected( the_thread->ready, &the_thread->Object.Node );
  }

  _ISR_Flash( level );

  /*
   *  We altered the set of thread priorities.  So let's figure out
   *  who is the heir and if we need to switch to them.
   */
  _Thread_Calculate_heir();

  if ( !_Thread_Is_executing_also_the_heir() &&
       _Thread_Executing->is_preemptible )
    _Context_Switch_necessary = true;
  _ISR_Enable( level );
}
예제 #12
0
파일: init.c 프로젝트: Dipupo/rtems
static void test_chain_iterator( void )
{
  Chain_Control chain;
  Chain_Iterator_registry reg;
  Chain_Iterator fit;
  Chain_Iterator bit;
  Chain_Node a;
  Chain_Node b;
  Chain_Node c;

  puts( "INIT - Verify Chain_Iterator" );

  rtems_test_assert( _Chain_Is_empty( &static_reg.Iterators ));

  _Chain_Initialize_empty( &chain );
  _Chain_Iterator_registry_initialize( &reg );
  _Chain_Iterator_initialize( &chain, &reg, &fit, CHAIN_ITERATOR_FORWARD );
  _Chain_Iterator_initialize( &chain, &reg, &bit, CHAIN_ITERATOR_BACKWARD );

  rtems_test_assert( _Chain_Iterator_next( &fit ) == _Chain_Tail( &chain ));
  rtems_test_assert( _Chain_Iterator_next( &bit ) == _Chain_Head( &chain ));

  _Chain_Iterator_set_position( &fit, _Chain_Head( &chain ) );
  _Chain_Iterator_set_position( &bit, _Chain_Tail( &chain ) );
  rtems_test_assert( _Chain_Iterator_next( &fit ) == _Chain_Tail( &chain ));
  rtems_test_assert( _Chain_Iterator_next( &bit ) == _Chain_Head( &chain ));

  _Chain_Append_unprotected( &chain, &a );
  rtems_test_assert( _Chain_Iterator_next( &fit ) == &a );
  rtems_test_assert( _Chain_Iterator_next( &bit ) == &a );

  _Chain_Append_unprotected( &chain, &b );
  rtems_test_assert( _Chain_Iterator_next( &fit ) == &a );
  rtems_test_assert( _Chain_Iterator_next( &bit ) == &b );

  _Chain_Append_unprotected( &chain, &c );
  rtems_test_assert( _Chain_Iterator_next( &fit ) == &a );
  rtems_test_assert( _Chain_Iterator_next( &bit ) == &c );

  update_registry_and_extract( &reg, &b );
  rtems_test_assert( _Chain_Iterator_next( &fit ) == &a );
  rtems_test_assert( _Chain_Iterator_next( &bit ) == &c );

  _Chain_Insert_unprotected( &a, &b );
  rtems_test_assert( _Chain_Iterator_next( &fit ) == &a );
  rtems_test_assert( _Chain_Iterator_next( &bit ) == &c );

  update_registry_and_extract( &reg, &c );
  rtems_test_assert( _Chain_Iterator_next( &fit ) == &a );
  rtems_test_assert( _Chain_Iterator_next( &bit ) == &b );

  _Chain_Append_unprotected( &chain, &c );
  rtems_test_assert( _Chain_Iterator_next( &fit ) == &a );
  rtems_test_assert( _Chain_Iterator_next( &bit ) == &c );

  update_registry_and_extract( &reg, &a );
  rtems_test_assert( _Chain_Iterator_next( &fit ) == &b );
  rtems_test_assert( _Chain_Iterator_next( &bit ) == &c );

  _Chain_Prepend_unprotected( &chain, &a );
  rtems_test_assert( _Chain_Iterator_next( &fit ) == &a );
  rtems_test_assert( _Chain_Iterator_next( &bit ) == &c );

  update_registry_and_extract( &reg, &a );
  rtems_test_assert( _Chain_Iterator_next( &fit ) == &b );
  rtems_test_assert( _Chain_Iterator_next( &bit ) == &c );

  update_registry_and_extract( &reg, &b );
  rtems_test_assert( _Chain_Iterator_next( &fit ) == &c );
  rtems_test_assert( _Chain_Iterator_next( &bit ) == &c );

  update_registry_and_extract( &reg, &c );
  rtems_test_assert( _Chain_Iterator_next( &fit ) == _Chain_Tail( &chain ));
  rtems_test_assert( _Chain_Iterator_next( &bit ) == _Chain_Head( &chain ));

  _Chain_Append_unprotected( &chain, &a );
  rtems_test_assert( _Chain_Iterator_next( &fit ) == &a );
  rtems_test_assert( _Chain_Iterator_next( &bit ) == &a );

  _Chain_Append_unprotected( &chain, &b );
  rtems_test_assert( _Chain_Iterator_next( &fit ) == &a );
  rtems_test_assert( _Chain_Iterator_next( &bit ) == &b );

  _Chain_Append_unprotected( &chain, &c );
  rtems_test_assert( _Chain_Iterator_next( &fit ) == &a );
  rtems_test_assert( _Chain_Iterator_next( &bit ) == &c );

  update_registry_and_extract( &reg, &c );
  rtems_test_assert( _Chain_Iterator_next( &fit ) == &a );
  rtems_test_assert( _Chain_Iterator_next( &bit ) == &b );

  update_registry_and_extract( &reg, &b );
  rtems_test_assert( _Chain_Iterator_next( &fit ) == &a );
  rtems_test_assert( _Chain_Iterator_next( &bit ) == &a );

  update_registry_and_extract( &reg, &a );
  rtems_test_assert( _Chain_Iterator_next( &fit ) == _Chain_Tail( &chain ));
  rtems_test_assert( _Chain_Iterator_next( &bit ) == _Chain_Head( &chain ));

  rtems_test_assert( !_Chain_Is_empty( &reg.Iterators ));
  _Chain_Iterator_destroy( &fit );
  rtems_test_assert( !_Chain_Is_empty( &reg.Iterators ));
  _Chain_Iterator_destroy( &bit );
  rtems_test_assert( _Chain_Is_empty( &reg.Iterators ));
}
예제 #13
0
CORE_mutex_Status _CORE_mutex_Initialize(
  CORE_mutex_Control           *the_mutex,
  Thread_Control               *executing,
  const CORE_mutex_Attributes  *the_mutex_attributes,
  bool                          initially_locked
)
{

/* Add this to the RTEMS environment later ?????????
  rtems_assert( initial_lock == CORE_MUTEX_LOCKED ||
                initial_lock == CORE_MUTEX_UNLOCKED );
 */

  the_mutex->Attributes    = *the_mutex_attributes;

  if ( initially_locked ) {
    bool is_priority_ceiling =
      _CORE_mutex_Is_priority_ceiling( &the_mutex->Attributes );

    the_mutex->nest_count = 1;
    the_mutex->holder     = executing;

    if (  is_priority_ceiling ||
         _CORE_mutex_Is_inherit_priority( &the_mutex->Attributes ) ) {
      Priority_Control ceiling = the_mutex->Attributes.priority_ceiling;
      Per_CPU_Control *cpu_self;

      /* The mutex initialization is only protected by the allocator lock */
      cpu_self = _Thread_Dispatch_disable();

      /*
       * The test to check for a ceiling violation is a bit arbitrary.  In case
       * this thread is the owner of a priority inheritance mutex, then it may
       * get a higher priority later or anytime on SMP configurations.
       */
      if ( is_priority_ceiling && executing->current_priority < ceiling ) {
        /*
         * There is no need to undo the previous work since this error aborts
         * the object creation.
         */
        _Thread_Dispatch_enable( cpu_self );
        return CORE_MUTEX_STATUS_CEILING_VIOLATED;
      }

#ifdef __RTEMS_STRICT_ORDER_MUTEX__
       _Chain_Prepend_unprotected( &executing->lock_mutex,
                                   &the_mutex->queue.lock_queue );
       the_mutex->queue.priority_before = executing->current_priority;
#endif

      executing->resource_count++;

      if ( is_priority_ceiling ) {
        _Thread_Raise_priority( executing, ceiling );
      }

      _Thread_Dispatch_enable( cpu_self );
    }
  } else {
    the_mutex->nest_count = 0;
    the_mutex->holder     = NULL;
  }

  _Thread_queue_Initialize( &the_mutex->Wait_queue );

  if ( _CORE_mutex_Is_priority( the_mutex_attributes ) ) {
    the_mutex->operations = &_Thread_queue_Operations_priority;
  } else {
    the_mutex->operations = &_Thread_queue_Operations_FIFO;
  }

  return CORE_MUTEX_STATUS_SUCCESSFUL;
}