rtems_status_code rtems_message_queue_delete( rtems_id id ) { register Message_queue_Control *the_message_queue; Objects_Locations location; the_message_queue = _Message_queue_Get( id, &location ); switch ( location ) { case OBJECTS_LOCAL: _Objects_Close( &_Message_queue_Information, &the_message_queue->Object ); _CORE_message_queue_Close( &the_message_queue->message_queue, #if defined(RTEMS_MULTIPROCESSING) _Message_queue_MP_Send_object_was_deleted, #else NULL, #endif CORE_MESSAGE_QUEUE_STATUS_WAS_DELETED ); _Message_queue_Free( the_message_queue ); #if defined(RTEMS_MULTIPROCESSING) if ( _Attributes_Is_global( the_message_queue->attribute_set ) ) { _Objects_MP_Close( &_Message_queue_Information, the_message_queue->Object.id ); _Message_queue_MP_Send_process_packet( MESSAGE_QUEUE_MP_ANNOUNCE_DELETE, the_message_queue->Object.id, 0, /* Not used */ 0 ); } #endif _Thread_Enable_dispatch(); return RTEMS_SUCCESSFUL; #if defined(RTEMS_MULTIPROCESSING) case OBJECTS_REMOTE: _Thread_Dispatch(); return RTEMS_ILLEGAL_ON_REMOTE_OBJECT; #endif case OBJECTS_ERROR: break; } return RTEMS_INVALID_ID; }
rtems_status_code rtems_partition_delete( rtems_id id ) { register Partition_Control *the_partition; Objects_Locations location; the_partition = _Partition_Get( id, &location ); switch ( location ) { case OBJECTS_LOCAL: if ( the_partition->number_of_used_blocks == 0 ) { _Objects_Close( &_Partition_Information, &the_partition->Object ); _Partition_Free( the_partition ); #if defined(RTEMS_MULTIPROCESSING) if ( _Attributes_Is_global( the_partition->attribute_set ) ) { _Objects_MP_Close( &_Partition_Information, the_partition->Object.id ); _Partition_MP_Send_process_packet( PARTITION_MP_ANNOUNCE_DELETE, the_partition->Object.id, 0, /* Not used */ 0 /* Not used */ ); } #endif _Thread_Enable_dispatch(); return RTEMS_SUCCESSFUL; } _Thread_Enable_dispatch(); return RTEMS_RESOURCE_IN_USE; #if defined(RTEMS_MULTIPROCESSING) case OBJECTS_REMOTE: _Thread_Dispatch(); return RTEMS_ILLEGAL_ON_REMOTE_OBJECT; #endif case OBJECTS_ERROR: break; } return RTEMS_INVALID_ID; }
rtems_status_code rtems_semaphore_delete( rtems_id id ) { register Semaphore_Control *the_semaphore; Objects_Locations location; the_semaphore = _Semaphore_Get( id, &location ); switch ( location ) { case OBJECTS_LOCAL: if ( !_Attributes_Is_counting_semaphore(the_semaphore->attribute_set) ) { if ( _CORE_mutex_Is_locked( &the_semaphore->Core_control.mutex ) && !_Attributes_Is_simple_binary_semaphore( the_semaphore->attribute_set ) ) { _Thread_Enable_dispatch(); return RTEMS_RESOURCE_IN_USE; } _CORE_mutex_Flush( &the_semaphore->Core_control.mutex, SEMAPHORE_MP_OBJECT_WAS_DELETED, CORE_MUTEX_WAS_DELETED ); } else { _CORE_semaphore_Flush( &the_semaphore->Core_control.semaphore, SEMAPHORE_MP_OBJECT_WAS_DELETED, CORE_SEMAPHORE_WAS_DELETED ); } _Objects_Close( &_Semaphore_Information, &the_semaphore->Object ); _Semaphore_Free( the_semaphore ); #if defined(RTEMS_MULTIPROCESSING) if ( _Attributes_Is_global( the_semaphore->attribute_set ) ) { _Objects_MP_Close( &_Semaphore_Information, the_semaphore->Object.id ); _Semaphore_MP_Send_process_packet( SEMAPHORE_MP_ANNOUNCE_DELETE, the_semaphore->Object.id, 0, /* Not used */ 0 /* Not used */ ); } #endif _Thread_Enable_dispatch(); return RTEMS_SUCCESSFUL; #if defined(RTEMS_MULTIPROCESSING) case OBJECTS_REMOTE: _Thread_Dispatch(); return RTEMS_ILLEGAL_ON_REMOTE_OBJECT; #endif case OBJECTS_ERROR: break; } return RTEMS_INVALID_ID; }
rtems_status_code rtems_partition_create( rtems_name name, void *starting_address, uint32_t length, uint32_t buffer_size, rtems_attribute attribute_set, rtems_id *id ) { register Partition_Control *the_partition; if ( !rtems_is_name_valid( name ) ) return RTEMS_INVALID_NAME; if ( !starting_address ) return RTEMS_INVALID_ADDRESS; if ( !id ) return RTEMS_INVALID_ADDRESS; if ( length == 0 || buffer_size == 0 || length < buffer_size || !_Partition_Is_buffer_size_aligned( buffer_size ) ) return RTEMS_INVALID_SIZE; if ( !_Addresses_Is_aligned( starting_address ) ) return RTEMS_INVALID_ADDRESS; #if defined(RTEMS_MULTIPROCESSING) if ( _Attributes_Is_global( attribute_set ) && !_System_state_Is_multiprocessing ) return RTEMS_MP_NOT_CONFIGURED; #endif _Thread_Disable_dispatch(); /* prevents deletion */ the_partition = _Partition_Allocate(); if ( !the_partition ) { _Thread_Enable_dispatch(); return RTEMS_TOO_MANY; } #if defined(RTEMS_MULTIPROCESSING) if ( _Attributes_Is_global( attribute_set ) && !( _Objects_MP_Allocate_and_open( &_Partition_Information, name, the_partition->Object.id, false ) ) ) { _Partition_Free( the_partition ); _Thread_Enable_dispatch(); return RTEMS_TOO_MANY; } #endif the_partition->starting_address = starting_address; the_partition->length = length; the_partition->buffer_size = buffer_size; the_partition->attribute_set = attribute_set; the_partition->number_of_used_blocks = 0; _Chain_Initialize( &the_partition->Memory, starting_address, length / buffer_size, buffer_size ); _Objects_Open( &_Partition_Information, &the_partition->Object, (Objects_Name) name ); *id = the_partition->Object.id; #if defined(RTEMS_MULTIPROCESSING) if ( _Attributes_Is_global( attribute_set ) ) _Partition_MP_Send_process_packet( PARTITION_MP_ANNOUNCE_CREATE, the_partition->Object.id, name, 0 /* Not used */ ); #endif _Thread_Enable_dispatch(); return RTEMS_SUCCESSFUL; }
epos_status_code epos_message_queue_create( epos_name name, uint32_t count, size_t max_message_size, epos_attribute attribute_set, epos_id *id ) { register Message_queue_Control *the_message_queue; CORE_message_queue_Attributes the_msgq_attributes; #if defined(RTEMS_MULTIPROCESSING) bool is_global; #endif if ( !epos_is_name_valid( name ) ) return RTEMS_INVALID_NAME; if ( !id ) return RTEMS_INVALID_ADDRESS; #if defined(RTEMS_MULTIPROCESSING) if ( (is_global = _Attributes_Is_global( attribute_set ) ) && !_System_state_Is_multiprocessing ) return RTEMS_MP_NOT_CONFIGURED; #endif if ( count == 0 ) return RTEMS_INVALID_NUMBER; if ( max_message_size == 0 ) return RTEMS_INVALID_SIZE; #if defined(RTEMS_MULTIPROCESSING) #if 1 /* * I am not 100% sure this should be an error. * It seems reasonable to create a que with a large max size, * and then just send smaller msgs from remote (or all) nodes. */ if ( is_global && (_MPCI_table->maximum_packet_size < max_message_size) ) return RTEMS_INVALID_SIZE; #endif #endif _Thread_Disable_dispatch(); /* protects object pointer */ the_message_queue = _Message_queue_Allocate(); if ( !the_message_queue ) { _Thread_Enable_dispatch(); return RTEMS_TOO_MANY; } #if defined(RTEMS_MULTIPROCESSING) if ( is_global && !( _Objects_MP_Allocate_and_open( &_Message_queue_Information, name, the_message_queue->Object.id, false ) ) ) { _Message_queue_Free( the_message_queue ); _Thread_Enable_dispatch(); return RTEMS_TOO_MANY; } #endif the_message_queue->attribute_set = attribute_set; if (_Attributes_Is_priority( attribute_set ) ) the_msgq_attributes.discipline = CORE_MESSAGE_QUEUE_DISCIPLINES_PRIORITY; else the_msgq_attributes.discipline = CORE_MESSAGE_QUEUE_DISCIPLINES_FIFO; if ( ! _CORE_message_queue_Initialize( &the_message_queue->message_queue, &the_msgq_attributes, count, max_message_size ) ) { #if defined(RTEMS_MULTIPROCESSING) if ( is_global ) _Objects_MP_Close( &_Message_queue_Information, the_message_queue->Object.id); #endif _Message_queue_Free( the_message_queue ); _Thread_Enable_dispatch(); return RTEMS_UNSATISFIED; } _Objects_Open( &_Message_queue_Information, &the_message_queue->Object, (Objects_Name) name ); *id = the_message_queue->Object.id; #if defined(RTEMS_MULTIPROCESSING) if ( is_global ) _Message_queue_MP_Send_process_packet( MESSAGE_QUEUE_MP_ANNOUNCE_CREATE, the_message_queue->Object.id, name, 0 ); #endif _Thread_Enable_dispatch(); return RTEMS_SUCCESSFUL; }
rtems_status_code rtems_task_create( rtems_name name, rtems_task_priority initial_priority, size_t stack_size, rtems_mode initial_modes, rtems_attribute attribute_set, rtems_id *id ) { Thread_Control *the_thread; bool is_fp; #if defined(RTEMS_MULTIPROCESSING) Objects_MP_Control *the_global_object = NULL; bool is_global; #endif bool status; rtems_attribute the_attribute_set; Priority_Control core_priority; RTEMS_API_Control *api; ASR_Information *asr; if ( !id ) return RTEMS_INVALID_ADDRESS; if ( !rtems_is_name_valid( name ) ) return RTEMS_INVALID_NAME; /* * Core Thread Initialize insures we get the minimum amount of * stack space. */ /* * Fix the attribute set to match the attributes which * this processor (1) requires and (2) is able to support. * First add in the required flags for attribute_set * Typically this might include FP if the platform * or application required all tasks to be fp aware. * Then turn off the requested bits which are not supported. */ the_attribute_set = _Attributes_Set( attribute_set, ATTRIBUTES_REQUIRED ); the_attribute_set = _Attributes_Clear( the_attribute_set, ATTRIBUTES_NOT_SUPPORTED ); if ( _Attributes_Is_floating_point( the_attribute_set ) ) is_fp = true; else is_fp = false; /* * Validate the RTEMS API priority and convert it to the core priority range. */ if ( !_Attributes_Is_system_task( the_attribute_set ) ) { if ( !_RTEMS_tasks_Priority_is_valid( initial_priority ) ) return RTEMS_INVALID_PRIORITY; } core_priority = _RTEMS_tasks_Priority_to_Core( initial_priority ); #if defined(RTEMS_MULTIPROCESSING) if ( _Attributes_Is_global( the_attribute_set ) ) { is_global = true; if ( !_System_state_Is_multiprocessing ) return RTEMS_MP_NOT_CONFIGURED; } else is_global = false; #endif /* * Make sure system is MP if this task is global */ /* * Allocate the thread control block and -- if the task is global -- * allocate a global object control block. * * NOTE: This routine does not use the combined allocate and open * global object routine because this results in a lack of * control over when memory is allocated and can be freed in * the event of an error. */ the_thread = _RTEMS_tasks_Allocate(); if ( !the_thread ) { _Objects_Allocator_unlock(); return RTEMS_TOO_MANY; } #if defined(RTEMS_MULTIPROCESSING) if ( is_global ) { the_global_object = _Objects_MP_Allocate_global_object(); if ( _Objects_MP_Is_null_global_object( the_global_object ) ) { _RTEMS_tasks_Free( the_thread ); _Objects_Allocator_unlock(); return RTEMS_TOO_MANY; } } #endif /* * Initialize the core thread for this task. */ status = _Thread_Initialize( &_RTEMS_tasks_Information, the_thread, _Scheduler_Get_by_CPU_index( _SMP_Get_current_processor() ), NULL, stack_size, is_fp, core_priority, _Modes_Is_preempt(initial_modes) ? true : false, _Modes_Is_timeslice(initial_modes) ? THREAD_CPU_BUDGET_ALGORITHM_RESET_TIMESLICE : THREAD_CPU_BUDGET_ALGORITHM_NONE, NULL, /* no budget algorithm callout */ _Modes_Get_interrupt_level(initial_modes), (Objects_Name) name ); if ( !status ) { #if defined(RTEMS_MULTIPROCESSING) if ( is_global ) _Objects_MP_Free_global_object( the_global_object ); #endif _RTEMS_tasks_Free( the_thread ); _Objects_Allocator_unlock(); return RTEMS_UNSATISFIED; } api = the_thread->API_Extensions[ THREAD_API_RTEMS ]; asr = &api->Signal; asr->is_enabled = _Modes_Is_asr_disabled(initial_modes) ? false : true; *id = the_thread->Object.id; #if defined(RTEMS_MULTIPROCESSING) the_thread->is_global = is_global; if ( is_global ) { _Objects_MP_Open( &_RTEMS_tasks_Information.Objects, the_global_object, name, the_thread->Object.id ); _RTEMS_tasks_MP_Send_process_packet( RTEMS_TASKS_MP_ANNOUNCE_CREATE, the_thread->Object.id, name ); } #endif _Objects_Allocator_unlock(); return RTEMS_SUCCESSFUL; }
rtems_status_code rtems_semaphore_create( rtems_name name, uint32_t count, rtems_attribute attribute_set, rtems_task_priority priority_ceiling, rtems_id *id ) { register Semaphore_Control *the_semaphore; CORE_mutex_Attributes the_mutex_attr; CORE_semaphore_Attributes the_semaphore_attr; CORE_mutex_Status mutex_status; if ( !rtems_is_name_valid( name ) ) return RTEMS_INVALID_NAME; if ( !id ) return RTEMS_INVALID_ADDRESS; #if defined(RTEMS_MULTIPROCESSING) if ( _Attributes_Is_global( attribute_set ) ) { if ( !_System_state_Is_multiprocessing ) return RTEMS_MP_NOT_CONFIGURED; if ( _Attributes_Is_inherit_priority( attribute_set ) || _Attributes_Is_priority_ceiling( attribute_set ) ) return RTEMS_NOT_DEFINED; } else #endif if ( _Attributes_Is_inherit_priority( attribute_set ) || _Attributes_Is_priority_ceiling( attribute_set ) ) { if ( ! (_Attributes_Is_binary_semaphore( attribute_set ) && _Attributes_Is_priority( attribute_set ) ) ) return RTEMS_NOT_DEFINED; } if ( _Attributes_Is_inherit_priority( attribute_set ) && _Attributes_Is_priority_ceiling( attribute_set ) ) return RTEMS_NOT_DEFINED; if ( !_Attributes_Is_counting_semaphore( attribute_set ) && ( count > 1 ) ) return RTEMS_INVALID_NUMBER; _Thread_Disable_dispatch(); /* prevents deletion */ the_semaphore = _Semaphore_Allocate(); if ( !the_semaphore ) { _Thread_Enable_dispatch(); return RTEMS_TOO_MANY; } #if defined(RTEMS_MULTIPROCESSING) if ( _Attributes_Is_global( attribute_set ) && ! ( _Objects_MP_Allocate_and_open( &_Semaphore_Information, name, the_semaphore->Object.id, false ) ) ) { _Semaphore_Free( the_semaphore ); _Thread_Enable_dispatch(); return RTEMS_TOO_MANY; } #endif the_semaphore->attribute_set = attribute_set; /* * Initialize it as a counting semaphore. */ if ( _Attributes_Is_counting_semaphore( attribute_set ) ) { /* * This effectively disables limit checking. */ the_semaphore_attr.maximum_count = 0xFFFFFFFF; if ( _Attributes_Is_priority( attribute_set ) ) the_semaphore_attr.discipline = CORE_SEMAPHORE_DISCIPLINES_PRIORITY; else the_semaphore_attr.discipline = CORE_SEMAPHORE_DISCIPLINES_FIFO; /* * The following are just to make Purify happy. */ the_mutex_attr.lock_nesting_behavior = CORE_MUTEX_NESTING_ACQUIRES; the_mutex_attr.priority_ceiling = PRIORITY_MINIMUM; _CORE_semaphore_Initialize( &the_semaphore->Core_control.semaphore, &the_semaphore_attr, count ); } else { /* * It is either simple binary semaphore or a more powerful mutex * style binary semaphore. This is the mutex style. */ if ( _Attributes_Is_priority( attribute_set ) ) the_mutex_attr.discipline = CORE_MUTEX_DISCIPLINES_PRIORITY; else the_mutex_attr.discipline = CORE_MUTEX_DISCIPLINES_FIFO; if ( _Attributes_Is_binary_semaphore( attribute_set ) ) { the_mutex_attr.priority_ceiling = priority_ceiling; the_mutex_attr.lock_nesting_behavior = CORE_MUTEX_NESTING_ACQUIRES; the_mutex_attr.only_owner_release = false; if ( the_mutex_attr.discipline == CORE_MUTEX_DISCIPLINES_PRIORITY ) { if ( _Attributes_Is_inherit_priority( attribute_set ) ) { the_mutex_attr.discipline = CORE_MUTEX_DISCIPLINES_PRIORITY_INHERIT; the_mutex_attr.only_owner_release = true; } else if ( _Attributes_Is_priority_ceiling( attribute_set ) ) { the_mutex_attr.discipline = CORE_MUTEX_DISCIPLINES_PRIORITY_CEILING; the_mutex_attr.only_owner_release = true; } } } else /* must be simple binary semaphore */ { the_mutex_attr.lock_nesting_behavior = CORE_MUTEX_NESTING_BLOCKS; the_mutex_attr.only_owner_release = false; } mutex_status = _CORE_mutex_Initialize( &the_semaphore->Core_control.mutex, &the_mutex_attr, (count == 1) ? CORE_MUTEX_UNLOCKED : CORE_MUTEX_LOCKED ); if ( mutex_status == CORE_MUTEX_STATUS_CEILING_VIOLATED ) { _Semaphore_Free( the_semaphore ); _Thread_Enable_dispatch(); return RTEMS_INVALID_PRIORITY; } } /* * Whether we initialized it as a mutex or counting semaphore, it is * now ready to be "offered" for use as a Classic API Semaphore. */ _Objects_Open( &_Semaphore_Information, &the_semaphore->Object, (Objects_Name) name ); *id = the_semaphore->Object.id; #if defined(RTEMS_MULTIPROCESSING) if ( _Attributes_Is_global( attribute_set ) ) _Semaphore_MP_Send_process_packet( SEMAPHORE_MP_ANNOUNCE_CREATE, the_semaphore->Object.id, name, 0 /* Not used */ ); #endif _Thread_Enable_dispatch(); return RTEMS_SUCCESSFUL; }
rtems_status_code rtems_semaphore_create( rtems_name name, uint32_t count, rtems_attribute attribute_set, rtems_task_priority priority_ceiling, rtems_id *id ) { Semaphore_Control *the_semaphore; CORE_mutex_Attributes the_mutex_attr; CORE_semaphore_Attributes the_semaphore_attr; CORE_mutex_Status mutex_status; if ( !rtems_is_name_valid( name ) ) return RTEMS_INVALID_NAME; if ( !id ) return RTEMS_INVALID_ADDRESS; #if defined(RTEMS_MULTIPROCESSING) if ( _Attributes_Is_global( attribute_set ) ) { if ( !_System_state_Is_multiprocessing ) return RTEMS_MP_NOT_CONFIGURED; if ( _Attributes_Is_inherit_priority( attribute_set ) || _Attributes_Is_priority_ceiling( attribute_set ) || _Attributes_Is_multiprocessor_resource_sharing( attribute_set ) ) return RTEMS_NOT_DEFINED; } else #endif if ( _Attributes_Is_multiprocessor_resource_sharing( attribute_set ) && !( _Attributes_Is_binary_semaphore( attribute_set ) && !_Attributes_Is_priority( attribute_set ) ) ) { return RTEMS_NOT_DEFINED; } if ( _Attributes_Is_inherit_priority( attribute_set ) || _Attributes_Is_priority_ceiling( attribute_set ) ) { if ( ! (_Attributes_Is_binary_semaphore( attribute_set ) && _Attributes_Is_priority( attribute_set ) ) ) return RTEMS_NOT_DEFINED; } if ( !_Attributes_Has_at_most_one_protocol( attribute_set ) ) return RTEMS_NOT_DEFINED; if ( !_Attributes_Is_counting_semaphore( attribute_set ) && ( count > 1 ) ) return RTEMS_INVALID_NUMBER; #if !defined(RTEMS_SMP) /* * On uni-processor configurations the Multiprocessor Resource Sharing * Protocol is equivalent to the Priority Ceiling Protocol. */ if ( _Attributes_Is_multiprocessor_resource_sharing( attribute_set ) ) { attribute_set |= RTEMS_PRIORITY_CEILING | RTEMS_PRIORITY; } #endif the_semaphore = _Semaphore_Allocate(); if ( !the_semaphore ) { _Objects_Allocator_unlock(); return RTEMS_TOO_MANY; } #if defined(RTEMS_MULTIPROCESSING) if ( _Attributes_Is_global( attribute_set ) && ! ( _Objects_MP_Allocate_and_open( &_Semaphore_Information, name, the_semaphore->Object.id, false ) ) ) { _Semaphore_Free( the_semaphore ); _Objects_Allocator_unlock(); return RTEMS_TOO_MANY; } #endif the_semaphore->attribute_set = attribute_set; /* * Initialize it as a counting semaphore. */ if ( _Attributes_Is_counting_semaphore( attribute_set ) ) { /* * This effectively disables limit checking. */ the_semaphore_attr.maximum_count = 0xFFFFFFFF; if ( _Attributes_Is_priority( attribute_set ) ) the_semaphore_attr.discipline = CORE_SEMAPHORE_DISCIPLINES_PRIORITY; else the_semaphore_attr.discipline = CORE_SEMAPHORE_DISCIPLINES_FIFO; /* * The following are just to make Purify happy. */ the_mutex_attr.lock_nesting_behavior = CORE_MUTEX_NESTING_ACQUIRES; the_mutex_attr.priority_ceiling = PRIORITY_MINIMUM; _CORE_semaphore_Initialize( &the_semaphore->Core_control.semaphore, &the_semaphore_attr, count ); #if defined(RTEMS_SMP) } else if ( _Attributes_Is_multiprocessor_resource_sharing( attribute_set ) ) { MRSP_Status mrsp_status = _MRSP_Initialize( &the_semaphore->Core_control.mrsp, priority_ceiling, _Thread_Get_executing(), count != 1 ); if ( mrsp_status != MRSP_SUCCESSFUL ) { _Semaphore_Free( the_semaphore ); _Objects_Allocator_unlock(); return _Semaphore_Translate_MRSP_status_code( mrsp_status ); } #endif } else { /* * It is either simple binary semaphore or a more powerful mutex * style binary semaphore. This is the mutex style. */ if ( _Attributes_Is_priority( attribute_set ) ) the_mutex_attr.discipline = CORE_MUTEX_DISCIPLINES_PRIORITY; else the_mutex_attr.discipline = CORE_MUTEX_DISCIPLINES_FIFO; if ( _Attributes_Is_binary_semaphore( attribute_set ) ) { the_mutex_attr.priority_ceiling = _RTEMS_tasks_Priority_to_Core( priority_ceiling ); the_mutex_attr.lock_nesting_behavior = CORE_MUTEX_NESTING_ACQUIRES; the_mutex_attr.only_owner_release = false; if ( the_mutex_attr.discipline == CORE_MUTEX_DISCIPLINES_PRIORITY ) { if ( _Attributes_Is_inherit_priority( attribute_set ) ) { the_mutex_attr.discipline = CORE_MUTEX_DISCIPLINES_PRIORITY_INHERIT; the_mutex_attr.only_owner_release = true; } else if ( _Attributes_Is_priority_ceiling( attribute_set ) ) { the_mutex_attr.discipline = CORE_MUTEX_DISCIPLINES_PRIORITY_CEILING; the_mutex_attr.only_owner_release = true; } } } else /* must be simple binary semaphore */ { the_mutex_attr.lock_nesting_behavior = CORE_MUTEX_NESTING_BLOCKS; the_mutex_attr.only_owner_release = false; } mutex_status = _CORE_mutex_Initialize( &the_semaphore->Core_control.mutex, _Thread_Get_executing(), &the_mutex_attr, count != 1 ); if ( mutex_status == CORE_MUTEX_STATUS_CEILING_VIOLATED ) { _Semaphore_Free( the_semaphore ); _Objects_Allocator_unlock(); return RTEMS_INVALID_PRIORITY; } } /* * Whether we initialized it as a mutex or counting semaphore, it is * now ready to be "offered" for use as a Classic API Semaphore. */ _Objects_Open( &_Semaphore_Information, &the_semaphore->Object, (Objects_Name) name ); *id = the_semaphore->Object.id; #if defined(RTEMS_MULTIPROCESSING) if ( _Attributes_Is_global( attribute_set ) ) _Semaphore_MP_Send_process_packet( SEMAPHORE_MP_ANNOUNCE_CREATE, the_semaphore->Object.id, name, 0 /* Not used */ ); #endif _Objects_Allocator_unlock(); return RTEMS_SUCCESSFUL; }